re PR target/85657 (Make __ibm128 a separate type, even if long double uses the IBM...
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob26d58fc4c289eebe9122096fa61adcea31026ce1
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
85 /* This file should be included last. */
86 #include "target-def.h"
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
106 #define min(A,B) ((A) < (B) ? (A) : (B))
107 #define max(A,B) ((A) > (B) ? (A) : (B))
109 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
111 /* Structure used to define the rs6000 stack */
112 typedef struct rs6000_stack {
113 int reload_completed; /* stack info won't change from here on */
114 int first_gp_reg_save; /* first callee saved GP register used */
115 int first_fp_reg_save; /* first callee saved FP register used */
116 int first_altivec_reg_save; /* first callee saved AltiVec register used */
117 int lr_save_p; /* true if the link reg needs to be saved */
118 int cr_save_p; /* true if the CR reg needs to be saved */
119 unsigned int vrsave_mask; /* mask of vec registers to save */
120 int push_p; /* true if we need to allocate stack space */
121 int calls_p; /* true if the function makes any calls */
122 int world_save_p; /* true if we're saving *everything*:
123 r13-r31, cr, f14-f31, vrsave, v20-v31 */
124 enum rs6000_abi abi; /* which ABI to use */
125 int gp_save_offset; /* offset to save GP regs from initial SP */
126 int fp_save_offset; /* offset to save FP regs from initial SP */
127 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
128 int lr_save_offset; /* offset to save LR from initial SP */
129 int cr_save_offset; /* offset to save CR from initial SP */
130 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
131 int varargs_save_offset; /* offset to save the varargs registers */
132 int ehrd_offset; /* offset to EH return data */
133 int ehcr_offset; /* offset to EH CR field data */
134 int reg_size; /* register size (4 or 8) */
135 HOST_WIDE_INT vars_size; /* variable save area size */
136 int parm_size; /* outgoing parameter size */
137 int save_size; /* save area size */
138 int fixed_size; /* fixed size of stack frame */
139 int gp_size; /* size of saved GP registers */
140 int fp_size; /* size of saved FP registers */
141 int altivec_size; /* size of saved AltiVec registers */
142 int cr_size; /* size to hold CR if not in fixed area */
143 int vrsave_size; /* size to hold VRSAVE */
144 int altivec_padding_size; /* size of altivec alignment padding */
145 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
146 int savres_strategy;
147 } rs6000_stack_t;
149 /* A C structure for machine-specific, per-function data.
150 This is added to the cfun structure. */
151 typedef struct GTY(()) machine_function
153 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
154 int ra_needs_full_frame;
155 /* Flags if __builtin_return_address (0) was used. */
156 int ra_need_lr;
157 /* Cache lr_save_p after expansion of builtin_eh_return. */
158 int lr_save_state;
159 /* Whether we need to save the TOC to the reserved stack location in the
160 function prologue. */
161 bool save_toc_in_prologue;
162 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
163 varargs save area. */
164 HOST_WIDE_INT varargs_save_offset;
165 /* Alternative internal arg pointer for -fsplit-stack. */
166 rtx split_stack_arg_pointer;
167 bool split_stack_argp_used;
168 /* Flag if r2 setup is needed with ELFv2 ABI. */
169 bool r2_setup_needed;
170 /* The number of components we use for separate shrink-wrapping. */
171 int n_components;
172 /* The components already handled by separate shrink-wrapping, which should
173 not be considered by the prologue and epilogue. */
174 bool gpr_is_wrapped_separately[32];
175 bool fpr_is_wrapped_separately[32];
176 bool lr_is_wrapped_separately;
177 bool toc_is_wrapped_separately;
178 } machine_function;
180 /* Support targetm.vectorize.builtin_mask_for_load. */
181 static GTY(()) tree altivec_builtin_mask_for_load;
183 /* Set to nonzero once AIX common-mode calls have been defined. */
184 static GTY(()) int common_mode_defined;
186 /* Label number of label created for -mrelocatable, to call to so we can
187 get the address of the GOT section */
188 static int rs6000_pic_labelno;
190 #ifdef USING_ELFOS_H
191 /* Counter for labels which are to be placed in .fixup. */
192 int fixuplabelno = 0;
193 #endif
195 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
196 int dot_symbols;
198 /* Specify the machine mode that pointers have. After generation of rtl, the
199 compiler makes no further distinction between pointers and any other objects
200 of this machine mode. */
201 scalar_int_mode rs6000_pmode;
203 /* Width in bits of a pointer. */
204 unsigned rs6000_pointer_size;
206 #ifdef HAVE_AS_GNU_ATTRIBUTE
207 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
208 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
209 # endif
210 /* Flag whether floating point values have been passed/returned.
211 Note that this doesn't say whether fprs are used, since the
212 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
213 should be set for soft-float values passed in gprs and ieee128
214 values passed in vsx registers. */
215 static bool rs6000_passes_float;
216 static bool rs6000_passes_long_double;
217 /* Flag whether vector values have been passed/returned. */
218 static bool rs6000_passes_vector;
219 /* Flag whether small (<= 8 byte) structures have been returned. */
220 static bool rs6000_returns_struct;
221 #endif
223 /* Value is TRUE if register/mode pair is acceptable. */
224 static bool rs6000_hard_regno_mode_ok_p
225 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
227 /* Maximum number of registers needed for a given register class and mode. */
228 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
230 /* How many registers are needed for a given register and mode. */
231 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
233 /* Map register number to register class. */
234 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
236 static int dbg_cost_ctrl;
238 /* Built in types. */
239 tree rs6000_builtin_types[RS6000_BTI_MAX];
240 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
242 /* Flag to say the TOC is initialized */
243 int toc_initialized, need_toc_init;
244 char toc_label_name[10];
246 /* Cached value of rs6000_variable_issue. This is cached in
247 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
248 static short cached_can_issue_more;
250 static GTY(()) section *read_only_data_section;
251 static GTY(()) section *private_data_section;
252 static GTY(()) section *tls_data_section;
253 static GTY(()) section *tls_private_data_section;
254 static GTY(()) section *read_only_private_data_section;
255 static GTY(()) section *sdata2_section;
256 static GTY(()) section *toc_section;
258 struct builtin_description
260 const HOST_WIDE_INT mask;
261 const enum insn_code icode;
262 const char *const name;
263 const enum rs6000_builtins code;
266 /* Describe the vector unit used for modes. */
267 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
268 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
270 /* Register classes for various constraints that are based on the target
271 switches. */
272 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
274 /* Describe the alignment of a vector. */
275 int rs6000_vector_align[NUM_MACHINE_MODES];
277 /* Map selected modes to types for builtins. */
278 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
280 /* What modes to automatically generate reciprocal divide estimate (fre) and
281 reciprocal sqrt (frsqrte) for. */
282 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
284 /* Masks to determine which reciprocal esitmate instructions to generate
285 automatically. */
286 enum rs6000_recip_mask {
287 RECIP_SF_DIV = 0x001, /* Use divide estimate */
288 RECIP_DF_DIV = 0x002,
289 RECIP_V4SF_DIV = 0x004,
290 RECIP_V2DF_DIV = 0x008,
292 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
293 RECIP_DF_RSQRT = 0x020,
294 RECIP_V4SF_RSQRT = 0x040,
295 RECIP_V2DF_RSQRT = 0x080,
297 /* Various combination of flags for -mrecip=xxx. */
298 RECIP_NONE = 0,
299 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
300 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
301 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
303 RECIP_HIGH_PRECISION = RECIP_ALL,
305 /* On low precision machines like the power5, don't enable double precision
306 reciprocal square root estimate, since it isn't accurate enough. */
307 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
310 /* -mrecip options. */
311 static struct
313 const char *string; /* option name */
314 unsigned int mask; /* mask bits to set */
315 } recip_options[] = {
316 { "all", RECIP_ALL },
317 { "none", RECIP_NONE },
318 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
319 | RECIP_V2DF_DIV) },
320 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
321 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
322 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
323 | RECIP_V2DF_RSQRT) },
324 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
325 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
328 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
329 static const struct
331 const char *cpu;
332 unsigned int cpuid;
333 } cpu_is_info[] = {
334 { "power9", PPC_PLATFORM_POWER9 },
335 { "power8", PPC_PLATFORM_POWER8 },
336 { "power7", PPC_PLATFORM_POWER7 },
337 { "power6x", PPC_PLATFORM_POWER6X },
338 { "power6", PPC_PLATFORM_POWER6 },
339 { "power5+", PPC_PLATFORM_POWER5_PLUS },
340 { "power5", PPC_PLATFORM_POWER5 },
341 { "ppc970", PPC_PLATFORM_PPC970 },
342 { "power4", PPC_PLATFORM_POWER4 },
343 { "ppca2", PPC_PLATFORM_PPCA2 },
344 { "ppc476", PPC_PLATFORM_PPC476 },
345 { "ppc464", PPC_PLATFORM_PPC464 },
346 { "ppc440", PPC_PLATFORM_PPC440 },
347 { "ppc405", PPC_PLATFORM_PPC405 },
348 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
351 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
352 static const struct
354 const char *hwcap;
355 int mask;
356 unsigned int id;
357 } cpu_supports_info[] = {
358 /* AT_HWCAP masks. */
359 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
360 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
361 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
362 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
363 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
364 { "booke", PPC_FEATURE_BOOKE, 0 },
365 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
366 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
367 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
368 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
369 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
370 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
371 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
372 { "notb", PPC_FEATURE_NO_TB, 0 },
373 { "pa6t", PPC_FEATURE_PA6T, 0 },
374 { "power4", PPC_FEATURE_POWER4, 0 },
375 { "power5", PPC_FEATURE_POWER5, 0 },
376 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
377 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
378 { "ppc32", PPC_FEATURE_32, 0 },
379 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
380 { "ppc64", PPC_FEATURE_64, 0 },
381 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
382 { "smt", PPC_FEATURE_SMT, 0 },
383 { "spe", PPC_FEATURE_HAS_SPE, 0 },
384 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
385 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
386 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
388 /* AT_HWCAP2 masks. */
389 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
390 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
391 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
392 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
393 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
394 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
395 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
396 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
397 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
398 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
399 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
400 { "darn", PPC_FEATURE2_DARN, 1 },
401 { "scv", PPC_FEATURE2_SCV, 1 }
404 /* On PowerPC, we have a limited number of target clones that we care about
405 which means we can use an array to hold the options, rather than having more
406 elaborate data structures to identify each possible variation. Order the
407 clones from the default to the highest ISA. */
408 enum {
409 CLONE_DEFAULT = 0, /* default clone. */
410 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
411 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
412 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
413 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
414 CLONE_MAX
417 /* Map compiler ISA bits into HWCAP names. */
418 struct clone_map {
419 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
420 const char *name; /* name to use in __builtin_cpu_supports. */
423 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
424 { 0, "" }, /* Default options. */
425 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
426 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
427 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
428 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
432 /* Newer LIBCs explicitly export this symbol to declare that they provide
433 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
434 reference to this symbol whenever we expand a CPU builtin, so that
435 we never link against an old LIBC. */
436 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
438 /* True if we have expanded a CPU builtin. */
439 bool cpu_builtin_p;
441 /* Pointer to function (in rs6000-c.c) that can define or undefine target
442 macros that have changed. Languages that don't support the preprocessor
443 don't link in rs6000-c.c, so we can't call it directly. */
444 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
446 /* Simplfy register classes into simpler classifications. We assume
447 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
448 check for standard register classes (gpr/floating/altivec/vsx) and
449 floating/vector classes (float/altivec/vsx). */
451 enum rs6000_reg_type {
452 NO_REG_TYPE,
453 PSEUDO_REG_TYPE,
454 GPR_REG_TYPE,
455 VSX_REG_TYPE,
456 ALTIVEC_REG_TYPE,
457 FPR_REG_TYPE,
458 SPR_REG_TYPE,
459 CR_REG_TYPE
462 /* Map register class to register type. */
463 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
465 /* First/last register type for the 'normal' register types (i.e. general
466 purpose, floating point, altivec, and VSX registers). */
467 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
469 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
472 /* Register classes we care about in secondary reload or go if legitimate
473 address. We only need to worry about GPR, FPR, and Altivec registers here,
474 along an ANY field that is the OR of the 3 register classes. */
476 enum rs6000_reload_reg_type {
477 RELOAD_REG_GPR, /* General purpose registers. */
478 RELOAD_REG_FPR, /* Traditional floating point regs. */
479 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
480 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
481 N_RELOAD_REG
484 /* For setting up register classes, loop through the 3 register classes mapping
485 into real registers, and skip the ANY class, which is just an OR of the
486 bits. */
487 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
488 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
490 /* Map reload register type to a register in the register class. */
491 struct reload_reg_map_type {
492 const char *name; /* Register class name. */
493 int reg; /* Register in the register class. */
496 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
497 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
498 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
499 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
500 { "Any", -1 }, /* RELOAD_REG_ANY. */
503 /* Mask bits for each register class, indexed per mode. Historically the
504 compiler has been more restrictive which types can do PRE_MODIFY instead of
505 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
506 typedef unsigned char addr_mask_type;
508 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
509 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
510 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
511 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
512 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
513 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
514 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
515 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
517 /* Register type masks based on the type, of valid addressing modes. */
518 struct rs6000_reg_addr {
519 enum insn_code reload_load; /* INSN to reload for loading. */
520 enum insn_code reload_store; /* INSN to reload for storing. */
521 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
522 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
523 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
524 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
525 /* INSNs for fusing addi with loads
526 or stores for each reg. class. */
527 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
528 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
529 /* INSNs for fusing addis with loads
530 or stores for each reg. class. */
531 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
532 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
533 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
534 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
535 bool fused_toc; /* Mode supports TOC fusion. */
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
556 /* Return true if we have D-form addressing in altivec registers. */
557 static inline bool
558 mode_supports_vmx_dform (machine_mode mode)
560 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 /* Return true if we have D-form addressing in VSX registers. This addressing
564 is more limited than normal d-form addressing in that the offset must be
565 aligned on a 16-byte boundary. */
566 static inline bool
567 mode_supports_dq_form (machine_mode mode)
569 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
570 != 0);
573 /* Given that there exists at least one variable that is set (produced)
574 by OUT_INSN and read (consumed) by IN_INSN, return true iff
575 IN_INSN represents one or more memory store operations and none of
576 the variables set by OUT_INSN is used by IN_INSN as the address of a
577 store operation. If either IN_INSN or OUT_INSN does not represent
578 a "single" RTL SET expression (as loosely defined by the
579 implementation of the single_set function) or a PARALLEL with only
580 SETs, CLOBBERs, and USEs inside, this function returns false.
582 This rs6000-specific version of store_data_bypass_p checks for
583 certain conditions that result in assertion failures (and internal
584 compiler errors) in the generic store_data_bypass_p function and
585 returns false rather than calling store_data_bypass_p if one of the
586 problematic conditions is detected. */
589 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
591 rtx out_set, in_set;
592 rtx out_pat, in_pat;
593 rtx out_exp, in_exp;
594 int i, j;
596 in_set = single_set (in_insn);
597 if (in_set)
599 if (MEM_P (SET_DEST (in_set)))
601 out_set = single_set (out_insn);
602 if (!out_set)
604 out_pat = PATTERN (out_insn);
605 if (GET_CODE (out_pat) == PARALLEL)
607 for (i = 0; i < XVECLEN (out_pat, 0); i++)
609 out_exp = XVECEXP (out_pat, 0, i);
610 if ((GET_CODE (out_exp) == CLOBBER)
611 || (GET_CODE (out_exp) == USE))
612 continue;
613 else if (GET_CODE (out_exp) != SET)
614 return false;
620 else
622 in_pat = PATTERN (in_insn);
623 if (GET_CODE (in_pat) != PARALLEL)
624 return false;
626 for (i = 0; i < XVECLEN (in_pat, 0); i++)
628 in_exp = XVECEXP (in_pat, 0, i);
629 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
630 continue;
631 else if (GET_CODE (in_exp) != SET)
632 return false;
634 if (MEM_P (SET_DEST (in_exp)))
636 out_set = single_set (out_insn);
637 if (!out_set)
639 out_pat = PATTERN (out_insn);
640 if (GET_CODE (out_pat) != PARALLEL)
641 return false;
642 for (j = 0; j < XVECLEN (out_pat, 0); j++)
644 out_exp = XVECEXP (out_pat, 0, j);
645 if ((GET_CODE (out_exp) == CLOBBER)
646 || (GET_CODE (out_exp) == USE))
647 continue;
648 else if (GET_CODE (out_exp) != SET)
649 return false;
655 return store_data_bypass_p (out_insn, in_insn);
659 /* Processor costs (relative to an add) */
661 const struct processor_costs *rs6000_cost;
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_X
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1312 #include "rs6000-builtin.def"
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_X
1325 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1326 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1330 static struct machine_function * rs6000_init_machine_status (void);
1331 static int rs6000_ra_ever_killed (void);
1332 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1333 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1336 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1337 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1338 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1339 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1340 bool);
1341 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1342 unsigned int);
1343 static bool is_microcoded_insn (rtx_insn *);
1344 static bool is_nonpipeline_insn (rtx_insn *);
1345 static bool is_cracked_insn (rtx_insn *);
1346 static bool is_load_insn (rtx, rtx *);
1347 static bool is_store_insn (rtx, rtx *);
1348 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1349 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1350 static bool insn_must_be_first_in_group (rtx_insn *);
1351 static bool insn_must_be_last_in_group (rtx_insn *);
1352 static void altivec_init_builtins (void);
1353 static tree builtin_function_type (machine_mode, machine_mode,
1354 machine_mode, machine_mode,
1355 enum rs6000_builtins, const char *name);
1356 static void rs6000_common_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rs6000_stack_t *rs6000_stack_info (void);
1359 static void is_altivec_return_reg (rtx, void *);
1360 int easy_vector_constant (rtx, machine_mode);
1361 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1362 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1363 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1364 bool, bool);
1365 #if TARGET_MACHO
1366 static void macho_branch_islands (void);
1367 #endif
1368 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1369 int, int *);
1370 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1371 int, int, int *);
1372 static bool rs6000_mode_dependent_address (const_rtx);
1373 static bool rs6000_debug_mode_dependent_address (const_rtx);
1374 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1375 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1376 machine_mode, rtx);
1377 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1378 machine_mode,
1379 rtx);
1380 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1381 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1382 enum reg_class);
1383 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1384 reg_class_t,
1385 reg_class_t);
1386 static bool rs6000_debug_can_change_mode_class (machine_mode,
1387 machine_mode,
1388 reg_class_t);
1389 static bool rs6000_save_toc_in_prologue_p (void);
1390 static rtx rs6000_internal_arg_pointer (void);
1392 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1393 int, int *)
1394 = rs6000_legitimize_reload_address;
1396 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1397 = rs6000_mode_dependent_address;
1399 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1400 machine_mode, rtx)
1401 = rs6000_secondary_reload_class;
1403 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1404 = rs6000_preferred_reload_class;
1406 const int INSN_NOT_AVAILABLE = -1;
1408 static void rs6000_print_isa_options (FILE *, int, const char *,
1409 HOST_WIDE_INT);
1410 static void rs6000_print_builtin_options (FILE *, int, const char *,
1411 HOST_WIDE_INT);
1412 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1414 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1415 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1416 enum rs6000_reg_type,
1417 machine_mode,
1418 secondary_reload_info *,
1419 bool);
1420 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1421 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1422 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1424 /* Hash table stuff for keeping track of TOC entries. */
1426 struct GTY((for_user)) toc_hash_struct
1428 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1429 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1430 rtx key;
1431 machine_mode key_mode;
1432 int labelno;
1435 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1437 static hashval_t hash (toc_hash_struct *);
1438 static bool equal (toc_hash_struct *, toc_hash_struct *);
1441 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1443 /* Hash table to keep track of the argument types for builtin functions. */
1445 struct GTY((for_user)) builtin_hash_struct
1447 tree type;
1448 machine_mode mode[4]; /* return value + 3 arguments. */
1449 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1452 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1454 static hashval_t hash (builtin_hash_struct *);
1455 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1458 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1461 /* Default register names. */
1462 char rs6000_reg_names[][8] =
1464 "0", "1", "2", "3", "4", "5", "6", "7",
1465 "8", "9", "10", "11", "12", "13", "14", "15",
1466 "16", "17", "18", "19", "20", "21", "22", "23",
1467 "24", "25", "26", "27", "28", "29", "30", "31",
1468 "0", "1", "2", "3", "4", "5", "6", "7",
1469 "8", "9", "10", "11", "12", "13", "14", "15",
1470 "16", "17", "18", "19", "20", "21", "22", "23",
1471 "24", "25", "26", "27", "28", "29", "30", "31",
1472 "mq", "lr", "ctr","ap",
1473 "0", "1", "2", "3", "4", "5", "6", "7",
1474 "ca",
1475 /* AltiVec registers. */
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "8", "9", "10", "11", "12", "13", "14", "15",
1478 "16", "17", "18", "19", "20", "21", "22", "23",
1479 "24", "25", "26", "27", "28", "29", "30", "31",
1480 "vrsave", "vscr",
1481 /* Soft frame pointer. */
1482 "sfp",
1483 /* HTM SPR registers. */
1484 "tfhar", "tfiar", "texasr"
1487 #ifdef TARGET_REGNAMES
1488 static const char alt_reg_names[][8] =
1490 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1491 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1492 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1493 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1494 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1495 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1496 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1497 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1498 "mq", "lr", "ctr", "ap",
1499 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1500 "ca",
1501 /* AltiVec registers. */
1502 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1503 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1504 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1505 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1506 "vrsave", "vscr",
1507 /* Soft frame pointer. */
1508 "sfp",
1509 /* HTM SPR registers. */
1510 "tfhar", "tfiar", "texasr"
1512 #endif
1514 /* Table of valid machine attributes. */
1516 static const struct attribute_spec rs6000_attribute_table[] =
1518 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1519 affects_type_identity, handler, exclude } */
1520 { "altivec", 1, 1, false, true, false, false,
1521 rs6000_handle_altivec_attribute, NULL },
1522 { "longcall", 0, 0, false, true, true, false,
1523 rs6000_handle_longcall_attribute, NULL },
1524 { "shortcall", 0, 0, false, true, true, false,
1525 rs6000_handle_longcall_attribute, NULL },
1526 { "ms_struct", 0, 0, false, false, false, false,
1527 rs6000_handle_struct_attribute, NULL },
1528 { "gcc_struct", 0, 0, false, false, false, false,
1529 rs6000_handle_struct_attribute, NULL },
1530 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1531 SUBTARGET_ATTRIBUTE_TABLE,
1532 #endif
1533 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1536 #ifndef TARGET_PROFILE_KERNEL
1537 #define TARGET_PROFILE_KERNEL 0
1538 #endif
1540 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1541 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1543 /* Initialize the GCC target structure. */
1544 #undef TARGET_ATTRIBUTE_TABLE
1545 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1546 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1547 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1548 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1549 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1551 #undef TARGET_ASM_ALIGNED_DI_OP
1552 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1554 /* Default unaligned ops are only provided for ELF. Find the ops needed
1555 for non-ELF systems. */
1556 #ifndef OBJECT_FORMAT_ELF
1557 #if TARGET_XCOFF
1558 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1559 64-bit targets. */
1560 #undef TARGET_ASM_UNALIGNED_HI_OP
1561 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1562 #undef TARGET_ASM_UNALIGNED_SI_OP
1563 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1564 #undef TARGET_ASM_UNALIGNED_DI_OP
1565 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1566 #else
1567 /* For Darwin. */
1568 #undef TARGET_ASM_UNALIGNED_HI_OP
1569 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1570 #undef TARGET_ASM_UNALIGNED_SI_OP
1571 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1572 #undef TARGET_ASM_UNALIGNED_DI_OP
1573 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1574 #undef TARGET_ASM_ALIGNED_DI_OP
1575 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1576 #endif
1577 #endif
1579 /* This hook deals with fixups for relocatable code and DI-mode objects
1580 in 64-bit code. */
1581 #undef TARGET_ASM_INTEGER
1582 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1584 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1585 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1586 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1587 #endif
1589 #undef TARGET_SET_UP_BY_PROLOGUE
1590 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1592 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1593 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1594 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1595 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1596 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1597 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1598 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1599 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1600 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1601 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1602 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1605 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1606 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1608 #undef TARGET_INTERNAL_ARG_POINTER
1609 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1611 #undef TARGET_HAVE_TLS
1612 #define TARGET_HAVE_TLS HAVE_AS_TLS
1614 #undef TARGET_CANNOT_FORCE_CONST_MEM
1615 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1617 #undef TARGET_DELEGITIMIZE_ADDRESS
1618 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1620 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1621 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1623 #undef TARGET_LEGITIMATE_COMBINED_INSN
1624 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1626 #undef TARGET_ASM_FUNCTION_PROLOGUE
1627 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1628 #undef TARGET_ASM_FUNCTION_EPILOGUE
1629 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1631 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1632 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1634 #undef TARGET_LEGITIMIZE_ADDRESS
1635 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1637 #undef TARGET_SCHED_VARIABLE_ISSUE
1638 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1640 #undef TARGET_SCHED_ISSUE_RATE
1641 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1642 #undef TARGET_SCHED_ADJUST_COST
1643 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1644 #undef TARGET_SCHED_ADJUST_PRIORITY
1645 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1646 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1647 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1648 #undef TARGET_SCHED_INIT
1649 #define TARGET_SCHED_INIT rs6000_sched_init
1650 #undef TARGET_SCHED_FINISH
1651 #define TARGET_SCHED_FINISH rs6000_sched_finish
1652 #undef TARGET_SCHED_REORDER
1653 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1654 #undef TARGET_SCHED_REORDER2
1655 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1657 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1658 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1663 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1664 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1665 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1666 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1667 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1668 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1669 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1670 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1672 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1673 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1675 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1676 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1677 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1678 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1679 rs6000_builtin_support_vector_misalignment
1680 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1681 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1682 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1683 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1684 rs6000_builtin_vectorization_cost
1685 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1686 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1687 rs6000_preferred_simd_mode
1688 #undef TARGET_VECTORIZE_INIT_COST
1689 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1690 #undef TARGET_VECTORIZE_ADD_STMT_COST
1691 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1692 #undef TARGET_VECTORIZE_FINISH_COST
1693 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1694 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1695 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1697 #undef TARGET_INIT_BUILTINS
1698 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1699 #undef TARGET_BUILTIN_DECL
1700 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1702 #undef TARGET_FOLD_BUILTIN
1703 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1704 #undef TARGET_GIMPLE_FOLD_BUILTIN
1705 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1707 #undef TARGET_EXPAND_BUILTIN
1708 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1710 #undef TARGET_MANGLE_TYPE
1711 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1713 #undef TARGET_INIT_LIBFUNCS
1714 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1716 #if TARGET_MACHO
1717 #undef TARGET_BINDS_LOCAL_P
1718 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1719 #endif
1721 #undef TARGET_MS_BITFIELD_LAYOUT_P
1722 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1724 #undef TARGET_ASM_OUTPUT_MI_THUNK
1725 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1727 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1730 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1731 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1733 #undef TARGET_REGISTER_MOVE_COST
1734 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1735 #undef TARGET_MEMORY_MOVE_COST
1736 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1737 #undef TARGET_CANNOT_COPY_INSN_P
1738 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1739 #undef TARGET_RTX_COSTS
1740 #define TARGET_RTX_COSTS rs6000_rtx_costs
1741 #undef TARGET_ADDRESS_COST
1742 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1743 #undef TARGET_INSN_COST
1744 #define TARGET_INSN_COST rs6000_insn_cost
1746 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1747 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1749 #undef TARGET_PROMOTE_FUNCTION_MODE
1750 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1752 #undef TARGET_RETURN_IN_MEMORY
1753 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1755 #undef TARGET_RETURN_IN_MSB
1756 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1758 #undef TARGET_SETUP_INCOMING_VARARGS
1759 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1761 /* Always strict argument naming on rs6000. */
1762 #undef TARGET_STRICT_ARGUMENT_NAMING
1763 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1765 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1766 #undef TARGET_SPLIT_COMPLEX_ARG
1767 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1768 #undef TARGET_MUST_PASS_IN_STACK
1769 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1770 #undef TARGET_PASS_BY_REFERENCE
1771 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1772 #undef TARGET_ARG_PARTIAL_BYTES
1773 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1774 #undef TARGET_FUNCTION_ARG_ADVANCE
1775 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1776 #undef TARGET_FUNCTION_ARG
1777 #define TARGET_FUNCTION_ARG rs6000_function_arg
1778 #undef TARGET_FUNCTION_ARG_PADDING
1779 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1780 #undef TARGET_FUNCTION_ARG_BOUNDARY
1781 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1783 #undef TARGET_BUILD_BUILTIN_VA_LIST
1784 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1786 #undef TARGET_EXPAND_BUILTIN_VA_START
1787 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1789 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1790 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1792 #undef TARGET_EH_RETURN_FILTER_MODE
1793 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1795 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1796 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1798 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1799 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1801 #undef TARGET_FLOATN_MODE
1802 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1804 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1805 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1807 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1808 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1810 #undef TARGET_MD_ASM_ADJUST
1811 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1813 #undef TARGET_OPTION_OVERRIDE
1814 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1816 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1817 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1818 rs6000_builtin_vectorized_function
1820 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1821 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1822 rs6000_builtin_md_vectorized_function
1824 #undef TARGET_STACK_PROTECT_GUARD
1825 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1827 #if !TARGET_MACHO
1828 #undef TARGET_STACK_PROTECT_FAIL
1829 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1830 #endif
1832 #ifdef HAVE_AS_TLS
1833 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1834 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1835 #endif
1837 /* Use a 32-bit anchor range. This leads to sequences like:
1839 addis tmp,anchor,high
1840 add dest,tmp,low
1842 where tmp itself acts as an anchor, and can be shared between
1843 accesses to the same 64k page. */
1844 #undef TARGET_MIN_ANCHOR_OFFSET
1845 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1846 #undef TARGET_MAX_ANCHOR_OFFSET
1847 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1848 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1849 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1850 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1851 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1853 #undef TARGET_BUILTIN_RECIPROCAL
1854 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1856 #undef TARGET_SECONDARY_RELOAD
1857 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED
1859 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1860 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1861 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1863 #undef TARGET_LEGITIMATE_ADDRESS_P
1864 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1866 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1867 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1869 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1870 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1872 #undef TARGET_CAN_ELIMINATE
1873 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1875 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1876 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1878 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1879 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1881 #undef TARGET_TRAMPOLINE_INIT
1882 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1884 #undef TARGET_FUNCTION_VALUE
1885 #define TARGET_FUNCTION_VALUE rs6000_function_value
1887 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1888 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1890 #undef TARGET_OPTION_SAVE
1891 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1893 #undef TARGET_OPTION_RESTORE
1894 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1896 #undef TARGET_OPTION_PRINT
1897 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1899 #undef TARGET_CAN_INLINE_P
1900 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1902 #undef TARGET_SET_CURRENT_FUNCTION
1903 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1905 #undef TARGET_LEGITIMATE_CONSTANT_P
1906 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1908 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1909 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1911 #undef TARGET_CAN_USE_DOLOOP_P
1912 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1914 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1915 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1917 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1918 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1919 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1920 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1921 #undef TARGET_UNWIND_WORD_MODE
1922 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1924 #undef TARGET_OFFLOAD_OPTIONS
1925 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1927 #undef TARGET_C_MODE_FOR_SUFFIX
1928 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1930 #undef TARGET_INVALID_BINARY_OP
1931 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1933 #undef TARGET_OPTAB_SUPPORTED_P
1934 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1936 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1937 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1939 #undef TARGET_COMPARE_VERSION_PRIORITY
1940 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1942 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1943 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1944 rs6000_generate_version_dispatcher_body
1946 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1947 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1948 rs6000_get_function_versions_dispatcher
1950 #undef TARGET_OPTION_FUNCTION_VERSIONS
1951 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1953 #undef TARGET_HARD_REGNO_NREGS
1954 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1955 #undef TARGET_HARD_REGNO_MODE_OK
1956 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1958 #undef TARGET_MODES_TIEABLE_P
1959 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1961 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1962 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1963 rs6000_hard_regno_call_part_clobbered
1965 #undef TARGET_SLOW_UNALIGNED_ACCESS
1966 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1968 #undef TARGET_CAN_CHANGE_MODE_CLASS
1969 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1971 #undef TARGET_CONSTANT_ALIGNMENT
1972 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1974 #undef TARGET_STARTING_FRAME_OFFSET
1975 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1978 /* Processor table. */
1979 struct rs6000_ptt
1981 const char *const name; /* Canonical processor name. */
1982 const enum processor_type processor; /* Processor type enum value. */
1983 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1986 static struct rs6000_ptt const processor_target_table[] =
1988 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1989 #include "rs6000-cpus.def"
1990 #undef RS6000_CPU
1993 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1994 name is invalid. */
1996 static int
1997 rs6000_cpu_name_lookup (const char *name)
1999 size_t i;
2001 if (name != NULL)
2003 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2004 if (! strcmp (name, processor_target_table[i].name))
2005 return (int)i;
2008 return -1;
2012 /* Return number of consecutive hard regs needed starting at reg REGNO
2013 to hold something of mode MODE.
2014 This is ordinarily the length in words of a value of mode MODE
2015 but can be less for certain modes in special long registers.
2017 POWER and PowerPC GPRs hold 32 bits worth;
2018 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2020 static int
2021 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2023 unsigned HOST_WIDE_INT reg_size;
2025 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2026 128-bit floating point that can go in vector registers, which has VSX
2027 memory addressing. */
2028 if (FP_REGNO_P (regno))
2029 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2030 ? UNITS_PER_VSX_WORD
2031 : UNITS_PER_FP_WORD);
2033 else if (ALTIVEC_REGNO_P (regno))
2034 reg_size = UNITS_PER_ALTIVEC_WORD;
2036 else
2037 reg_size = UNITS_PER_WORD;
2039 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2042 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2043 MODE. */
2044 static int
2045 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2047 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2049 if (COMPLEX_MODE_P (mode))
2050 mode = GET_MODE_INNER (mode);
2052 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2053 register combinations, and use PTImode where we need to deal with quad
2054 word memory operations. Don't allow quad words in the argument or frame
2055 pointer registers, just registers 0..31. */
2056 if (mode == PTImode)
2057 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2058 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2059 && ((regno & 1) == 0));
2061 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2062 implementations. Don't allow an item to be split between a FP register
2063 and an Altivec register. Allow TImode in all VSX registers if the user
2064 asked for it. */
2065 if (TARGET_VSX && VSX_REGNO_P (regno)
2066 && (VECTOR_MEM_VSX_P (mode)
2067 || FLOAT128_VECTOR_P (mode)
2068 || reg_addr[mode].scalar_in_vmx_p
2069 || mode == TImode
2070 || (TARGET_VADDUQM && mode == V1TImode)))
2072 if (FP_REGNO_P (regno))
2073 return FP_REGNO_P (last_regno);
2075 if (ALTIVEC_REGNO_P (regno))
2077 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2078 return 0;
2080 return ALTIVEC_REGNO_P (last_regno);
2084 /* The GPRs can hold any mode, but values bigger than one register
2085 cannot go past R31. */
2086 if (INT_REGNO_P (regno))
2087 return INT_REGNO_P (last_regno);
2089 /* The float registers (except for VSX vector modes) can only hold floating
2090 modes and DImode. */
2091 if (FP_REGNO_P (regno))
2093 if (FLOAT128_VECTOR_P (mode))
2094 return false;
2096 if (SCALAR_FLOAT_MODE_P (mode)
2097 && (mode != TDmode || (regno % 2) == 0)
2098 && FP_REGNO_P (last_regno))
2099 return 1;
2101 if (GET_MODE_CLASS (mode) == MODE_INT)
2103 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2104 return 1;
2106 if (TARGET_P8_VECTOR && (mode == SImode))
2107 return 1;
2109 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2110 return 1;
2113 return 0;
2116 /* The CR register can only hold CC modes. */
2117 if (CR_REGNO_P (regno))
2118 return GET_MODE_CLASS (mode) == MODE_CC;
2120 if (CA_REGNO_P (regno))
2121 return mode == Pmode || mode == SImode;
2123 /* AltiVec only in AldyVec registers. */
2124 if (ALTIVEC_REGNO_P (regno))
2125 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2126 || mode == V1TImode);
2128 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2129 and it must be able to fit within the register set. */
2131 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2134 /* Implement TARGET_HARD_REGNO_NREGS. */
2136 static unsigned int
2137 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2139 return rs6000_hard_regno_nregs[mode][regno];
2142 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2144 static bool
2145 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2147 return rs6000_hard_regno_mode_ok_p[mode][regno];
2150 /* Implement TARGET_MODES_TIEABLE_P.
2152 PTImode cannot tie with other modes because PTImode is restricted to even
2153 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2154 57744).
2156 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2157 128-bit floating point on VSX systems ties with other vectors. */
2159 static bool
2160 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2162 if (mode1 == PTImode)
2163 return mode2 == PTImode;
2164 if (mode2 == PTImode)
2165 return false;
2167 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2168 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2169 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2170 return false;
2172 if (SCALAR_FLOAT_MODE_P (mode1))
2173 return SCALAR_FLOAT_MODE_P (mode2);
2174 if (SCALAR_FLOAT_MODE_P (mode2))
2175 return false;
2177 if (GET_MODE_CLASS (mode1) == MODE_CC)
2178 return GET_MODE_CLASS (mode2) == MODE_CC;
2179 if (GET_MODE_CLASS (mode2) == MODE_CC)
2180 return false;
2182 return true;
2185 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2187 static bool
2188 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2190 if (TARGET_32BIT
2191 && TARGET_POWERPC64
2192 && GET_MODE_SIZE (mode) > 4
2193 && INT_REGNO_P (regno))
2194 return true;
2196 if (TARGET_VSX
2197 && FP_REGNO_P (regno)
2198 && GET_MODE_SIZE (mode) > 8
2199 && !FLOAT128_2REG_P (mode))
2200 return true;
2202 return false;
2205 /* Print interesting facts about registers. */
2206 static void
2207 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2209 int r, m;
2211 for (r = first_regno; r <= last_regno; ++r)
2213 const char *comma = "";
2214 int len;
2216 if (first_regno == last_regno)
2217 fprintf (stderr, "%s:\t", reg_name);
2218 else
2219 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2221 len = 8;
2222 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2223 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2225 if (len > 70)
2227 fprintf (stderr, ",\n\t");
2228 len = 8;
2229 comma = "";
2232 if (rs6000_hard_regno_nregs[m][r] > 1)
2233 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2234 rs6000_hard_regno_nregs[m][r]);
2235 else
2236 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2238 comma = ", ";
2241 if (call_used_regs[r])
2243 if (len > 70)
2245 fprintf (stderr, ",\n\t");
2246 len = 8;
2247 comma = "";
2250 len += fprintf (stderr, "%s%s", comma, "call-used");
2251 comma = ", ";
2254 if (fixed_regs[r])
2256 if (len > 70)
2258 fprintf (stderr, ",\n\t");
2259 len = 8;
2260 comma = "";
2263 len += fprintf (stderr, "%s%s", comma, "fixed");
2264 comma = ", ";
2267 if (len > 70)
2269 fprintf (stderr, ",\n\t");
2270 comma = "";
2273 len += fprintf (stderr, "%sreg-class = %s", comma,
2274 reg_class_names[(int)rs6000_regno_regclass[r]]);
2275 comma = ", ";
2277 if (len > 70)
2279 fprintf (stderr, ",\n\t");
2280 comma = "";
2283 fprintf (stderr, "%sregno = %d\n", comma, r);
2287 static const char *
2288 rs6000_debug_vector_unit (enum rs6000_vector v)
2290 const char *ret;
2292 switch (v)
2294 case VECTOR_NONE: ret = "none"; break;
2295 case VECTOR_ALTIVEC: ret = "altivec"; break;
2296 case VECTOR_VSX: ret = "vsx"; break;
2297 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2298 case VECTOR_OTHER: ret = "other"; break;
2299 default: ret = "unknown"; break;
2302 return ret;
2305 /* Inner function printing just the address mask for a particular reload
2306 register class. */
2307 DEBUG_FUNCTION char *
2308 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2310 static char ret[8];
2311 char *p = ret;
2313 if ((mask & RELOAD_REG_VALID) != 0)
2314 *p++ = 'v';
2315 else if (keep_spaces)
2316 *p++ = ' ';
2318 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2319 *p++ = 'm';
2320 else if (keep_spaces)
2321 *p++ = ' ';
2323 if ((mask & RELOAD_REG_INDEXED) != 0)
2324 *p++ = 'i';
2325 else if (keep_spaces)
2326 *p++ = ' ';
2328 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2329 *p++ = 'O';
2330 else if ((mask & RELOAD_REG_OFFSET) != 0)
2331 *p++ = 'o';
2332 else if (keep_spaces)
2333 *p++ = ' ';
2335 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2336 *p++ = '+';
2337 else if (keep_spaces)
2338 *p++ = ' ';
2340 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2341 *p++ = '+';
2342 else if (keep_spaces)
2343 *p++ = ' ';
2345 if ((mask & RELOAD_REG_AND_M16) != 0)
2346 *p++ = '&';
2347 else if (keep_spaces)
2348 *p++ = ' ';
2350 *p = '\0';
2352 return ret;
2355 /* Print the address masks in a human readble fashion. */
2356 DEBUG_FUNCTION void
2357 rs6000_debug_print_mode (ssize_t m)
2359 ssize_t rc;
2360 int spaces = 0;
2361 bool fuse_extra_p;
2363 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2364 for (rc = 0; rc < N_RELOAD_REG; rc++)
2365 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2366 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2368 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2369 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2370 fprintf (stderr, " Reload=%c%c",
2371 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2372 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2373 else
2374 spaces += sizeof (" Reload=sl") - 1;
2376 if (reg_addr[m].scalar_in_vmx_p)
2378 fprintf (stderr, "%*s Upper=y", spaces, "");
2379 spaces = 0;
2381 else
2382 spaces += sizeof (" Upper=y") - 1;
2384 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2385 || reg_addr[m].fused_toc);
2386 if (!fuse_extra_p)
2388 for (rc = 0; rc < N_RELOAD_REG; rc++)
2390 if (rc != RELOAD_REG_ANY)
2392 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2393 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2394 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2395 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2396 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2398 fuse_extra_p = true;
2399 break;
2405 if (fuse_extra_p)
2407 fprintf (stderr, "%*s Fuse:", spaces, "");
2408 spaces = 0;
2410 for (rc = 0; rc < N_RELOAD_REG; rc++)
2412 if (rc != RELOAD_REG_ANY)
2414 char load, store;
2416 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2417 load = 'l';
2418 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2419 load = 'L';
2420 else
2421 load = '-';
2423 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2424 store = 's';
2425 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2426 store = 'S';
2427 else
2428 store = '-';
2430 if (load == '-' && store == '-')
2431 spaces += 5;
2432 else
2434 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2435 reload_reg_map[rc].name[0], load, store);
2436 spaces = 0;
2441 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2443 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2444 spaces = 0;
2446 else
2447 spaces += sizeof (" P8gpr") - 1;
2449 if (reg_addr[m].fused_toc)
2451 fprintf (stderr, "%*sToc", (spaces + 1), "");
2452 spaces = 0;
2454 else
2455 spaces += sizeof (" Toc") - 1;
2457 else
2458 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2460 if (rs6000_vector_unit[m] != VECTOR_NONE
2461 || rs6000_vector_mem[m] != VECTOR_NONE)
2463 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2464 spaces, "",
2465 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2466 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2469 fputs ("\n", stderr);
2472 #define DEBUG_FMT_ID "%-32s= "
2473 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2474 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2475 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2477 /* Print various interesting information with -mdebug=reg. */
2478 static void
2479 rs6000_debug_reg_global (void)
2481 static const char *const tf[2] = { "false", "true" };
2482 const char *nl = (const char *)0;
2483 int m;
2484 size_t m1, m2, v;
2485 char costly_num[20];
2486 char nop_num[20];
2487 char flags_buffer[40];
2488 const char *costly_str;
2489 const char *nop_str;
2490 const char *trace_str;
2491 const char *abi_str;
2492 const char *cmodel_str;
2493 struct cl_target_option cl_opts;
2495 /* Modes we want tieable information on. */
2496 static const machine_mode print_tieable_modes[] = {
2497 QImode,
2498 HImode,
2499 SImode,
2500 DImode,
2501 TImode,
2502 PTImode,
2503 SFmode,
2504 DFmode,
2505 TFmode,
2506 IFmode,
2507 KFmode,
2508 SDmode,
2509 DDmode,
2510 TDmode,
2511 V16QImode,
2512 V8HImode,
2513 V4SImode,
2514 V2DImode,
2515 V1TImode,
2516 V32QImode,
2517 V16HImode,
2518 V8SImode,
2519 V4DImode,
2520 V2TImode,
2521 V4SFmode,
2522 V2DFmode,
2523 V8SFmode,
2524 V4DFmode,
2525 CCmode,
2526 CCUNSmode,
2527 CCEQmode,
2530 /* Virtual regs we are interested in. */
2531 const static struct {
2532 int regno; /* register number. */
2533 const char *name; /* register name. */
2534 } virtual_regs[] = {
2535 { STACK_POINTER_REGNUM, "stack pointer:" },
2536 { TOC_REGNUM, "toc: " },
2537 { STATIC_CHAIN_REGNUM, "static chain: " },
2538 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2539 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2540 { ARG_POINTER_REGNUM, "arg pointer: " },
2541 { FRAME_POINTER_REGNUM, "frame pointer:" },
2542 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2543 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2544 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2545 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2546 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2547 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2548 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2549 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2550 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2553 fputs ("\nHard register information:\n", stderr);
2554 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2555 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2556 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2557 LAST_ALTIVEC_REGNO,
2558 "vs");
2559 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2560 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2561 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2562 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2563 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2564 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2566 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2567 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2568 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2570 fprintf (stderr,
2571 "\n"
2572 "d reg_class = %s\n"
2573 "f reg_class = %s\n"
2574 "v reg_class = %s\n"
2575 "wa reg_class = %s\n"
2576 "wb reg_class = %s\n"
2577 "wd reg_class = %s\n"
2578 "we reg_class = %s\n"
2579 "wf reg_class = %s\n"
2580 "wg reg_class = %s\n"
2581 "wh reg_class = %s\n"
2582 "wi reg_class = %s\n"
2583 "wj reg_class = %s\n"
2584 "wk reg_class = %s\n"
2585 "wl reg_class = %s\n"
2586 "wm reg_class = %s\n"
2587 "wo reg_class = %s\n"
2588 "wp reg_class = %s\n"
2589 "wq reg_class = %s\n"
2590 "wr reg_class = %s\n"
2591 "ws reg_class = %s\n"
2592 "wt reg_class = %s\n"
2593 "wu reg_class = %s\n"
2594 "wv reg_class = %s\n"
2595 "ww reg_class = %s\n"
2596 "wx reg_class = %s\n"
2597 "wy reg_class = %s\n"
2598 "wz reg_class = %s\n"
2599 "wA reg_class = %s\n"
2600 "wH reg_class = %s\n"
2601 "wI reg_class = %s\n"
2602 "wJ reg_class = %s\n"
2603 "wK reg_class = %s\n"
2604 "\n",
2605 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2606 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2607 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2608 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2609 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2610 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2611 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2612 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2613 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2614 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2615 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2616 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2617 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2618 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2619 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2620 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2621 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2622 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2623 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2638 nl = "\n";
2639 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2640 rs6000_debug_print_mode (m);
2642 fputs ("\n", stderr);
2644 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2646 machine_mode mode1 = print_tieable_modes[m1];
2647 bool first_time = true;
2649 nl = (const char *)0;
2650 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2652 machine_mode mode2 = print_tieable_modes[m2];
2653 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2655 if (first_time)
2657 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2658 nl = "\n";
2659 first_time = false;
2662 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2666 if (!first_time)
2667 fputs ("\n", stderr);
2670 if (nl)
2671 fputs (nl, stderr);
2673 if (rs6000_recip_control)
2675 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2677 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2678 if (rs6000_recip_bits[m])
2680 fprintf (stderr,
2681 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2682 GET_MODE_NAME (m),
2683 (RS6000_RECIP_AUTO_RE_P (m)
2684 ? "auto"
2685 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2686 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2687 ? "auto"
2688 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2691 fputs ("\n", stderr);
2694 if (rs6000_cpu_index >= 0)
2696 const char *name = processor_target_table[rs6000_cpu_index].name;
2697 HOST_WIDE_INT flags
2698 = processor_target_table[rs6000_cpu_index].target_enable;
2700 sprintf (flags_buffer, "-mcpu=%s flags", name);
2701 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2703 else
2704 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2706 if (rs6000_tune_index >= 0)
2708 const char *name = processor_target_table[rs6000_tune_index].name;
2709 HOST_WIDE_INT flags
2710 = processor_target_table[rs6000_tune_index].target_enable;
2712 sprintf (flags_buffer, "-mtune=%s flags", name);
2713 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2715 else
2716 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2718 cl_target_option_save (&cl_opts, &global_options);
2719 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2720 rs6000_isa_flags);
2722 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2723 rs6000_isa_flags_explicit);
2725 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2726 rs6000_builtin_mask);
2728 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2730 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2731 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2733 switch (rs6000_sched_costly_dep)
2735 case max_dep_latency:
2736 costly_str = "max_dep_latency";
2737 break;
2739 case no_dep_costly:
2740 costly_str = "no_dep_costly";
2741 break;
2743 case all_deps_costly:
2744 costly_str = "all_deps_costly";
2745 break;
2747 case true_store_to_load_dep_costly:
2748 costly_str = "true_store_to_load_dep_costly";
2749 break;
2751 case store_to_load_dep_costly:
2752 costly_str = "store_to_load_dep_costly";
2753 break;
2755 default:
2756 costly_str = costly_num;
2757 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2758 break;
2761 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2763 switch (rs6000_sched_insert_nops)
2765 case sched_finish_regroup_exact:
2766 nop_str = "sched_finish_regroup_exact";
2767 break;
2769 case sched_finish_pad_groups:
2770 nop_str = "sched_finish_pad_groups";
2771 break;
2773 case sched_finish_none:
2774 nop_str = "sched_finish_none";
2775 break;
2777 default:
2778 nop_str = nop_num;
2779 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2780 break;
2783 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2785 switch (rs6000_sdata)
2787 default:
2788 case SDATA_NONE:
2789 break;
2791 case SDATA_DATA:
2792 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2793 break;
2795 case SDATA_SYSV:
2796 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2797 break;
2799 case SDATA_EABI:
2800 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2801 break;
2805 switch (rs6000_traceback)
2807 case traceback_default: trace_str = "default"; break;
2808 case traceback_none: trace_str = "none"; break;
2809 case traceback_part: trace_str = "part"; break;
2810 case traceback_full: trace_str = "full"; break;
2811 default: trace_str = "unknown"; break;
2814 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2816 switch (rs6000_current_cmodel)
2818 case CMODEL_SMALL: cmodel_str = "small"; break;
2819 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2820 case CMODEL_LARGE: cmodel_str = "large"; break;
2821 default: cmodel_str = "unknown"; break;
2824 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2826 switch (rs6000_current_abi)
2828 case ABI_NONE: abi_str = "none"; break;
2829 case ABI_AIX: abi_str = "aix"; break;
2830 case ABI_ELFv2: abi_str = "ELFv2"; break;
2831 case ABI_V4: abi_str = "V4"; break;
2832 case ABI_DARWIN: abi_str = "darwin"; break;
2833 default: abi_str = "unknown"; break;
2836 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2838 if (rs6000_altivec_abi)
2839 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2841 if (rs6000_darwin64_abi)
2842 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2844 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2845 (TARGET_SOFT_FLOAT ? "true" : "false"));
2847 if (TARGET_LINK_STACK)
2848 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2850 if (TARGET_P8_FUSION)
2852 char options[80];
2854 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2855 if (TARGET_TOC_FUSION)
2856 strcat (options, ", toc");
2858 if (TARGET_P8_FUSION_SIGN)
2859 strcat (options, ", sign");
2861 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2864 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2865 TARGET_SECURE_PLT ? "secure" : "bss");
2866 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2867 aix_struct_return ? "aix" : "sysv");
2868 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2869 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2870 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2871 tf[!!rs6000_align_branch_targets]);
2872 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2873 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2874 rs6000_long_double_type_size);
2875 if (rs6000_long_double_type_size == 128)
2877 fprintf (stderr, DEBUG_FMT_S, "long double type",
2878 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2879 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2880 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2882 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2883 (int)rs6000_sched_restricted_insns_priority);
2884 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2885 (int)END_BUILTINS);
2886 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2887 (int)RS6000_BUILTIN_COUNT);
2889 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2890 (int)TARGET_FLOAT128_ENABLE_TYPE);
2892 if (TARGET_VSX)
2893 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2894 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2896 if (TARGET_DIRECT_MOVE_128)
2897 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2898 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2902 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2903 legitimate address support to figure out the appropriate addressing to
2904 use. */
2906 static void
2907 rs6000_setup_reg_addr_masks (void)
2909 ssize_t rc, reg, m, nregs;
2910 addr_mask_type any_addr_mask, addr_mask;
2912 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2914 machine_mode m2 = (machine_mode) m;
2915 bool complex_p = false;
2916 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2917 size_t msize;
2919 if (COMPLEX_MODE_P (m2))
2921 complex_p = true;
2922 m2 = GET_MODE_INNER (m2);
2925 msize = GET_MODE_SIZE (m2);
2927 /* SDmode is special in that we want to access it only via REG+REG
2928 addressing on power7 and above, since we want to use the LFIWZX and
2929 STFIWZX instructions to load it. */
2930 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2932 any_addr_mask = 0;
2933 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2935 addr_mask = 0;
2936 reg = reload_reg_map[rc].reg;
2938 /* Can mode values go in the GPR/FPR/Altivec registers? */
2939 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2941 bool small_int_vsx_p = (small_int_p
2942 && (rc == RELOAD_REG_FPR
2943 || rc == RELOAD_REG_VMX));
2945 nregs = rs6000_hard_regno_nregs[m][reg];
2946 addr_mask |= RELOAD_REG_VALID;
2948 /* Indicate if the mode takes more than 1 physical register. If
2949 it takes a single register, indicate it can do REG+REG
2950 addressing. Small integers in VSX registers can only do
2951 REG+REG addressing. */
2952 if (small_int_vsx_p)
2953 addr_mask |= RELOAD_REG_INDEXED;
2954 else if (nregs > 1 || m == BLKmode || complex_p)
2955 addr_mask |= RELOAD_REG_MULTIPLE;
2956 else
2957 addr_mask |= RELOAD_REG_INDEXED;
2959 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2960 addressing. If we allow scalars into Altivec registers,
2961 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2963 For VSX systems, we don't allow update addressing for
2964 DFmode/SFmode if those registers can go in both the
2965 traditional floating point registers and Altivec registers.
2966 The load/store instructions for the Altivec registers do not
2967 have update forms. If we allowed update addressing, it seems
2968 to break IV-OPT code using floating point if the index type is
2969 int instead of long (PR target/81550 and target/84042). */
2971 if (TARGET_UPDATE
2972 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2973 && msize <= 8
2974 && !VECTOR_MODE_P (m2)
2975 && !FLOAT128_VECTOR_P (m2)
2976 && !complex_p
2977 && (m != E_DFmode || !TARGET_VSX)
2978 && (m != E_SFmode || !TARGET_P8_VECTOR)
2979 && !small_int_vsx_p)
2981 addr_mask |= RELOAD_REG_PRE_INCDEC;
2983 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2984 we don't allow PRE_MODIFY for some multi-register
2985 operations. */
2986 switch (m)
2988 default:
2989 addr_mask |= RELOAD_REG_PRE_MODIFY;
2990 break;
2992 case E_DImode:
2993 if (TARGET_POWERPC64)
2994 addr_mask |= RELOAD_REG_PRE_MODIFY;
2995 break;
2997 case E_DFmode:
2998 case E_DDmode:
2999 if (TARGET_HARD_FLOAT)
3000 addr_mask |= RELOAD_REG_PRE_MODIFY;
3001 break;
3006 /* GPR and FPR registers can do REG+OFFSET addressing, except
3007 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3008 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3009 if ((addr_mask != 0) && !indexed_only_p
3010 && msize <= 8
3011 && (rc == RELOAD_REG_GPR
3012 || ((msize == 8 || m2 == SFmode)
3013 && (rc == RELOAD_REG_FPR
3014 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3015 addr_mask |= RELOAD_REG_OFFSET;
3017 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3018 instructions are enabled. The offset for 128-bit VSX registers is
3019 only 12-bits. While GPRs can handle the full offset range, VSX
3020 registers can only handle the restricted range. */
3021 else if ((addr_mask != 0) && !indexed_only_p
3022 && msize == 16 && TARGET_P9_VECTOR
3023 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3024 || (m2 == TImode && TARGET_VSX)))
3026 addr_mask |= RELOAD_REG_OFFSET;
3027 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3028 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3031 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3032 addressing on 128-bit types. */
3033 if (rc == RELOAD_REG_VMX && msize == 16
3034 && (addr_mask & RELOAD_REG_VALID) != 0)
3035 addr_mask |= RELOAD_REG_AND_M16;
3037 reg_addr[m].addr_mask[rc] = addr_mask;
3038 any_addr_mask |= addr_mask;
3041 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3046 /* Initialize the various global tables that are based on register size. */
3047 static void
3048 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3050 ssize_t r, m, c;
3051 int align64;
3052 int align32;
3054 /* Precalculate REGNO_REG_CLASS. */
3055 rs6000_regno_regclass[0] = GENERAL_REGS;
3056 for (r = 1; r < 32; ++r)
3057 rs6000_regno_regclass[r] = BASE_REGS;
3059 for (r = 32; r < 64; ++r)
3060 rs6000_regno_regclass[r] = FLOAT_REGS;
3062 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3063 rs6000_regno_regclass[r] = NO_REGS;
3065 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3066 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3068 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3069 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3070 rs6000_regno_regclass[r] = CR_REGS;
3072 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3073 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3074 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3075 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3076 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3077 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3078 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3079 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3080 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3081 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3083 /* Precalculate register class to simpler reload register class. We don't
3084 need all of the register classes that are combinations of different
3085 classes, just the simple ones that have constraint letters. */
3086 for (c = 0; c < N_REG_CLASSES; c++)
3087 reg_class_to_reg_type[c] = NO_REG_TYPE;
3089 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3090 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3091 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3092 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3093 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3094 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3095 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3096 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3097 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3098 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3100 if (TARGET_VSX)
3102 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3103 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3105 else
3107 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3108 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3111 /* Precalculate the valid memory formats as well as the vector information,
3112 this must be set up before the rs6000_hard_regno_nregs_internal calls
3113 below. */
3114 gcc_assert ((int)VECTOR_NONE == 0);
3115 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3116 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3118 gcc_assert ((int)CODE_FOR_nothing == 0);
3119 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3121 gcc_assert ((int)NO_REGS == 0);
3122 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3124 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3125 believes it can use native alignment or still uses 128-bit alignment. */
3126 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3128 align64 = 64;
3129 align32 = 32;
3131 else
3133 align64 = 128;
3134 align32 = 128;
3137 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3138 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3139 if (TARGET_FLOAT128_TYPE)
3141 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3142 rs6000_vector_align[KFmode] = 128;
3144 if (FLOAT128_IEEE_P (TFmode))
3146 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3147 rs6000_vector_align[TFmode] = 128;
3151 /* V2DF mode, VSX only. */
3152 if (TARGET_VSX)
3154 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3155 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3156 rs6000_vector_align[V2DFmode] = align64;
3159 /* V4SF mode, either VSX or Altivec. */
3160 if (TARGET_VSX)
3162 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3163 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3164 rs6000_vector_align[V4SFmode] = align32;
3166 else if (TARGET_ALTIVEC)
3168 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3169 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3170 rs6000_vector_align[V4SFmode] = align32;
3173 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3174 and stores. */
3175 if (TARGET_ALTIVEC)
3177 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3178 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3179 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3180 rs6000_vector_align[V4SImode] = align32;
3181 rs6000_vector_align[V8HImode] = align32;
3182 rs6000_vector_align[V16QImode] = align32;
3184 if (TARGET_VSX)
3186 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3187 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3188 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3190 else
3192 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3193 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3194 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3198 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3199 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3200 if (TARGET_VSX)
3202 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3203 rs6000_vector_unit[V2DImode]
3204 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3205 rs6000_vector_align[V2DImode] = align64;
3207 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3208 rs6000_vector_unit[V1TImode]
3209 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3210 rs6000_vector_align[V1TImode] = 128;
3213 /* DFmode, see if we want to use the VSX unit. Memory is handled
3214 differently, so don't set rs6000_vector_mem. */
3215 if (TARGET_VSX)
3217 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3218 rs6000_vector_align[DFmode] = 64;
3221 /* SFmode, see if we want to use the VSX unit. */
3222 if (TARGET_P8_VECTOR)
3224 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3225 rs6000_vector_align[SFmode] = 32;
3228 /* Allow TImode in VSX register and set the VSX memory macros. */
3229 if (TARGET_VSX)
3231 rs6000_vector_mem[TImode] = VECTOR_VSX;
3232 rs6000_vector_align[TImode] = align64;
3235 /* Register class constraints for the constraints that depend on compile
3236 switches. When the VSX code was added, different constraints were added
3237 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3238 of the VSX registers are used. The register classes for scalar floating
3239 point types is set, based on whether we allow that type into the upper
3240 (Altivec) registers. GCC has register classes to target the Altivec
3241 registers for load/store operations, to select using a VSX memory
3242 operation instead of the traditional floating point operation. The
3243 constraints are:
3245 d - Register class to use with traditional DFmode instructions.
3246 f - Register class to use with traditional SFmode instructions.
3247 v - Altivec register.
3248 wa - Any VSX register.
3249 wc - Reserved to represent individual CR bits (used in LLVM).
3250 wd - Preferred register class for V2DFmode.
3251 wf - Preferred register class for V4SFmode.
3252 wg - Float register for power6x move insns.
3253 wh - FP register for direct move instructions.
3254 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3255 wj - FP or VSX register to hold 64-bit integers for direct moves.
3256 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3257 wl - Float register if we can do 32-bit signed int loads.
3258 wm - VSX register for ISA 2.07 direct move operations.
3259 wn - always NO_REGS.
3260 wr - GPR if 64-bit mode is permitted.
3261 ws - Register class to do ISA 2.06 DF operations.
3262 wt - VSX register for TImode in VSX registers.
3263 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3264 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3265 ww - Register class to do SF conversions in with VSX operations.
3266 wx - Float register if we can do 32-bit int stores.
3267 wy - Register class to do ISA 2.07 SF operations.
3268 wz - Float register if we can do 32-bit unsigned int loads.
3269 wH - Altivec register if SImode is allowed in VSX registers.
3270 wI - VSX register if SImode is allowed in VSX registers.
3271 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3272 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3274 if (TARGET_HARD_FLOAT)
3276 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3277 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3280 if (TARGET_VSX)
3282 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3283 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3284 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3285 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3286 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3287 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3288 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3291 /* Add conditional constraints based on various options, to allow us to
3292 collapse multiple insn patterns. */
3293 if (TARGET_ALTIVEC)
3294 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3296 if (TARGET_MFPGPR) /* DFmode */
3297 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3299 if (TARGET_LFIWAX)
3300 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3302 if (TARGET_DIRECT_MOVE)
3304 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3305 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3306 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3307 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3308 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3309 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3312 if (TARGET_POWERPC64)
3314 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3315 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3318 if (TARGET_P8_VECTOR) /* SFmode */
3320 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3321 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3322 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3324 else if (TARGET_VSX)
3325 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3327 if (TARGET_STFIWX)
3328 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3330 if (TARGET_LFIWZX)
3331 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3333 if (TARGET_FLOAT128_TYPE)
3335 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3336 if (FLOAT128_IEEE_P (TFmode))
3337 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3340 if (TARGET_P9_VECTOR)
3342 /* Support for new D-form instructions. */
3343 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3345 /* Support for ISA 3.0 (power9) vectors. */
3346 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3349 /* Support for new direct moves (ISA 3.0 + 64bit). */
3350 if (TARGET_DIRECT_MOVE_128)
3351 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3353 /* Support small integers in VSX registers. */
3354 if (TARGET_P8_VECTOR)
3356 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3357 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3358 if (TARGET_P9_VECTOR)
3360 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3361 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3365 /* Set up the reload helper and direct move functions. */
3366 if (TARGET_VSX || TARGET_ALTIVEC)
3368 if (TARGET_64BIT)
3370 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3371 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3372 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3373 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3374 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3375 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3376 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3377 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3378 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3379 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3380 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3381 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3382 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3383 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3384 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3385 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3386 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3387 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3388 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3389 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3391 if (FLOAT128_VECTOR_P (KFmode))
3393 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3394 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3397 if (FLOAT128_VECTOR_P (TFmode))
3399 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3400 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3403 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3404 available. */
3405 if (TARGET_NO_SDMODE_STACK)
3407 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3408 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3411 if (TARGET_VSX)
3413 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3414 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3417 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3419 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3420 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3421 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3422 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3423 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3424 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3425 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3426 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3427 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3429 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3430 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3431 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3432 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3433 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3434 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3435 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3436 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3437 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3439 if (FLOAT128_VECTOR_P (KFmode))
3441 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3442 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3445 if (FLOAT128_VECTOR_P (TFmode))
3447 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3448 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3452 else
3454 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3455 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3456 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3457 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3458 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3459 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3460 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3461 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3462 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3463 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3464 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3465 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3466 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3467 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3468 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3469 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3470 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3471 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3472 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3473 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3475 if (FLOAT128_VECTOR_P (KFmode))
3477 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3478 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3481 if (FLOAT128_IEEE_P (TFmode))
3483 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3484 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3487 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3488 available. */
3489 if (TARGET_NO_SDMODE_STACK)
3491 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3492 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3495 if (TARGET_VSX)
3497 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3498 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3501 if (TARGET_DIRECT_MOVE)
3503 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3504 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3505 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3509 reg_addr[DFmode].scalar_in_vmx_p = true;
3510 reg_addr[DImode].scalar_in_vmx_p = true;
3512 if (TARGET_P8_VECTOR)
3514 reg_addr[SFmode].scalar_in_vmx_p = true;
3515 reg_addr[SImode].scalar_in_vmx_p = true;
3517 if (TARGET_P9_VECTOR)
3519 reg_addr[HImode].scalar_in_vmx_p = true;
3520 reg_addr[QImode].scalar_in_vmx_p = true;
3525 /* Setup the fusion operations. */
3526 if (TARGET_P8_FUSION)
3528 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3529 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3530 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3531 if (TARGET_64BIT)
3532 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3535 if (TARGET_P9_FUSION)
3537 struct fuse_insns {
3538 enum machine_mode mode; /* mode of the fused type. */
3539 enum machine_mode pmode; /* pointer mode. */
3540 enum rs6000_reload_reg_type rtype; /* register type. */
3541 enum insn_code load; /* load insn. */
3542 enum insn_code store; /* store insn. */
3545 static const struct fuse_insns addis_insns[] = {
3546 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3547 CODE_FOR_fusion_vsx_di_sf_load,
3548 CODE_FOR_fusion_vsx_di_sf_store },
3550 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3551 CODE_FOR_fusion_vsx_si_sf_load,
3552 CODE_FOR_fusion_vsx_si_sf_store },
3554 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3555 CODE_FOR_fusion_vsx_di_df_load,
3556 CODE_FOR_fusion_vsx_di_df_store },
3558 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3559 CODE_FOR_fusion_vsx_si_df_load,
3560 CODE_FOR_fusion_vsx_si_df_store },
3562 { E_DImode, E_DImode, RELOAD_REG_FPR,
3563 CODE_FOR_fusion_vsx_di_di_load,
3564 CODE_FOR_fusion_vsx_di_di_store },
3566 { E_DImode, E_SImode, RELOAD_REG_FPR,
3567 CODE_FOR_fusion_vsx_si_di_load,
3568 CODE_FOR_fusion_vsx_si_di_store },
3570 { E_QImode, E_DImode, RELOAD_REG_GPR,
3571 CODE_FOR_fusion_gpr_di_qi_load,
3572 CODE_FOR_fusion_gpr_di_qi_store },
3574 { E_QImode, E_SImode, RELOAD_REG_GPR,
3575 CODE_FOR_fusion_gpr_si_qi_load,
3576 CODE_FOR_fusion_gpr_si_qi_store },
3578 { E_HImode, E_DImode, RELOAD_REG_GPR,
3579 CODE_FOR_fusion_gpr_di_hi_load,
3580 CODE_FOR_fusion_gpr_di_hi_store },
3582 { E_HImode, E_SImode, RELOAD_REG_GPR,
3583 CODE_FOR_fusion_gpr_si_hi_load,
3584 CODE_FOR_fusion_gpr_si_hi_store },
3586 { E_SImode, E_DImode, RELOAD_REG_GPR,
3587 CODE_FOR_fusion_gpr_di_si_load,
3588 CODE_FOR_fusion_gpr_di_si_store },
3590 { E_SImode, E_SImode, RELOAD_REG_GPR,
3591 CODE_FOR_fusion_gpr_si_si_load,
3592 CODE_FOR_fusion_gpr_si_si_store },
3594 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3595 CODE_FOR_fusion_gpr_di_sf_load,
3596 CODE_FOR_fusion_gpr_di_sf_store },
3598 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3599 CODE_FOR_fusion_gpr_si_sf_load,
3600 CODE_FOR_fusion_gpr_si_sf_store },
3602 { E_DImode, E_DImode, RELOAD_REG_GPR,
3603 CODE_FOR_fusion_gpr_di_di_load,
3604 CODE_FOR_fusion_gpr_di_di_store },
3606 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3607 CODE_FOR_fusion_gpr_di_df_load,
3608 CODE_FOR_fusion_gpr_di_df_store },
3611 machine_mode cur_pmode = Pmode;
3612 size_t i;
3614 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3616 machine_mode xmode = addis_insns[i].mode;
3617 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3619 if (addis_insns[i].pmode != cur_pmode)
3620 continue;
3622 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3623 continue;
3625 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3626 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3628 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3630 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3631 = addis_insns[i].load;
3632 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3633 = addis_insns[i].store;
3638 /* Note which types we support fusing TOC setup plus memory insn. We only do
3639 fused TOCs for medium/large code models. */
3640 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3641 && (TARGET_CMODEL != CMODEL_SMALL))
3643 reg_addr[QImode].fused_toc = true;
3644 reg_addr[HImode].fused_toc = true;
3645 reg_addr[SImode].fused_toc = true;
3646 reg_addr[DImode].fused_toc = true;
3647 if (TARGET_HARD_FLOAT)
3649 reg_addr[SFmode].fused_toc = true;
3650 reg_addr[DFmode].fused_toc = true;
3654 /* Precalculate HARD_REGNO_NREGS. */
3655 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3656 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3657 rs6000_hard_regno_nregs[m][r]
3658 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3660 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3661 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3662 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3663 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3664 rs6000_hard_regno_mode_ok_p[m][r] = true;
3666 /* Precalculate CLASS_MAX_NREGS sizes. */
3667 for (c = 0; c < LIM_REG_CLASSES; ++c)
3669 int reg_size;
3671 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3672 reg_size = UNITS_PER_VSX_WORD;
3674 else if (c == ALTIVEC_REGS)
3675 reg_size = UNITS_PER_ALTIVEC_WORD;
3677 else if (c == FLOAT_REGS)
3678 reg_size = UNITS_PER_FP_WORD;
3680 else
3681 reg_size = UNITS_PER_WORD;
3683 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3685 machine_mode m2 = (machine_mode)m;
3686 int reg_size2 = reg_size;
3688 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3689 in VSX. */
3690 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3691 reg_size2 = UNITS_PER_FP_WORD;
3693 rs6000_class_max_nregs[m][c]
3694 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3698 /* Calculate which modes to automatically generate code to use a the
3699 reciprocal divide and square root instructions. In the future, possibly
3700 automatically generate the instructions even if the user did not specify
3701 -mrecip. The older machines double precision reciprocal sqrt estimate is
3702 not accurate enough. */
3703 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3704 if (TARGET_FRES)
3705 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3706 if (TARGET_FRE)
3707 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3708 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3709 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3710 if (VECTOR_UNIT_VSX_P (V2DFmode))
3711 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3713 if (TARGET_FRSQRTES)
3714 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3715 if (TARGET_FRSQRTE)
3716 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3717 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3718 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3719 if (VECTOR_UNIT_VSX_P (V2DFmode))
3720 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3722 if (rs6000_recip_control)
3724 if (!flag_finite_math_only)
3725 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3726 "-ffast-math");
3727 if (flag_trapping_math)
3728 warning (0, "%qs requires %qs or %qs", "-mrecip",
3729 "-fno-trapping-math", "-ffast-math");
3730 if (!flag_reciprocal_math)
3731 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3732 "-ffast-math");
3733 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3735 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3736 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3737 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3739 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3740 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3741 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3743 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3744 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3745 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3747 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3748 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3749 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3751 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3752 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3753 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3755 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3756 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3757 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3759 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3760 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3761 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3763 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3764 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3765 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3769 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3770 legitimate address support to figure out the appropriate addressing to
3771 use. */
3772 rs6000_setup_reg_addr_masks ();
3774 if (global_init_p || TARGET_DEBUG_TARGET)
3776 if (TARGET_DEBUG_REG)
3777 rs6000_debug_reg_global ();
3779 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3780 fprintf (stderr,
3781 "SImode variable mult cost = %d\n"
3782 "SImode constant mult cost = %d\n"
3783 "SImode short constant mult cost = %d\n"
3784 "DImode multipliciation cost = %d\n"
3785 "SImode division cost = %d\n"
3786 "DImode division cost = %d\n"
3787 "Simple fp operation cost = %d\n"
3788 "DFmode multiplication cost = %d\n"
3789 "SFmode division cost = %d\n"
3790 "DFmode division cost = %d\n"
3791 "cache line size = %d\n"
3792 "l1 cache size = %d\n"
3793 "l2 cache size = %d\n"
3794 "simultaneous prefetches = %d\n"
3795 "\n",
3796 rs6000_cost->mulsi,
3797 rs6000_cost->mulsi_const,
3798 rs6000_cost->mulsi_const9,
3799 rs6000_cost->muldi,
3800 rs6000_cost->divsi,
3801 rs6000_cost->divdi,
3802 rs6000_cost->fp,
3803 rs6000_cost->dmul,
3804 rs6000_cost->sdiv,
3805 rs6000_cost->ddiv,
3806 rs6000_cost->cache_line_size,
3807 rs6000_cost->l1_cache_size,
3808 rs6000_cost->l2_cache_size,
3809 rs6000_cost->simultaneous_prefetches);
3813 #if TARGET_MACHO
3814 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3816 static void
3817 darwin_rs6000_override_options (void)
3819 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3820 off. */
3821 rs6000_altivec_abi = 1;
3822 TARGET_ALTIVEC_VRSAVE = 1;
3823 rs6000_current_abi = ABI_DARWIN;
3825 if (DEFAULT_ABI == ABI_DARWIN
3826 && TARGET_64BIT)
3827 darwin_one_byte_bool = 1;
3829 if (TARGET_64BIT && ! TARGET_POWERPC64)
3831 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3832 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3834 if (flag_mkernel)
3836 rs6000_default_long_calls = 1;
3837 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3840 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3841 Altivec. */
3842 if (!flag_mkernel && !flag_apple_kext
3843 && TARGET_64BIT
3844 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3845 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3847 /* Unless the user (not the configurer) has explicitly overridden
3848 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3849 G4 unless targeting the kernel. */
3850 if (!flag_mkernel
3851 && !flag_apple_kext
3852 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3853 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3854 && ! global_options_set.x_rs6000_cpu_index)
3856 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3859 #endif
3861 /* If not otherwise specified by a target, make 'long double' equivalent to
3862 'double'. */
3864 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3865 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3866 #endif
3868 /* Return the builtin mask of the various options used that could affect which
3869 builtins were used. In the past we used target_flags, but we've run out of
3870 bits, and some options are no longer in target_flags. */
3872 HOST_WIDE_INT
3873 rs6000_builtin_mask_calculate (void)
3875 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3876 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3877 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3878 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3879 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3880 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3881 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3882 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3883 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3884 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3885 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3886 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3887 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3888 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3889 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3890 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3891 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3892 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3893 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3894 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3895 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3896 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3899 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3900 to clobber the XER[CA] bit because clobbering that bit without telling
3901 the compiler worked just fine with versions of GCC before GCC 5, and
3902 breaking a lot of older code in ways that are hard to track down is
3903 not such a great idea. */
3905 static rtx_insn *
3906 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3907 vec<const char *> &/*constraints*/,
3908 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3910 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3911 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3912 return NULL;
3915 /* Override command line options.
3917 Combine build-specific configuration information with options
3918 specified on the command line to set various state variables which
3919 influence code generation, optimization, and expansion of built-in
3920 functions. Assure that command-line configuration preferences are
3921 compatible with each other and with the build configuration; issue
3922 warnings while adjusting configuration or error messages while
3923 rejecting configuration.
3925 Upon entry to this function:
3927 This function is called once at the beginning of
3928 compilation, and then again at the start and end of compiling
3929 each section of code that has a different configuration, as
3930 indicated, for example, by adding the
3932 __attribute__((__target__("cpu=power9")))
3934 qualifier to a function definition or, for example, by bracketing
3935 code between
3937 #pragma GCC target("altivec")
3941 #pragma GCC reset_options
3943 directives. Parameter global_init_p is true for the initial
3944 invocation, which initializes global variables, and false for all
3945 subsequent invocations.
3948 Various global state information is assumed to be valid. This
3949 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3950 default CPU specified at build configure time, TARGET_DEFAULT,
3951 representing the default set of option flags for the default
3952 target, and global_options_set.x_rs6000_isa_flags, representing
3953 which options were requested on the command line.
3955 Upon return from this function:
3957 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3958 was set by name on the command line. Additionally, if certain
3959 attributes are automatically enabled or disabled by this function
3960 in order to assure compatibility between options and
3961 configuration, the flags associated with those attributes are
3962 also set. By setting these "explicit bits", we avoid the risk
3963 that other code might accidentally overwrite these particular
3964 attributes with "default values".
3966 The various bits of rs6000_isa_flags are set to indicate the
3967 target options that have been selected for the most current
3968 compilation efforts. This has the effect of also turning on the
3969 associated TARGET_XXX values since these are macros which are
3970 generally defined to test the corresponding bit of the
3971 rs6000_isa_flags variable.
3973 The variable rs6000_builtin_mask is set to represent the target
3974 options for the most current compilation efforts, consistent with
3975 the current contents of rs6000_isa_flags. This variable controls
3976 expansion of built-in functions.
3978 Various other global variables and fields of global structures
3979 (over 50 in all) are initialized to reflect the desired options
3980 for the most current compilation efforts. */
3982 static bool
3983 rs6000_option_override_internal (bool global_init_p)
3985 bool ret = true;
3987 HOST_WIDE_INT set_masks;
3988 HOST_WIDE_INT ignore_masks;
3989 int cpu_index = -1;
3990 int tune_index;
3991 struct cl_target_option *main_target_opt
3992 = ((global_init_p || target_option_default_node == NULL)
3993 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3995 /* Print defaults. */
3996 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3997 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3999 /* Remember the explicit arguments. */
4000 if (global_init_p)
4001 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4003 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4004 library functions, so warn about it. The flag may be useful for
4005 performance studies from time to time though, so don't disable it
4006 entirely. */
4007 if (global_options_set.x_rs6000_alignment_flags
4008 && rs6000_alignment_flags == MASK_ALIGN_POWER
4009 && DEFAULT_ABI == ABI_DARWIN
4010 && TARGET_64BIT)
4011 warning (0, "%qs is not supported for 64-bit Darwin;"
4012 " it is incompatible with the installed C and C++ libraries",
4013 "-malign-power");
4015 /* Numerous experiment shows that IRA based loop pressure
4016 calculation works better for RTL loop invariant motion on targets
4017 with enough (>= 32) registers. It is an expensive optimization.
4018 So it is on only for peak performance. */
4019 if (optimize >= 3 && global_init_p
4020 && !global_options_set.x_flag_ira_loop_pressure)
4021 flag_ira_loop_pressure = 1;
4023 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4024 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4025 options were already specified. */
4026 if (flag_sanitize & SANITIZE_USER_ADDRESS
4027 && !global_options_set.x_flag_asynchronous_unwind_tables)
4028 flag_asynchronous_unwind_tables = 1;
4030 /* Set the pointer size. */
4031 if (TARGET_64BIT)
4033 rs6000_pmode = DImode;
4034 rs6000_pointer_size = 64;
4036 else
4038 rs6000_pmode = SImode;
4039 rs6000_pointer_size = 32;
4042 /* Some OSs don't support saving the high part of 64-bit registers on context
4043 switch. Other OSs don't support saving Altivec registers. On those OSs,
4044 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4045 if the user wants either, the user must explicitly specify them and we
4046 won't interfere with the user's specification. */
4048 set_masks = POWERPC_MASKS;
4049 #ifdef OS_MISSING_POWERPC64
4050 if (OS_MISSING_POWERPC64)
4051 set_masks &= ~OPTION_MASK_POWERPC64;
4052 #endif
4053 #ifdef OS_MISSING_ALTIVEC
4054 if (OS_MISSING_ALTIVEC)
4055 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4056 | OTHER_VSX_VECTOR_MASKS);
4057 #endif
4059 /* Don't override by the processor default if given explicitly. */
4060 set_masks &= ~rs6000_isa_flags_explicit;
4062 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4063 the cpu in a target attribute or pragma, but did not specify a tuning
4064 option, use the cpu for the tuning option rather than the option specified
4065 with -mtune on the command line. Process a '--with-cpu' configuration
4066 request as an implicit --cpu. */
4067 if (rs6000_cpu_index >= 0)
4068 cpu_index = rs6000_cpu_index;
4069 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4070 cpu_index = main_target_opt->x_rs6000_cpu_index;
4071 else if (OPTION_TARGET_CPU_DEFAULT)
4072 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
4074 if (cpu_index >= 0)
4076 const char *unavailable_cpu = NULL;
4077 switch (processor_target_table[cpu_index].processor)
4079 #ifndef HAVE_AS_POWER9
4080 case PROCESSOR_POWER9:
4081 unavailable_cpu = "power9";
4082 break;
4083 #endif
4084 #ifndef HAVE_AS_POWER8
4085 case PROCESSOR_POWER8:
4086 unavailable_cpu = "power8";
4087 break;
4088 #endif
4089 #ifndef HAVE_AS_POPCNTD
4090 case PROCESSOR_POWER7:
4091 unavailable_cpu = "power7";
4092 break;
4093 #endif
4094 #ifndef HAVE_AS_DFP
4095 case PROCESSOR_POWER6:
4096 unavailable_cpu = "power6";
4097 break;
4098 #endif
4099 #ifndef HAVE_AS_POPCNTB
4100 case PROCESSOR_POWER5:
4101 unavailable_cpu = "power5";
4102 break;
4103 #endif
4104 default:
4105 break;
4107 if (unavailable_cpu)
4109 cpu_index = -1;
4110 warning (0, "will not generate %qs instructions because "
4111 "assembler lacks %qs support", unavailable_cpu,
4112 unavailable_cpu);
4116 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4117 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4118 with those from the cpu, except for options that were explicitly set. If
4119 we don't have a cpu, do not override the target bits set in
4120 TARGET_DEFAULT. */
4121 if (cpu_index >= 0)
4123 rs6000_cpu_index = cpu_index;
4124 rs6000_isa_flags &= ~set_masks;
4125 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4126 & set_masks);
4128 else
4130 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4131 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4132 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4133 to using rs6000_isa_flags, we need to do the initialization here.
4135 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4136 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4137 HOST_WIDE_INT flags;
4138 if (TARGET_DEFAULT)
4139 flags = TARGET_DEFAULT;
4140 else
4142 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4143 const char *default_cpu = (!TARGET_POWERPC64
4144 ? "powerpc"
4145 : (BYTES_BIG_ENDIAN
4146 ? "powerpc64"
4147 : "powerpc64le"));
4148 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
4149 flags = processor_target_table[default_cpu_index].target_enable;
4151 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4154 if (rs6000_tune_index >= 0)
4155 tune_index = rs6000_tune_index;
4156 else if (cpu_index >= 0)
4157 rs6000_tune_index = tune_index = cpu_index;
4158 else
4160 size_t i;
4161 enum processor_type tune_proc
4162 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4164 tune_index = -1;
4165 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4166 if (processor_target_table[i].processor == tune_proc)
4168 tune_index = i;
4169 break;
4173 if (cpu_index >= 0)
4174 rs6000_cpu = processor_target_table[cpu_index].processor;
4175 else
4176 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
4178 gcc_assert (tune_index >= 0);
4179 rs6000_tune = processor_target_table[tune_index].processor;
4181 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4182 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4183 || rs6000_cpu == PROCESSOR_PPCE5500)
4185 if (TARGET_ALTIVEC)
4186 error ("AltiVec not supported in this target");
4189 /* If we are optimizing big endian systems for space, use the load/store
4190 multiple instructions. */
4191 if (BYTES_BIG_ENDIAN && optimize_size)
4192 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
4194 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
4195 because the hardware doesn't support the instructions used in little
4196 endian mode, and causes an alignment trap. The 750 does not cause an
4197 alignment trap (except when the target is unaligned). */
4199 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
4201 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4202 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4203 warning (0, "%qs is not supported on little endian systems",
4204 "-mmultiple");
4207 /* If little-endian, default to -mstrict-align on older processors.
4208 Testing for htm matches power8 and later. */
4209 if (!BYTES_BIG_ENDIAN
4210 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4211 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4213 if (!rs6000_fold_gimple)
4214 fprintf (stderr,
4215 "gimple folding of rs6000 builtins has been disabled.\n");
4217 /* Add some warnings for VSX. */
4218 if (TARGET_VSX)
4220 const char *msg = NULL;
4221 if (!TARGET_HARD_FLOAT)
4223 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4224 msg = N_("-mvsx requires hardware floating point");
4225 else
4227 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4228 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4231 else if (TARGET_AVOID_XFORM > 0)
4232 msg = N_("-mvsx needs indexed addressing");
4233 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4234 & OPTION_MASK_ALTIVEC))
4236 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4237 msg = N_("-mvsx and -mno-altivec are incompatible");
4238 else
4239 msg = N_("-mno-altivec disables vsx");
4242 if (msg)
4244 warning (0, msg);
4245 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4246 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4250 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4251 the -mcpu setting to enable options that conflict. */
4252 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4253 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4254 | OPTION_MASK_ALTIVEC
4255 | OPTION_MASK_VSX)) != 0)
4256 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4257 | OPTION_MASK_DIRECT_MOVE)
4258 & ~rs6000_isa_flags_explicit);
4260 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4261 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4263 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4264 off all of the options that depend on those flags. */
4265 ignore_masks = rs6000_disable_incompatible_switches ();
4267 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4268 unless the user explicitly used the -mno-<option> to disable the code. */
4269 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4270 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4271 else if (TARGET_P9_MINMAX)
4273 if (cpu_index >= 0)
4275 if (cpu_index == PROCESSOR_POWER9)
4277 /* legacy behavior: allow -mcpu=power9 with certain
4278 capabilities explicitly disabled. */
4279 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4281 else
4282 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4283 "for <xxx> less than power9", "-mcpu");
4285 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4286 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4287 & rs6000_isa_flags_explicit))
4288 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4289 were explicitly cleared. */
4290 error ("%qs incompatible with explicitly disabled options",
4291 "-mpower9-minmax");
4292 else
4293 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4295 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4296 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4297 else if (TARGET_VSX)
4298 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4299 else if (TARGET_POPCNTD)
4300 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4301 else if (TARGET_DFP)
4302 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4303 else if (TARGET_CMPB)
4304 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4305 else if (TARGET_FPRND)
4306 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4307 else if (TARGET_POPCNTB)
4308 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4309 else if (TARGET_ALTIVEC)
4310 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4312 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4314 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4315 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4316 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4319 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4321 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4322 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4323 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4326 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4328 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4329 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4330 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4333 if (TARGET_P8_VECTOR && !TARGET_VSX)
4335 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4336 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4337 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4338 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4340 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4341 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4342 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4344 else
4346 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4347 not explicit. */
4348 rs6000_isa_flags |= OPTION_MASK_VSX;
4349 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4353 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4355 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4356 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4357 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4360 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4361 silently turn off quad memory mode. */
4362 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4364 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4365 warning (0, N_("-mquad-memory requires 64-bit mode"));
4367 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4368 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4370 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4371 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4374 /* Non-atomic quad memory load/store are disabled for little endian, since
4375 the words are reversed, but atomic operations can still be done by
4376 swapping the words. */
4377 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4379 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4380 warning (0, N_("-mquad-memory is not available in little endian "
4381 "mode"));
4383 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4386 /* Assume if the user asked for normal quad memory instructions, they want
4387 the atomic versions as well, unless they explicity told us not to use quad
4388 word atomic instructions. */
4389 if (TARGET_QUAD_MEMORY
4390 && !TARGET_QUAD_MEMORY_ATOMIC
4391 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4392 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4394 /* If we can shrink-wrap the TOC register save separately, then use
4395 -msave-toc-indirect unless explicitly disabled. */
4396 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4397 && flag_shrink_wrap_separate
4398 && optimize_function_for_speed_p (cfun))
4399 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4401 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4402 generating power8 instructions. */
4403 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4404 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4405 & OPTION_MASK_P8_FUSION);
4407 /* Setting additional fusion flags turns on base fusion. */
4408 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4410 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4412 if (TARGET_P8_FUSION_SIGN)
4413 error ("%qs requires %qs", "-mpower8-fusion-sign",
4414 "-mpower8-fusion");
4416 if (TARGET_TOC_FUSION)
4417 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4419 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4421 else
4422 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4425 /* Power9 fusion is a superset over power8 fusion. */
4426 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4428 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4430 /* We prefer to not mention undocumented options in
4431 error messages. However, if users have managed to select
4432 power9-fusion without selecting power8-fusion, they
4433 already know about undocumented flags. */
4434 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4435 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4437 else
4438 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4441 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4442 generating power9 instructions. */
4443 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4444 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4445 & OPTION_MASK_P9_FUSION);
4447 /* Power8 does not fuse sign extended loads with the addis. If we are
4448 optimizing at high levels for speed, convert a sign extended load into a
4449 zero extending load, and an explicit sign extension. */
4450 if (TARGET_P8_FUSION
4451 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4452 && optimize_function_for_speed_p (cfun)
4453 && optimize >= 3)
4454 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4456 /* TOC fusion requires 64-bit and medium/large code model. */
4457 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4459 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4460 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4461 warning (0, N_("-mtoc-fusion requires 64-bit"));
4464 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4466 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4467 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4468 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4471 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4472 model. */
4473 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4474 && (TARGET_CMODEL != CMODEL_SMALL)
4475 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4476 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4478 /* ISA 3.0 vector instructions include ISA 2.07. */
4479 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4481 /* We prefer to not mention undocumented options in
4482 error messages. However, if users have managed to select
4483 power9-vector without selecting power8-vector, they
4484 already know about undocumented flags. */
4485 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4486 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4487 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4488 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4490 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4491 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4492 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4494 else
4496 /* OPTION_MASK_P9_VECTOR is explicit and
4497 OPTION_MASK_P8_VECTOR is not explicit. */
4498 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4499 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4503 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4504 support. If we only have ISA 2.06 support, and the user did not specify
4505 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4506 but we don't enable the full vectorization support */
4507 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4508 TARGET_ALLOW_MOVMISALIGN = 1;
4510 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4512 if (TARGET_ALLOW_MOVMISALIGN > 0
4513 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4514 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4516 TARGET_ALLOW_MOVMISALIGN = 0;
4519 /* Determine when unaligned vector accesses are permitted, and when
4520 they are preferred over masked Altivec loads. Note that if
4521 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4522 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4523 not true. */
4524 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4526 if (!TARGET_VSX)
4528 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4529 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4531 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4534 else if (!TARGET_ALLOW_MOVMISALIGN)
4536 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4537 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4538 "-mallow-movmisalign");
4540 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4544 /* Set long double size before the IEEE 128-bit tests. */
4545 if (!global_options_set.x_rs6000_long_double_type_size)
4547 if (main_target_opt != NULL
4548 && (main_target_opt->x_rs6000_long_double_type_size
4549 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4550 error ("target attribute or pragma changes long double size");
4551 else
4552 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4555 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4556 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4557 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4558 those systems will not pick up this default. Warn if the user changes the
4559 default unless either the user used the -Wno-psabi option, or the compiler
4560 was built to enable multilibs to switch between the two long double
4561 types. */
4562 if (!global_options_set.x_rs6000_ieeequad)
4563 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4565 else if (!TARGET_IEEEQUAD_MULTILIB
4566 && rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT
4567 && TARGET_LONG_DOUBLE_128)
4569 static bool warned_change_long_double;
4570 if (!warned_change_long_double)
4572 warned_change_long_double = true;
4573 if (TARGET_IEEEQUAD)
4574 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4575 else
4576 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4580 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4581 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4582 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4583 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4584 the keyword as well as the type. */
4585 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4587 /* IEEE 128-bit floating point requires VSX support. */
4588 if (TARGET_FLOAT128_KEYWORD)
4590 if (!TARGET_VSX)
4592 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4593 error ("%qs requires VSX support", "-mfloat128");
4595 TARGET_FLOAT128_TYPE = 0;
4596 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4597 | OPTION_MASK_FLOAT128_HW);
4599 else if (!TARGET_FLOAT128_TYPE)
4601 TARGET_FLOAT128_TYPE = 1;
4602 warning (0, "The -mfloat128 option may not be fully supported");
4606 /* Enable the __float128 keyword under Linux by default. */
4607 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4608 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4609 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4611 /* If we have are supporting the float128 type and full ISA 3.0 support,
4612 enable -mfloat128-hardware by default. However, don't enable the
4613 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4614 because sometimes the compiler wants to put things in an integer
4615 container, and if we don't have __int128 support, it is impossible. */
4616 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4617 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4618 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4619 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4621 if (TARGET_FLOAT128_HW
4622 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4624 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4625 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4627 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4630 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4632 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4633 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4635 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4638 /* Print the options after updating the defaults. */
4639 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4640 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4642 /* E500mc does "better" if we inline more aggressively. Respect the
4643 user's opinion, though. */
4644 if (rs6000_block_move_inline_limit == 0
4645 && (rs6000_tune == PROCESSOR_PPCE500MC
4646 || rs6000_tune == PROCESSOR_PPCE500MC64
4647 || rs6000_tune == PROCESSOR_PPCE5500
4648 || rs6000_tune == PROCESSOR_PPCE6500))
4649 rs6000_block_move_inline_limit = 128;
4651 /* store_one_arg depends on expand_block_move to handle at least the
4652 size of reg_parm_stack_space. */
4653 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4654 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4656 if (global_init_p)
4658 /* If the appropriate debug option is enabled, replace the target hooks
4659 with debug versions that call the real version and then prints
4660 debugging information. */
4661 if (TARGET_DEBUG_COST)
4663 targetm.rtx_costs = rs6000_debug_rtx_costs;
4664 targetm.address_cost = rs6000_debug_address_cost;
4665 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4668 if (TARGET_DEBUG_ADDR)
4670 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4671 targetm.legitimize_address = rs6000_debug_legitimize_address;
4672 rs6000_secondary_reload_class_ptr
4673 = rs6000_debug_secondary_reload_class;
4674 targetm.secondary_memory_needed
4675 = rs6000_debug_secondary_memory_needed;
4676 targetm.can_change_mode_class
4677 = rs6000_debug_can_change_mode_class;
4678 rs6000_preferred_reload_class_ptr
4679 = rs6000_debug_preferred_reload_class;
4680 rs6000_legitimize_reload_address_ptr
4681 = rs6000_debug_legitimize_reload_address;
4682 rs6000_mode_dependent_address_ptr
4683 = rs6000_debug_mode_dependent_address;
4686 if (rs6000_veclibabi_name)
4688 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4689 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4690 else
4692 error ("unknown vectorization library ABI type (%qs) for "
4693 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4694 ret = false;
4699 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4700 target attribute or pragma which automatically enables both options,
4701 unless the altivec ABI was set. This is set by default for 64-bit, but
4702 not for 32-bit. */
4703 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4705 TARGET_FLOAT128_TYPE = 0;
4706 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4707 | OPTION_MASK_FLOAT128_KEYWORD)
4708 & ~rs6000_isa_flags_explicit);
4711 /* Enable Altivec ABI for AIX -maltivec. */
4712 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4714 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4715 error ("target attribute or pragma changes AltiVec ABI");
4716 else
4717 rs6000_altivec_abi = 1;
4720 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4721 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4722 be explicitly overridden in either case. */
4723 if (TARGET_ELF)
4725 if (!global_options_set.x_rs6000_altivec_abi
4726 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4728 if (main_target_opt != NULL &&
4729 !main_target_opt->x_rs6000_altivec_abi)
4730 error ("target attribute or pragma changes AltiVec ABI");
4731 else
4732 rs6000_altivec_abi = 1;
4736 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4737 So far, the only darwin64 targets are also MACH-O. */
4738 if (TARGET_MACHO
4739 && DEFAULT_ABI == ABI_DARWIN
4740 && TARGET_64BIT)
4742 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4743 error ("target attribute or pragma changes darwin64 ABI");
4744 else
4746 rs6000_darwin64_abi = 1;
4747 /* Default to natural alignment, for better performance. */
4748 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4752 /* Place FP constants in the constant pool instead of TOC
4753 if section anchors enabled. */
4754 if (flag_section_anchors
4755 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4756 TARGET_NO_FP_IN_TOC = 1;
4758 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4759 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4761 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4762 SUBTARGET_OVERRIDE_OPTIONS;
4763 #endif
4764 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4765 SUBSUBTARGET_OVERRIDE_OPTIONS;
4766 #endif
4767 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4768 SUB3TARGET_OVERRIDE_OPTIONS;
4769 #endif
4771 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4772 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4774 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4775 && rs6000_tune != PROCESSOR_POWER5
4776 && rs6000_tune != PROCESSOR_POWER6
4777 && rs6000_tune != PROCESSOR_POWER7
4778 && rs6000_tune != PROCESSOR_POWER8
4779 && rs6000_tune != PROCESSOR_POWER9
4780 && rs6000_tune != PROCESSOR_PPCA2
4781 && rs6000_tune != PROCESSOR_CELL
4782 && rs6000_tune != PROCESSOR_PPC476);
4783 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4784 || rs6000_tune == PROCESSOR_POWER5
4785 || rs6000_tune == PROCESSOR_POWER7
4786 || rs6000_tune == PROCESSOR_POWER8);
4787 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4788 || rs6000_tune == PROCESSOR_POWER5
4789 || rs6000_tune == PROCESSOR_POWER6
4790 || rs6000_tune == PROCESSOR_POWER7
4791 || rs6000_tune == PROCESSOR_POWER8
4792 || rs6000_tune == PROCESSOR_POWER9
4793 || rs6000_tune == PROCESSOR_PPCE500MC
4794 || rs6000_tune == PROCESSOR_PPCE500MC64
4795 || rs6000_tune == PROCESSOR_PPCE5500
4796 || rs6000_tune == PROCESSOR_PPCE6500);
4798 /* Allow debug switches to override the above settings. These are set to -1
4799 in rs6000.opt to indicate the user hasn't directly set the switch. */
4800 if (TARGET_ALWAYS_HINT >= 0)
4801 rs6000_always_hint = TARGET_ALWAYS_HINT;
4803 if (TARGET_SCHED_GROUPS >= 0)
4804 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4806 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4807 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4809 rs6000_sched_restricted_insns_priority
4810 = (rs6000_sched_groups ? 1 : 0);
4812 /* Handle -msched-costly-dep option. */
4813 rs6000_sched_costly_dep
4814 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4816 if (rs6000_sched_costly_dep_str)
4818 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4819 rs6000_sched_costly_dep = no_dep_costly;
4820 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4821 rs6000_sched_costly_dep = all_deps_costly;
4822 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4823 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4824 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4825 rs6000_sched_costly_dep = store_to_load_dep_costly;
4826 else
4827 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4828 atoi (rs6000_sched_costly_dep_str));
4831 /* Handle -minsert-sched-nops option. */
4832 rs6000_sched_insert_nops
4833 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4835 if (rs6000_sched_insert_nops_str)
4837 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4838 rs6000_sched_insert_nops = sched_finish_none;
4839 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4840 rs6000_sched_insert_nops = sched_finish_pad_groups;
4841 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4842 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4843 else
4844 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4845 atoi (rs6000_sched_insert_nops_str));
4848 /* Handle stack protector */
4849 if (!global_options_set.x_rs6000_stack_protector_guard)
4850 #ifdef TARGET_THREAD_SSP_OFFSET
4851 rs6000_stack_protector_guard = SSP_TLS;
4852 #else
4853 rs6000_stack_protector_guard = SSP_GLOBAL;
4854 #endif
4856 #ifdef TARGET_THREAD_SSP_OFFSET
4857 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4858 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4859 #endif
4861 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4863 char *endp;
4864 const char *str = rs6000_stack_protector_guard_offset_str;
4866 errno = 0;
4867 long offset = strtol (str, &endp, 0);
4868 if (!*str || *endp || errno)
4869 error ("%qs is not a valid number in %qs", str,
4870 "-mstack-protector-guard-offset=");
4872 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4873 || (TARGET_64BIT && (offset & 3)))
4874 error ("%qs is not a valid offset in %qs", str,
4875 "-mstack-protector-guard-offset=");
4877 rs6000_stack_protector_guard_offset = offset;
4880 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4882 const char *str = rs6000_stack_protector_guard_reg_str;
4883 int reg = decode_reg_name (str);
4885 if (!IN_RANGE (reg, 1, 31))
4886 error ("%qs is not a valid base register in %qs", str,
4887 "-mstack-protector-guard-reg=");
4889 rs6000_stack_protector_guard_reg = reg;
4892 if (rs6000_stack_protector_guard == SSP_TLS
4893 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4894 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4896 if (global_init_p)
4898 #ifdef TARGET_REGNAMES
4899 /* If the user desires alternate register names, copy in the
4900 alternate names now. */
4901 if (TARGET_REGNAMES)
4902 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4903 #endif
4905 /* Set aix_struct_return last, after the ABI is determined.
4906 If -maix-struct-return or -msvr4-struct-return was explicitly
4907 used, don't override with the ABI default. */
4908 if (!global_options_set.x_aix_struct_return)
4909 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4911 #if 0
4912 /* IBM XL compiler defaults to unsigned bitfields. */
4913 if (TARGET_XL_COMPAT)
4914 flag_signed_bitfields = 0;
4915 #endif
4917 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4918 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4920 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4922 /* We can only guarantee the availability of DI pseudo-ops when
4923 assembling for 64-bit targets. */
4924 if (!TARGET_64BIT)
4926 targetm.asm_out.aligned_op.di = NULL;
4927 targetm.asm_out.unaligned_op.di = NULL;
4931 /* Set branch target alignment, if not optimizing for size. */
4932 if (!optimize_size)
4934 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4935 aligned 8byte to avoid misprediction by the branch predictor. */
4936 if (rs6000_tune == PROCESSOR_TITAN
4937 || rs6000_tune == PROCESSOR_CELL)
4939 if (align_functions <= 0)
4940 align_functions = 8;
4941 if (align_jumps <= 0)
4942 align_jumps = 8;
4943 if (align_loops <= 0)
4944 align_loops = 8;
4946 if (rs6000_align_branch_targets)
4948 if (align_functions <= 0)
4949 align_functions = 16;
4950 if (align_jumps <= 0)
4951 align_jumps = 16;
4952 if (align_loops <= 0)
4954 can_override_loop_align = 1;
4955 align_loops = 16;
4958 if (align_jumps_max_skip <= 0)
4959 align_jumps_max_skip = 15;
4960 if (align_loops_max_skip <= 0)
4961 align_loops_max_skip = 15;
4964 /* Arrange to save and restore machine status around nested functions. */
4965 init_machine_status = rs6000_init_machine_status;
4967 /* We should always be splitting complex arguments, but we can't break
4968 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4969 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4970 targetm.calls.split_complex_arg = NULL;
4972 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4973 if (DEFAULT_ABI == ABI_AIX)
4974 targetm.calls.custom_function_descriptors = 0;
4977 /* Initialize rs6000_cost with the appropriate target costs. */
4978 if (optimize_size)
4979 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4980 else
4981 switch (rs6000_tune)
4983 case PROCESSOR_RS64A:
4984 rs6000_cost = &rs64a_cost;
4985 break;
4987 case PROCESSOR_MPCCORE:
4988 rs6000_cost = &mpccore_cost;
4989 break;
4991 case PROCESSOR_PPC403:
4992 rs6000_cost = &ppc403_cost;
4993 break;
4995 case PROCESSOR_PPC405:
4996 rs6000_cost = &ppc405_cost;
4997 break;
4999 case PROCESSOR_PPC440:
5000 rs6000_cost = &ppc440_cost;
5001 break;
5003 case PROCESSOR_PPC476:
5004 rs6000_cost = &ppc476_cost;
5005 break;
5007 case PROCESSOR_PPC601:
5008 rs6000_cost = &ppc601_cost;
5009 break;
5011 case PROCESSOR_PPC603:
5012 rs6000_cost = &ppc603_cost;
5013 break;
5015 case PROCESSOR_PPC604:
5016 rs6000_cost = &ppc604_cost;
5017 break;
5019 case PROCESSOR_PPC604e:
5020 rs6000_cost = &ppc604e_cost;
5021 break;
5023 case PROCESSOR_PPC620:
5024 rs6000_cost = &ppc620_cost;
5025 break;
5027 case PROCESSOR_PPC630:
5028 rs6000_cost = &ppc630_cost;
5029 break;
5031 case PROCESSOR_CELL:
5032 rs6000_cost = &ppccell_cost;
5033 break;
5035 case PROCESSOR_PPC750:
5036 case PROCESSOR_PPC7400:
5037 rs6000_cost = &ppc750_cost;
5038 break;
5040 case PROCESSOR_PPC7450:
5041 rs6000_cost = &ppc7450_cost;
5042 break;
5044 case PROCESSOR_PPC8540:
5045 case PROCESSOR_PPC8548:
5046 rs6000_cost = &ppc8540_cost;
5047 break;
5049 case PROCESSOR_PPCE300C2:
5050 case PROCESSOR_PPCE300C3:
5051 rs6000_cost = &ppce300c2c3_cost;
5052 break;
5054 case PROCESSOR_PPCE500MC:
5055 rs6000_cost = &ppce500mc_cost;
5056 break;
5058 case PROCESSOR_PPCE500MC64:
5059 rs6000_cost = &ppce500mc64_cost;
5060 break;
5062 case PROCESSOR_PPCE5500:
5063 rs6000_cost = &ppce5500_cost;
5064 break;
5066 case PROCESSOR_PPCE6500:
5067 rs6000_cost = &ppce6500_cost;
5068 break;
5070 case PROCESSOR_TITAN:
5071 rs6000_cost = &titan_cost;
5072 break;
5074 case PROCESSOR_POWER4:
5075 case PROCESSOR_POWER5:
5076 rs6000_cost = &power4_cost;
5077 break;
5079 case PROCESSOR_POWER6:
5080 rs6000_cost = &power6_cost;
5081 break;
5083 case PROCESSOR_POWER7:
5084 rs6000_cost = &power7_cost;
5085 break;
5087 case PROCESSOR_POWER8:
5088 rs6000_cost = &power8_cost;
5089 break;
5091 case PROCESSOR_POWER9:
5092 rs6000_cost = &power9_cost;
5093 break;
5095 case PROCESSOR_PPCA2:
5096 rs6000_cost = &ppca2_cost;
5097 break;
5099 default:
5100 gcc_unreachable ();
5103 if (global_init_p)
5105 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5106 rs6000_cost->simultaneous_prefetches,
5107 global_options.x_param_values,
5108 global_options_set.x_param_values);
5109 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5110 global_options.x_param_values,
5111 global_options_set.x_param_values);
5112 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5113 rs6000_cost->cache_line_size,
5114 global_options.x_param_values,
5115 global_options_set.x_param_values);
5116 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5117 global_options.x_param_values,
5118 global_options_set.x_param_values);
5120 /* Increase loop peeling limits based on performance analysis. */
5121 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5122 global_options.x_param_values,
5123 global_options_set.x_param_values);
5124 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5125 global_options.x_param_values,
5126 global_options_set.x_param_values);
5128 /* Use the 'model' -fsched-pressure algorithm by default. */
5129 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5130 SCHED_PRESSURE_MODEL,
5131 global_options.x_param_values,
5132 global_options_set.x_param_values);
5134 /* If using typedef char *va_list, signal that
5135 __builtin_va_start (&ap, 0) can be optimized to
5136 ap = __builtin_next_arg (0). */
5137 if (DEFAULT_ABI != ABI_V4)
5138 targetm.expand_builtin_va_start = NULL;
5141 /* If not explicitly specified via option, decide whether to generate indexed
5142 load/store instructions. A value of -1 indicates that the
5143 initial value of this variable has not been overwritten. During
5144 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5145 if (TARGET_AVOID_XFORM == -1)
5146 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5147 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5148 need indexed accesses and the type used is the scalar type of the element
5149 being loaded or stored. */
5150 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
5151 && !TARGET_ALTIVEC);
5153 /* Set the -mrecip options. */
5154 if (rs6000_recip_name)
5156 char *p = ASTRDUP (rs6000_recip_name);
5157 char *q;
5158 unsigned int mask, i;
5159 bool invert;
5161 while ((q = strtok (p, ",")) != NULL)
5163 p = NULL;
5164 if (*q == '!')
5166 invert = true;
5167 q++;
5169 else
5170 invert = false;
5172 if (!strcmp (q, "default"))
5173 mask = ((TARGET_RECIP_PRECISION)
5174 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5175 else
5177 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5178 if (!strcmp (q, recip_options[i].string))
5180 mask = recip_options[i].mask;
5181 break;
5184 if (i == ARRAY_SIZE (recip_options))
5186 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5187 invert = false;
5188 mask = 0;
5189 ret = false;
5193 if (invert)
5194 rs6000_recip_control &= ~mask;
5195 else
5196 rs6000_recip_control |= mask;
5200 /* Set the builtin mask of the various options used that could affect which
5201 builtins were used. In the past we used target_flags, but we've run out
5202 of bits, and some options are no longer in target_flags. */
5203 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5204 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5205 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5206 rs6000_builtin_mask);
5208 /* Initialize all of the registers. */
5209 rs6000_init_hard_regno_mode_ok (global_init_p);
5211 /* Save the initial options in case the user does function specific options */
5212 if (global_init_p)
5213 target_option_default_node = target_option_current_node
5214 = build_target_option_node (&global_options);
5216 /* If not explicitly specified via option, decide whether to generate the
5217 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5218 if (TARGET_LINK_STACK == -1)
5219 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
5221 /* Deprecate use of -mno-speculate-indirect-jumps. */
5222 if (!rs6000_speculate_indirect_jumps)
5223 warning (0, "%qs is deprecated and not recommended in any circumstances",
5224 "-mno-speculate-indirect-jumps");
5226 return ret;
5229 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5230 define the target cpu type. */
5232 static void
5233 rs6000_option_override (void)
5235 (void) rs6000_option_override_internal (true);
5239 /* Implement targetm.vectorize.builtin_mask_for_load. */
5240 static tree
5241 rs6000_builtin_mask_for_load (void)
5243 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5244 if ((TARGET_ALTIVEC && !TARGET_VSX)
5245 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5246 return altivec_builtin_mask_for_load;
5247 else
5248 return 0;
5251 /* Implement LOOP_ALIGN. */
5253 rs6000_loop_align (rtx label)
5255 basic_block bb;
5256 int ninsns;
5258 /* Don't override loop alignment if -falign-loops was specified. */
5259 if (!can_override_loop_align)
5260 return align_loops_log;
5262 bb = BLOCK_FOR_INSN (label);
5263 ninsns = num_loop_insns(bb->loop_father);
5265 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5266 if (ninsns > 4 && ninsns <= 8
5267 && (rs6000_tune == PROCESSOR_POWER4
5268 || rs6000_tune == PROCESSOR_POWER5
5269 || rs6000_tune == PROCESSOR_POWER6
5270 || rs6000_tune == PROCESSOR_POWER7
5271 || rs6000_tune == PROCESSOR_POWER8))
5272 return 5;
5273 else
5274 return align_loops_log;
5277 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5278 static int
5279 rs6000_loop_align_max_skip (rtx_insn *label)
5281 return (1 << rs6000_loop_align (label)) - 1;
5284 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5285 after applying N number of iterations. This routine does not determine
5286 how may iterations are required to reach desired alignment. */
5288 static bool
5289 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5291 if (is_packed)
5292 return false;
5294 if (TARGET_32BIT)
5296 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5297 return true;
5299 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5300 return true;
5302 return false;
5304 else
5306 if (TARGET_MACHO)
5307 return false;
5309 /* Assuming that all other types are naturally aligned. CHECKME! */
5310 return true;
5314 /* Return true if the vector misalignment factor is supported by the
5315 target. */
5316 static bool
5317 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5318 const_tree type,
5319 int misalignment,
5320 bool is_packed)
5322 if (TARGET_VSX)
5324 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5325 return true;
5327 /* Return if movmisalign pattern is not supported for this mode. */
5328 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5329 return false;
5331 if (misalignment == -1)
5333 /* Misalignment factor is unknown at compile time but we know
5334 it's word aligned. */
5335 if (rs6000_vector_alignment_reachable (type, is_packed))
5337 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5339 if (element_size == 64 || element_size == 32)
5340 return true;
5343 return false;
5346 /* VSX supports word-aligned vector. */
5347 if (misalignment % 4 == 0)
5348 return true;
5350 return false;
5353 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5354 static int
5355 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5356 tree vectype, int misalign)
5358 unsigned elements;
5359 tree elem_type;
5361 switch (type_of_cost)
5363 case scalar_stmt:
5364 case scalar_load:
5365 case scalar_store:
5366 case vector_stmt:
5367 case vector_load:
5368 case vector_store:
5369 case vec_to_scalar:
5370 case scalar_to_vec:
5371 case cond_branch_not_taken:
5372 return 1;
5374 case vec_perm:
5375 if (TARGET_VSX)
5376 return 3;
5377 else
5378 return 1;
5380 case vec_promote_demote:
5381 if (TARGET_VSX)
5382 return 4;
5383 else
5384 return 1;
5386 case cond_branch_taken:
5387 return 3;
5389 case unaligned_load:
5390 case vector_gather_load:
5391 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5392 return 1;
5394 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5396 elements = TYPE_VECTOR_SUBPARTS (vectype);
5397 if (elements == 2)
5398 /* Double word aligned. */
5399 return 2;
5401 if (elements == 4)
5403 switch (misalign)
5405 case 8:
5406 /* Double word aligned. */
5407 return 2;
5409 case -1:
5410 /* Unknown misalignment. */
5411 case 4:
5412 case 12:
5413 /* Word aligned. */
5414 return 22;
5416 default:
5417 gcc_unreachable ();
5422 if (TARGET_ALTIVEC)
5423 /* Misaligned loads are not supported. */
5424 gcc_unreachable ();
5426 return 2;
5428 case unaligned_store:
5429 case vector_scatter_store:
5430 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5431 return 1;
5433 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5435 elements = TYPE_VECTOR_SUBPARTS (vectype);
5436 if (elements == 2)
5437 /* Double word aligned. */
5438 return 2;
5440 if (elements == 4)
5442 switch (misalign)
5444 case 8:
5445 /* Double word aligned. */
5446 return 2;
5448 case -1:
5449 /* Unknown misalignment. */
5450 case 4:
5451 case 12:
5452 /* Word aligned. */
5453 return 23;
5455 default:
5456 gcc_unreachable ();
5461 if (TARGET_ALTIVEC)
5462 /* Misaligned stores are not supported. */
5463 gcc_unreachable ();
5465 return 2;
5467 case vec_construct:
5468 /* This is a rough approximation assuming non-constant elements
5469 constructed into a vector via element insertion. FIXME:
5470 vec_construct is not granular enough for uniformly good
5471 decisions. If the initialization is a splat, this is
5472 cheaper than we estimate. Improve this someday. */
5473 elem_type = TREE_TYPE (vectype);
5474 /* 32-bit vectors loaded into registers are stored as double
5475 precision, so we need 2 permutes, 2 converts, and 1 merge
5476 to construct a vector of short floats from them. */
5477 if (SCALAR_FLOAT_TYPE_P (elem_type)
5478 && TYPE_PRECISION (elem_type) == 32)
5479 return 5;
5480 /* On POWER9, integer vector types are built up in GPRs and then
5481 use a direct move (2 cycles). For POWER8 this is even worse,
5482 as we need two direct moves and a merge, and the direct moves
5483 are five cycles. */
5484 else if (INTEGRAL_TYPE_P (elem_type))
5486 if (TARGET_P9_VECTOR)
5487 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5488 else
5489 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5491 else
5492 /* V2DFmode doesn't need a direct move. */
5493 return 2;
5495 default:
5496 gcc_unreachable ();
5500 /* Implement targetm.vectorize.preferred_simd_mode. */
5502 static machine_mode
5503 rs6000_preferred_simd_mode (scalar_mode mode)
5505 if (TARGET_VSX)
5506 switch (mode)
5508 case E_DFmode:
5509 return V2DFmode;
5510 default:;
5512 if (TARGET_ALTIVEC || TARGET_VSX)
5513 switch (mode)
5515 case E_SFmode:
5516 return V4SFmode;
5517 case E_TImode:
5518 return V1TImode;
5519 case E_DImode:
5520 return V2DImode;
5521 case E_SImode:
5522 return V4SImode;
5523 case E_HImode:
5524 return V8HImode;
5525 case E_QImode:
5526 return V16QImode;
5527 default:;
5529 return word_mode;
5532 typedef struct _rs6000_cost_data
5534 struct loop *loop_info;
5535 unsigned cost[3];
5536 } rs6000_cost_data;
5538 /* Test for likely overcommitment of vector hardware resources. If a
5539 loop iteration is relatively large, and too large a percentage of
5540 instructions in the loop are vectorized, the cost model may not
5541 adequately reflect delays from unavailable vector resources.
5542 Penalize the loop body cost for this case. */
5544 static void
5545 rs6000_density_test (rs6000_cost_data *data)
5547 const int DENSITY_PCT_THRESHOLD = 85;
5548 const int DENSITY_SIZE_THRESHOLD = 70;
5549 const int DENSITY_PENALTY = 10;
5550 struct loop *loop = data->loop_info;
5551 basic_block *bbs = get_loop_body (loop);
5552 int nbbs = loop->num_nodes;
5553 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5554 int i, density_pct;
5556 for (i = 0; i < nbbs; i++)
5558 basic_block bb = bbs[i];
5559 gimple_stmt_iterator gsi;
5561 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5563 gimple *stmt = gsi_stmt (gsi);
5564 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5566 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5567 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5568 not_vec_cost++;
5572 free (bbs);
5573 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5575 if (density_pct > DENSITY_PCT_THRESHOLD
5576 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5578 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5579 if (dump_enabled_p ())
5580 dump_printf_loc (MSG_NOTE, vect_location,
5581 "density %d%%, cost %d exceeds threshold, penalizing "
5582 "loop body cost by %d%%", density_pct,
5583 vec_cost + not_vec_cost, DENSITY_PENALTY);
5587 /* Implement targetm.vectorize.init_cost. */
5589 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5590 instruction is needed by the vectorization. */
5591 static bool rs6000_vect_nonmem;
5593 static void *
5594 rs6000_init_cost (struct loop *loop_info)
5596 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5597 data->loop_info = loop_info;
5598 data->cost[vect_prologue] = 0;
5599 data->cost[vect_body] = 0;
5600 data->cost[vect_epilogue] = 0;
5601 rs6000_vect_nonmem = false;
5602 return data;
5605 /* Implement targetm.vectorize.add_stmt_cost. */
5607 static unsigned
5608 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5609 struct _stmt_vec_info *stmt_info, int misalign,
5610 enum vect_cost_model_location where)
5612 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5613 unsigned retval = 0;
5615 if (flag_vect_cost_model)
5617 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5618 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5619 misalign);
5620 /* Statements in an inner loop relative to the loop being
5621 vectorized are weighted more heavily. The value here is
5622 arbitrary and could potentially be improved with analysis. */
5623 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5624 count *= 50; /* FIXME. */
5626 retval = (unsigned) (count * stmt_cost);
5627 cost_data->cost[where] += retval;
5629 /* Check whether we're doing something other than just a copy loop.
5630 Not all such loops may be profitably vectorized; see
5631 rs6000_finish_cost. */
5632 if ((kind == vec_to_scalar || kind == vec_perm
5633 || kind == vec_promote_demote || kind == vec_construct
5634 || kind == scalar_to_vec)
5635 || (where == vect_body && kind == vector_stmt))
5636 rs6000_vect_nonmem = true;
5639 return retval;
5642 /* Implement targetm.vectorize.finish_cost. */
5644 static void
5645 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5646 unsigned *body_cost, unsigned *epilogue_cost)
5648 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5650 if (cost_data->loop_info)
5651 rs6000_density_test (cost_data);
5653 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5654 that require versioning for any reason. The vectorization is at
5655 best a wash inside the loop, and the versioning checks make
5656 profitability highly unlikely and potentially quite harmful. */
5657 if (cost_data->loop_info)
5659 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5660 if (!rs6000_vect_nonmem
5661 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5662 && LOOP_REQUIRES_VERSIONING (vec_info))
5663 cost_data->cost[vect_body] += 10000;
5666 *prologue_cost = cost_data->cost[vect_prologue];
5667 *body_cost = cost_data->cost[vect_body];
5668 *epilogue_cost = cost_data->cost[vect_epilogue];
5671 /* Implement targetm.vectorize.destroy_cost_data. */
5673 static void
5674 rs6000_destroy_cost_data (void *data)
5676 free (data);
5679 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5680 library with vectorized intrinsics. */
5682 static tree
5683 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5684 tree type_in)
5686 char name[32];
5687 const char *suffix = NULL;
5688 tree fntype, new_fndecl, bdecl = NULL_TREE;
5689 int n_args = 1;
5690 const char *bname;
5691 machine_mode el_mode, in_mode;
5692 int n, in_n;
5694 /* Libmass is suitable for unsafe math only as it does not correctly support
5695 parts of IEEE with the required precision such as denormals. Only support
5696 it if we have VSX to use the simd d2 or f4 functions.
5697 XXX: Add variable length support. */
5698 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5699 return NULL_TREE;
5701 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5702 n = TYPE_VECTOR_SUBPARTS (type_out);
5703 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5704 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5705 if (el_mode != in_mode
5706 || n != in_n)
5707 return NULL_TREE;
5709 switch (fn)
5711 CASE_CFN_ATAN2:
5712 CASE_CFN_HYPOT:
5713 CASE_CFN_POW:
5714 n_args = 2;
5715 gcc_fallthrough ();
5717 CASE_CFN_ACOS:
5718 CASE_CFN_ACOSH:
5719 CASE_CFN_ASIN:
5720 CASE_CFN_ASINH:
5721 CASE_CFN_ATAN:
5722 CASE_CFN_ATANH:
5723 CASE_CFN_CBRT:
5724 CASE_CFN_COS:
5725 CASE_CFN_COSH:
5726 CASE_CFN_ERF:
5727 CASE_CFN_ERFC:
5728 CASE_CFN_EXP2:
5729 CASE_CFN_EXP:
5730 CASE_CFN_EXPM1:
5731 CASE_CFN_LGAMMA:
5732 CASE_CFN_LOG10:
5733 CASE_CFN_LOG1P:
5734 CASE_CFN_LOG2:
5735 CASE_CFN_LOG:
5736 CASE_CFN_SIN:
5737 CASE_CFN_SINH:
5738 CASE_CFN_SQRT:
5739 CASE_CFN_TAN:
5740 CASE_CFN_TANH:
5741 if (el_mode == DFmode && n == 2)
5743 bdecl = mathfn_built_in (double_type_node, fn);
5744 suffix = "d2"; /* pow -> powd2 */
5746 else if (el_mode == SFmode && n == 4)
5748 bdecl = mathfn_built_in (float_type_node, fn);
5749 suffix = "4"; /* powf -> powf4 */
5751 else
5752 return NULL_TREE;
5753 if (!bdecl)
5754 return NULL_TREE;
5755 break;
5757 default:
5758 return NULL_TREE;
5761 gcc_assert (suffix != NULL);
5762 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5763 if (!bname)
5764 return NULL_TREE;
5766 strcpy (name, bname + sizeof ("__builtin_") - 1);
5767 strcat (name, suffix);
5769 if (n_args == 1)
5770 fntype = build_function_type_list (type_out, type_in, NULL);
5771 else if (n_args == 2)
5772 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5773 else
5774 gcc_unreachable ();
5776 /* Build a function declaration for the vectorized function. */
5777 new_fndecl = build_decl (BUILTINS_LOCATION,
5778 FUNCTION_DECL, get_identifier (name), fntype);
5779 TREE_PUBLIC (new_fndecl) = 1;
5780 DECL_EXTERNAL (new_fndecl) = 1;
5781 DECL_IS_NOVOPS (new_fndecl) = 1;
5782 TREE_READONLY (new_fndecl) = 1;
5784 return new_fndecl;
5787 /* Returns a function decl for a vectorized version of the builtin function
5788 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5789 if it is not available. */
5791 static tree
5792 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5793 tree type_in)
5795 machine_mode in_mode, out_mode;
5796 int in_n, out_n;
5798 if (TARGET_DEBUG_BUILTIN)
5799 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5800 combined_fn_name (combined_fn (fn)),
5801 GET_MODE_NAME (TYPE_MODE (type_out)),
5802 GET_MODE_NAME (TYPE_MODE (type_in)));
5804 if (TREE_CODE (type_out) != VECTOR_TYPE
5805 || TREE_CODE (type_in) != VECTOR_TYPE)
5806 return NULL_TREE;
5808 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5809 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5810 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5811 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5813 switch (fn)
5815 CASE_CFN_COPYSIGN:
5816 if (VECTOR_UNIT_VSX_P (V2DFmode)
5817 && out_mode == DFmode && out_n == 2
5818 && in_mode == DFmode && in_n == 2)
5819 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5820 if (VECTOR_UNIT_VSX_P (V4SFmode)
5821 && out_mode == SFmode && out_n == 4
5822 && in_mode == SFmode && in_n == 4)
5823 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5824 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5825 && out_mode == SFmode && out_n == 4
5826 && in_mode == SFmode && in_n == 4)
5827 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5828 break;
5829 CASE_CFN_CEIL:
5830 if (VECTOR_UNIT_VSX_P (V2DFmode)
5831 && out_mode == DFmode && out_n == 2
5832 && in_mode == DFmode && in_n == 2)
5833 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5834 if (VECTOR_UNIT_VSX_P (V4SFmode)
5835 && out_mode == SFmode && out_n == 4
5836 && in_mode == SFmode && in_n == 4)
5837 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5838 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5839 && out_mode == SFmode && out_n == 4
5840 && in_mode == SFmode && in_n == 4)
5841 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5842 break;
5843 CASE_CFN_FLOOR:
5844 if (VECTOR_UNIT_VSX_P (V2DFmode)
5845 && out_mode == DFmode && out_n == 2
5846 && in_mode == DFmode && in_n == 2)
5847 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5848 if (VECTOR_UNIT_VSX_P (V4SFmode)
5849 && out_mode == SFmode && out_n == 4
5850 && in_mode == SFmode && in_n == 4)
5851 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5852 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5853 && out_mode == SFmode && out_n == 4
5854 && in_mode == SFmode && in_n == 4)
5855 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5856 break;
5857 CASE_CFN_FMA:
5858 if (VECTOR_UNIT_VSX_P (V2DFmode)
5859 && out_mode == DFmode && out_n == 2
5860 && in_mode == DFmode && in_n == 2)
5861 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5862 if (VECTOR_UNIT_VSX_P (V4SFmode)
5863 && out_mode == SFmode && out_n == 4
5864 && in_mode == SFmode && in_n == 4)
5865 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5866 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5867 && out_mode == SFmode && out_n == 4
5868 && in_mode == SFmode && in_n == 4)
5869 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5870 break;
5871 CASE_CFN_TRUNC:
5872 if (VECTOR_UNIT_VSX_P (V2DFmode)
5873 && out_mode == DFmode && out_n == 2
5874 && in_mode == DFmode && in_n == 2)
5875 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5876 if (VECTOR_UNIT_VSX_P (V4SFmode)
5877 && out_mode == SFmode && out_n == 4
5878 && in_mode == SFmode && in_n == 4)
5879 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5880 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5881 && out_mode == SFmode && out_n == 4
5882 && in_mode == SFmode && in_n == 4)
5883 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5884 break;
5885 CASE_CFN_NEARBYINT:
5886 if (VECTOR_UNIT_VSX_P (V2DFmode)
5887 && flag_unsafe_math_optimizations
5888 && out_mode == DFmode && out_n == 2
5889 && in_mode == DFmode && in_n == 2)
5890 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5891 if (VECTOR_UNIT_VSX_P (V4SFmode)
5892 && flag_unsafe_math_optimizations
5893 && out_mode == SFmode && out_n == 4
5894 && in_mode == SFmode && in_n == 4)
5895 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5896 break;
5897 CASE_CFN_RINT:
5898 if (VECTOR_UNIT_VSX_P (V2DFmode)
5899 && !flag_trapping_math
5900 && out_mode == DFmode && out_n == 2
5901 && in_mode == DFmode && in_n == 2)
5902 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5903 if (VECTOR_UNIT_VSX_P (V4SFmode)
5904 && !flag_trapping_math
5905 && out_mode == SFmode && out_n == 4
5906 && in_mode == SFmode && in_n == 4)
5907 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5908 break;
5909 default:
5910 break;
5913 /* Generate calls to libmass if appropriate. */
5914 if (rs6000_veclib_handler)
5915 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5917 return NULL_TREE;
5920 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5922 static tree
5923 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5924 tree type_in)
5926 machine_mode in_mode, out_mode;
5927 int in_n, out_n;
5929 if (TARGET_DEBUG_BUILTIN)
5930 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5931 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5932 GET_MODE_NAME (TYPE_MODE (type_out)),
5933 GET_MODE_NAME (TYPE_MODE (type_in)));
5935 if (TREE_CODE (type_out) != VECTOR_TYPE
5936 || TREE_CODE (type_in) != VECTOR_TYPE)
5937 return NULL_TREE;
5939 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5940 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5941 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5942 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5944 enum rs6000_builtins fn
5945 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5946 switch (fn)
5948 case RS6000_BUILTIN_RSQRTF:
5949 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5950 && out_mode == SFmode && out_n == 4
5951 && in_mode == SFmode && in_n == 4)
5952 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5953 break;
5954 case RS6000_BUILTIN_RSQRT:
5955 if (VECTOR_UNIT_VSX_P (V2DFmode)
5956 && out_mode == DFmode && out_n == 2
5957 && in_mode == DFmode && in_n == 2)
5958 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5959 break;
5960 case RS6000_BUILTIN_RECIPF:
5961 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5962 && out_mode == SFmode && out_n == 4
5963 && in_mode == SFmode && in_n == 4)
5964 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5965 break;
5966 case RS6000_BUILTIN_RECIP:
5967 if (VECTOR_UNIT_VSX_P (V2DFmode)
5968 && out_mode == DFmode && out_n == 2
5969 && in_mode == DFmode && in_n == 2)
5970 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5971 break;
5972 default:
5973 break;
5975 return NULL_TREE;
5978 /* Default CPU string for rs6000*_file_start functions. */
5979 static const char *rs6000_default_cpu;
5981 /* Do anything needed at the start of the asm file. */
5983 static void
5984 rs6000_file_start (void)
5986 char buffer[80];
5987 const char *start = buffer;
5988 FILE *file = asm_out_file;
5990 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5992 default_file_start ();
5994 if (flag_verbose_asm)
5996 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5998 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6000 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6001 start = "";
6004 if (global_options_set.x_rs6000_cpu_index)
6006 fprintf (file, "%s -mcpu=%s", start,
6007 processor_target_table[rs6000_cpu_index].name);
6008 start = "";
6011 if (global_options_set.x_rs6000_tune_index)
6013 fprintf (file, "%s -mtune=%s", start,
6014 processor_target_table[rs6000_tune_index].name);
6015 start = "";
6018 if (PPC405_ERRATUM77)
6020 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6021 start = "";
6024 #ifdef USING_ELFOS_H
6025 switch (rs6000_sdata)
6027 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6028 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6029 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6030 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6033 if (rs6000_sdata && g_switch_value)
6035 fprintf (file, "%s -G %d", start,
6036 g_switch_value);
6037 start = "";
6039 #endif
6041 if (*start == '\0')
6042 putc ('\n', file);
6045 #ifdef USING_ELFOS_H
6046 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6047 && !global_options_set.x_rs6000_cpu_index)
6049 fputs ("\t.machine ", asm_out_file);
6050 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6051 fputs ("power9\n", asm_out_file);
6052 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6053 fputs ("power8\n", asm_out_file);
6054 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6055 fputs ("power7\n", asm_out_file);
6056 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6057 fputs ("power6\n", asm_out_file);
6058 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6059 fputs ("power5\n", asm_out_file);
6060 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6061 fputs ("power4\n", asm_out_file);
6062 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6063 fputs ("ppc64\n", asm_out_file);
6064 else
6065 fputs ("ppc\n", asm_out_file);
6067 #endif
6069 if (DEFAULT_ABI == ABI_ELFv2)
6070 fprintf (file, "\t.abiversion 2\n");
6074 /* Return nonzero if this function is known to have a null epilogue. */
6077 direct_return (void)
6079 if (reload_completed)
6081 rs6000_stack_t *info = rs6000_stack_info ();
6083 if (info->first_gp_reg_save == 32
6084 && info->first_fp_reg_save == 64
6085 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6086 && ! info->lr_save_p
6087 && ! info->cr_save_p
6088 && info->vrsave_size == 0
6089 && ! info->push_p)
6090 return 1;
6093 return 0;
6096 /* Return the number of instructions it takes to form a constant in an
6097 integer register. */
6100 num_insns_constant_wide (HOST_WIDE_INT value)
6102 /* signed constant loadable with addi */
6103 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6104 return 1;
6106 /* constant loadable with addis */
6107 else if ((value & 0xffff) == 0
6108 && (value >> 31 == -1 || value >> 31 == 0))
6109 return 1;
6111 else if (TARGET_POWERPC64)
6113 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6114 HOST_WIDE_INT high = value >> 31;
6116 if (high == 0 || high == -1)
6117 return 2;
6119 high >>= 1;
6121 if (low == 0)
6122 return num_insns_constant_wide (high) + 1;
6123 else if (high == 0)
6124 return num_insns_constant_wide (low) + 1;
6125 else
6126 return (num_insns_constant_wide (high)
6127 + num_insns_constant_wide (low) + 1);
6130 else
6131 return 2;
6135 num_insns_constant (rtx op, machine_mode mode)
6137 HOST_WIDE_INT low, high;
6139 switch (GET_CODE (op))
6141 case CONST_INT:
6142 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6143 && rs6000_is_valid_and_mask (op, mode))
6144 return 2;
6145 else
6146 return num_insns_constant_wide (INTVAL (op));
6148 case CONST_WIDE_INT:
6150 int i;
6151 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6152 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6153 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6154 return ins;
6157 case CONST_DOUBLE:
6158 if (mode == SFmode || mode == SDmode)
6160 long l;
6162 if (DECIMAL_FLOAT_MODE_P (mode))
6163 REAL_VALUE_TO_TARGET_DECIMAL32
6164 (*CONST_DOUBLE_REAL_VALUE (op), l);
6165 else
6166 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6167 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6170 long l[2];
6171 if (DECIMAL_FLOAT_MODE_P (mode))
6172 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6173 else
6174 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6175 high = l[WORDS_BIG_ENDIAN == 0];
6176 low = l[WORDS_BIG_ENDIAN != 0];
6178 if (TARGET_32BIT)
6179 return (num_insns_constant_wide (low)
6180 + num_insns_constant_wide (high));
6181 else
6183 if ((high == 0 && low >= 0)
6184 || (high == -1 && low < 0))
6185 return num_insns_constant_wide (low);
6187 else if (rs6000_is_valid_and_mask (op, mode))
6188 return 2;
6190 else if (low == 0)
6191 return num_insns_constant_wide (high) + 1;
6193 else
6194 return (num_insns_constant_wide (high)
6195 + num_insns_constant_wide (low) + 1);
6198 default:
6199 gcc_unreachable ();
6203 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6204 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6205 corresponding element of the vector, but for V4SFmode, the
6206 corresponding "float" is interpreted as an SImode integer. */
6208 HOST_WIDE_INT
6209 const_vector_elt_as_int (rtx op, unsigned int elt)
6211 rtx tmp;
6213 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6214 gcc_assert (GET_MODE (op) != V2DImode
6215 && GET_MODE (op) != V2DFmode);
6217 tmp = CONST_VECTOR_ELT (op, elt);
6218 if (GET_MODE (op) == V4SFmode)
6219 tmp = gen_lowpart (SImode, tmp);
6220 return INTVAL (tmp);
6223 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6224 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6225 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6226 all items are set to the same value and contain COPIES replicas of the
6227 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6228 operand and the others are set to the value of the operand's msb. */
6230 static bool
6231 vspltis_constant (rtx op, unsigned step, unsigned copies)
6233 machine_mode mode = GET_MODE (op);
6234 machine_mode inner = GET_MODE_INNER (mode);
6236 unsigned i;
6237 unsigned nunits;
6238 unsigned bitsize;
6239 unsigned mask;
6241 HOST_WIDE_INT val;
6242 HOST_WIDE_INT splat_val;
6243 HOST_WIDE_INT msb_val;
6245 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6246 return false;
6248 nunits = GET_MODE_NUNITS (mode);
6249 bitsize = GET_MODE_BITSIZE (inner);
6250 mask = GET_MODE_MASK (inner);
6252 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6253 splat_val = val;
6254 msb_val = val >= 0 ? 0 : -1;
6256 /* Construct the value to be splatted, if possible. If not, return 0. */
6257 for (i = 2; i <= copies; i *= 2)
6259 HOST_WIDE_INT small_val;
6260 bitsize /= 2;
6261 small_val = splat_val >> bitsize;
6262 mask >>= bitsize;
6263 if (splat_val != ((HOST_WIDE_INT)
6264 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6265 | (small_val & mask)))
6266 return false;
6267 splat_val = small_val;
6270 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6271 if (EASY_VECTOR_15 (splat_val))
6274 /* Also check if we can splat, and then add the result to itself. Do so if
6275 the value is positive, of if the splat instruction is using OP's mode;
6276 for splat_val < 0, the splat and the add should use the same mode. */
6277 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6278 && (splat_val >= 0 || (step == 1 && copies == 1)))
6281 /* Also check if are loading up the most significant bit which can be done by
6282 loading up -1 and shifting the value left by -1. */
6283 else if (EASY_VECTOR_MSB (splat_val, inner))
6286 else
6287 return false;
6289 /* Check if VAL is present in every STEP-th element, and the
6290 other elements are filled with its most significant bit. */
6291 for (i = 1; i < nunits; ++i)
6293 HOST_WIDE_INT desired_val;
6294 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6295 if ((i & (step - 1)) == 0)
6296 desired_val = val;
6297 else
6298 desired_val = msb_val;
6300 if (desired_val != const_vector_elt_as_int (op, elt))
6301 return false;
6304 return true;
6307 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6308 instruction, filling in the bottom elements with 0 or -1.
6310 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6311 for the number of zeroes to shift in, or negative for the number of 0xff
6312 bytes to shift in.
6314 OP is a CONST_VECTOR. */
6317 vspltis_shifted (rtx op)
6319 machine_mode mode = GET_MODE (op);
6320 machine_mode inner = GET_MODE_INNER (mode);
6322 unsigned i, j;
6323 unsigned nunits;
6324 unsigned mask;
6326 HOST_WIDE_INT val;
6328 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6329 return false;
6331 /* We need to create pseudo registers to do the shift, so don't recognize
6332 shift vector constants after reload. */
6333 if (!can_create_pseudo_p ())
6334 return false;
6336 nunits = GET_MODE_NUNITS (mode);
6337 mask = GET_MODE_MASK (inner);
6339 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6341 /* Check if the value can really be the operand of a vspltis[bhw]. */
6342 if (EASY_VECTOR_15 (val))
6345 /* Also check if we are loading up the most significant bit which can be done
6346 by loading up -1 and shifting the value left by -1. */
6347 else if (EASY_VECTOR_MSB (val, inner))
6350 else
6351 return 0;
6353 /* Check if VAL is present in every STEP-th element until we find elements
6354 that are 0 or all 1 bits. */
6355 for (i = 1; i < nunits; ++i)
6357 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6358 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6360 /* If the value isn't the splat value, check for the remaining elements
6361 being 0/-1. */
6362 if (val != elt_val)
6364 if (elt_val == 0)
6366 for (j = i+1; j < nunits; ++j)
6368 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6369 if (const_vector_elt_as_int (op, elt2) != 0)
6370 return 0;
6373 return (nunits - i) * GET_MODE_SIZE (inner);
6376 else if ((elt_val & mask) == mask)
6378 for (j = i+1; j < nunits; ++j)
6380 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6381 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6382 return 0;
6385 return -((nunits - i) * GET_MODE_SIZE (inner));
6388 else
6389 return 0;
6393 /* If all elements are equal, we don't need to do VLSDOI. */
6394 return 0;
6398 /* Return true if OP is of the given MODE and can be synthesized
6399 with a vspltisb, vspltish or vspltisw. */
6401 bool
6402 easy_altivec_constant (rtx op, machine_mode mode)
6404 unsigned step, copies;
6406 if (mode == VOIDmode)
6407 mode = GET_MODE (op);
6408 else if (mode != GET_MODE (op))
6409 return false;
6411 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6412 constants. */
6413 if (mode == V2DFmode)
6414 return zero_constant (op, mode);
6416 else if (mode == V2DImode)
6418 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6419 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6420 return false;
6422 if (zero_constant (op, mode))
6423 return true;
6425 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6426 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6427 return true;
6429 return false;
6432 /* V1TImode is a special container for TImode. Ignore for now. */
6433 else if (mode == V1TImode)
6434 return false;
6436 /* Start with a vspltisw. */
6437 step = GET_MODE_NUNITS (mode) / 4;
6438 copies = 1;
6440 if (vspltis_constant (op, step, copies))
6441 return true;
6443 /* Then try with a vspltish. */
6444 if (step == 1)
6445 copies <<= 1;
6446 else
6447 step >>= 1;
6449 if (vspltis_constant (op, step, copies))
6450 return true;
6452 /* And finally a vspltisb. */
6453 if (step == 1)
6454 copies <<= 1;
6455 else
6456 step >>= 1;
6458 if (vspltis_constant (op, step, copies))
6459 return true;
6461 if (vspltis_shifted (op) != 0)
6462 return true;
6464 return false;
6467 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6468 result is OP. Abort if it is not possible. */
6471 gen_easy_altivec_constant (rtx op)
6473 machine_mode mode = GET_MODE (op);
6474 int nunits = GET_MODE_NUNITS (mode);
6475 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6476 unsigned step = nunits / 4;
6477 unsigned copies = 1;
6479 /* Start with a vspltisw. */
6480 if (vspltis_constant (op, step, copies))
6481 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6483 /* Then try with a vspltish. */
6484 if (step == 1)
6485 copies <<= 1;
6486 else
6487 step >>= 1;
6489 if (vspltis_constant (op, step, copies))
6490 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6492 /* And finally a vspltisb. */
6493 if (step == 1)
6494 copies <<= 1;
6495 else
6496 step >>= 1;
6498 if (vspltis_constant (op, step, copies))
6499 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6501 gcc_unreachable ();
6504 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6505 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6507 Return the number of instructions needed (1 or 2) into the address pointed
6508 via NUM_INSNS_PTR.
6510 Return the constant that is being split via CONSTANT_PTR. */
6512 bool
6513 xxspltib_constant_p (rtx op,
6514 machine_mode mode,
6515 int *num_insns_ptr,
6516 int *constant_ptr)
6518 size_t nunits = GET_MODE_NUNITS (mode);
6519 size_t i;
6520 HOST_WIDE_INT value;
6521 rtx element;
6523 /* Set the returned values to out of bound values. */
6524 *num_insns_ptr = -1;
6525 *constant_ptr = 256;
6527 if (!TARGET_P9_VECTOR)
6528 return false;
6530 if (mode == VOIDmode)
6531 mode = GET_MODE (op);
6533 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6534 return false;
6536 /* Handle (vec_duplicate <constant>). */
6537 if (GET_CODE (op) == VEC_DUPLICATE)
6539 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6540 && mode != V2DImode)
6541 return false;
6543 element = XEXP (op, 0);
6544 if (!CONST_INT_P (element))
6545 return false;
6547 value = INTVAL (element);
6548 if (!IN_RANGE (value, -128, 127))
6549 return false;
6552 /* Handle (const_vector [...]). */
6553 else if (GET_CODE (op) == CONST_VECTOR)
6555 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6556 && mode != V2DImode)
6557 return false;
6559 element = CONST_VECTOR_ELT (op, 0);
6560 if (!CONST_INT_P (element))
6561 return false;
6563 value = INTVAL (element);
6564 if (!IN_RANGE (value, -128, 127))
6565 return false;
6567 for (i = 1; i < nunits; i++)
6569 element = CONST_VECTOR_ELT (op, i);
6570 if (!CONST_INT_P (element))
6571 return false;
6573 if (value != INTVAL (element))
6574 return false;
6578 /* Handle integer constants being loaded into the upper part of the VSX
6579 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6580 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6581 else if (CONST_INT_P (op))
6583 if (!SCALAR_INT_MODE_P (mode))
6584 return false;
6586 value = INTVAL (op);
6587 if (!IN_RANGE (value, -128, 127))
6588 return false;
6590 if (!IN_RANGE (value, -1, 0))
6592 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6593 return false;
6595 if (EASY_VECTOR_15 (value))
6596 return false;
6600 else
6601 return false;
6603 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6604 sign extend. Special case 0/-1 to allow getting any VSX register instead
6605 of an Altivec register. */
6606 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6607 && EASY_VECTOR_15 (value))
6608 return false;
6610 /* Return # of instructions and the constant byte for XXSPLTIB. */
6611 if (mode == V16QImode)
6612 *num_insns_ptr = 1;
6614 else if (IN_RANGE (value, -1, 0))
6615 *num_insns_ptr = 1;
6617 else
6618 *num_insns_ptr = 2;
6620 *constant_ptr = (int) value;
6621 return true;
6624 const char *
6625 output_vec_const_move (rtx *operands)
6627 int shift;
6628 machine_mode mode;
6629 rtx dest, vec;
6631 dest = operands[0];
6632 vec = operands[1];
6633 mode = GET_MODE (dest);
6635 if (TARGET_VSX)
6637 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6638 int xxspltib_value = 256;
6639 int num_insns = -1;
6641 if (zero_constant (vec, mode))
6643 if (TARGET_P9_VECTOR)
6644 return "xxspltib %x0,0";
6646 else if (dest_vmx_p)
6647 return "vspltisw %0,0";
6649 else
6650 return "xxlxor %x0,%x0,%x0";
6653 if (all_ones_constant (vec, mode))
6655 if (TARGET_P9_VECTOR)
6656 return "xxspltib %x0,255";
6658 else if (dest_vmx_p)
6659 return "vspltisw %0,-1";
6661 else if (TARGET_P8_VECTOR)
6662 return "xxlorc %x0,%x0,%x0";
6664 else
6665 gcc_unreachable ();
6668 if (TARGET_P9_VECTOR
6669 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6671 if (num_insns == 1)
6673 operands[2] = GEN_INT (xxspltib_value & 0xff);
6674 return "xxspltib %x0,%2";
6677 return "#";
6681 if (TARGET_ALTIVEC)
6683 rtx splat_vec;
6685 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6686 if (zero_constant (vec, mode))
6687 return "vspltisw %0,0";
6689 if (all_ones_constant (vec, mode))
6690 return "vspltisw %0,-1";
6692 /* Do we need to construct a value using VSLDOI? */
6693 shift = vspltis_shifted (vec);
6694 if (shift != 0)
6695 return "#";
6697 splat_vec = gen_easy_altivec_constant (vec);
6698 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6699 operands[1] = XEXP (splat_vec, 0);
6700 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6701 return "#";
6703 switch (GET_MODE (splat_vec))
6705 case E_V4SImode:
6706 return "vspltisw %0,%1";
6708 case E_V8HImode:
6709 return "vspltish %0,%1";
6711 case E_V16QImode:
6712 return "vspltisb %0,%1";
6714 default:
6715 gcc_unreachable ();
6719 gcc_unreachable ();
6722 /* Initialize vector TARGET to VALS. */
6724 void
6725 rs6000_expand_vector_init (rtx target, rtx vals)
6727 machine_mode mode = GET_MODE (target);
6728 machine_mode inner_mode = GET_MODE_INNER (mode);
6729 int n_elts = GET_MODE_NUNITS (mode);
6730 int n_var = 0, one_var = -1;
6731 bool all_same = true, all_const_zero = true;
6732 rtx x, mem;
6733 int i;
6735 for (i = 0; i < n_elts; ++i)
6737 x = XVECEXP (vals, 0, i);
6738 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6739 ++n_var, one_var = i;
6740 else if (x != CONST0_RTX (inner_mode))
6741 all_const_zero = false;
6743 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6744 all_same = false;
6747 if (n_var == 0)
6749 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6750 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6751 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6753 /* Zero register. */
6754 emit_move_insn (target, CONST0_RTX (mode));
6755 return;
6757 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6759 /* Splat immediate. */
6760 emit_insn (gen_rtx_SET (target, const_vec));
6761 return;
6763 else
6765 /* Load from constant pool. */
6766 emit_move_insn (target, const_vec);
6767 return;
6771 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6772 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6774 rtx op[2];
6775 size_t i;
6776 size_t num_elements = all_same ? 1 : 2;
6777 for (i = 0; i < num_elements; i++)
6779 op[i] = XVECEXP (vals, 0, i);
6780 /* Just in case there is a SUBREG with a smaller mode, do a
6781 conversion. */
6782 if (GET_MODE (op[i]) != inner_mode)
6784 rtx tmp = gen_reg_rtx (inner_mode);
6785 convert_move (tmp, op[i], 0);
6786 op[i] = tmp;
6788 /* Allow load with splat double word. */
6789 else if (MEM_P (op[i]))
6791 if (!all_same)
6792 op[i] = force_reg (inner_mode, op[i]);
6794 else if (!REG_P (op[i]))
6795 op[i] = force_reg (inner_mode, op[i]);
6798 if (all_same)
6800 if (mode == V2DFmode)
6801 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6802 else
6803 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6805 else
6807 if (mode == V2DFmode)
6808 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6809 else
6810 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6812 return;
6815 /* Special case initializing vector int if we are on 64-bit systems with
6816 direct move or we have the ISA 3.0 instructions. */
6817 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6818 && TARGET_DIRECT_MOVE_64BIT)
6820 if (all_same)
6822 rtx element0 = XVECEXP (vals, 0, 0);
6823 if (MEM_P (element0))
6824 element0 = rs6000_address_for_fpconvert (element0);
6825 else
6826 element0 = force_reg (SImode, element0);
6828 if (TARGET_P9_VECTOR)
6829 emit_insn (gen_vsx_splat_v4si (target, element0));
6830 else
6832 rtx tmp = gen_reg_rtx (DImode);
6833 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6834 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6836 return;
6838 else
6840 rtx elements[4];
6841 size_t i;
6843 for (i = 0; i < 4; i++)
6845 elements[i] = XVECEXP (vals, 0, i);
6846 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
6847 elements[i] = copy_to_mode_reg (SImode, elements[i]);
6850 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6851 elements[2], elements[3]));
6852 return;
6856 /* With single precision floating point on VSX, know that internally single
6857 precision is actually represented as a double, and either make 2 V2DF
6858 vectors, and convert these vectors to single precision, or do one
6859 conversion, and splat the result to the other elements. */
6860 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6862 if (all_same)
6864 rtx element0 = XVECEXP (vals, 0, 0);
6866 if (TARGET_P9_VECTOR)
6868 if (MEM_P (element0))
6869 element0 = rs6000_address_for_fpconvert (element0);
6871 emit_insn (gen_vsx_splat_v4sf (target, element0));
6874 else
6876 rtx freg = gen_reg_rtx (V4SFmode);
6877 rtx sreg = force_reg (SFmode, element0);
6878 rtx cvt = (TARGET_XSCVDPSPN
6879 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6880 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6882 emit_insn (cvt);
6883 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6884 const0_rtx));
6887 else
6889 rtx dbl_even = gen_reg_rtx (V2DFmode);
6890 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6891 rtx flt_even = gen_reg_rtx (V4SFmode);
6892 rtx flt_odd = gen_reg_rtx (V4SFmode);
6893 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6894 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6895 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6896 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6898 /* Use VMRGEW if we can instead of doing a permute. */
6899 if (TARGET_P8_VECTOR)
6901 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6902 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6903 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6904 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6905 if (BYTES_BIG_ENDIAN)
6906 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6907 else
6908 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6910 else
6912 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6913 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6914 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6915 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6916 rs6000_expand_extract_even (target, flt_even, flt_odd);
6919 return;
6922 /* Special case initializing vector short/char that are splats if we are on
6923 64-bit systems with direct move. */
6924 if (all_same && TARGET_DIRECT_MOVE_64BIT
6925 && (mode == V16QImode || mode == V8HImode))
6927 rtx op0 = XVECEXP (vals, 0, 0);
6928 rtx di_tmp = gen_reg_rtx (DImode);
6930 if (!REG_P (op0))
6931 op0 = force_reg (GET_MODE_INNER (mode), op0);
6933 if (mode == V16QImode)
6935 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6936 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6937 return;
6940 if (mode == V8HImode)
6942 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6943 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6944 return;
6948 /* Store value to stack temp. Load vector element. Splat. However, splat
6949 of 64-bit items is not supported on Altivec. */
6950 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6952 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6953 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6954 XVECEXP (vals, 0, 0));
6955 x = gen_rtx_UNSPEC (VOIDmode,
6956 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6957 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6958 gen_rtvec (2,
6959 gen_rtx_SET (target, mem),
6960 x)));
6961 x = gen_rtx_VEC_SELECT (inner_mode, target,
6962 gen_rtx_PARALLEL (VOIDmode,
6963 gen_rtvec (1, const0_rtx)));
6964 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6965 return;
6968 /* One field is non-constant. Load constant then overwrite
6969 varying field. */
6970 if (n_var == 1)
6972 rtx copy = copy_rtx (vals);
6974 /* Load constant part of vector, substitute neighboring value for
6975 varying element. */
6976 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6977 rs6000_expand_vector_init (target, copy);
6979 /* Insert variable. */
6980 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6981 return;
6984 /* Construct the vector in memory one field at a time
6985 and load the whole vector. */
6986 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6987 for (i = 0; i < n_elts; i++)
6988 emit_move_insn (adjust_address_nv (mem, inner_mode,
6989 i * GET_MODE_SIZE (inner_mode)),
6990 XVECEXP (vals, 0, i));
6991 emit_move_insn (target, mem);
6994 /* Set field ELT of TARGET to VAL. */
6996 void
6997 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6999 machine_mode mode = GET_MODE (target);
7000 machine_mode inner_mode = GET_MODE_INNER (mode);
7001 rtx reg = gen_reg_rtx (mode);
7002 rtx mask, mem, x;
7003 int width = GET_MODE_SIZE (inner_mode);
7004 int i;
7006 val = force_reg (GET_MODE (val), val);
7008 if (VECTOR_MEM_VSX_P (mode))
7010 rtx insn = NULL_RTX;
7011 rtx elt_rtx = GEN_INT (elt);
7013 if (mode == V2DFmode)
7014 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7016 else if (mode == V2DImode)
7017 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7019 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7021 if (mode == V4SImode)
7022 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7023 else if (mode == V8HImode)
7024 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7025 else if (mode == V16QImode)
7026 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7027 else if (mode == V4SFmode)
7028 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7031 if (insn)
7033 emit_insn (insn);
7034 return;
7038 /* Simplify setting single element vectors like V1TImode. */
7039 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7041 emit_move_insn (target, gen_lowpart (mode, val));
7042 return;
7045 /* Load single variable value. */
7046 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7047 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7048 x = gen_rtx_UNSPEC (VOIDmode,
7049 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7050 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7051 gen_rtvec (2,
7052 gen_rtx_SET (reg, mem),
7053 x)));
7055 /* Linear sequence. */
7056 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7057 for (i = 0; i < 16; ++i)
7058 XVECEXP (mask, 0, i) = GEN_INT (i);
7060 /* Set permute mask to insert element into target. */
7061 for (i = 0; i < width; ++i)
7062 XVECEXP (mask, 0, elt*width + i)
7063 = GEN_INT (i + 0x10);
7064 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7066 if (BYTES_BIG_ENDIAN)
7067 x = gen_rtx_UNSPEC (mode,
7068 gen_rtvec (3, target, reg,
7069 force_reg (V16QImode, x)),
7070 UNSPEC_VPERM);
7071 else
7073 if (TARGET_P9_VECTOR)
7074 x = gen_rtx_UNSPEC (mode,
7075 gen_rtvec (3, reg, target,
7076 force_reg (V16QImode, x)),
7077 UNSPEC_VPERMR);
7078 else
7080 /* Invert selector. We prefer to generate VNAND on P8 so
7081 that future fusion opportunities can kick in, but must
7082 generate VNOR elsewhere. */
7083 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7084 rtx iorx = (TARGET_P8_VECTOR
7085 ? gen_rtx_IOR (V16QImode, notx, notx)
7086 : gen_rtx_AND (V16QImode, notx, notx));
7087 rtx tmp = gen_reg_rtx (V16QImode);
7088 emit_insn (gen_rtx_SET (tmp, iorx));
7090 /* Permute with operands reversed and adjusted selector. */
7091 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7092 UNSPEC_VPERM);
7096 emit_insn (gen_rtx_SET (target, x));
7099 /* Extract field ELT from VEC into TARGET. */
7101 void
7102 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7104 machine_mode mode = GET_MODE (vec);
7105 machine_mode inner_mode = GET_MODE_INNER (mode);
7106 rtx mem;
7108 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7110 switch (mode)
7112 default:
7113 break;
7114 case E_V1TImode:
7115 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7116 emit_move_insn (target, gen_lowpart (TImode, vec));
7117 break;
7118 case E_V2DFmode:
7119 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7120 return;
7121 case E_V2DImode:
7122 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7123 return;
7124 case E_V4SFmode:
7125 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7126 return;
7127 case E_V16QImode:
7128 if (TARGET_DIRECT_MOVE_64BIT)
7130 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7131 return;
7133 else
7134 break;
7135 case E_V8HImode:
7136 if (TARGET_DIRECT_MOVE_64BIT)
7138 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7139 return;
7141 else
7142 break;
7143 case E_V4SImode:
7144 if (TARGET_DIRECT_MOVE_64BIT)
7146 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7147 return;
7149 break;
7152 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7153 && TARGET_DIRECT_MOVE_64BIT)
7155 if (GET_MODE (elt) != DImode)
7157 rtx tmp = gen_reg_rtx (DImode);
7158 convert_move (tmp, elt, 0);
7159 elt = tmp;
7161 else if (!REG_P (elt))
7162 elt = force_reg (DImode, elt);
7164 switch (mode)
7166 case E_V2DFmode:
7167 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7168 return;
7170 case E_V2DImode:
7171 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7172 return;
7174 case E_V4SFmode:
7175 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7176 return;
7178 case E_V4SImode:
7179 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7180 return;
7182 case E_V8HImode:
7183 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7184 return;
7186 case E_V16QImode:
7187 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7188 return;
7190 default:
7191 gcc_unreachable ();
7195 gcc_assert (CONST_INT_P (elt));
7197 /* Allocate mode-sized buffer. */
7198 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7200 emit_move_insn (mem, vec);
7202 /* Add offset to field within buffer matching vector element. */
7203 mem = adjust_address_nv (mem, inner_mode,
7204 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7206 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7209 /* Helper function to return the register number of a RTX. */
7210 static inline int
7211 regno_or_subregno (rtx op)
7213 if (REG_P (op))
7214 return REGNO (op);
7215 else if (SUBREG_P (op))
7216 return subreg_regno (op);
7217 else
7218 gcc_unreachable ();
7221 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7222 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7223 temporary (BASE_TMP) to fixup the address. Return the new memory address
7224 that is valid for reads or writes to a given register (SCALAR_REG). */
7227 rs6000_adjust_vec_address (rtx scalar_reg,
7228 rtx mem,
7229 rtx element,
7230 rtx base_tmp,
7231 machine_mode scalar_mode)
7233 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7234 rtx addr = XEXP (mem, 0);
7235 rtx element_offset;
7236 rtx new_addr;
7237 bool valid_addr_p;
7239 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7240 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7242 /* Calculate what we need to add to the address to get the element
7243 address. */
7244 if (CONST_INT_P (element))
7245 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7246 else
7248 int byte_shift = exact_log2 (scalar_size);
7249 gcc_assert (byte_shift >= 0);
7251 if (byte_shift == 0)
7252 element_offset = element;
7254 else
7256 if (TARGET_POWERPC64)
7257 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7258 else
7259 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7261 element_offset = base_tmp;
7265 /* Create the new address pointing to the element within the vector. If we
7266 are adding 0, we don't have to change the address. */
7267 if (element_offset == const0_rtx)
7268 new_addr = addr;
7270 /* A simple indirect address can be converted into a reg + offset
7271 address. */
7272 else if (REG_P (addr) || SUBREG_P (addr))
7273 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7275 /* Optimize D-FORM addresses with constant offset with a constant element, to
7276 include the element offset in the address directly. */
7277 else if (GET_CODE (addr) == PLUS)
7279 rtx op0 = XEXP (addr, 0);
7280 rtx op1 = XEXP (addr, 1);
7281 rtx insn;
7283 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7284 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7286 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7287 rtx offset_rtx = GEN_INT (offset);
7289 if (IN_RANGE (offset, -32768, 32767)
7290 && (scalar_size < 8 || (offset & 0x3) == 0))
7291 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7292 else
7294 emit_move_insn (base_tmp, offset_rtx);
7295 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7298 else
7300 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7301 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7303 /* Note, ADDI requires the register being added to be a base
7304 register. If the register was R0, load it up into the temporary
7305 and do the add. */
7306 if (op1_reg_p
7307 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7309 insn = gen_add3_insn (base_tmp, op1, element_offset);
7310 gcc_assert (insn != NULL_RTX);
7311 emit_insn (insn);
7314 else if (ele_reg_p
7315 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7317 insn = gen_add3_insn (base_tmp, element_offset, op1);
7318 gcc_assert (insn != NULL_RTX);
7319 emit_insn (insn);
7322 else
7324 emit_move_insn (base_tmp, op1);
7325 emit_insn (gen_add2_insn (base_tmp, element_offset));
7328 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7332 else
7334 emit_move_insn (base_tmp, addr);
7335 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7338 /* If we have a PLUS, we need to see whether the particular register class
7339 allows for D-FORM or X-FORM addressing. */
7340 if (GET_CODE (new_addr) == PLUS)
7342 rtx op1 = XEXP (new_addr, 1);
7343 addr_mask_type addr_mask;
7344 int scalar_regno = regno_or_subregno (scalar_reg);
7346 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7347 if (INT_REGNO_P (scalar_regno))
7348 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7350 else if (FP_REGNO_P (scalar_regno))
7351 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7353 else if (ALTIVEC_REGNO_P (scalar_regno))
7354 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7356 else
7357 gcc_unreachable ();
7359 if (REG_P (op1) || SUBREG_P (op1))
7360 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7361 else
7362 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7365 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7366 valid_addr_p = true;
7368 else
7369 valid_addr_p = false;
7371 if (!valid_addr_p)
7373 emit_move_insn (base_tmp, new_addr);
7374 new_addr = base_tmp;
7377 return change_address (mem, scalar_mode, new_addr);
7380 /* Split a variable vec_extract operation into the component instructions. */
7382 void
7383 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7384 rtx tmp_altivec)
7386 machine_mode mode = GET_MODE (src);
7387 machine_mode scalar_mode = GET_MODE (dest);
7388 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7389 int byte_shift = exact_log2 (scalar_size);
7391 gcc_assert (byte_shift >= 0);
7393 /* If we are given a memory address, optimize to load just the element. We
7394 don't have to adjust the vector element number on little endian
7395 systems. */
7396 if (MEM_P (src))
7398 gcc_assert (REG_P (tmp_gpr));
7399 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7400 tmp_gpr, scalar_mode));
7401 return;
7404 else if (REG_P (src) || SUBREG_P (src))
7406 int bit_shift = byte_shift + 3;
7407 rtx element2;
7408 int dest_regno = regno_or_subregno (dest);
7409 int src_regno = regno_or_subregno (src);
7410 int element_regno = regno_or_subregno (element);
7412 gcc_assert (REG_P (tmp_gpr));
7414 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7415 a general purpose register. */
7416 if (TARGET_P9_VECTOR
7417 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7418 && INT_REGNO_P (dest_regno)
7419 && ALTIVEC_REGNO_P (src_regno)
7420 && INT_REGNO_P (element_regno))
7422 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7423 rtx element_si = gen_rtx_REG (SImode, element_regno);
7425 if (mode == V16QImode)
7426 emit_insn (BYTES_BIG_ENDIAN
7427 ? gen_vextublx (dest_si, element_si, src)
7428 : gen_vextubrx (dest_si, element_si, src));
7430 else if (mode == V8HImode)
7432 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7433 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7434 emit_insn (BYTES_BIG_ENDIAN
7435 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7436 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7440 else
7442 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7443 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7444 emit_insn (BYTES_BIG_ENDIAN
7445 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7446 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7449 return;
7453 gcc_assert (REG_P (tmp_altivec));
7455 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7456 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7457 will shift the element into the upper position (adding 3 to convert a
7458 byte shift into a bit shift). */
7459 if (scalar_size == 8)
7461 if (!BYTES_BIG_ENDIAN)
7463 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7464 element2 = tmp_gpr;
7466 else
7467 element2 = element;
7469 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7470 bit. */
7471 emit_insn (gen_rtx_SET (tmp_gpr,
7472 gen_rtx_AND (DImode,
7473 gen_rtx_ASHIFT (DImode,
7474 element2,
7475 GEN_INT (6)),
7476 GEN_INT (64))));
7478 else
7480 if (!BYTES_BIG_ENDIAN)
7482 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7484 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7485 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7486 element2 = tmp_gpr;
7488 else
7489 element2 = element;
7491 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7494 /* Get the value into the lower byte of the Altivec register where VSLO
7495 expects it. */
7496 if (TARGET_P9_VECTOR)
7497 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7498 else if (can_create_pseudo_p ())
7499 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7500 else
7502 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7503 emit_move_insn (tmp_di, tmp_gpr);
7504 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7507 /* Do the VSLO to get the value into the final location. */
7508 switch (mode)
7510 case E_V2DFmode:
7511 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7512 return;
7514 case E_V2DImode:
7515 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7516 return;
7518 case E_V4SFmode:
7520 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7521 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7522 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7523 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7524 tmp_altivec));
7526 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7527 return;
7530 case E_V4SImode:
7531 case E_V8HImode:
7532 case E_V16QImode:
7534 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7535 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7536 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7537 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7538 tmp_altivec));
7539 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7540 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7541 GEN_INT (64 - (8 * scalar_size))));
7542 return;
7545 default:
7546 gcc_unreachable ();
7549 return;
7551 else
7552 gcc_unreachable ();
7555 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7556 two SImode values. */
7558 static void
7559 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7561 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7563 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7565 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7566 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7568 emit_move_insn (dest, GEN_INT (const1 | const2));
7569 return;
7572 /* Put si1 into upper 32-bits of dest. */
7573 if (CONST_INT_P (si1))
7574 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7575 else
7577 /* Generate RLDIC. */
7578 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7579 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7580 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7581 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7582 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7583 emit_insn (gen_rtx_SET (dest, and_rtx));
7586 /* Put si2 into the temporary. */
7587 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7588 if (CONST_INT_P (si2))
7589 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7590 else
7591 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7593 /* Combine the two parts. */
7594 emit_insn (gen_iordi3 (dest, dest, tmp));
7595 return;
7598 /* Split a V4SI initialization. */
7600 void
7601 rs6000_split_v4si_init (rtx operands[])
7603 rtx dest = operands[0];
7605 /* Destination is a GPR, build up the two DImode parts in place. */
7606 if (REG_P (dest) || SUBREG_P (dest))
7608 int d_regno = regno_or_subregno (dest);
7609 rtx scalar1 = operands[1];
7610 rtx scalar2 = operands[2];
7611 rtx scalar3 = operands[3];
7612 rtx scalar4 = operands[4];
7613 rtx tmp1 = operands[5];
7614 rtx tmp2 = operands[6];
7616 /* Even though we only need one temporary (plus the destination, which
7617 has an early clobber constraint, try to use two temporaries, one for
7618 each double word created. That way the 2nd insn scheduling pass can
7619 rearrange things so the two parts are done in parallel. */
7620 if (BYTES_BIG_ENDIAN)
7622 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7623 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7624 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7625 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7627 else
7629 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7630 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7631 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7632 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7634 return;
7637 else
7638 gcc_unreachable ();
7641 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7642 selects whether the alignment is abi mandated, optional, or
7643 both abi and optional alignment. */
7645 unsigned int
7646 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7648 if (how != align_opt)
7650 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7651 align = 128;
7654 if (how != align_abi)
7656 if (TREE_CODE (type) == ARRAY_TYPE
7657 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7659 if (align < BITS_PER_WORD)
7660 align = BITS_PER_WORD;
7664 return align;
7667 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7668 instructions simply ignore the low bits; VSX memory instructions
7669 are aligned to 4 or 8 bytes. */
7671 static bool
7672 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7674 return (STRICT_ALIGNMENT
7675 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7676 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7677 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7678 && (int) align < VECTOR_ALIGN (mode)))));
7681 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7683 bool
7684 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7686 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7688 if (computed != 128)
7690 static bool warned;
7691 if (!warned && warn_psabi)
7693 warned = true;
7694 inform (input_location,
7695 "the layout of aggregates containing vectors with"
7696 " %d-byte alignment has changed in GCC 5",
7697 computed / BITS_PER_UNIT);
7700 /* In current GCC there is no special case. */
7701 return false;
7704 return false;
7707 /* AIX increases natural record alignment to doubleword if the first
7708 field is an FP double while the FP fields remain word aligned. */
7710 unsigned int
7711 rs6000_special_round_type_align (tree type, unsigned int computed,
7712 unsigned int specified)
7714 unsigned int align = MAX (computed, specified);
7715 tree field = TYPE_FIELDS (type);
7717 /* Skip all non field decls */
7718 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7719 field = DECL_CHAIN (field);
7721 if (field != NULL && field != type)
7723 type = TREE_TYPE (field);
7724 while (TREE_CODE (type) == ARRAY_TYPE)
7725 type = TREE_TYPE (type);
7727 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7728 align = MAX (align, 64);
7731 return align;
7734 /* Darwin increases record alignment to the natural alignment of
7735 the first field. */
7737 unsigned int
7738 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7739 unsigned int specified)
7741 unsigned int align = MAX (computed, specified);
7743 if (TYPE_PACKED (type))
7744 return align;
7746 /* Find the first field, looking down into aggregates. */
7747 do {
7748 tree field = TYPE_FIELDS (type);
7749 /* Skip all non field decls */
7750 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7751 field = DECL_CHAIN (field);
7752 if (! field)
7753 break;
7754 /* A packed field does not contribute any extra alignment. */
7755 if (DECL_PACKED (field))
7756 return align;
7757 type = TREE_TYPE (field);
7758 while (TREE_CODE (type) == ARRAY_TYPE)
7759 type = TREE_TYPE (type);
7760 } while (AGGREGATE_TYPE_P (type));
7762 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7763 align = MAX (align, TYPE_ALIGN (type));
7765 return align;
7768 /* Return 1 for an operand in small memory on V.4/eabi. */
7771 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7772 machine_mode mode ATTRIBUTE_UNUSED)
7774 #if TARGET_ELF
7775 rtx sym_ref;
7777 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7778 return 0;
7780 if (DEFAULT_ABI != ABI_V4)
7781 return 0;
7783 if (GET_CODE (op) == SYMBOL_REF)
7784 sym_ref = op;
7786 else if (GET_CODE (op) != CONST
7787 || GET_CODE (XEXP (op, 0)) != PLUS
7788 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7789 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7790 return 0;
7792 else
7794 rtx sum = XEXP (op, 0);
7795 HOST_WIDE_INT summand;
7797 /* We have to be careful here, because it is the referenced address
7798 that must be 32k from _SDA_BASE_, not just the symbol. */
7799 summand = INTVAL (XEXP (sum, 1));
7800 if (summand < 0 || summand > g_switch_value)
7801 return 0;
7803 sym_ref = XEXP (sum, 0);
7806 return SYMBOL_REF_SMALL_P (sym_ref);
7807 #else
7808 return 0;
7809 #endif
7812 /* Return true if either operand is a general purpose register. */
7814 bool
7815 gpr_or_gpr_p (rtx op0, rtx op1)
7817 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7818 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7821 /* Return true if this is a move direct operation between GPR registers and
7822 floating point/VSX registers. */
7824 bool
7825 direct_move_p (rtx op0, rtx op1)
7827 int regno0, regno1;
7829 if (!REG_P (op0) || !REG_P (op1))
7830 return false;
7832 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7833 return false;
7835 regno0 = REGNO (op0);
7836 regno1 = REGNO (op1);
7837 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7838 return false;
7840 if (INT_REGNO_P (regno0))
7841 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7843 else if (INT_REGNO_P (regno1))
7845 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7846 return true;
7848 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7849 return true;
7852 return false;
7855 /* Return true if the OFFSET is valid for the quad address instructions that
7856 use d-form (register + offset) addressing. */
7858 static inline bool
7859 quad_address_offset_p (HOST_WIDE_INT offset)
7861 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7864 /* Return true if the ADDR is an acceptable address for a quad memory
7865 operation of mode MODE (either LQ/STQ for general purpose registers, or
7866 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7867 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7868 3.0 LXV/STXV instruction. */
7870 bool
7871 quad_address_p (rtx addr, machine_mode mode, bool strict)
7873 rtx op0, op1;
7875 if (GET_MODE_SIZE (mode) != 16)
7876 return false;
7878 if (legitimate_indirect_address_p (addr, strict))
7879 return true;
7881 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7882 return false;
7884 if (GET_CODE (addr) != PLUS)
7885 return false;
7887 op0 = XEXP (addr, 0);
7888 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7889 return false;
7891 op1 = XEXP (addr, 1);
7892 if (!CONST_INT_P (op1))
7893 return false;
7895 return quad_address_offset_p (INTVAL (op1));
7898 /* Return true if this is a load or store quad operation. This function does
7899 not handle the atomic quad memory instructions. */
7901 bool
7902 quad_load_store_p (rtx op0, rtx op1)
7904 bool ret;
7906 if (!TARGET_QUAD_MEMORY)
7907 ret = false;
7909 else if (REG_P (op0) && MEM_P (op1))
7910 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7911 && quad_memory_operand (op1, GET_MODE (op1))
7912 && !reg_overlap_mentioned_p (op0, op1));
7914 else if (MEM_P (op0) && REG_P (op1))
7915 ret = (quad_memory_operand (op0, GET_MODE (op0))
7916 && quad_int_reg_operand (op1, GET_MODE (op1)));
7918 else
7919 ret = false;
7921 if (TARGET_DEBUG_ADDR)
7923 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7924 ret ? "true" : "false");
7925 debug_rtx (gen_rtx_SET (op0, op1));
7928 return ret;
7931 /* Given an address, return a constant offset term if one exists. */
7933 static rtx
7934 address_offset (rtx op)
7936 if (GET_CODE (op) == PRE_INC
7937 || GET_CODE (op) == PRE_DEC)
7938 op = XEXP (op, 0);
7939 else if (GET_CODE (op) == PRE_MODIFY
7940 || GET_CODE (op) == LO_SUM)
7941 op = XEXP (op, 1);
7943 if (GET_CODE (op) == CONST)
7944 op = XEXP (op, 0);
7946 if (GET_CODE (op) == PLUS)
7947 op = XEXP (op, 1);
7949 if (CONST_INT_P (op))
7950 return op;
7952 return NULL_RTX;
7955 /* Return true if the MEM operand is a memory operand suitable for use
7956 with a (full width, possibly multiple) gpr load/store. On
7957 powerpc64 this means the offset must be divisible by 4.
7958 Implements 'Y' constraint.
7960 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7961 a constraint function we know the operand has satisfied a suitable
7962 memory predicate. Also accept some odd rtl generated by reload
7963 (see rs6000_legitimize_reload_address for various forms). It is
7964 important that reload rtl be accepted by appropriate constraints
7965 but not by the operand predicate.
7967 Offsetting a lo_sum should not be allowed, except where we know by
7968 alignment that a 32k boundary is not crossed, but see the ???
7969 comment in rs6000_legitimize_reload_address. Note that by
7970 "offsetting" here we mean a further offset to access parts of the
7971 MEM. It's fine to have a lo_sum where the inner address is offset
7972 from a sym, since the same sym+offset will appear in the high part
7973 of the address calculation. */
7975 bool
7976 mem_operand_gpr (rtx op, machine_mode mode)
7978 unsigned HOST_WIDE_INT offset;
7979 int extra;
7980 rtx addr = XEXP (op, 0);
7982 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7983 if (!rs6000_offsettable_memref_p (op, mode, false))
7984 return false;
7986 op = address_offset (addr);
7987 if (op == NULL_RTX)
7988 return true;
7990 offset = INTVAL (op);
7991 if (TARGET_POWERPC64 && (offset & 3) != 0)
7992 return false;
7994 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7995 if (extra < 0)
7996 extra = 0;
7998 if (GET_CODE (addr) == LO_SUM)
7999 /* For lo_sum addresses, we must allow any offset except one that
8000 causes a wrap, so test only the low 16 bits. */
8001 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8003 return offset + 0x8000 < 0x10000u - extra;
8006 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8007 enforce an offset divisible by 4 even for 32-bit. */
8009 bool
8010 mem_operand_ds_form (rtx op, machine_mode mode)
8012 unsigned HOST_WIDE_INT offset;
8013 int extra;
8014 rtx addr = XEXP (op, 0);
8016 if (!offsettable_address_p (false, mode, addr))
8017 return false;
8019 op = address_offset (addr);
8020 if (op == NULL_RTX)
8021 return true;
8023 offset = INTVAL (op);
8024 if ((offset & 3) != 0)
8025 return false;
8027 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8028 if (extra < 0)
8029 extra = 0;
8031 if (GET_CODE (addr) == LO_SUM)
8032 /* For lo_sum addresses, we must allow any offset except one that
8033 causes a wrap, so test only the low 16 bits. */
8034 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8036 return offset + 0x8000 < 0x10000u - extra;
8039 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8041 static bool
8042 reg_offset_addressing_ok_p (machine_mode mode)
8044 switch (mode)
8046 case E_V16QImode:
8047 case E_V8HImode:
8048 case E_V4SFmode:
8049 case E_V4SImode:
8050 case E_V2DFmode:
8051 case E_V2DImode:
8052 case E_V1TImode:
8053 case E_TImode:
8054 case E_TFmode:
8055 case E_KFmode:
8056 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8057 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8058 a vector mode, if we want to use the VSX registers to move it around,
8059 we need to restrict ourselves to reg+reg addressing. Similarly for
8060 IEEE 128-bit floating point that is passed in a single vector
8061 register. */
8062 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8063 return mode_supports_dq_form (mode);
8064 break;
8066 case E_SDmode:
8067 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8068 addressing for the LFIWZX and STFIWX instructions. */
8069 if (TARGET_NO_SDMODE_STACK)
8070 return false;
8071 break;
8073 default:
8074 break;
8077 return true;
8080 static bool
8081 virtual_stack_registers_memory_p (rtx op)
8083 int regnum;
8085 if (GET_CODE (op) == REG)
8086 regnum = REGNO (op);
8088 else if (GET_CODE (op) == PLUS
8089 && GET_CODE (XEXP (op, 0)) == REG
8090 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8091 regnum = REGNO (XEXP (op, 0));
8093 else
8094 return false;
8096 return (regnum >= FIRST_VIRTUAL_REGISTER
8097 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8100 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8101 is known to not straddle a 32k boundary. This function is used
8102 to determine whether -mcmodel=medium code can use TOC pointer
8103 relative addressing for OP. This means the alignment of the TOC
8104 pointer must also be taken into account, and unfortunately that is
8105 only 8 bytes. */
8107 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8108 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8109 #endif
8111 static bool
8112 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8113 machine_mode mode)
8115 tree decl;
8116 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8118 if (GET_CODE (op) != SYMBOL_REF)
8119 return false;
8121 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8122 SYMBOL_REF. */
8123 if (mode_supports_dq_form (mode))
8124 return false;
8126 dsize = GET_MODE_SIZE (mode);
8127 decl = SYMBOL_REF_DECL (op);
8128 if (!decl)
8130 if (dsize == 0)
8131 return false;
8133 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8134 replacing memory addresses with an anchor plus offset. We
8135 could find the decl by rummaging around in the block->objects
8136 VEC for the given offset but that seems like too much work. */
8137 dalign = BITS_PER_UNIT;
8138 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8139 && SYMBOL_REF_ANCHOR_P (op)
8140 && SYMBOL_REF_BLOCK (op) != NULL)
8142 struct object_block *block = SYMBOL_REF_BLOCK (op);
8144 dalign = block->alignment;
8145 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8147 else if (CONSTANT_POOL_ADDRESS_P (op))
8149 /* It would be nice to have get_pool_align().. */
8150 machine_mode cmode = get_pool_mode (op);
8152 dalign = GET_MODE_ALIGNMENT (cmode);
8155 else if (DECL_P (decl))
8157 dalign = DECL_ALIGN (decl);
8159 if (dsize == 0)
8161 /* Allow BLKmode when the entire object is known to not
8162 cross a 32k boundary. */
8163 if (!DECL_SIZE_UNIT (decl))
8164 return false;
8166 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8167 return false;
8169 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8170 if (dsize > 32768)
8171 return false;
8173 dalign /= BITS_PER_UNIT;
8174 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8175 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8176 return dalign >= dsize;
8179 else
8180 gcc_unreachable ();
8182 /* Find how many bits of the alignment we know for this access. */
8183 dalign /= BITS_PER_UNIT;
8184 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8185 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8186 mask = dalign - 1;
8187 lsb = offset & -offset;
8188 mask &= lsb - 1;
8189 dalign = mask + 1;
8191 return dalign >= dsize;
8194 static bool
8195 constant_pool_expr_p (rtx op)
8197 rtx base, offset;
8199 split_const (op, &base, &offset);
8200 return (GET_CODE (base) == SYMBOL_REF
8201 && CONSTANT_POOL_ADDRESS_P (base)
8202 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8205 /* These are only used to pass through from print_operand/print_operand_address
8206 to rs6000_output_addr_const_extra over the intervening function
8207 output_addr_const which is not target code. */
8208 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8210 /* Return true if OP is a toc pointer relative address (the output
8211 of create_TOC_reference). If STRICT, do not match non-split
8212 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8213 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8214 TOCREL_OFFSET_RET respectively. */
8216 bool
8217 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8218 const_rtx *tocrel_offset_ret)
8220 if (!TARGET_TOC)
8221 return false;
8223 if (TARGET_CMODEL != CMODEL_SMALL)
8225 /* When strict ensure we have everything tidy. */
8226 if (strict
8227 && !(GET_CODE (op) == LO_SUM
8228 && REG_P (XEXP (op, 0))
8229 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8230 return false;
8232 /* When not strict, allow non-split TOC addresses and also allow
8233 (lo_sum (high ..)) TOC addresses created during reload. */
8234 if (GET_CODE (op) == LO_SUM)
8235 op = XEXP (op, 1);
8238 const_rtx tocrel_base = op;
8239 const_rtx tocrel_offset = const0_rtx;
8241 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8243 tocrel_base = XEXP (op, 0);
8244 tocrel_offset = XEXP (op, 1);
8247 if (tocrel_base_ret)
8248 *tocrel_base_ret = tocrel_base;
8249 if (tocrel_offset_ret)
8250 *tocrel_offset_ret = tocrel_offset;
8252 return (GET_CODE (tocrel_base) == UNSPEC
8253 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8256 /* Return true if X is a constant pool address, and also for cmodel=medium
8257 if X is a toc-relative address known to be offsettable within MODE. */
8259 bool
8260 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8261 bool strict)
8263 const_rtx tocrel_base, tocrel_offset;
8264 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8265 && (TARGET_CMODEL != CMODEL_MEDIUM
8266 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8267 || mode == QImode
8268 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8269 INTVAL (tocrel_offset), mode)));
8272 static bool
8273 legitimate_small_data_p (machine_mode mode, rtx x)
8275 return (DEFAULT_ABI == ABI_V4
8276 && !flag_pic && !TARGET_TOC
8277 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8278 && small_data_operand (x, mode));
8281 bool
8282 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8283 bool strict, bool worst_case)
8285 unsigned HOST_WIDE_INT offset;
8286 unsigned int extra;
8288 if (GET_CODE (x) != PLUS)
8289 return false;
8290 if (!REG_P (XEXP (x, 0)))
8291 return false;
8292 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8293 return false;
8294 if (mode_supports_dq_form (mode))
8295 return quad_address_p (x, mode, strict);
8296 if (!reg_offset_addressing_ok_p (mode))
8297 return virtual_stack_registers_memory_p (x);
8298 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8299 return true;
8300 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8301 return false;
8303 offset = INTVAL (XEXP (x, 1));
8304 extra = 0;
8305 switch (mode)
8307 case E_DFmode:
8308 case E_DDmode:
8309 case E_DImode:
8310 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8311 addressing. */
8312 if (VECTOR_MEM_VSX_P (mode))
8313 return false;
8315 if (!worst_case)
8316 break;
8317 if (!TARGET_POWERPC64)
8318 extra = 4;
8319 else if (offset & 3)
8320 return false;
8321 break;
8323 case E_TFmode:
8324 case E_IFmode:
8325 case E_KFmode:
8326 case E_TDmode:
8327 case E_TImode:
8328 case E_PTImode:
8329 extra = 8;
8330 if (!worst_case)
8331 break;
8332 if (!TARGET_POWERPC64)
8333 extra = 12;
8334 else if (offset & 3)
8335 return false;
8336 break;
8338 default:
8339 break;
8342 offset += 0x8000;
8343 return offset < 0x10000 - extra;
8346 bool
8347 legitimate_indexed_address_p (rtx x, int strict)
8349 rtx op0, op1;
8351 if (GET_CODE (x) != PLUS)
8352 return false;
8354 op0 = XEXP (x, 0);
8355 op1 = XEXP (x, 1);
8357 return (REG_P (op0) && REG_P (op1)
8358 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8359 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8360 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8361 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8364 bool
8365 avoiding_indexed_address_p (machine_mode mode)
8367 /* Avoid indexed addressing for modes that have non-indexed
8368 load/store instruction forms. */
8369 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8372 bool
8373 legitimate_indirect_address_p (rtx x, int strict)
8375 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8378 bool
8379 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8381 if (!TARGET_MACHO || !flag_pic
8382 || mode != SImode || GET_CODE (x) != MEM)
8383 return false;
8384 x = XEXP (x, 0);
8386 if (GET_CODE (x) != LO_SUM)
8387 return false;
8388 if (GET_CODE (XEXP (x, 0)) != REG)
8389 return false;
8390 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8391 return false;
8392 x = XEXP (x, 1);
8394 return CONSTANT_P (x);
8397 static bool
8398 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8400 if (GET_CODE (x) != LO_SUM)
8401 return false;
8402 if (GET_CODE (XEXP (x, 0)) != REG)
8403 return false;
8404 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8405 return false;
8406 /* quad word addresses are restricted, and we can't use LO_SUM. */
8407 if (mode_supports_dq_form (mode))
8408 return false;
8409 x = XEXP (x, 1);
8411 if (TARGET_ELF || TARGET_MACHO)
8413 bool large_toc_ok;
8415 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8416 return false;
8417 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8418 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8419 recognizes some LO_SUM addresses as valid although this
8420 function says opposite. In most cases, LRA through different
8421 transformations can generate correct code for address reloads.
8422 It can not manage only some LO_SUM cases. So we need to add
8423 code analogous to one in rs6000_legitimize_reload_address for
8424 LOW_SUM here saying that some addresses are still valid. */
8425 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8426 && small_toc_ref (x, VOIDmode));
8427 if (TARGET_TOC && ! large_toc_ok)
8428 return false;
8429 if (GET_MODE_NUNITS (mode) != 1)
8430 return false;
8431 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8432 && !(/* ??? Assume floating point reg based on mode? */
8433 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8434 return false;
8436 return CONSTANT_P (x) || large_toc_ok;
8439 return false;
8443 /* Try machine-dependent ways of modifying an illegitimate address
8444 to be legitimate. If we find one, return the new, valid address.
8445 This is used from only one place: `memory_address' in explow.c.
8447 OLDX is the address as it was before break_out_memory_refs was
8448 called. In some cases it is useful to look at this to decide what
8449 needs to be done.
8451 It is always safe for this function to do nothing. It exists to
8452 recognize opportunities to optimize the output.
8454 On RS/6000, first check for the sum of a register with a constant
8455 integer that is out of range. If so, generate code to add the
8456 constant with the low-order 16 bits masked to the register and force
8457 this result into another register (this can be done with `cau').
8458 Then generate an address of REG+(CONST&0xffff), allowing for the
8459 possibility of bit 16 being a one.
8461 Then check for the sum of a register and something not constant, try to
8462 load the other things into a register and return the sum. */
8464 static rtx
8465 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8466 machine_mode mode)
8468 unsigned int extra;
8470 if (!reg_offset_addressing_ok_p (mode)
8471 || mode_supports_dq_form (mode))
8473 if (virtual_stack_registers_memory_p (x))
8474 return x;
8476 /* In theory we should not be seeing addresses of the form reg+0,
8477 but just in case it is generated, optimize it away. */
8478 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8479 return force_reg (Pmode, XEXP (x, 0));
8481 /* For TImode with load/store quad, restrict addresses to just a single
8482 pointer, so it works with both GPRs and VSX registers. */
8483 /* Make sure both operands are registers. */
8484 else if (GET_CODE (x) == PLUS
8485 && (mode != TImode || !TARGET_VSX))
8486 return gen_rtx_PLUS (Pmode,
8487 force_reg (Pmode, XEXP (x, 0)),
8488 force_reg (Pmode, XEXP (x, 1)));
8489 else
8490 return force_reg (Pmode, x);
8492 if (GET_CODE (x) == SYMBOL_REF)
8494 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8495 if (model != 0)
8496 return rs6000_legitimize_tls_address (x, model);
8499 extra = 0;
8500 switch (mode)
8502 case E_TFmode:
8503 case E_TDmode:
8504 case E_TImode:
8505 case E_PTImode:
8506 case E_IFmode:
8507 case E_KFmode:
8508 /* As in legitimate_offset_address_p we do not assume
8509 worst-case. The mode here is just a hint as to the registers
8510 used. A TImode is usually in gprs, but may actually be in
8511 fprs. Leave worst-case scenario for reload to handle via
8512 insn constraints. PTImode is only GPRs. */
8513 extra = 8;
8514 break;
8515 default:
8516 break;
8519 if (GET_CODE (x) == PLUS
8520 && GET_CODE (XEXP (x, 0)) == REG
8521 && GET_CODE (XEXP (x, 1)) == CONST_INT
8522 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8523 >= 0x10000 - extra))
8525 HOST_WIDE_INT high_int, low_int;
8526 rtx sum;
8527 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8528 if (low_int >= 0x8000 - extra)
8529 low_int = 0;
8530 high_int = INTVAL (XEXP (x, 1)) - low_int;
8531 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8532 GEN_INT (high_int)), 0);
8533 return plus_constant (Pmode, sum, low_int);
8535 else if (GET_CODE (x) == PLUS
8536 && GET_CODE (XEXP (x, 0)) == REG
8537 && GET_CODE (XEXP (x, 1)) != CONST_INT
8538 && GET_MODE_NUNITS (mode) == 1
8539 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8540 || (/* ??? Assume floating point reg based on mode? */
8541 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8542 && !avoiding_indexed_address_p (mode))
8544 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8545 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8547 else if ((TARGET_ELF
8548 #if TARGET_MACHO
8549 || !MACHO_DYNAMIC_NO_PIC_P
8550 #endif
8552 && TARGET_32BIT
8553 && TARGET_NO_TOC
8554 && ! flag_pic
8555 && GET_CODE (x) != CONST_INT
8556 && GET_CODE (x) != CONST_WIDE_INT
8557 && GET_CODE (x) != CONST_DOUBLE
8558 && CONSTANT_P (x)
8559 && GET_MODE_NUNITS (mode) == 1
8560 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8561 || (/* ??? Assume floating point reg based on mode? */
8562 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8564 rtx reg = gen_reg_rtx (Pmode);
8565 if (TARGET_ELF)
8566 emit_insn (gen_elf_high (reg, x));
8567 else
8568 emit_insn (gen_macho_high (reg, x));
8569 return gen_rtx_LO_SUM (Pmode, reg, x);
8571 else if (TARGET_TOC
8572 && GET_CODE (x) == SYMBOL_REF
8573 && constant_pool_expr_p (x)
8574 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8575 return create_TOC_reference (x, NULL_RTX);
8576 else
8577 return x;
8580 /* Debug version of rs6000_legitimize_address. */
8581 static rtx
8582 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8584 rtx ret;
8585 rtx_insn *insns;
8587 start_sequence ();
8588 ret = rs6000_legitimize_address (x, oldx, mode);
8589 insns = get_insns ();
8590 end_sequence ();
8592 if (ret != x)
8594 fprintf (stderr,
8595 "\nrs6000_legitimize_address: mode %s, old code %s, "
8596 "new code %s, modified\n",
8597 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8598 GET_RTX_NAME (GET_CODE (ret)));
8600 fprintf (stderr, "Original address:\n");
8601 debug_rtx (x);
8603 fprintf (stderr, "oldx:\n");
8604 debug_rtx (oldx);
8606 fprintf (stderr, "New address:\n");
8607 debug_rtx (ret);
8609 if (insns)
8611 fprintf (stderr, "Insns added:\n");
8612 debug_rtx_list (insns, 20);
8615 else
8617 fprintf (stderr,
8618 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8619 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8621 debug_rtx (x);
8624 if (insns)
8625 emit_insn (insns);
8627 return ret;
8630 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8631 We need to emit DTP-relative relocations. */
8633 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8634 static void
8635 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8637 switch (size)
8639 case 4:
8640 fputs ("\t.long\t", file);
8641 break;
8642 case 8:
8643 fputs (DOUBLE_INT_ASM_OP, file);
8644 break;
8645 default:
8646 gcc_unreachable ();
8648 output_addr_const (file, x);
8649 if (TARGET_ELF)
8650 fputs ("@dtprel+0x8000", file);
8651 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8653 switch (SYMBOL_REF_TLS_MODEL (x))
8655 case 0:
8656 break;
8657 case TLS_MODEL_LOCAL_EXEC:
8658 fputs ("@le", file);
8659 break;
8660 case TLS_MODEL_INITIAL_EXEC:
8661 fputs ("@ie", file);
8662 break;
8663 case TLS_MODEL_GLOBAL_DYNAMIC:
8664 case TLS_MODEL_LOCAL_DYNAMIC:
8665 fputs ("@m", file);
8666 break;
8667 default:
8668 gcc_unreachable ();
8673 /* Return true if X is a symbol that refers to real (rather than emulated)
8674 TLS. */
8676 static bool
8677 rs6000_real_tls_symbol_ref_p (rtx x)
8679 return (GET_CODE (x) == SYMBOL_REF
8680 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8683 /* In the name of slightly smaller debug output, and to cater to
8684 general assembler lossage, recognize various UNSPEC sequences
8685 and turn them back into a direct symbol reference. */
8687 static rtx
8688 rs6000_delegitimize_address (rtx orig_x)
8690 rtx x, y, offset;
8692 orig_x = delegitimize_mem_from_attrs (orig_x);
8693 x = orig_x;
8694 if (MEM_P (x))
8695 x = XEXP (x, 0);
8697 y = x;
8698 if (TARGET_CMODEL != CMODEL_SMALL
8699 && GET_CODE (y) == LO_SUM)
8700 y = XEXP (y, 1);
8702 offset = NULL_RTX;
8703 if (GET_CODE (y) == PLUS
8704 && GET_MODE (y) == Pmode
8705 && CONST_INT_P (XEXP (y, 1)))
8707 offset = XEXP (y, 1);
8708 y = XEXP (y, 0);
8711 if (GET_CODE (y) == UNSPEC
8712 && XINT (y, 1) == UNSPEC_TOCREL)
8714 y = XVECEXP (y, 0, 0);
8716 #ifdef HAVE_AS_TLS
8717 /* Do not associate thread-local symbols with the original
8718 constant pool symbol. */
8719 if (TARGET_XCOFF
8720 && GET_CODE (y) == SYMBOL_REF
8721 && CONSTANT_POOL_ADDRESS_P (y)
8722 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8723 return orig_x;
8724 #endif
8726 if (offset != NULL_RTX)
8727 y = gen_rtx_PLUS (Pmode, y, offset);
8728 if (!MEM_P (orig_x))
8729 return y;
8730 else
8731 return replace_equiv_address_nv (orig_x, y);
8734 if (TARGET_MACHO
8735 && GET_CODE (orig_x) == LO_SUM
8736 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8738 y = XEXP (XEXP (orig_x, 1), 0);
8739 if (GET_CODE (y) == UNSPEC
8740 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8741 return XVECEXP (y, 0, 0);
8744 return orig_x;
8747 /* Return true if X shouldn't be emitted into the debug info.
8748 The linker doesn't like .toc section references from
8749 .debug_* sections, so reject .toc section symbols. */
8751 static bool
8752 rs6000_const_not_ok_for_debug_p (rtx x)
8754 if (GET_CODE (x) == UNSPEC)
8755 return true;
8756 if (GET_CODE (x) == SYMBOL_REF
8757 && CONSTANT_POOL_ADDRESS_P (x))
8759 rtx c = get_pool_constant (x);
8760 machine_mode cmode = get_pool_mode (x);
8761 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8762 return true;
8765 return false;
8769 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8771 static bool
8772 rs6000_legitimate_combined_insn (rtx_insn *insn)
8774 int icode = INSN_CODE (insn);
8776 /* Reject creating doloop insns. Combine should not be allowed
8777 to create these for a number of reasons:
8778 1) In a nested loop, if combine creates one of these in an
8779 outer loop and the register allocator happens to allocate ctr
8780 to the outer loop insn, then the inner loop can't use ctr.
8781 Inner loops ought to be more highly optimized.
8782 2) Combine often wants to create one of these from what was
8783 originally a three insn sequence, first combining the three
8784 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8785 allocated ctr, the splitter takes use back to the three insn
8786 sequence. It's better to stop combine at the two insn
8787 sequence.
8788 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8789 insns, the register allocator sometimes uses floating point
8790 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8791 jump insn and output reloads are not implemented for jumps,
8792 the ctrsi/ctrdi splitters need to handle all possible cases.
8793 That's a pain, and it gets to be seriously difficult when a
8794 splitter that runs after reload needs memory to transfer from
8795 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8796 for the difficult case. It's better to not create problems
8797 in the first place. */
8798 if (icode != CODE_FOR_nothing
8799 && (icode == CODE_FOR_bdz_si
8800 || icode == CODE_FOR_bdz_di
8801 || icode == CODE_FOR_bdnz_si
8802 || icode == CODE_FOR_bdnz_di
8803 || icode == CODE_FOR_bdztf_si
8804 || icode == CODE_FOR_bdztf_di
8805 || icode == CODE_FOR_bdnztf_si
8806 || icode == CODE_FOR_bdnztf_di))
8807 return false;
8809 return true;
8812 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8814 static GTY(()) rtx rs6000_tls_symbol;
8815 static rtx
8816 rs6000_tls_get_addr (void)
8818 if (!rs6000_tls_symbol)
8819 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8821 return rs6000_tls_symbol;
8824 /* Construct the SYMBOL_REF for TLS GOT references. */
8826 static GTY(()) rtx rs6000_got_symbol;
8827 static rtx
8828 rs6000_got_sym (void)
8830 if (!rs6000_got_symbol)
8832 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8833 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8834 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8837 return rs6000_got_symbol;
8840 /* AIX Thread-Local Address support. */
8842 static rtx
8843 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8845 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8846 const char *name;
8847 char *tlsname;
8849 name = XSTR (addr, 0);
8850 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8851 or the symbol will be in TLS private data section. */
8852 if (name[strlen (name) - 1] != ']'
8853 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8854 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8856 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8857 strcpy (tlsname, name);
8858 strcat (tlsname,
8859 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8860 tlsaddr = copy_rtx (addr);
8861 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8863 else
8864 tlsaddr = addr;
8866 /* Place addr into TOC constant pool. */
8867 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8869 /* Output the TOC entry and create the MEM referencing the value. */
8870 if (constant_pool_expr_p (XEXP (sym, 0))
8871 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8873 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8874 mem = gen_const_mem (Pmode, tocref);
8875 set_mem_alias_set (mem, get_TOC_alias_set ());
8877 else
8878 return sym;
8880 /* Use global-dynamic for local-dynamic. */
8881 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8882 || model == TLS_MODEL_LOCAL_DYNAMIC)
8884 /* Create new TOC reference for @m symbol. */
8885 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8886 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8887 strcpy (tlsname, "*LCM");
8888 strcat (tlsname, name + 3);
8889 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8890 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8891 tocref = create_TOC_reference (modaddr, NULL_RTX);
8892 rtx modmem = gen_const_mem (Pmode, tocref);
8893 set_mem_alias_set (modmem, get_TOC_alias_set ());
8895 rtx modreg = gen_reg_rtx (Pmode);
8896 emit_insn (gen_rtx_SET (modreg, modmem));
8898 tmpreg = gen_reg_rtx (Pmode);
8899 emit_insn (gen_rtx_SET (tmpreg, mem));
8901 dest = gen_reg_rtx (Pmode);
8902 if (TARGET_32BIT)
8903 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8904 else
8905 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8906 return dest;
8908 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8909 else if (TARGET_32BIT)
8911 tlsreg = gen_reg_rtx (SImode);
8912 emit_insn (gen_tls_get_tpointer (tlsreg));
8914 else
8915 tlsreg = gen_rtx_REG (DImode, 13);
8917 /* Load the TOC value into temporary register. */
8918 tmpreg = gen_reg_rtx (Pmode);
8919 emit_insn (gen_rtx_SET (tmpreg, mem));
8920 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8921 gen_rtx_MINUS (Pmode, addr, tlsreg));
8923 /* Add TOC symbol value to TLS pointer. */
8924 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8926 return dest;
8929 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8930 this (thread-local) address. */
8932 static rtx
8933 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8935 rtx dest, insn;
8937 if (TARGET_XCOFF)
8938 return rs6000_legitimize_tls_address_aix (addr, model);
8940 dest = gen_reg_rtx (Pmode);
8941 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8943 rtx tlsreg;
8945 if (TARGET_64BIT)
8947 tlsreg = gen_rtx_REG (Pmode, 13);
8948 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8950 else
8952 tlsreg = gen_rtx_REG (Pmode, 2);
8953 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8955 emit_insn (insn);
8957 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8959 rtx tlsreg, tmp;
8961 tmp = gen_reg_rtx (Pmode);
8962 if (TARGET_64BIT)
8964 tlsreg = gen_rtx_REG (Pmode, 13);
8965 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8967 else
8969 tlsreg = gen_rtx_REG (Pmode, 2);
8970 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8972 emit_insn (insn);
8973 if (TARGET_64BIT)
8974 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8975 else
8976 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8977 emit_insn (insn);
8979 else
8981 rtx r3, got, tga, tmp1, tmp2, call_insn;
8983 /* We currently use relocations like @got@tlsgd for tls, which
8984 means the linker will handle allocation of tls entries, placing
8985 them in the .got section. So use a pointer to the .got section,
8986 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8987 or to secondary GOT sections used by 32-bit -fPIC. */
8988 if (TARGET_64BIT)
8989 got = gen_rtx_REG (Pmode, 2);
8990 else
8992 if (flag_pic == 1)
8993 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8994 else
8996 rtx gsym = rs6000_got_sym ();
8997 got = gen_reg_rtx (Pmode);
8998 if (flag_pic == 0)
8999 rs6000_emit_move (got, gsym, Pmode);
9000 else
9002 rtx mem, lab;
9004 tmp1 = gen_reg_rtx (Pmode);
9005 tmp2 = gen_reg_rtx (Pmode);
9006 mem = gen_const_mem (Pmode, tmp1);
9007 lab = gen_label_rtx ();
9008 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9009 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9010 if (TARGET_LINK_STACK)
9011 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9012 emit_move_insn (tmp2, mem);
9013 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9014 set_unique_reg_note (last, REG_EQUAL, gsym);
9019 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9021 tga = rs6000_tls_get_addr ();
9022 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9023 const0_rtx, Pmode);
9025 r3 = gen_rtx_REG (Pmode, 3);
9026 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9028 if (TARGET_64BIT)
9029 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9030 else
9031 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9033 else if (DEFAULT_ABI == ABI_V4)
9034 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9035 else
9036 gcc_unreachable ();
9037 call_insn = last_call_insn ();
9038 PATTERN (call_insn) = insn;
9039 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9040 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9041 pic_offset_table_rtx);
9043 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9045 tga = rs6000_tls_get_addr ();
9046 tmp1 = gen_reg_rtx (Pmode);
9047 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9048 const0_rtx, Pmode);
9050 r3 = gen_rtx_REG (Pmode, 3);
9051 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9053 if (TARGET_64BIT)
9054 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9055 else
9056 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9058 else if (DEFAULT_ABI == ABI_V4)
9059 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9060 else
9061 gcc_unreachable ();
9062 call_insn = last_call_insn ();
9063 PATTERN (call_insn) = insn;
9064 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9065 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9066 pic_offset_table_rtx);
9068 if (rs6000_tls_size == 16)
9070 if (TARGET_64BIT)
9071 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9072 else
9073 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9075 else if (rs6000_tls_size == 32)
9077 tmp2 = gen_reg_rtx (Pmode);
9078 if (TARGET_64BIT)
9079 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9080 else
9081 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9082 emit_insn (insn);
9083 if (TARGET_64BIT)
9084 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9085 else
9086 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9088 else
9090 tmp2 = gen_reg_rtx (Pmode);
9091 if (TARGET_64BIT)
9092 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9093 else
9094 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9095 emit_insn (insn);
9096 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9098 emit_insn (insn);
9100 else
9102 /* IE, or 64-bit offset LE. */
9103 tmp2 = gen_reg_rtx (Pmode);
9104 if (TARGET_64BIT)
9105 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9106 else
9107 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9108 emit_insn (insn);
9109 if (TARGET_64BIT)
9110 insn = gen_tls_tls_64 (dest, tmp2, addr);
9111 else
9112 insn = gen_tls_tls_32 (dest, tmp2, addr);
9113 emit_insn (insn);
9117 return dest;
9120 /* Only create the global variable for the stack protect guard if we are using
9121 the global flavor of that guard. */
9122 static tree
9123 rs6000_init_stack_protect_guard (void)
9125 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9126 return default_stack_protect_guard ();
9128 return NULL_TREE;
9131 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9133 static bool
9134 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9136 if (GET_CODE (x) == HIGH
9137 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9138 return true;
9140 /* A TLS symbol in the TOC cannot contain a sum. */
9141 if (GET_CODE (x) == CONST
9142 && GET_CODE (XEXP (x, 0)) == PLUS
9143 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9144 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9145 return true;
9147 /* Do not place an ELF TLS symbol in the constant pool. */
9148 return TARGET_ELF && tls_referenced_p (x);
9151 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9152 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9153 can be addressed relative to the toc pointer. */
9155 static bool
9156 use_toc_relative_ref (rtx sym, machine_mode mode)
9158 return ((constant_pool_expr_p (sym)
9159 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9160 get_pool_mode (sym)))
9161 || (TARGET_CMODEL == CMODEL_MEDIUM
9162 && SYMBOL_REF_LOCAL_P (sym)
9163 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9166 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9167 replace the input X, or the original X if no replacement is called for.
9168 The output parameter *WIN is 1 if the calling macro should goto WIN,
9169 0 if it should not.
9171 For RS/6000, we wish to handle large displacements off a base
9172 register by splitting the addend across an addiu/addis and the mem insn.
9173 This cuts number of extra insns needed from 3 to 1.
9175 On Darwin, we use this to generate code for floating point constants.
9176 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9177 The Darwin code is inside #if TARGET_MACHO because only then are the
9178 machopic_* functions defined. */
9179 static rtx
9180 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9181 int opnum, int type,
9182 int ind_levels ATTRIBUTE_UNUSED, int *win)
9184 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9185 bool quad_offset_p = mode_supports_dq_form (mode);
9187 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9188 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9189 if (reg_offset_p
9190 && opnum == 1
9191 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9192 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9193 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9194 && TARGET_P9_VECTOR)
9195 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9196 && TARGET_P9_VECTOR)))
9197 reg_offset_p = false;
9199 /* We must recognize output that we have already generated ourselves. */
9200 if (GET_CODE (x) == PLUS
9201 && GET_CODE (XEXP (x, 0)) == PLUS
9202 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9203 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9204 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9206 if (TARGET_DEBUG_ADDR)
9208 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9209 debug_rtx (x);
9211 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9212 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9213 opnum, (enum reload_type) type);
9214 *win = 1;
9215 return x;
9218 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9219 if (GET_CODE (x) == LO_SUM
9220 && GET_CODE (XEXP (x, 0)) == HIGH)
9222 if (TARGET_DEBUG_ADDR)
9224 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9225 debug_rtx (x);
9227 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9228 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9229 opnum, (enum reload_type) type);
9230 *win = 1;
9231 return x;
9234 #if TARGET_MACHO
9235 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9236 && GET_CODE (x) == LO_SUM
9237 && GET_CODE (XEXP (x, 0)) == PLUS
9238 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9239 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9240 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9241 && machopic_operand_p (XEXP (x, 1)))
9243 /* Result of previous invocation of this function on Darwin
9244 floating point constant. */
9245 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9246 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9247 opnum, (enum reload_type) type);
9248 *win = 1;
9249 return x;
9251 #endif
9253 if (TARGET_CMODEL != CMODEL_SMALL
9254 && reg_offset_p
9255 && !quad_offset_p
9256 && small_toc_ref (x, VOIDmode))
9258 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9259 x = gen_rtx_LO_SUM (Pmode, hi, x);
9260 if (TARGET_DEBUG_ADDR)
9262 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9263 debug_rtx (x);
9265 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9266 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9267 opnum, (enum reload_type) type);
9268 *win = 1;
9269 return x;
9272 if (GET_CODE (x) == PLUS
9273 && REG_P (XEXP (x, 0))
9274 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9275 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9276 && CONST_INT_P (XEXP (x, 1))
9277 && reg_offset_p
9278 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9280 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9281 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9282 HOST_WIDE_INT high
9283 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9285 /* Check for 32-bit overflow or quad addresses with one of the
9286 four least significant bits set. */
9287 if (high + low != val
9288 || (quad_offset_p && (low & 0xf)))
9290 *win = 0;
9291 return x;
9294 /* Reload the high part into a base reg; leave the low part
9295 in the mem directly. */
9297 x = gen_rtx_PLUS (GET_MODE (x),
9298 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9299 GEN_INT (high)),
9300 GEN_INT (low));
9302 if (TARGET_DEBUG_ADDR)
9304 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9305 debug_rtx (x);
9307 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9308 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9309 opnum, (enum reload_type) type);
9310 *win = 1;
9311 return x;
9314 if (GET_CODE (x) == SYMBOL_REF
9315 && reg_offset_p
9316 && !quad_offset_p
9317 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9318 #if TARGET_MACHO
9319 && DEFAULT_ABI == ABI_DARWIN
9320 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9321 && machopic_symbol_defined_p (x)
9322 #else
9323 && DEFAULT_ABI == ABI_V4
9324 && !flag_pic
9325 #endif
9326 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9327 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9328 without fprs.
9329 ??? Assume floating point reg based on mode? This assumption is
9330 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9331 where reload ends up doing a DFmode load of a constant from
9332 mem using two gprs. Unfortunately, at this point reload
9333 hasn't yet selected regs so poking around in reload data
9334 won't help and even if we could figure out the regs reliably,
9335 we'd still want to allow this transformation when the mem is
9336 naturally aligned. Since we say the address is good here, we
9337 can't disable offsets from LO_SUMs in mem_operand_gpr.
9338 FIXME: Allow offset from lo_sum for other modes too, when
9339 mem is sufficiently aligned.
9341 Also disallow this if the type can go in VMX/Altivec registers, since
9342 those registers do not have d-form (reg+offset) address modes. */
9343 && !reg_addr[mode].scalar_in_vmx_p
9344 && mode != TFmode
9345 && mode != TDmode
9346 && mode != IFmode
9347 && mode != KFmode
9348 && (mode != TImode || !TARGET_VSX)
9349 && mode != PTImode
9350 && (mode != DImode || TARGET_POWERPC64)
9351 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9352 || TARGET_HARD_FLOAT))
9354 #if TARGET_MACHO
9355 if (flag_pic)
9357 rtx offset = machopic_gen_offset (x);
9358 x = gen_rtx_LO_SUM (GET_MODE (x),
9359 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9360 gen_rtx_HIGH (Pmode, offset)), offset);
9362 else
9363 #endif
9364 x = gen_rtx_LO_SUM (GET_MODE (x),
9365 gen_rtx_HIGH (Pmode, x), x);
9367 if (TARGET_DEBUG_ADDR)
9369 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9370 debug_rtx (x);
9372 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9373 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9374 opnum, (enum reload_type) type);
9375 *win = 1;
9376 return x;
9379 /* Reload an offset address wrapped by an AND that represents the
9380 masking of the lower bits. Strip the outer AND and let reload
9381 convert the offset address into an indirect address. For VSX,
9382 force reload to create the address with an AND in a separate
9383 register, because we can't guarantee an altivec register will
9384 be used. */
9385 if (VECTOR_MEM_ALTIVEC_P (mode)
9386 && GET_CODE (x) == AND
9387 && GET_CODE (XEXP (x, 0)) == PLUS
9388 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9389 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9390 && GET_CODE (XEXP (x, 1)) == CONST_INT
9391 && INTVAL (XEXP (x, 1)) == -16)
9393 x = XEXP (x, 0);
9394 *win = 1;
9395 return x;
9398 if (TARGET_TOC
9399 && reg_offset_p
9400 && !quad_offset_p
9401 && GET_CODE (x) == SYMBOL_REF
9402 && use_toc_relative_ref (x, mode))
9404 x = create_TOC_reference (x, NULL_RTX);
9405 if (TARGET_CMODEL != CMODEL_SMALL)
9407 if (TARGET_DEBUG_ADDR)
9409 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9410 debug_rtx (x);
9412 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9413 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9414 opnum, (enum reload_type) type);
9416 *win = 1;
9417 return x;
9419 *win = 0;
9420 return x;
9423 /* Debug version of rs6000_legitimize_reload_address. */
9424 static rtx
9425 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9426 int opnum, int type,
9427 int ind_levels, int *win)
9429 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9430 ind_levels, win);
9431 fprintf (stderr,
9432 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9433 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9434 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9435 debug_rtx (x);
9437 if (x == ret)
9438 fprintf (stderr, "Same address returned\n");
9439 else if (!ret)
9440 fprintf (stderr, "NULL returned\n");
9441 else
9443 fprintf (stderr, "New address:\n");
9444 debug_rtx (ret);
9447 return ret;
9450 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9451 that is a valid memory address for an instruction.
9452 The MODE argument is the machine mode for the MEM expression
9453 that wants to use this address.
9455 On the RS/6000, there are four valid address: a SYMBOL_REF that
9456 refers to a constant pool entry of an address (or the sum of it
9457 plus a constant), a short (16-bit signed) constant plus a register,
9458 the sum of two registers, or a register indirect, possibly with an
9459 auto-increment. For DFmode, DDmode and DImode with a constant plus
9460 register, we must ensure that both words are addressable or PowerPC64
9461 with offset word aligned.
9463 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9464 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9465 because adjacent memory cells are accessed by adding word-sized offsets
9466 during assembly output. */
9467 static bool
9468 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9470 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9471 bool quad_offset_p = mode_supports_dq_form (mode);
9473 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9474 if (VECTOR_MEM_ALTIVEC_P (mode)
9475 && GET_CODE (x) == AND
9476 && GET_CODE (XEXP (x, 1)) == CONST_INT
9477 && INTVAL (XEXP (x, 1)) == -16)
9478 x = XEXP (x, 0);
9480 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9481 return 0;
9482 if (legitimate_indirect_address_p (x, reg_ok_strict))
9483 return 1;
9484 if (TARGET_UPDATE
9485 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9486 && mode_supports_pre_incdec_p (mode)
9487 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9488 return 1;
9489 /* Handle restricted vector d-form offsets in ISA 3.0. */
9490 if (quad_offset_p)
9492 if (quad_address_p (x, mode, reg_ok_strict))
9493 return 1;
9495 else if (virtual_stack_registers_memory_p (x))
9496 return 1;
9498 else if (reg_offset_p)
9500 if (legitimate_small_data_p (mode, x))
9501 return 1;
9502 if (legitimate_constant_pool_address_p (x, mode,
9503 reg_ok_strict || lra_in_progress))
9504 return 1;
9505 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9506 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9507 return 1;
9510 /* For TImode, if we have TImode in VSX registers, only allow register
9511 indirect addresses. This will allow the values to go in either GPRs
9512 or VSX registers without reloading. The vector types would tend to
9513 go into VSX registers, so we allow REG+REG, while TImode seems
9514 somewhat split, in that some uses are GPR based, and some VSX based. */
9515 /* FIXME: We could loosen this by changing the following to
9516 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9517 but currently we cannot allow REG+REG addressing for TImode. See
9518 PR72827 for complete details on how this ends up hoodwinking DSE. */
9519 if (mode == TImode && TARGET_VSX)
9520 return 0;
9521 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9522 if (! reg_ok_strict
9523 && reg_offset_p
9524 && GET_CODE (x) == PLUS
9525 && GET_CODE (XEXP (x, 0)) == REG
9526 && (XEXP (x, 0) == virtual_stack_vars_rtx
9527 || XEXP (x, 0) == arg_pointer_rtx)
9528 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9529 return 1;
9530 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9531 return 1;
9532 if (!FLOAT128_2REG_P (mode)
9533 && (TARGET_HARD_FLOAT
9534 || TARGET_POWERPC64
9535 || (mode != DFmode && mode != DDmode))
9536 && (TARGET_POWERPC64 || mode != DImode)
9537 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9538 && mode != PTImode
9539 && !avoiding_indexed_address_p (mode)
9540 && legitimate_indexed_address_p (x, reg_ok_strict))
9541 return 1;
9542 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9543 && mode_supports_pre_modify_p (mode)
9544 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9545 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9546 reg_ok_strict, false)
9547 || (!avoiding_indexed_address_p (mode)
9548 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9549 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9550 return 1;
9551 if (reg_offset_p && !quad_offset_p
9552 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9553 return 1;
9554 return 0;
9557 /* Debug version of rs6000_legitimate_address_p. */
9558 static bool
9559 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9560 bool reg_ok_strict)
9562 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9563 fprintf (stderr,
9564 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9565 "strict = %d, reload = %s, code = %s\n",
9566 ret ? "true" : "false",
9567 GET_MODE_NAME (mode),
9568 reg_ok_strict,
9569 (reload_completed ? "after" : "before"),
9570 GET_RTX_NAME (GET_CODE (x)));
9571 debug_rtx (x);
9573 return ret;
9576 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9578 static bool
9579 rs6000_mode_dependent_address_p (const_rtx addr,
9580 addr_space_t as ATTRIBUTE_UNUSED)
9582 return rs6000_mode_dependent_address_ptr (addr);
9585 /* Go to LABEL if ADDR (a legitimate address expression)
9586 has an effect that depends on the machine mode it is used for.
9588 On the RS/6000 this is true of all integral offsets (since AltiVec
9589 and VSX modes don't allow them) or is a pre-increment or decrement.
9591 ??? Except that due to conceptual problems in offsettable_address_p
9592 we can't really report the problems of integral offsets. So leave
9593 this assuming that the adjustable offset must be valid for the
9594 sub-words of a TFmode operand, which is what we had before. */
9596 static bool
9597 rs6000_mode_dependent_address (const_rtx addr)
9599 switch (GET_CODE (addr))
9601 case PLUS:
9602 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9603 is considered a legitimate address before reload, so there
9604 are no offset restrictions in that case. Note that this
9605 condition is safe in strict mode because any address involving
9606 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9607 been rejected as illegitimate. */
9608 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9609 && XEXP (addr, 0) != arg_pointer_rtx
9610 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9612 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9613 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9615 break;
9617 case LO_SUM:
9618 /* Anything in the constant pool is sufficiently aligned that
9619 all bytes have the same high part address. */
9620 return !legitimate_constant_pool_address_p (addr, QImode, false);
9622 /* Auto-increment cases are now treated generically in recog.c. */
9623 case PRE_MODIFY:
9624 return TARGET_UPDATE;
9626 /* AND is only allowed in Altivec loads. */
9627 case AND:
9628 return true;
9630 default:
9631 break;
9634 return false;
9637 /* Debug version of rs6000_mode_dependent_address. */
9638 static bool
9639 rs6000_debug_mode_dependent_address (const_rtx addr)
9641 bool ret = rs6000_mode_dependent_address (addr);
9643 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9644 ret ? "true" : "false");
9645 debug_rtx (addr);
9647 return ret;
9650 /* Implement FIND_BASE_TERM. */
9653 rs6000_find_base_term (rtx op)
9655 rtx base;
9657 base = op;
9658 if (GET_CODE (base) == CONST)
9659 base = XEXP (base, 0);
9660 if (GET_CODE (base) == PLUS)
9661 base = XEXP (base, 0);
9662 if (GET_CODE (base) == UNSPEC)
9663 switch (XINT (base, 1))
9665 case UNSPEC_TOCREL:
9666 case UNSPEC_MACHOPIC_OFFSET:
9667 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9668 for aliasing purposes. */
9669 return XVECEXP (base, 0, 0);
9672 return op;
9675 /* More elaborate version of recog's offsettable_memref_p predicate
9676 that works around the ??? note of rs6000_mode_dependent_address.
9677 In particular it accepts
9679 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9681 in 32-bit mode, that the recog predicate rejects. */
9683 static bool
9684 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9686 bool worst_case;
9688 if (!MEM_P (op))
9689 return false;
9691 /* First mimic offsettable_memref_p. */
9692 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9693 return true;
9695 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9696 the latter predicate knows nothing about the mode of the memory
9697 reference and, therefore, assumes that it is the largest supported
9698 mode (TFmode). As a consequence, legitimate offsettable memory
9699 references are rejected. rs6000_legitimate_offset_address_p contains
9700 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9701 at least with a little bit of help here given that we know the
9702 actual registers used. */
9703 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9704 || GET_MODE_SIZE (reg_mode) == 4);
9705 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9706 strict, worst_case);
9709 /* Determine the reassociation width to be used in reassociate_bb.
9710 This takes into account how many parallel operations we
9711 can actually do of a given type, and also the latency.
9713 int add/sub 6/cycle
9714 mul 2/cycle
9715 vect add/sub/mul 2/cycle
9716 fp add/sub/mul 2/cycle
9717 dfp 1/cycle
9720 static int
9721 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9722 machine_mode mode)
9724 switch (rs6000_tune)
9726 case PROCESSOR_POWER8:
9727 case PROCESSOR_POWER9:
9728 if (DECIMAL_FLOAT_MODE_P (mode))
9729 return 1;
9730 if (VECTOR_MODE_P (mode))
9731 return 4;
9732 if (INTEGRAL_MODE_P (mode))
9733 return 1;
9734 if (FLOAT_MODE_P (mode))
9735 return 4;
9736 break;
9737 default:
9738 break;
9740 return 1;
9743 /* Change register usage conditional on target flags. */
9744 static void
9745 rs6000_conditional_register_usage (void)
9747 int i;
9749 if (TARGET_DEBUG_TARGET)
9750 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9752 /* Set MQ register fixed (already call_used) so that it will not be
9753 allocated. */
9754 fixed_regs[64] = 1;
9756 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9757 if (TARGET_64BIT)
9758 fixed_regs[13] = call_used_regs[13]
9759 = call_really_used_regs[13] = 1;
9761 /* Conditionally disable FPRs. */
9762 if (TARGET_SOFT_FLOAT)
9763 for (i = 32; i < 64; i++)
9764 fixed_regs[i] = call_used_regs[i]
9765 = call_really_used_regs[i] = 1;
9767 /* The TOC register is not killed across calls in a way that is
9768 visible to the compiler. */
9769 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9770 call_really_used_regs[2] = 0;
9772 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9773 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9775 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9776 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9777 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9778 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9780 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9781 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9782 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9783 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9785 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9786 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9787 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9789 if (!TARGET_ALTIVEC && !TARGET_VSX)
9791 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9792 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9793 call_really_used_regs[VRSAVE_REGNO] = 1;
9796 if (TARGET_ALTIVEC || TARGET_VSX)
9797 global_regs[VSCR_REGNO] = 1;
9799 if (TARGET_ALTIVEC_ABI)
9801 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9802 call_used_regs[i] = call_really_used_regs[i] = 1;
9804 /* AIX reserves VR20:31 in non-extended ABI mode. */
9805 if (TARGET_XCOFF)
9806 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9807 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9812 /* Output insns to set DEST equal to the constant SOURCE as a series of
9813 lis, ori and shl instructions and return TRUE. */
9815 bool
9816 rs6000_emit_set_const (rtx dest, rtx source)
9818 machine_mode mode = GET_MODE (dest);
9819 rtx temp, set;
9820 rtx_insn *insn;
9821 HOST_WIDE_INT c;
9823 gcc_checking_assert (CONST_INT_P (source));
9824 c = INTVAL (source);
9825 switch (mode)
9827 case E_QImode:
9828 case E_HImode:
9829 emit_insn (gen_rtx_SET (dest, source));
9830 return true;
9832 case E_SImode:
9833 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9835 emit_insn (gen_rtx_SET (copy_rtx (temp),
9836 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9837 emit_insn (gen_rtx_SET (dest,
9838 gen_rtx_IOR (SImode, copy_rtx (temp),
9839 GEN_INT (c & 0xffff))));
9840 break;
9842 case E_DImode:
9843 if (!TARGET_POWERPC64)
9845 rtx hi, lo;
9847 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9848 DImode);
9849 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9850 DImode);
9851 emit_move_insn (hi, GEN_INT (c >> 32));
9852 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9853 emit_move_insn (lo, GEN_INT (c));
9855 else
9856 rs6000_emit_set_long_const (dest, c);
9857 break;
9859 default:
9860 gcc_unreachable ();
9863 insn = get_last_insn ();
9864 set = single_set (insn);
9865 if (! CONSTANT_P (SET_SRC (set)))
9866 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9868 return true;
9871 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9872 Output insns to set DEST equal to the constant C as a series of
9873 lis, ori and shl instructions. */
9875 static void
9876 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9878 rtx temp;
9879 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9881 ud1 = c & 0xffff;
9882 c = c >> 16;
9883 ud2 = c & 0xffff;
9884 c = c >> 16;
9885 ud3 = c & 0xffff;
9886 c = c >> 16;
9887 ud4 = c & 0xffff;
9889 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9890 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9891 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9893 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9894 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9896 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9898 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9899 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9900 if (ud1 != 0)
9901 emit_move_insn (dest,
9902 gen_rtx_IOR (DImode, copy_rtx (temp),
9903 GEN_INT (ud1)));
9905 else if (ud3 == 0 && ud4 == 0)
9907 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9909 gcc_assert (ud2 & 0x8000);
9910 emit_move_insn (copy_rtx (temp),
9911 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9912 if (ud1 != 0)
9913 emit_move_insn (copy_rtx (temp),
9914 gen_rtx_IOR (DImode, copy_rtx (temp),
9915 GEN_INT (ud1)));
9916 emit_move_insn (dest,
9917 gen_rtx_ZERO_EXTEND (DImode,
9918 gen_lowpart (SImode,
9919 copy_rtx (temp))));
9921 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9922 || (ud4 == 0 && ! (ud3 & 0x8000)))
9924 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9926 emit_move_insn (copy_rtx (temp),
9927 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9928 if (ud2 != 0)
9929 emit_move_insn (copy_rtx (temp),
9930 gen_rtx_IOR (DImode, copy_rtx (temp),
9931 GEN_INT (ud2)));
9932 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9933 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9934 GEN_INT (16)));
9935 if (ud1 != 0)
9936 emit_move_insn (dest,
9937 gen_rtx_IOR (DImode, copy_rtx (temp),
9938 GEN_INT (ud1)));
9940 else
9942 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9944 emit_move_insn (copy_rtx (temp),
9945 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9946 if (ud3 != 0)
9947 emit_move_insn (copy_rtx (temp),
9948 gen_rtx_IOR (DImode, copy_rtx (temp),
9949 GEN_INT (ud3)));
9951 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9952 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9953 GEN_INT (32)));
9954 if (ud2 != 0)
9955 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9956 gen_rtx_IOR (DImode, copy_rtx (temp),
9957 GEN_INT (ud2 << 16)));
9958 if (ud1 != 0)
9959 emit_move_insn (dest,
9960 gen_rtx_IOR (DImode, copy_rtx (temp),
9961 GEN_INT (ud1)));
9965 /* Helper for the following. Get rid of [r+r] memory refs
9966 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9968 static void
9969 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9971 if (GET_CODE (operands[0]) == MEM
9972 && GET_CODE (XEXP (operands[0], 0)) != REG
9973 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9974 GET_MODE (operands[0]), false))
9975 operands[0]
9976 = replace_equiv_address (operands[0],
9977 copy_addr_to_reg (XEXP (operands[0], 0)));
9979 if (GET_CODE (operands[1]) == MEM
9980 && GET_CODE (XEXP (operands[1], 0)) != REG
9981 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9982 GET_MODE (operands[1]), false))
9983 operands[1]
9984 = replace_equiv_address (operands[1],
9985 copy_addr_to_reg (XEXP (operands[1], 0)));
9988 /* Generate a vector of constants to permute MODE for a little-endian
9989 storage operation by swapping the two halves of a vector. */
9990 static rtvec
9991 rs6000_const_vec (machine_mode mode)
9993 int i, subparts;
9994 rtvec v;
9996 switch (mode)
9998 case E_V1TImode:
9999 subparts = 1;
10000 break;
10001 case E_V2DFmode:
10002 case E_V2DImode:
10003 subparts = 2;
10004 break;
10005 case E_V4SFmode:
10006 case E_V4SImode:
10007 subparts = 4;
10008 break;
10009 case E_V8HImode:
10010 subparts = 8;
10011 break;
10012 case E_V16QImode:
10013 subparts = 16;
10014 break;
10015 default:
10016 gcc_unreachable();
10019 v = rtvec_alloc (subparts);
10021 for (i = 0; i < subparts / 2; ++i)
10022 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10023 for (i = subparts / 2; i < subparts; ++i)
10024 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10026 return v;
10029 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10030 store operation. */
10031 void
10032 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10034 /* Scalar permutations are easier to express in integer modes rather than
10035 floating-point modes, so cast them here. We use V1TImode instead
10036 of TImode to ensure that the values don't go through GPRs. */
10037 if (FLOAT128_VECTOR_P (mode))
10039 dest = gen_lowpart (V1TImode, dest);
10040 source = gen_lowpart (V1TImode, source);
10041 mode = V1TImode;
10044 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10045 scalar. */
10046 if (mode == TImode || mode == V1TImode)
10047 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10048 GEN_INT (64))));
10049 else
10051 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10052 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10056 /* Emit a little-endian load from vector memory location SOURCE to VSX
10057 register DEST in mode MODE. The load is done with two permuting
10058 insn's that represent an lxvd2x and xxpermdi. */
10059 void
10060 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10062 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10063 V1TImode). */
10064 if (mode == TImode || mode == V1TImode)
10066 mode = V2DImode;
10067 dest = gen_lowpart (V2DImode, dest);
10068 source = adjust_address (source, V2DImode, 0);
10071 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10072 rs6000_emit_le_vsx_permute (tmp, source, mode);
10073 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10076 /* Emit a little-endian store to vector memory location DEST from VSX
10077 register SOURCE in mode MODE. The store is done with two permuting
10078 insn's that represent an xxpermdi and an stxvd2x. */
10079 void
10080 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10082 /* This should never be called during or after LRA, because it does
10083 not re-permute the source register. It is intended only for use
10084 during expand. */
10085 gcc_assert (!lra_in_progress && !reload_completed);
10087 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10088 V1TImode). */
10089 if (mode == TImode || mode == V1TImode)
10091 mode = V2DImode;
10092 dest = adjust_address (dest, V2DImode, 0);
10093 source = gen_lowpart (V2DImode, source);
10096 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10097 rs6000_emit_le_vsx_permute (tmp, source, mode);
10098 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10101 /* Emit a sequence representing a little-endian VSX load or store,
10102 moving data from SOURCE to DEST in mode MODE. This is done
10103 separately from rs6000_emit_move to ensure it is called only
10104 during expand. LE VSX loads and stores introduced later are
10105 handled with a split. The expand-time RTL generation allows
10106 us to optimize away redundant pairs of register-permutes. */
10107 void
10108 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10110 gcc_assert (!BYTES_BIG_ENDIAN
10111 && VECTOR_MEM_VSX_P (mode)
10112 && !TARGET_P9_VECTOR
10113 && !gpr_or_gpr_p (dest, source)
10114 && (MEM_P (source) ^ MEM_P (dest)));
10116 if (MEM_P (source))
10118 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10119 rs6000_emit_le_vsx_load (dest, source, mode);
10121 else
10123 if (!REG_P (source))
10124 source = force_reg (mode, source);
10125 rs6000_emit_le_vsx_store (dest, source, mode);
10129 /* Return whether a SFmode or SImode move can be done without converting one
10130 mode to another. This arrises when we have:
10132 (SUBREG:SF (REG:SI ...))
10133 (SUBREG:SI (REG:SF ...))
10135 and one of the values is in a floating point/vector register, where SFmode
10136 scalars are stored in DFmode format. */
10138 bool
10139 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10141 if (TARGET_ALLOW_SF_SUBREG)
10142 return true;
10144 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10145 return true;
10147 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10148 return true;
10150 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10151 if (SUBREG_P (dest))
10153 rtx dest_subreg = SUBREG_REG (dest);
10154 rtx src_subreg = SUBREG_REG (src);
10155 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10158 return false;
10162 /* Helper function to change moves with:
10164 (SUBREG:SF (REG:SI)) and
10165 (SUBREG:SI (REG:SF))
10167 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10168 values are stored as DFmode values in the VSX registers. We need to convert
10169 the bits before we can use a direct move or operate on the bits in the
10170 vector register as an integer type.
10172 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10174 static bool
10175 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10177 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10178 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10179 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10181 rtx inner_source = SUBREG_REG (source);
10182 machine_mode inner_mode = GET_MODE (inner_source);
10184 if (mode == SImode && inner_mode == SFmode)
10186 emit_insn (gen_movsi_from_sf (dest, inner_source));
10187 return true;
10190 if (mode == SFmode && inner_mode == SImode)
10192 emit_insn (gen_movsf_from_si (dest, inner_source));
10193 return true;
10197 return false;
10200 /* Emit a move from SOURCE to DEST in mode MODE. */
10201 void
10202 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10204 rtx operands[2];
10205 operands[0] = dest;
10206 operands[1] = source;
10208 if (TARGET_DEBUG_ADDR)
10210 fprintf (stderr,
10211 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10212 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10213 GET_MODE_NAME (mode),
10214 lra_in_progress,
10215 reload_completed,
10216 can_create_pseudo_p ());
10217 debug_rtx (dest);
10218 fprintf (stderr, "source:\n");
10219 debug_rtx (source);
10222 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10223 if (CONST_WIDE_INT_P (operands[1])
10224 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10226 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10227 gcc_unreachable ();
10230 #ifdef HAVE_AS_GNU_ATTRIBUTE
10231 /* If we use a long double type, set the flags in .gnu_attribute that say
10232 what the long double type is. This is to allow the linker's warning
10233 message for the wrong long double to be useful, even if the function does
10234 not do a call (for example, doing a 128-bit add on power9 if the long
10235 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
10236 used if they aren't the default long dobule type. */
10237 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
10239 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
10240 rs6000_passes_float = rs6000_passes_long_double = true;
10242 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
10243 rs6000_passes_float = rs6000_passes_long_double = true;
10245 #endif
10247 /* See if we need to special case SImode/SFmode SUBREG moves. */
10248 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10249 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10250 return;
10252 /* Check if GCC is setting up a block move that will end up using FP
10253 registers as temporaries. We must make sure this is acceptable. */
10254 if (GET_CODE (operands[0]) == MEM
10255 && GET_CODE (operands[1]) == MEM
10256 && mode == DImode
10257 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10258 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10259 && ! (rs6000_slow_unaligned_access (SImode,
10260 (MEM_ALIGN (operands[0]) > 32
10261 ? 32 : MEM_ALIGN (operands[0])))
10262 || rs6000_slow_unaligned_access (SImode,
10263 (MEM_ALIGN (operands[1]) > 32
10264 ? 32 : MEM_ALIGN (operands[1]))))
10265 && ! MEM_VOLATILE_P (operands [0])
10266 && ! MEM_VOLATILE_P (operands [1]))
10268 emit_move_insn (adjust_address (operands[0], SImode, 0),
10269 adjust_address (operands[1], SImode, 0));
10270 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10271 adjust_address (copy_rtx (operands[1]), SImode, 4));
10272 return;
10275 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10276 && !gpc_reg_operand (operands[1], mode))
10277 operands[1] = force_reg (mode, operands[1]);
10279 /* Recognize the case where operand[1] is a reference to thread-local
10280 data and load its address to a register. */
10281 if (tls_referenced_p (operands[1]))
10283 enum tls_model model;
10284 rtx tmp = operands[1];
10285 rtx addend = NULL;
10287 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10289 addend = XEXP (XEXP (tmp, 0), 1);
10290 tmp = XEXP (XEXP (tmp, 0), 0);
10293 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10294 model = SYMBOL_REF_TLS_MODEL (tmp);
10295 gcc_assert (model != 0);
10297 tmp = rs6000_legitimize_tls_address (tmp, model);
10298 if (addend)
10300 tmp = gen_rtx_PLUS (mode, tmp, addend);
10301 tmp = force_operand (tmp, operands[0]);
10303 operands[1] = tmp;
10306 /* 128-bit constant floating-point values on Darwin should really be loaded
10307 as two parts. However, this premature splitting is a problem when DFmode
10308 values can go into Altivec registers. */
10309 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10310 && GET_CODE (operands[1]) == CONST_DOUBLE)
10312 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10313 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10314 DFmode);
10315 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10316 GET_MODE_SIZE (DFmode)),
10317 simplify_gen_subreg (DFmode, operands[1], mode,
10318 GET_MODE_SIZE (DFmode)),
10319 DFmode);
10320 return;
10323 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10324 p1:SD) if p1 is not of floating point class and p0 is spilled as
10325 we can have no analogous movsd_store for this. */
10326 if (lra_in_progress && mode == DDmode
10327 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10328 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10329 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10330 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10332 enum reg_class cl;
10333 int regno = REGNO (SUBREG_REG (operands[1]));
10335 if (regno >= FIRST_PSEUDO_REGISTER)
10337 cl = reg_preferred_class (regno);
10338 regno = reg_renumber[regno];
10339 if (regno < 0)
10340 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10342 if (regno >= 0 && ! FP_REGNO_P (regno))
10344 mode = SDmode;
10345 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10346 operands[1] = SUBREG_REG (operands[1]);
10349 if (lra_in_progress
10350 && mode == SDmode
10351 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10352 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10353 && (REG_P (operands[1])
10354 || (GET_CODE (operands[1]) == SUBREG
10355 && REG_P (SUBREG_REG (operands[1])))))
10357 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10358 ? SUBREG_REG (operands[1]) : operands[1]);
10359 enum reg_class cl;
10361 if (regno >= FIRST_PSEUDO_REGISTER)
10363 cl = reg_preferred_class (regno);
10364 gcc_assert (cl != NO_REGS);
10365 regno = reg_renumber[regno];
10366 if (regno < 0)
10367 regno = ira_class_hard_regs[cl][0];
10369 if (FP_REGNO_P (regno))
10371 if (GET_MODE (operands[0]) != DDmode)
10372 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10373 emit_insn (gen_movsd_store (operands[0], operands[1]));
10375 else if (INT_REGNO_P (regno))
10376 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10377 else
10378 gcc_unreachable();
10379 return;
10381 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10382 p:DD)) if p0 is not of floating point class and p1 is spilled as
10383 we can have no analogous movsd_load for this. */
10384 if (lra_in_progress && mode == DDmode
10385 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10386 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10387 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10388 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10390 enum reg_class cl;
10391 int regno = REGNO (SUBREG_REG (operands[0]));
10393 if (regno >= FIRST_PSEUDO_REGISTER)
10395 cl = reg_preferred_class (regno);
10396 regno = reg_renumber[regno];
10397 if (regno < 0)
10398 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10400 if (regno >= 0 && ! FP_REGNO_P (regno))
10402 mode = SDmode;
10403 operands[0] = SUBREG_REG (operands[0]);
10404 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10407 if (lra_in_progress
10408 && mode == SDmode
10409 && (REG_P (operands[0])
10410 || (GET_CODE (operands[0]) == SUBREG
10411 && REG_P (SUBREG_REG (operands[0]))))
10412 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10413 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10415 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10416 ? SUBREG_REG (operands[0]) : operands[0]);
10417 enum reg_class cl;
10419 if (regno >= FIRST_PSEUDO_REGISTER)
10421 cl = reg_preferred_class (regno);
10422 gcc_assert (cl != NO_REGS);
10423 regno = reg_renumber[regno];
10424 if (regno < 0)
10425 regno = ira_class_hard_regs[cl][0];
10427 if (FP_REGNO_P (regno))
10429 if (GET_MODE (operands[1]) != DDmode)
10430 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10431 emit_insn (gen_movsd_load (operands[0], operands[1]));
10433 else if (INT_REGNO_P (regno))
10434 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10435 else
10436 gcc_unreachable();
10437 return;
10440 /* FIXME: In the long term, this switch statement should go away
10441 and be replaced by a sequence of tests based on things like
10442 mode == Pmode. */
10443 switch (mode)
10445 case E_HImode:
10446 case E_QImode:
10447 if (CONSTANT_P (operands[1])
10448 && GET_CODE (operands[1]) != CONST_INT)
10449 operands[1] = force_const_mem (mode, operands[1]);
10450 break;
10452 case E_TFmode:
10453 case E_TDmode:
10454 case E_IFmode:
10455 case E_KFmode:
10456 if (FLOAT128_2REG_P (mode))
10457 rs6000_eliminate_indexed_memrefs (operands);
10458 /* fall through */
10460 case E_DFmode:
10461 case E_DDmode:
10462 case E_SFmode:
10463 case E_SDmode:
10464 if (CONSTANT_P (operands[1])
10465 && ! easy_fp_constant (operands[1], mode))
10466 operands[1] = force_const_mem (mode, operands[1]);
10467 break;
10469 case E_V16QImode:
10470 case E_V8HImode:
10471 case E_V4SFmode:
10472 case E_V4SImode:
10473 case E_V2DFmode:
10474 case E_V2DImode:
10475 case E_V1TImode:
10476 if (CONSTANT_P (operands[1])
10477 && !easy_vector_constant (operands[1], mode))
10478 operands[1] = force_const_mem (mode, operands[1]);
10479 break;
10481 case E_SImode:
10482 case E_DImode:
10483 /* Use default pattern for address of ELF small data */
10484 if (TARGET_ELF
10485 && mode == Pmode
10486 && DEFAULT_ABI == ABI_V4
10487 && (GET_CODE (operands[1]) == SYMBOL_REF
10488 || GET_CODE (operands[1]) == CONST)
10489 && small_data_operand (operands[1], mode))
10491 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10492 return;
10495 if (DEFAULT_ABI == ABI_V4
10496 && mode == Pmode && mode == SImode
10497 && flag_pic == 1 && got_operand (operands[1], mode))
10499 emit_insn (gen_movsi_got (operands[0], operands[1]));
10500 return;
10503 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10504 && TARGET_NO_TOC
10505 && ! flag_pic
10506 && mode == Pmode
10507 && CONSTANT_P (operands[1])
10508 && GET_CODE (operands[1]) != HIGH
10509 && GET_CODE (operands[1]) != CONST_INT)
10511 rtx target = (!can_create_pseudo_p ()
10512 ? operands[0]
10513 : gen_reg_rtx (mode));
10515 /* If this is a function address on -mcall-aixdesc,
10516 convert it to the address of the descriptor. */
10517 if (DEFAULT_ABI == ABI_AIX
10518 && GET_CODE (operands[1]) == SYMBOL_REF
10519 && XSTR (operands[1], 0)[0] == '.')
10521 const char *name = XSTR (operands[1], 0);
10522 rtx new_ref;
10523 while (*name == '.')
10524 name++;
10525 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10526 CONSTANT_POOL_ADDRESS_P (new_ref)
10527 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10528 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10529 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10530 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10531 operands[1] = new_ref;
10534 if (DEFAULT_ABI == ABI_DARWIN)
10536 #if TARGET_MACHO
10537 if (MACHO_DYNAMIC_NO_PIC_P)
10539 /* Take care of any required data indirection. */
10540 operands[1] = rs6000_machopic_legitimize_pic_address (
10541 operands[1], mode, operands[0]);
10542 if (operands[0] != operands[1])
10543 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10544 return;
10546 #endif
10547 emit_insn (gen_macho_high (target, operands[1]));
10548 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10549 return;
10552 emit_insn (gen_elf_high (target, operands[1]));
10553 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10554 return;
10557 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10558 and we have put it in the TOC, we just need to make a TOC-relative
10559 reference to it. */
10560 if (TARGET_TOC
10561 && GET_CODE (operands[1]) == SYMBOL_REF
10562 && use_toc_relative_ref (operands[1], mode))
10563 operands[1] = create_TOC_reference (operands[1], operands[0]);
10564 else if (mode == Pmode
10565 && CONSTANT_P (operands[1])
10566 && GET_CODE (operands[1]) != HIGH
10567 && ((GET_CODE (operands[1]) != CONST_INT
10568 && ! easy_fp_constant (operands[1], mode))
10569 || (GET_CODE (operands[1]) == CONST_INT
10570 && (num_insns_constant (operands[1], mode)
10571 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10572 || (GET_CODE (operands[0]) == REG
10573 && FP_REGNO_P (REGNO (operands[0]))))
10574 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10575 && (TARGET_CMODEL == CMODEL_SMALL
10576 || can_create_pseudo_p ()
10577 || (REG_P (operands[0])
10578 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10581 #if TARGET_MACHO
10582 /* Darwin uses a special PIC legitimizer. */
10583 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10585 operands[1] =
10586 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10587 operands[0]);
10588 if (operands[0] != operands[1])
10589 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10590 return;
10592 #endif
10594 /* If we are to limit the number of things we put in the TOC and
10595 this is a symbol plus a constant we can add in one insn,
10596 just put the symbol in the TOC and add the constant. */
10597 if (GET_CODE (operands[1]) == CONST
10598 && TARGET_NO_SUM_IN_TOC
10599 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10600 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10601 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10602 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10603 && ! side_effects_p (operands[0]))
10605 rtx sym =
10606 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10607 rtx other = XEXP (XEXP (operands[1], 0), 1);
10609 sym = force_reg (mode, sym);
10610 emit_insn (gen_add3_insn (operands[0], sym, other));
10611 return;
10614 operands[1] = force_const_mem (mode, operands[1]);
10616 if (TARGET_TOC
10617 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10618 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10620 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10621 operands[0]);
10622 operands[1] = gen_const_mem (mode, tocref);
10623 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10626 break;
10628 case E_TImode:
10629 if (!VECTOR_MEM_VSX_P (TImode))
10630 rs6000_eliminate_indexed_memrefs (operands);
10631 break;
10633 case E_PTImode:
10634 rs6000_eliminate_indexed_memrefs (operands);
10635 break;
10637 default:
10638 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10641 /* Above, we may have called force_const_mem which may have returned
10642 an invalid address. If we can, fix this up; otherwise, reload will
10643 have to deal with it. */
10644 if (GET_CODE (operands[1]) == MEM)
10645 operands[1] = validize_mem (operands[1]);
10647 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10650 /* Nonzero if we can use a floating-point register to pass this arg. */
10651 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10652 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10653 && (CUM)->fregno <= FP_ARG_MAX_REG \
10654 && TARGET_HARD_FLOAT)
10656 /* Nonzero if we can use an AltiVec register to pass this arg. */
10657 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10658 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10659 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10660 && TARGET_ALTIVEC_ABI \
10661 && (NAMED))
10663 /* Walk down the type tree of TYPE counting consecutive base elements.
10664 If *MODEP is VOIDmode, then set it to the first valid floating point
10665 or vector type. If a non-floating point or vector type is found, or
10666 if a floating point or vector type that doesn't match a non-VOIDmode
10667 *MODEP is found, then return -1, otherwise return the count in the
10668 sub-tree. */
10670 static int
10671 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10673 machine_mode mode;
10674 HOST_WIDE_INT size;
10676 switch (TREE_CODE (type))
10678 case REAL_TYPE:
10679 mode = TYPE_MODE (type);
10680 if (!SCALAR_FLOAT_MODE_P (mode))
10681 return -1;
10683 if (*modep == VOIDmode)
10684 *modep = mode;
10686 if (*modep == mode)
10687 return 1;
10689 break;
10691 case COMPLEX_TYPE:
10692 mode = TYPE_MODE (TREE_TYPE (type));
10693 if (!SCALAR_FLOAT_MODE_P (mode))
10694 return -1;
10696 if (*modep == VOIDmode)
10697 *modep = mode;
10699 if (*modep == mode)
10700 return 2;
10702 break;
10704 case VECTOR_TYPE:
10705 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10706 return -1;
10708 /* Use V4SImode as representative of all 128-bit vector types. */
10709 size = int_size_in_bytes (type);
10710 switch (size)
10712 case 16:
10713 mode = V4SImode;
10714 break;
10715 default:
10716 return -1;
10719 if (*modep == VOIDmode)
10720 *modep = mode;
10722 /* Vector modes are considered to be opaque: two vectors are
10723 equivalent for the purposes of being homogeneous aggregates
10724 if they are the same size. */
10725 if (*modep == mode)
10726 return 1;
10728 break;
10730 case ARRAY_TYPE:
10732 int count;
10733 tree index = TYPE_DOMAIN (type);
10735 /* Can't handle incomplete types nor sizes that are not
10736 fixed. */
10737 if (!COMPLETE_TYPE_P (type)
10738 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10739 return -1;
10741 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10742 if (count == -1
10743 || !index
10744 || !TYPE_MAX_VALUE (index)
10745 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10746 || !TYPE_MIN_VALUE (index)
10747 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10748 || count < 0)
10749 return -1;
10751 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10752 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10754 /* There must be no padding. */
10755 if (wi::to_wide (TYPE_SIZE (type))
10756 != count * GET_MODE_BITSIZE (*modep))
10757 return -1;
10759 return count;
10762 case RECORD_TYPE:
10764 int count = 0;
10765 int sub_count;
10766 tree field;
10768 /* Can't handle incomplete types nor sizes that are not
10769 fixed. */
10770 if (!COMPLETE_TYPE_P (type)
10771 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10772 return -1;
10774 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10776 if (TREE_CODE (field) != FIELD_DECL)
10777 continue;
10779 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10780 if (sub_count < 0)
10781 return -1;
10782 count += sub_count;
10785 /* There must be no padding. */
10786 if (wi::to_wide (TYPE_SIZE (type))
10787 != count * GET_MODE_BITSIZE (*modep))
10788 return -1;
10790 return count;
10793 case UNION_TYPE:
10794 case QUAL_UNION_TYPE:
10796 /* These aren't very interesting except in a degenerate case. */
10797 int count = 0;
10798 int sub_count;
10799 tree field;
10801 /* Can't handle incomplete types nor sizes that are not
10802 fixed. */
10803 if (!COMPLETE_TYPE_P (type)
10804 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10805 return -1;
10807 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10809 if (TREE_CODE (field) != FIELD_DECL)
10810 continue;
10812 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10813 if (sub_count < 0)
10814 return -1;
10815 count = count > sub_count ? count : sub_count;
10818 /* There must be no padding. */
10819 if (wi::to_wide (TYPE_SIZE (type))
10820 != count * GET_MODE_BITSIZE (*modep))
10821 return -1;
10823 return count;
10826 default:
10827 break;
10830 return -1;
10833 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10834 float or vector aggregate that shall be passed in FP/vector registers
10835 according to the ELFv2 ABI, return the homogeneous element mode in
10836 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10838 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10840 static bool
10841 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10842 machine_mode *elt_mode,
10843 int *n_elts)
10845 /* Note that we do not accept complex types at the top level as
10846 homogeneous aggregates; these types are handled via the
10847 targetm.calls.split_complex_arg mechanism. Complex types
10848 can be elements of homogeneous aggregates, however. */
10849 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10850 && AGGREGATE_TYPE_P (type))
10852 machine_mode field_mode = VOIDmode;
10853 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10855 if (field_count > 0)
10857 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
10858 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
10860 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10861 up to AGGR_ARG_NUM_REG registers. */
10862 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
10864 if (elt_mode)
10865 *elt_mode = field_mode;
10866 if (n_elts)
10867 *n_elts = field_count;
10868 return true;
10873 if (elt_mode)
10874 *elt_mode = mode;
10875 if (n_elts)
10876 *n_elts = 1;
10877 return false;
10880 /* Return a nonzero value to say to return the function value in
10881 memory, just as large structures are always returned. TYPE will be
10882 the data type of the value, and FNTYPE will be the type of the
10883 function doing the returning, or @code{NULL} for libcalls.
10885 The AIX ABI for the RS/6000 specifies that all structures are
10886 returned in memory. The Darwin ABI does the same.
10888 For the Darwin 64 Bit ABI, a function result can be returned in
10889 registers or in memory, depending on the size of the return data
10890 type. If it is returned in registers, the value occupies the same
10891 registers as it would if it were the first and only function
10892 argument. Otherwise, the function places its result in memory at
10893 the location pointed to by GPR3.
10895 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10896 but a draft put them in memory, and GCC used to implement the draft
10897 instead of the final standard. Therefore, aix_struct_return
10898 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10899 compatibility can change DRAFT_V4_STRUCT_RET to override the
10900 default, and -m switches get the final word. See
10901 rs6000_option_override_internal for more details.
10903 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10904 long double support is enabled. These values are returned in memory.
10906 int_size_in_bytes returns -1 for variable size objects, which go in
10907 memory always. The cast to unsigned makes -1 > 8. */
10909 static bool
10910 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10912 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10913 if (TARGET_MACHO
10914 && rs6000_darwin64_abi
10915 && TREE_CODE (type) == RECORD_TYPE
10916 && int_size_in_bytes (type) > 0)
10918 CUMULATIVE_ARGS valcum;
10919 rtx valret;
10921 valcum.words = 0;
10922 valcum.fregno = FP_ARG_MIN_REG;
10923 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10924 /* Do a trial code generation as if this were going to be passed
10925 as an argument; if any part goes in memory, we return NULL. */
10926 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10927 if (valret)
10928 return false;
10929 /* Otherwise fall through to more conventional ABI rules. */
10932 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10933 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10934 NULL, NULL))
10935 return false;
10937 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10938 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10939 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10940 return false;
10942 if (AGGREGATE_TYPE_P (type)
10943 && (aix_struct_return
10944 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10945 return true;
10947 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10948 modes only exist for GCC vector types if -maltivec. */
10949 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10950 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10951 return false;
10953 /* Return synthetic vectors in memory. */
10954 if (TREE_CODE (type) == VECTOR_TYPE
10955 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10957 static bool warned_for_return_big_vectors = false;
10958 if (!warned_for_return_big_vectors)
10960 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10961 "non-standard ABI extension with no compatibility "
10962 "guarantee");
10963 warned_for_return_big_vectors = true;
10965 return true;
10968 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10969 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10970 return true;
10972 return false;
10975 /* Specify whether values returned in registers should be at the most
10976 significant end of a register. We want aggregates returned by
10977 value to match the way aggregates are passed to functions. */
10979 static bool
10980 rs6000_return_in_msb (const_tree valtype)
10982 return (DEFAULT_ABI == ABI_ELFv2
10983 && BYTES_BIG_ENDIAN
10984 && AGGREGATE_TYPE_P (valtype)
10985 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10986 == PAD_UPWARD));
10989 #ifdef HAVE_AS_GNU_ATTRIBUTE
10990 /* Return TRUE if a call to function FNDECL may be one that
10991 potentially affects the function calling ABI of the object file. */
10993 static bool
10994 call_ABI_of_interest (tree fndecl)
10996 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10998 struct cgraph_node *c_node;
11000 /* Libcalls are always interesting. */
11001 if (fndecl == NULL_TREE)
11002 return true;
11004 /* Any call to an external function is interesting. */
11005 if (DECL_EXTERNAL (fndecl))
11006 return true;
11008 /* Interesting functions that we are emitting in this object file. */
11009 c_node = cgraph_node::get (fndecl);
11010 c_node = c_node->ultimate_alias_target ();
11011 return !c_node->only_called_directly_p ();
11013 return false;
11015 #endif
11017 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11018 for a call to a function whose data type is FNTYPE.
11019 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11021 For incoming args we set the number of arguments in the prototype large
11022 so we never return a PARALLEL. */
11024 void
11025 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11026 rtx libname ATTRIBUTE_UNUSED, int incoming,
11027 int libcall, int n_named_args,
11028 tree fndecl ATTRIBUTE_UNUSED,
11029 machine_mode return_mode ATTRIBUTE_UNUSED)
11031 static CUMULATIVE_ARGS zero_cumulative;
11033 *cum = zero_cumulative;
11034 cum->words = 0;
11035 cum->fregno = FP_ARG_MIN_REG;
11036 cum->vregno = ALTIVEC_ARG_MIN_REG;
11037 cum->prototype = (fntype && prototype_p (fntype));
11038 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11039 ? CALL_LIBCALL : CALL_NORMAL);
11040 cum->sysv_gregno = GP_ARG_MIN_REG;
11041 cum->stdarg = stdarg_p (fntype);
11042 cum->libcall = libcall;
11044 cum->nargs_prototype = 0;
11045 if (incoming || cum->prototype)
11046 cum->nargs_prototype = n_named_args;
11048 /* Check for a longcall attribute. */
11049 if ((!fntype && rs6000_default_long_calls)
11050 || (fntype
11051 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11052 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11053 cum->call_cookie |= CALL_LONG;
11055 if (TARGET_DEBUG_ARG)
11057 fprintf (stderr, "\ninit_cumulative_args:");
11058 if (fntype)
11060 tree ret_type = TREE_TYPE (fntype);
11061 fprintf (stderr, " ret code = %s,",
11062 get_tree_code_name (TREE_CODE (ret_type)));
11065 if (cum->call_cookie & CALL_LONG)
11066 fprintf (stderr, " longcall,");
11068 fprintf (stderr, " proto = %d, nargs = %d\n",
11069 cum->prototype, cum->nargs_prototype);
11072 #ifdef HAVE_AS_GNU_ATTRIBUTE
11073 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11075 cum->escapes = call_ABI_of_interest (fndecl);
11076 if (cum->escapes)
11078 tree return_type;
11080 if (fntype)
11082 return_type = TREE_TYPE (fntype);
11083 return_mode = TYPE_MODE (return_type);
11085 else
11086 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11088 if (return_type != NULL)
11090 if (TREE_CODE (return_type) == RECORD_TYPE
11091 && TYPE_TRANSPARENT_AGGR (return_type))
11093 return_type = TREE_TYPE (first_field (return_type));
11094 return_mode = TYPE_MODE (return_type);
11096 if (AGGREGATE_TYPE_P (return_type)
11097 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11098 <= 8))
11099 rs6000_returns_struct = true;
11101 if (SCALAR_FLOAT_MODE_P (return_mode))
11103 rs6000_passes_float = true;
11104 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11105 && (FLOAT128_IBM_P (return_mode)
11106 || FLOAT128_IEEE_P (return_mode)
11107 || (return_type != NULL
11108 && (TYPE_MAIN_VARIANT (return_type)
11109 == long_double_type_node))))
11110 rs6000_passes_long_double = true;
11112 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
11113 rs6000_passes_vector = true;
11116 #endif
11118 if (fntype
11119 && !TARGET_ALTIVEC
11120 && TARGET_ALTIVEC_ABI
11121 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11123 error ("cannot return value in vector register because"
11124 " altivec instructions are disabled, use %qs"
11125 " to enable them", "-maltivec");
11129 /* The mode the ABI uses for a word. This is not the same as word_mode
11130 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11132 static scalar_int_mode
11133 rs6000_abi_word_mode (void)
11135 return TARGET_32BIT ? SImode : DImode;
11138 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11139 static char *
11140 rs6000_offload_options (void)
11142 if (TARGET_64BIT)
11143 return xstrdup ("-foffload-abi=lp64");
11144 else
11145 return xstrdup ("-foffload-abi=ilp32");
11148 /* On rs6000, function arguments are promoted, as are function return
11149 values. */
11151 static machine_mode
11152 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11153 machine_mode mode,
11154 int *punsignedp ATTRIBUTE_UNUSED,
11155 const_tree, int)
11157 PROMOTE_MODE (mode, *punsignedp, type);
11159 return mode;
11162 /* Return true if TYPE must be passed on the stack and not in registers. */
11164 static bool
11165 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11167 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11168 return must_pass_in_stack_var_size (mode, type);
11169 else
11170 return must_pass_in_stack_var_size_or_pad (mode, type);
11173 static inline bool
11174 is_complex_IBM_long_double (machine_mode mode)
11176 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
11179 /* Whether ABI_V4 passes MODE args to a function in floating point
11180 registers. */
11182 static bool
11183 abi_v4_pass_in_fpr (machine_mode mode, bool named)
11185 if (!TARGET_HARD_FLOAT)
11186 return false;
11187 if (mode == DFmode)
11188 return true;
11189 if (mode == SFmode && named)
11190 return true;
11191 /* ABI_V4 passes complex IBM long double in 8 gprs.
11192 Stupid, but we can't change the ABI now. */
11193 if (is_complex_IBM_long_double (mode))
11194 return false;
11195 if (FLOAT128_2REG_P (mode))
11196 return true;
11197 if (DECIMAL_FLOAT_MODE_P (mode))
11198 return true;
11199 return false;
11202 /* Implement TARGET_FUNCTION_ARG_PADDING.
11204 For the AIX ABI structs are always stored left shifted in their
11205 argument slot. */
11207 static pad_direction
11208 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11210 #ifndef AGGREGATE_PADDING_FIXED
11211 #define AGGREGATE_PADDING_FIXED 0
11212 #endif
11213 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11214 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11215 #endif
11217 if (!AGGREGATE_PADDING_FIXED)
11219 /* GCC used to pass structures of the same size as integer types as
11220 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11221 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11222 passed padded downward, except that -mstrict-align further
11223 muddied the water in that multi-component structures of 2 and 4
11224 bytes in size were passed padded upward.
11226 The following arranges for best compatibility with previous
11227 versions of gcc, but removes the -mstrict-align dependency. */
11228 if (BYTES_BIG_ENDIAN)
11230 HOST_WIDE_INT size = 0;
11232 if (mode == BLKmode)
11234 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11235 size = int_size_in_bytes (type);
11237 else
11238 size = GET_MODE_SIZE (mode);
11240 if (size == 1 || size == 2 || size == 4)
11241 return PAD_DOWNWARD;
11243 return PAD_UPWARD;
11246 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11248 if (type != 0 && AGGREGATE_TYPE_P (type))
11249 return PAD_UPWARD;
11252 /* Fall back to the default. */
11253 return default_function_arg_padding (mode, type);
11256 /* If defined, a C expression that gives the alignment boundary, in bits,
11257 of an argument with the specified mode and type. If it is not defined,
11258 PARM_BOUNDARY is used for all arguments.
11260 V.4 wants long longs and doubles to be double word aligned. Just
11261 testing the mode size is a boneheaded way to do this as it means
11262 that other types such as complex int are also double word aligned.
11263 However, we're stuck with this because changing the ABI might break
11264 existing library interfaces.
11266 Quadword align Altivec/VSX vectors.
11267 Quadword align large synthetic vector types. */
11269 static unsigned int
11270 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11272 machine_mode elt_mode;
11273 int n_elts;
11275 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11277 if (DEFAULT_ABI == ABI_V4
11278 && (GET_MODE_SIZE (mode) == 8
11279 || (TARGET_HARD_FLOAT
11280 && !is_complex_IBM_long_double (mode)
11281 && FLOAT128_2REG_P (mode))))
11282 return 64;
11283 else if (FLOAT128_VECTOR_P (mode))
11284 return 128;
11285 else if (type && TREE_CODE (type) == VECTOR_TYPE
11286 && int_size_in_bytes (type) >= 8
11287 && int_size_in_bytes (type) < 16)
11288 return 64;
11289 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11290 || (type && TREE_CODE (type) == VECTOR_TYPE
11291 && int_size_in_bytes (type) >= 16))
11292 return 128;
11294 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11295 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11296 -mcompat-align-parm is used. */
11297 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11298 || DEFAULT_ABI == ABI_ELFv2)
11299 && type && TYPE_ALIGN (type) > 64)
11301 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11302 or homogeneous float/vector aggregates here. We already handled
11303 vector aggregates above, but still need to check for float here. */
11304 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11305 && !SCALAR_FLOAT_MODE_P (elt_mode));
11307 /* We used to check for BLKmode instead of the above aggregate type
11308 check. Warn when this results in any difference to the ABI. */
11309 if (aggregate_p != (mode == BLKmode))
11311 static bool warned;
11312 if (!warned && warn_psabi)
11314 warned = true;
11315 inform (input_location,
11316 "the ABI of passing aggregates with %d-byte alignment"
11317 " has changed in GCC 5",
11318 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11322 if (aggregate_p)
11323 return 128;
11326 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11327 implement the "aggregate type" check as a BLKmode check here; this
11328 means certain aggregate types are in fact not aligned. */
11329 if (TARGET_MACHO && rs6000_darwin64_abi
11330 && mode == BLKmode
11331 && type && TYPE_ALIGN (type) > 64)
11332 return 128;
11334 return PARM_BOUNDARY;
11337 /* The offset in words to the start of the parameter save area. */
11339 static unsigned int
11340 rs6000_parm_offset (void)
11342 return (DEFAULT_ABI == ABI_V4 ? 2
11343 : DEFAULT_ABI == ABI_ELFv2 ? 4
11344 : 6);
11347 /* For a function parm of MODE and TYPE, return the starting word in
11348 the parameter area. NWORDS of the parameter area are already used. */
11350 static unsigned int
11351 rs6000_parm_start (machine_mode mode, const_tree type,
11352 unsigned int nwords)
11354 unsigned int align;
11356 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11357 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11360 /* Compute the size (in words) of a function argument. */
11362 static unsigned long
11363 rs6000_arg_size (machine_mode mode, const_tree type)
11365 unsigned long size;
11367 if (mode != BLKmode)
11368 size = GET_MODE_SIZE (mode);
11369 else
11370 size = int_size_in_bytes (type);
11372 if (TARGET_32BIT)
11373 return (size + 3) >> 2;
11374 else
11375 return (size + 7) >> 3;
11378 /* Use this to flush pending int fields. */
11380 static void
11381 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11382 HOST_WIDE_INT bitpos, int final)
11384 unsigned int startbit, endbit;
11385 int intregs, intoffset;
11387 /* Handle the situations where a float is taking up the first half
11388 of the GPR, and the other half is empty (typically due to
11389 alignment restrictions). We can detect this by a 8-byte-aligned
11390 int field, or by seeing that this is the final flush for this
11391 argument. Count the word and continue on. */
11392 if (cum->floats_in_gpr == 1
11393 && (cum->intoffset % 64 == 0
11394 || (cum->intoffset == -1 && final)))
11396 cum->words++;
11397 cum->floats_in_gpr = 0;
11400 if (cum->intoffset == -1)
11401 return;
11403 intoffset = cum->intoffset;
11404 cum->intoffset = -1;
11405 cum->floats_in_gpr = 0;
11407 if (intoffset % BITS_PER_WORD != 0)
11409 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11410 if (!int_mode_for_size (bits, 0).exists ())
11412 /* We couldn't find an appropriate mode, which happens,
11413 e.g., in packed structs when there are 3 bytes to load.
11414 Back intoffset back to the beginning of the word in this
11415 case. */
11416 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11420 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11421 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11422 intregs = (endbit - startbit) / BITS_PER_WORD;
11423 cum->words += intregs;
11424 /* words should be unsigned. */
11425 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11427 int pad = (endbit/BITS_PER_WORD) - cum->words;
11428 cum->words += pad;
11432 /* The darwin64 ABI calls for us to recurse down through structs,
11433 looking for elements passed in registers. Unfortunately, we have
11434 to track int register count here also because of misalignments
11435 in powerpc alignment mode. */
11437 static void
11438 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11439 const_tree type,
11440 HOST_WIDE_INT startbitpos)
11442 tree f;
11444 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11445 if (TREE_CODE (f) == FIELD_DECL)
11447 HOST_WIDE_INT bitpos = startbitpos;
11448 tree ftype = TREE_TYPE (f);
11449 machine_mode mode;
11450 if (ftype == error_mark_node)
11451 continue;
11452 mode = TYPE_MODE (ftype);
11454 if (DECL_SIZE (f) != 0
11455 && tree_fits_uhwi_p (bit_position (f)))
11456 bitpos += int_bit_position (f);
11458 /* ??? FIXME: else assume zero offset. */
11460 if (TREE_CODE (ftype) == RECORD_TYPE)
11461 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11462 else if (USE_FP_FOR_ARG_P (cum, mode))
11464 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11465 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11466 cum->fregno += n_fpregs;
11467 /* Single-precision floats present a special problem for
11468 us, because they are smaller than an 8-byte GPR, and so
11469 the structure-packing rules combined with the standard
11470 varargs behavior mean that we want to pack float/float
11471 and float/int combinations into a single register's
11472 space. This is complicated by the arg advance flushing,
11473 which works on arbitrarily large groups of int-type
11474 fields. */
11475 if (mode == SFmode)
11477 if (cum->floats_in_gpr == 1)
11479 /* Two floats in a word; count the word and reset
11480 the float count. */
11481 cum->words++;
11482 cum->floats_in_gpr = 0;
11484 else if (bitpos % 64 == 0)
11486 /* A float at the beginning of an 8-byte word;
11487 count it and put off adjusting cum->words until
11488 we see if a arg advance flush is going to do it
11489 for us. */
11490 cum->floats_in_gpr++;
11492 else
11494 /* The float is at the end of a word, preceded
11495 by integer fields, so the arg advance flush
11496 just above has already set cum->words and
11497 everything is taken care of. */
11500 else
11501 cum->words += n_fpregs;
11503 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11505 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11506 cum->vregno++;
11507 cum->words += 2;
11509 else if (cum->intoffset == -1)
11510 cum->intoffset = bitpos;
11514 /* Check for an item that needs to be considered specially under the darwin 64
11515 bit ABI. These are record types where the mode is BLK or the structure is
11516 8 bytes in size. */
11517 static int
11518 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11520 return rs6000_darwin64_abi
11521 && ((mode == BLKmode
11522 && TREE_CODE (type) == RECORD_TYPE
11523 && int_size_in_bytes (type) > 0)
11524 || (type && TREE_CODE (type) == RECORD_TYPE
11525 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11528 /* Update the data in CUM to advance over an argument
11529 of mode MODE and data type TYPE.
11530 (TYPE is null for libcalls where that information may not be available.)
11532 Note that for args passed by reference, function_arg will be called
11533 with MODE and TYPE set to that of the pointer to the arg, not the arg
11534 itself. */
11536 static void
11537 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11538 const_tree type, bool named, int depth)
11540 machine_mode elt_mode;
11541 int n_elts;
11543 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11545 /* Only tick off an argument if we're not recursing. */
11546 if (depth == 0)
11547 cum->nargs_prototype--;
11549 #ifdef HAVE_AS_GNU_ATTRIBUTE
11550 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11551 && cum->escapes)
11553 if (SCALAR_FLOAT_MODE_P (mode))
11555 rs6000_passes_float = true;
11556 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11557 && (FLOAT128_IBM_P (mode)
11558 || FLOAT128_IEEE_P (mode)
11559 || (type != NULL
11560 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11561 rs6000_passes_long_double = true;
11563 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11564 rs6000_passes_vector = true;
11566 #endif
11568 if (TARGET_ALTIVEC_ABI
11569 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11570 || (type && TREE_CODE (type) == VECTOR_TYPE
11571 && int_size_in_bytes (type) == 16)))
11573 bool stack = false;
11575 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11577 cum->vregno += n_elts;
11579 if (!TARGET_ALTIVEC)
11580 error ("cannot pass argument in vector register because"
11581 " altivec instructions are disabled, use %qs"
11582 " to enable them", "-maltivec");
11584 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11585 even if it is going to be passed in a vector register.
11586 Darwin does the same for variable-argument functions. */
11587 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11588 && TARGET_64BIT)
11589 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11590 stack = true;
11592 else
11593 stack = true;
11595 if (stack)
11597 int align;
11599 /* Vector parameters must be 16-byte aligned. In 32-bit
11600 mode this means we need to take into account the offset
11601 to the parameter save area. In 64-bit mode, they just
11602 have to start on an even word, since the parameter save
11603 area is 16-byte aligned. */
11604 if (TARGET_32BIT)
11605 align = -(rs6000_parm_offset () + cum->words) & 3;
11606 else
11607 align = cum->words & 1;
11608 cum->words += align + rs6000_arg_size (mode, type);
11610 if (TARGET_DEBUG_ARG)
11612 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11613 cum->words, align);
11614 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11615 cum->nargs_prototype, cum->prototype,
11616 GET_MODE_NAME (mode));
11620 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11622 int size = int_size_in_bytes (type);
11623 /* Variable sized types have size == -1 and are
11624 treated as if consisting entirely of ints.
11625 Pad to 16 byte boundary if needed. */
11626 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11627 && (cum->words % 2) != 0)
11628 cum->words++;
11629 /* For varargs, we can just go up by the size of the struct. */
11630 if (!named)
11631 cum->words += (size + 7) / 8;
11632 else
11634 /* It is tempting to say int register count just goes up by
11635 sizeof(type)/8, but this is wrong in a case such as
11636 { int; double; int; } [powerpc alignment]. We have to
11637 grovel through the fields for these too. */
11638 cum->intoffset = 0;
11639 cum->floats_in_gpr = 0;
11640 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11641 rs6000_darwin64_record_arg_advance_flush (cum,
11642 size * BITS_PER_UNIT, 1);
11644 if (TARGET_DEBUG_ARG)
11646 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11647 cum->words, TYPE_ALIGN (type), size);
11648 fprintf (stderr,
11649 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11650 cum->nargs_prototype, cum->prototype,
11651 GET_MODE_NAME (mode));
11654 else if (DEFAULT_ABI == ABI_V4)
11656 if (abi_v4_pass_in_fpr (mode, named))
11658 /* _Decimal128 must use an even/odd register pair. This assumes
11659 that the register number is odd when fregno is odd. */
11660 if (mode == TDmode && (cum->fregno % 2) == 1)
11661 cum->fregno++;
11663 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11664 <= FP_ARG_V4_MAX_REG)
11665 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11666 else
11668 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11669 if (mode == DFmode || FLOAT128_IBM_P (mode)
11670 || mode == DDmode || mode == TDmode)
11671 cum->words += cum->words & 1;
11672 cum->words += rs6000_arg_size (mode, type);
11675 else
11677 int n_words = rs6000_arg_size (mode, type);
11678 int gregno = cum->sysv_gregno;
11680 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11681 As does any other 2 word item such as complex int due to a
11682 historical mistake. */
11683 if (n_words == 2)
11684 gregno += (1 - gregno) & 1;
11686 /* Multi-reg args are not split between registers and stack. */
11687 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11689 /* Long long is aligned on the stack. So are other 2 word
11690 items such as complex int due to a historical mistake. */
11691 if (n_words == 2)
11692 cum->words += cum->words & 1;
11693 cum->words += n_words;
11696 /* Note: continuing to accumulate gregno past when we've started
11697 spilling to the stack indicates the fact that we've started
11698 spilling to the stack to expand_builtin_saveregs. */
11699 cum->sysv_gregno = gregno + n_words;
11702 if (TARGET_DEBUG_ARG)
11704 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11705 cum->words, cum->fregno);
11706 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11707 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11708 fprintf (stderr, "mode = %4s, named = %d\n",
11709 GET_MODE_NAME (mode), named);
11712 else
11714 int n_words = rs6000_arg_size (mode, type);
11715 int start_words = cum->words;
11716 int align_words = rs6000_parm_start (mode, type, start_words);
11718 cum->words = align_words + n_words;
11720 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11722 /* _Decimal128 must be passed in an even/odd float register pair.
11723 This assumes that the register number is odd when fregno is
11724 odd. */
11725 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11726 cum->fregno++;
11727 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11730 if (TARGET_DEBUG_ARG)
11732 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11733 cum->words, cum->fregno);
11734 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11735 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11736 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11737 named, align_words - start_words, depth);
11742 static void
11743 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11744 const_tree type, bool named)
11746 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11750 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11751 structure between cum->intoffset and bitpos to integer registers. */
11753 static void
11754 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11755 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11757 machine_mode mode;
11758 unsigned int regno;
11759 unsigned int startbit, endbit;
11760 int this_regno, intregs, intoffset;
11761 rtx reg;
11763 if (cum->intoffset == -1)
11764 return;
11766 intoffset = cum->intoffset;
11767 cum->intoffset = -1;
11769 /* If this is the trailing part of a word, try to only load that
11770 much into the register. Otherwise load the whole register. Note
11771 that in the latter case we may pick up unwanted bits. It's not a
11772 problem at the moment but may wish to revisit. */
11774 if (intoffset % BITS_PER_WORD != 0)
11776 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11777 if (!int_mode_for_size (bits, 0).exists (&mode))
11779 /* We couldn't find an appropriate mode, which happens,
11780 e.g., in packed structs when there are 3 bytes to load.
11781 Back intoffset back to the beginning of the word in this
11782 case. */
11783 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11784 mode = word_mode;
11787 else
11788 mode = word_mode;
11790 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11791 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11792 intregs = (endbit - startbit) / BITS_PER_WORD;
11793 this_regno = cum->words + intoffset / BITS_PER_WORD;
11795 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11796 cum->use_stack = 1;
11798 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11799 if (intregs <= 0)
11800 return;
11802 intoffset /= BITS_PER_UNIT;
11805 regno = GP_ARG_MIN_REG + this_regno;
11806 reg = gen_rtx_REG (mode, regno);
11807 rvec[(*k)++] =
11808 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11810 this_regno += 1;
11811 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11812 mode = word_mode;
11813 intregs -= 1;
11815 while (intregs > 0);
11818 /* Recursive workhorse for the following. */
11820 static void
11821 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11822 HOST_WIDE_INT startbitpos, rtx rvec[],
11823 int *k)
11825 tree f;
11827 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11828 if (TREE_CODE (f) == FIELD_DECL)
11830 HOST_WIDE_INT bitpos = startbitpos;
11831 tree ftype = TREE_TYPE (f);
11832 machine_mode mode;
11833 if (ftype == error_mark_node)
11834 continue;
11835 mode = TYPE_MODE (ftype);
11837 if (DECL_SIZE (f) != 0
11838 && tree_fits_uhwi_p (bit_position (f)))
11839 bitpos += int_bit_position (f);
11841 /* ??? FIXME: else assume zero offset. */
11843 if (TREE_CODE (ftype) == RECORD_TYPE)
11844 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11845 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11847 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11848 #if 0
11849 switch (mode)
11851 case E_SCmode: mode = SFmode; break;
11852 case E_DCmode: mode = DFmode; break;
11853 case E_TCmode: mode = TFmode; break;
11854 default: break;
11856 #endif
11857 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11858 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11860 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11861 && (mode == TFmode || mode == TDmode));
11862 /* Long double or _Decimal128 split over regs and memory. */
11863 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11864 cum->use_stack=1;
11866 rvec[(*k)++]
11867 = gen_rtx_EXPR_LIST (VOIDmode,
11868 gen_rtx_REG (mode, cum->fregno++),
11869 GEN_INT (bitpos / BITS_PER_UNIT));
11870 if (FLOAT128_2REG_P (mode))
11871 cum->fregno++;
11873 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11875 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11876 rvec[(*k)++]
11877 = gen_rtx_EXPR_LIST (VOIDmode,
11878 gen_rtx_REG (mode, cum->vregno++),
11879 GEN_INT (bitpos / BITS_PER_UNIT));
11881 else if (cum->intoffset == -1)
11882 cum->intoffset = bitpos;
11886 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11887 the register(s) to be used for each field and subfield of a struct
11888 being passed by value, along with the offset of where the
11889 register's value may be found in the block. FP fields go in FP
11890 register, vector fields go in vector registers, and everything
11891 else goes in int registers, packed as in memory.
11893 This code is also used for function return values. RETVAL indicates
11894 whether this is the case.
11896 Much of this is taken from the SPARC V9 port, which has a similar
11897 calling convention. */
11899 static rtx
11900 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11901 bool named, bool retval)
11903 rtx rvec[FIRST_PSEUDO_REGISTER];
11904 int k = 1, kbase = 1;
11905 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11906 /* This is a copy; modifications are not visible to our caller. */
11907 CUMULATIVE_ARGS copy_cum = *orig_cum;
11908 CUMULATIVE_ARGS *cum = &copy_cum;
11910 /* Pad to 16 byte boundary if needed. */
11911 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11912 && (cum->words % 2) != 0)
11913 cum->words++;
11915 cum->intoffset = 0;
11916 cum->use_stack = 0;
11917 cum->named = named;
11919 /* Put entries into rvec[] for individual FP and vector fields, and
11920 for the chunks of memory that go in int regs. Note we start at
11921 element 1; 0 is reserved for an indication of using memory, and
11922 may or may not be filled in below. */
11923 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11924 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11926 /* If any part of the struct went on the stack put all of it there.
11927 This hack is because the generic code for
11928 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11929 parts of the struct are not at the beginning. */
11930 if (cum->use_stack)
11932 if (retval)
11933 return NULL_RTX; /* doesn't go in registers at all */
11934 kbase = 0;
11935 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11937 if (k > 1 || cum->use_stack)
11938 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11939 else
11940 return NULL_RTX;
11943 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11945 static rtx
11946 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11947 int align_words)
11949 int n_units;
11950 int i, k;
11951 rtx rvec[GP_ARG_NUM_REG + 1];
11953 if (align_words >= GP_ARG_NUM_REG)
11954 return NULL_RTX;
11956 n_units = rs6000_arg_size (mode, type);
11958 /* Optimize the simple case where the arg fits in one gpr, except in
11959 the case of BLKmode due to assign_parms assuming that registers are
11960 BITS_PER_WORD wide. */
11961 if (n_units == 0
11962 || (n_units == 1 && mode != BLKmode))
11963 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11965 k = 0;
11966 if (align_words + n_units > GP_ARG_NUM_REG)
11967 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11968 using a magic NULL_RTX component.
11969 This is not strictly correct. Only some of the arg belongs in
11970 memory, not all of it. However, the normal scheme using
11971 function_arg_partial_nregs can result in unusual subregs, eg.
11972 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11973 store the whole arg to memory is often more efficient than code
11974 to store pieces, and we know that space is available in the right
11975 place for the whole arg. */
11976 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11978 i = 0;
11981 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11982 rtx off = GEN_INT (i++ * 4);
11983 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11985 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11987 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11990 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11991 but must also be copied into the parameter save area starting at
11992 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11993 to the GPRs and/or memory. Return the number of elements used. */
11995 static int
11996 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11997 int align_words, rtx *rvec)
11999 int k = 0;
12001 if (align_words < GP_ARG_NUM_REG)
12003 int n_words = rs6000_arg_size (mode, type);
12005 if (align_words + n_words > GP_ARG_NUM_REG
12006 || mode == BLKmode
12007 || (TARGET_32BIT && TARGET_POWERPC64))
12009 /* If this is partially on the stack, then we only
12010 include the portion actually in registers here. */
12011 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12012 int i = 0;
12014 if (align_words + n_words > GP_ARG_NUM_REG)
12016 /* Not all of the arg fits in gprs. Say that it goes in memory
12017 too, using a magic NULL_RTX component. Also see comment in
12018 rs6000_mixed_function_arg for why the normal
12019 function_arg_partial_nregs scheme doesn't work in this case. */
12020 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12025 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12026 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12027 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12029 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12031 else
12033 /* The whole arg fits in gprs. */
12034 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12035 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12038 else
12040 /* It's entirely in memory. */
12041 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12044 return k;
12047 /* RVEC is a vector of K components of an argument of mode MODE.
12048 Construct the final function_arg return value from it. */
12050 static rtx
12051 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12053 gcc_assert (k >= 1);
12055 /* Avoid returning a PARALLEL in the trivial cases. */
12056 if (k == 1)
12058 if (XEXP (rvec[0], 0) == NULL_RTX)
12059 return NULL_RTX;
12061 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12062 return XEXP (rvec[0], 0);
12065 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12068 /* Determine where to put an argument to a function.
12069 Value is zero to push the argument on the stack,
12070 or a hard register in which to store the argument.
12072 MODE is the argument's machine mode.
12073 TYPE is the data type of the argument (as a tree).
12074 This is null for libcalls where that information may
12075 not be available.
12076 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12077 the preceding args and about the function being called. It is
12078 not modified in this routine.
12079 NAMED is nonzero if this argument is a named parameter
12080 (otherwise it is an extra parameter matching an ellipsis).
12082 On RS/6000 the first eight words of non-FP are normally in registers
12083 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12084 Under V.4, the first 8 FP args are in registers.
12086 If this is floating-point and no prototype is specified, we use
12087 both an FP and integer register (or possibly FP reg and stack). Library
12088 functions (when CALL_LIBCALL is set) always have the proper types for args,
12089 so we can pass the FP value just in one register. emit_library_function
12090 doesn't support PARALLEL anyway.
12092 Note that for args passed by reference, function_arg will be called
12093 with MODE and TYPE set to that of the pointer to the arg, not the arg
12094 itself. */
12096 static rtx
12097 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12098 const_tree type, bool named)
12100 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12101 enum rs6000_abi abi = DEFAULT_ABI;
12102 machine_mode elt_mode;
12103 int n_elts;
12105 /* Return a marker to indicate whether CR1 needs to set or clear the
12106 bit that V.4 uses to say fp args were passed in registers.
12107 Assume that we don't need the marker for software floating point,
12108 or compiler generated library calls. */
12109 if (mode == VOIDmode)
12111 if (abi == ABI_V4
12112 && (cum->call_cookie & CALL_LIBCALL) == 0
12113 && (cum->stdarg
12114 || (cum->nargs_prototype < 0
12115 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12116 && TARGET_HARD_FLOAT)
12117 return GEN_INT (cum->call_cookie
12118 | ((cum->fregno == FP_ARG_MIN_REG)
12119 ? CALL_V4_SET_FP_ARGS
12120 : CALL_V4_CLEAR_FP_ARGS));
12122 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12125 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12127 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12129 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12130 if (rslt != NULL_RTX)
12131 return rslt;
12132 /* Else fall through to usual handling. */
12135 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12137 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12138 rtx r, off;
12139 int i, k = 0;
12141 /* Do we also need to pass this argument in the parameter save area?
12142 Library support functions for IEEE 128-bit are assumed to not need the
12143 value passed both in GPRs and in vector registers. */
12144 if (TARGET_64BIT && !cum->prototype
12145 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12147 int align_words = ROUND_UP (cum->words, 2);
12148 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12151 /* Describe where this argument goes in the vector registers. */
12152 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12154 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12155 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12156 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12159 return rs6000_finish_function_arg (mode, rvec, k);
12161 else if (TARGET_ALTIVEC_ABI
12162 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12163 || (type && TREE_CODE (type) == VECTOR_TYPE
12164 && int_size_in_bytes (type) == 16)))
12166 if (named || abi == ABI_V4)
12167 return NULL_RTX;
12168 else
12170 /* Vector parameters to varargs functions under AIX or Darwin
12171 get passed in memory and possibly also in GPRs. */
12172 int align, align_words, n_words;
12173 machine_mode part_mode;
12175 /* Vector parameters must be 16-byte aligned. In 32-bit
12176 mode this means we need to take into account the offset
12177 to the parameter save area. In 64-bit mode, they just
12178 have to start on an even word, since the parameter save
12179 area is 16-byte aligned. */
12180 if (TARGET_32BIT)
12181 align = -(rs6000_parm_offset () + cum->words) & 3;
12182 else
12183 align = cum->words & 1;
12184 align_words = cum->words + align;
12186 /* Out of registers? Memory, then. */
12187 if (align_words >= GP_ARG_NUM_REG)
12188 return NULL_RTX;
12190 if (TARGET_32BIT && TARGET_POWERPC64)
12191 return rs6000_mixed_function_arg (mode, type, align_words);
12193 /* The vector value goes in GPRs. Only the part of the
12194 value in GPRs is reported here. */
12195 part_mode = mode;
12196 n_words = rs6000_arg_size (mode, type);
12197 if (align_words + n_words > GP_ARG_NUM_REG)
12198 /* Fortunately, there are only two possibilities, the value
12199 is either wholly in GPRs or half in GPRs and half not. */
12200 part_mode = DImode;
12202 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12206 else if (abi == ABI_V4)
12208 if (abi_v4_pass_in_fpr (mode, named))
12210 /* _Decimal128 must use an even/odd register pair. This assumes
12211 that the register number is odd when fregno is odd. */
12212 if (mode == TDmode && (cum->fregno % 2) == 1)
12213 cum->fregno++;
12215 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12216 <= FP_ARG_V4_MAX_REG)
12217 return gen_rtx_REG (mode, cum->fregno);
12218 else
12219 return NULL_RTX;
12221 else
12223 int n_words = rs6000_arg_size (mode, type);
12224 int gregno = cum->sysv_gregno;
12226 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12227 As does any other 2 word item such as complex int due to a
12228 historical mistake. */
12229 if (n_words == 2)
12230 gregno += (1 - gregno) & 1;
12232 /* Multi-reg args are not split between registers and stack. */
12233 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12234 return NULL_RTX;
12236 if (TARGET_32BIT && TARGET_POWERPC64)
12237 return rs6000_mixed_function_arg (mode, type,
12238 gregno - GP_ARG_MIN_REG);
12239 return gen_rtx_REG (mode, gregno);
12242 else
12244 int align_words = rs6000_parm_start (mode, type, cum->words);
12246 /* _Decimal128 must be passed in an even/odd float register pair.
12247 This assumes that the register number is odd when fregno is odd. */
12248 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12249 cum->fregno++;
12251 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12253 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12254 rtx r, off;
12255 int i, k = 0;
12256 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12257 int fpr_words;
12259 /* Do we also need to pass this argument in the parameter
12260 save area? */
12261 if (type && (cum->nargs_prototype <= 0
12262 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12263 && TARGET_XL_COMPAT
12264 && align_words >= GP_ARG_NUM_REG)))
12265 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12267 /* Describe where this argument goes in the fprs. */
12268 for (i = 0; i < n_elts
12269 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12271 /* Check if the argument is split over registers and memory.
12272 This can only ever happen for long double or _Decimal128;
12273 complex types are handled via split_complex_arg. */
12274 machine_mode fmode = elt_mode;
12275 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12277 gcc_assert (FLOAT128_2REG_P (fmode));
12278 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12281 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12282 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12283 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12286 /* If there were not enough FPRs to hold the argument, the rest
12287 usually goes into memory. However, if the current position
12288 is still within the register parameter area, a portion may
12289 actually have to go into GPRs.
12291 Note that it may happen that the portion of the argument
12292 passed in the first "half" of the first GPR was already
12293 passed in the last FPR as well.
12295 For unnamed arguments, we already set up GPRs to cover the
12296 whole argument in rs6000_psave_function_arg, so there is
12297 nothing further to do at this point. */
12298 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12299 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12300 && cum->nargs_prototype > 0)
12302 static bool warned;
12304 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12305 int n_words = rs6000_arg_size (mode, type);
12307 align_words += fpr_words;
12308 n_words -= fpr_words;
12312 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12313 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12314 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12316 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12318 if (!warned && warn_psabi)
12320 warned = true;
12321 inform (input_location,
12322 "the ABI of passing homogeneous float aggregates"
12323 " has changed in GCC 5");
12327 return rs6000_finish_function_arg (mode, rvec, k);
12329 else if (align_words < GP_ARG_NUM_REG)
12331 if (TARGET_32BIT && TARGET_POWERPC64)
12332 return rs6000_mixed_function_arg (mode, type, align_words);
12334 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12336 else
12337 return NULL_RTX;
12341 /* For an arg passed partly in registers and partly in memory, this is
12342 the number of bytes passed in registers. For args passed entirely in
12343 registers or entirely in memory, zero. When an arg is described by a
12344 PARALLEL, perhaps using more than one register type, this function
12345 returns the number of bytes used by the first element of the PARALLEL. */
12347 static int
12348 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12349 tree type, bool named)
12351 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12352 bool passed_in_gprs = true;
12353 int ret = 0;
12354 int align_words;
12355 machine_mode elt_mode;
12356 int n_elts;
12358 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12360 if (DEFAULT_ABI == ABI_V4)
12361 return 0;
12363 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12365 /* If we are passing this arg in the fixed parameter save area (gprs or
12366 memory) as well as VRs, we do not use the partial bytes mechanism;
12367 instead, rs6000_function_arg will return a PARALLEL including a memory
12368 element as necessary. Library support functions for IEEE 128-bit are
12369 assumed to not need the value passed both in GPRs and in vector
12370 registers. */
12371 if (TARGET_64BIT && !cum->prototype
12372 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12373 return 0;
12375 /* Otherwise, we pass in VRs only. Check for partial copies. */
12376 passed_in_gprs = false;
12377 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12378 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12381 /* In this complicated case we just disable the partial_nregs code. */
12382 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12383 return 0;
12385 align_words = rs6000_parm_start (mode, type, cum->words);
12387 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12389 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12391 /* If we are passing this arg in the fixed parameter save area
12392 (gprs or memory) as well as FPRs, we do not use the partial
12393 bytes mechanism; instead, rs6000_function_arg will return a
12394 PARALLEL including a memory element as necessary. */
12395 if (type
12396 && (cum->nargs_prototype <= 0
12397 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12398 && TARGET_XL_COMPAT
12399 && align_words >= GP_ARG_NUM_REG)))
12400 return 0;
12402 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12403 passed_in_gprs = false;
12404 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12406 /* Compute number of bytes / words passed in FPRs. If there
12407 is still space available in the register parameter area
12408 *after* that amount, a part of the argument will be passed
12409 in GPRs. In that case, the total amount passed in any
12410 registers is equal to the amount that would have been passed
12411 in GPRs if everything were passed there, so we fall back to
12412 the GPR code below to compute the appropriate value. */
12413 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12414 * MIN (8, GET_MODE_SIZE (elt_mode)));
12415 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12417 if (align_words + fpr_words < GP_ARG_NUM_REG)
12418 passed_in_gprs = true;
12419 else
12420 ret = fpr;
12424 if (passed_in_gprs
12425 && align_words < GP_ARG_NUM_REG
12426 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12427 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12429 if (ret != 0 && TARGET_DEBUG_ARG)
12430 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12432 return ret;
12435 /* A C expression that indicates when an argument must be passed by
12436 reference. If nonzero for an argument, a copy of that argument is
12437 made in memory and a pointer to the argument is passed instead of
12438 the argument itself. The pointer is passed in whatever way is
12439 appropriate for passing a pointer to that type.
12441 Under V.4, aggregates and long double are passed by reference.
12443 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12444 reference unless the AltiVec vector extension ABI is in force.
12446 As an extension to all ABIs, variable sized types are passed by
12447 reference. */
12449 static bool
12450 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12451 machine_mode mode, const_tree type,
12452 bool named ATTRIBUTE_UNUSED)
12454 if (!type)
12455 return 0;
12457 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12458 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12460 if (TARGET_DEBUG_ARG)
12461 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12462 return 1;
12465 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12467 if (TARGET_DEBUG_ARG)
12468 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12469 return 1;
12472 if (int_size_in_bytes (type) < 0)
12474 if (TARGET_DEBUG_ARG)
12475 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12476 return 1;
12479 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12480 modes only exist for GCC vector types if -maltivec. */
12481 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12483 if (TARGET_DEBUG_ARG)
12484 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12485 return 1;
12488 /* Pass synthetic vectors in memory. */
12489 if (TREE_CODE (type) == VECTOR_TYPE
12490 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12492 static bool warned_for_pass_big_vectors = false;
12493 if (TARGET_DEBUG_ARG)
12494 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12495 if (!warned_for_pass_big_vectors)
12497 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12498 "non-standard ABI extension with no compatibility "
12499 "guarantee");
12500 warned_for_pass_big_vectors = true;
12502 return 1;
12505 return 0;
12508 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12509 already processes. Return true if the parameter must be passed
12510 (fully or partially) on the stack. */
12512 static bool
12513 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12515 machine_mode mode;
12516 int unsignedp;
12517 rtx entry_parm;
12519 /* Catch errors. */
12520 if (type == NULL || type == error_mark_node)
12521 return true;
12523 /* Handle types with no storage requirement. */
12524 if (TYPE_MODE (type) == VOIDmode)
12525 return false;
12527 /* Handle complex types. */
12528 if (TREE_CODE (type) == COMPLEX_TYPE)
12529 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12530 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12532 /* Handle transparent aggregates. */
12533 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12534 && TYPE_TRANSPARENT_AGGR (type))
12535 type = TREE_TYPE (first_field (type));
12537 /* See if this arg was passed by invisible reference. */
12538 if (pass_by_reference (get_cumulative_args (args_so_far),
12539 TYPE_MODE (type), type, true))
12540 type = build_pointer_type (type);
12542 /* Find mode as it is passed by the ABI. */
12543 unsignedp = TYPE_UNSIGNED (type);
12544 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12546 /* If we must pass in stack, we need a stack. */
12547 if (rs6000_must_pass_in_stack (mode, type))
12548 return true;
12550 /* If there is no incoming register, we need a stack. */
12551 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12552 if (entry_parm == NULL)
12553 return true;
12555 /* Likewise if we need to pass both in registers and on the stack. */
12556 if (GET_CODE (entry_parm) == PARALLEL
12557 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12558 return true;
12560 /* Also true if we're partially in registers and partially not. */
12561 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12562 return true;
12564 /* Update info on where next arg arrives in registers. */
12565 rs6000_function_arg_advance (args_so_far, mode, type, true);
12566 return false;
12569 /* Return true if FUN has no prototype, has a variable argument
12570 list, or passes any parameter in memory. */
12572 static bool
12573 rs6000_function_parms_need_stack (tree fun, bool incoming)
12575 tree fntype, result;
12576 CUMULATIVE_ARGS args_so_far_v;
12577 cumulative_args_t args_so_far;
12579 if (!fun)
12580 /* Must be a libcall, all of which only use reg parms. */
12581 return false;
12583 fntype = fun;
12584 if (!TYPE_P (fun))
12585 fntype = TREE_TYPE (fun);
12587 /* Varargs functions need the parameter save area. */
12588 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12589 return true;
12591 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12592 args_so_far = pack_cumulative_args (&args_so_far_v);
12594 /* When incoming, we will have been passed the function decl.
12595 It is necessary to use the decl to handle K&R style functions,
12596 where TYPE_ARG_TYPES may not be available. */
12597 if (incoming)
12599 gcc_assert (DECL_P (fun));
12600 result = DECL_RESULT (fun);
12602 else
12603 result = TREE_TYPE (fntype);
12605 if (result && aggregate_value_p (result, fntype))
12607 if (!TYPE_P (result))
12608 result = TREE_TYPE (result);
12609 result = build_pointer_type (result);
12610 rs6000_parm_needs_stack (args_so_far, result);
12613 if (incoming)
12615 tree parm;
12617 for (parm = DECL_ARGUMENTS (fun);
12618 parm && parm != void_list_node;
12619 parm = TREE_CHAIN (parm))
12620 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12621 return true;
12623 else
12625 function_args_iterator args_iter;
12626 tree arg_type;
12628 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12629 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12630 return true;
12633 return false;
12636 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12637 usually a constant depending on the ABI. However, in the ELFv2 ABI
12638 the register parameter area is optional when calling a function that
12639 has a prototype is scope, has no variable argument list, and passes
12640 all parameters in registers. */
12643 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12645 int reg_parm_stack_space;
12647 switch (DEFAULT_ABI)
12649 default:
12650 reg_parm_stack_space = 0;
12651 break;
12653 case ABI_AIX:
12654 case ABI_DARWIN:
12655 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12656 break;
12658 case ABI_ELFv2:
12659 /* ??? Recomputing this every time is a bit expensive. Is there
12660 a place to cache this information? */
12661 if (rs6000_function_parms_need_stack (fun, incoming))
12662 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12663 else
12664 reg_parm_stack_space = 0;
12665 break;
12668 return reg_parm_stack_space;
12671 static void
12672 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12674 int i;
12675 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12677 if (nregs == 0)
12678 return;
12680 for (i = 0; i < nregs; i++)
12682 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12683 if (reload_completed)
12685 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12686 tem = NULL_RTX;
12687 else
12688 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12689 i * GET_MODE_SIZE (reg_mode));
12691 else
12692 tem = replace_equiv_address (tem, XEXP (tem, 0));
12694 gcc_assert (tem);
12696 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12700 /* Perform any needed actions needed for a function that is receiving a
12701 variable number of arguments.
12703 CUM is as above.
12705 MODE and TYPE are the mode and type of the current parameter.
12707 PRETEND_SIZE is a variable that should be set to the amount of stack
12708 that must be pushed by the prolog to pretend that our caller pushed
12711 Normally, this macro will push all remaining incoming registers on the
12712 stack and set PRETEND_SIZE to the length of the registers pushed. */
12714 static void
12715 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12716 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12717 int no_rtl)
12719 CUMULATIVE_ARGS next_cum;
12720 int reg_size = TARGET_32BIT ? 4 : 8;
12721 rtx save_area = NULL_RTX, mem;
12722 int first_reg_offset;
12723 alias_set_type set;
12725 /* Skip the last named argument. */
12726 next_cum = *get_cumulative_args (cum);
12727 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12729 if (DEFAULT_ABI == ABI_V4)
12731 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12733 if (! no_rtl)
12735 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12736 HOST_WIDE_INT offset = 0;
12738 /* Try to optimize the size of the varargs save area.
12739 The ABI requires that ap.reg_save_area is doubleword
12740 aligned, but we don't need to allocate space for all
12741 the bytes, only those to which we actually will save
12742 anything. */
12743 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12744 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12745 if (TARGET_HARD_FLOAT
12746 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12747 && cfun->va_list_fpr_size)
12749 if (gpr_reg_num)
12750 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12751 * UNITS_PER_FP_WORD;
12752 if (cfun->va_list_fpr_size
12753 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12754 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12755 else
12756 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12757 * UNITS_PER_FP_WORD;
12759 if (gpr_reg_num)
12761 offset = -((first_reg_offset * reg_size) & ~7);
12762 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12764 gpr_reg_num = cfun->va_list_gpr_size;
12765 if (reg_size == 4 && (first_reg_offset & 1))
12766 gpr_reg_num++;
12768 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12770 else if (fpr_size)
12771 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12772 * UNITS_PER_FP_WORD
12773 - (int) (GP_ARG_NUM_REG * reg_size);
12775 if (gpr_size + fpr_size)
12777 rtx reg_save_area
12778 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12779 gcc_assert (GET_CODE (reg_save_area) == MEM);
12780 reg_save_area = XEXP (reg_save_area, 0);
12781 if (GET_CODE (reg_save_area) == PLUS)
12783 gcc_assert (XEXP (reg_save_area, 0)
12784 == virtual_stack_vars_rtx);
12785 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12786 offset += INTVAL (XEXP (reg_save_area, 1));
12788 else
12789 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12792 cfun->machine->varargs_save_offset = offset;
12793 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12796 else
12798 first_reg_offset = next_cum.words;
12799 save_area = crtl->args.internal_arg_pointer;
12801 if (targetm.calls.must_pass_in_stack (mode, type))
12802 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12805 set = get_varargs_alias_set ();
12806 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12807 && cfun->va_list_gpr_size)
12809 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12811 if (va_list_gpr_counter_field)
12812 /* V4 va_list_gpr_size counts number of registers needed. */
12813 n_gpr = cfun->va_list_gpr_size;
12814 else
12815 /* char * va_list instead counts number of bytes needed. */
12816 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12818 if (nregs > n_gpr)
12819 nregs = n_gpr;
12821 mem = gen_rtx_MEM (BLKmode,
12822 plus_constant (Pmode, save_area,
12823 first_reg_offset * reg_size));
12824 MEM_NOTRAP_P (mem) = 1;
12825 set_mem_alias_set (mem, set);
12826 set_mem_align (mem, BITS_PER_WORD);
12828 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12829 nregs);
12832 /* Save FP registers if needed. */
12833 if (DEFAULT_ABI == ABI_V4
12834 && TARGET_HARD_FLOAT
12835 && ! no_rtl
12836 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12837 && cfun->va_list_fpr_size)
12839 int fregno = next_cum.fregno, nregs;
12840 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12841 rtx lab = gen_label_rtx ();
12842 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12843 * UNITS_PER_FP_WORD);
12845 emit_jump_insn
12846 (gen_rtx_SET (pc_rtx,
12847 gen_rtx_IF_THEN_ELSE (VOIDmode,
12848 gen_rtx_NE (VOIDmode, cr1,
12849 const0_rtx),
12850 gen_rtx_LABEL_REF (VOIDmode, lab),
12851 pc_rtx)));
12853 for (nregs = 0;
12854 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12855 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12857 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12858 plus_constant (Pmode, save_area, off));
12859 MEM_NOTRAP_P (mem) = 1;
12860 set_mem_alias_set (mem, set);
12861 set_mem_align (mem, GET_MODE_ALIGNMENT (
12862 TARGET_HARD_FLOAT ? DFmode : SFmode));
12863 emit_move_insn (mem, gen_rtx_REG (
12864 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12867 emit_label (lab);
12871 /* Create the va_list data type. */
12873 static tree
12874 rs6000_build_builtin_va_list (void)
12876 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12878 /* For AIX, prefer 'char *' because that's what the system
12879 header files like. */
12880 if (DEFAULT_ABI != ABI_V4)
12881 return build_pointer_type (char_type_node);
12883 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12884 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12885 get_identifier ("__va_list_tag"), record);
12887 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12888 unsigned_char_type_node);
12889 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12890 unsigned_char_type_node);
12891 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12892 every user file. */
12893 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12894 get_identifier ("reserved"), short_unsigned_type_node);
12895 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12896 get_identifier ("overflow_arg_area"),
12897 ptr_type_node);
12898 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12899 get_identifier ("reg_save_area"),
12900 ptr_type_node);
12902 va_list_gpr_counter_field = f_gpr;
12903 va_list_fpr_counter_field = f_fpr;
12905 DECL_FIELD_CONTEXT (f_gpr) = record;
12906 DECL_FIELD_CONTEXT (f_fpr) = record;
12907 DECL_FIELD_CONTEXT (f_res) = record;
12908 DECL_FIELD_CONTEXT (f_ovf) = record;
12909 DECL_FIELD_CONTEXT (f_sav) = record;
12911 TYPE_STUB_DECL (record) = type_decl;
12912 TYPE_NAME (record) = type_decl;
12913 TYPE_FIELDS (record) = f_gpr;
12914 DECL_CHAIN (f_gpr) = f_fpr;
12915 DECL_CHAIN (f_fpr) = f_res;
12916 DECL_CHAIN (f_res) = f_ovf;
12917 DECL_CHAIN (f_ovf) = f_sav;
12919 layout_type (record);
12921 /* The correct type is an array type of one element. */
12922 return build_array_type (record, build_index_type (size_zero_node));
12925 /* Implement va_start. */
12927 static void
12928 rs6000_va_start (tree valist, rtx nextarg)
12930 HOST_WIDE_INT words, n_gpr, n_fpr;
12931 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12932 tree gpr, fpr, ovf, sav, t;
12934 /* Only SVR4 needs something special. */
12935 if (DEFAULT_ABI != ABI_V4)
12937 std_expand_builtin_va_start (valist, nextarg);
12938 return;
12941 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12942 f_fpr = DECL_CHAIN (f_gpr);
12943 f_res = DECL_CHAIN (f_fpr);
12944 f_ovf = DECL_CHAIN (f_res);
12945 f_sav = DECL_CHAIN (f_ovf);
12947 valist = build_simple_mem_ref (valist);
12948 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12949 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12950 f_fpr, NULL_TREE);
12951 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12952 f_ovf, NULL_TREE);
12953 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12954 f_sav, NULL_TREE);
12956 /* Count number of gp and fp argument registers used. */
12957 words = crtl->args.info.words;
12958 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12959 GP_ARG_NUM_REG);
12960 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12961 FP_ARG_NUM_REG);
12963 if (TARGET_DEBUG_ARG)
12964 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12965 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12966 words, n_gpr, n_fpr);
12968 if (cfun->va_list_gpr_size)
12970 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12971 build_int_cst (NULL_TREE, n_gpr));
12972 TREE_SIDE_EFFECTS (t) = 1;
12973 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12976 if (cfun->va_list_fpr_size)
12978 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12979 build_int_cst (NULL_TREE, n_fpr));
12980 TREE_SIDE_EFFECTS (t) = 1;
12981 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12983 #ifdef HAVE_AS_GNU_ATTRIBUTE
12984 if (call_ABI_of_interest (cfun->decl))
12985 rs6000_passes_float = true;
12986 #endif
12989 /* Find the overflow area. */
12990 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12991 if (words != 0)
12992 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12993 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12994 TREE_SIDE_EFFECTS (t) = 1;
12995 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12997 /* If there were no va_arg invocations, don't set up the register
12998 save area. */
12999 if (!cfun->va_list_gpr_size
13000 && !cfun->va_list_fpr_size
13001 && n_gpr < GP_ARG_NUM_REG
13002 && n_fpr < FP_ARG_V4_MAX_REG)
13003 return;
13005 /* Find the register save area. */
13006 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13007 if (cfun->machine->varargs_save_offset)
13008 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13009 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13010 TREE_SIDE_EFFECTS (t) = 1;
13011 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13014 /* Implement va_arg. */
13016 static tree
13017 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13018 gimple_seq *post_p)
13020 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13021 tree gpr, fpr, ovf, sav, reg, t, u;
13022 int size, rsize, n_reg, sav_ofs, sav_scale;
13023 tree lab_false, lab_over, addr;
13024 int align;
13025 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13026 int regalign = 0;
13027 gimple *stmt;
13029 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13031 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13032 return build_va_arg_indirect_ref (t);
13035 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13036 earlier version of gcc, with the property that it always applied alignment
13037 adjustments to the va-args (even for zero-sized types). The cheapest way
13038 to deal with this is to replicate the effect of the part of
13039 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13040 of relevance.
13041 We don't need to check for pass-by-reference because of the test above.
13042 We can return a simplifed answer, since we know there's no offset to add. */
13044 if (((TARGET_MACHO
13045 && rs6000_darwin64_abi)
13046 || DEFAULT_ABI == ABI_ELFv2
13047 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13048 && integer_zerop (TYPE_SIZE (type)))
13050 unsigned HOST_WIDE_INT align, boundary;
13051 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13052 align = PARM_BOUNDARY / BITS_PER_UNIT;
13053 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13054 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13055 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13056 boundary /= BITS_PER_UNIT;
13057 if (boundary > align)
13059 tree t ;
13060 /* This updates arg ptr by the amount that would be necessary
13061 to align the zero-sized (but not zero-alignment) item. */
13062 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13063 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13064 gimplify_and_add (t, pre_p);
13066 t = fold_convert (sizetype, valist_tmp);
13067 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13068 fold_convert (TREE_TYPE (valist),
13069 fold_build2 (BIT_AND_EXPR, sizetype, t,
13070 size_int (-boundary))));
13071 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13072 gimplify_and_add (t, pre_p);
13074 /* Since it is zero-sized there's no increment for the item itself. */
13075 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13076 return build_va_arg_indirect_ref (valist_tmp);
13079 if (DEFAULT_ABI != ABI_V4)
13081 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13083 tree elem_type = TREE_TYPE (type);
13084 machine_mode elem_mode = TYPE_MODE (elem_type);
13085 int elem_size = GET_MODE_SIZE (elem_mode);
13087 if (elem_size < UNITS_PER_WORD)
13089 tree real_part, imag_part;
13090 gimple_seq post = NULL;
13092 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13093 &post);
13094 /* Copy the value into a temporary, lest the formal temporary
13095 be reused out from under us. */
13096 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13097 gimple_seq_add_seq (pre_p, post);
13099 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13100 post_p);
13102 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13106 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13109 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13110 f_fpr = DECL_CHAIN (f_gpr);
13111 f_res = DECL_CHAIN (f_fpr);
13112 f_ovf = DECL_CHAIN (f_res);
13113 f_sav = DECL_CHAIN (f_ovf);
13115 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13116 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13117 f_fpr, NULL_TREE);
13118 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13119 f_ovf, NULL_TREE);
13120 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13121 f_sav, NULL_TREE);
13123 size = int_size_in_bytes (type);
13124 rsize = (size + 3) / 4;
13125 int pad = 4 * rsize - size;
13126 align = 1;
13128 machine_mode mode = TYPE_MODE (type);
13129 if (abi_v4_pass_in_fpr (mode, false))
13131 /* FP args go in FP registers, if present. */
13132 reg = fpr;
13133 n_reg = (size + 7) / 8;
13134 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
13135 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
13136 if (mode != SFmode && mode != SDmode)
13137 align = 8;
13139 else
13141 /* Otherwise into GP registers. */
13142 reg = gpr;
13143 n_reg = rsize;
13144 sav_ofs = 0;
13145 sav_scale = 4;
13146 if (n_reg == 2)
13147 align = 8;
13150 /* Pull the value out of the saved registers.... */
13152 lab_over = NULL;
13153 addr = create_tmp_var (ptr_type_node, "addr");
13155 /* AltiVec vectors never go in registers when -mabi=altivec. */
13156 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13157 align = 16;
13158 else
13160 lab_false = create_artificial_label (input_location);
13161 lab_over = create_artificial_label (input_location);
13163 /* Long long is aligned in the registers. As are any other 2 gpr
13164 item such as complex int due to a historical mistake. */
13165 u = reg;
13166 if (n_reg == 2 && reg == gpr)
13168 regalign = 1;
13169 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13170 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13171 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13172 unshare_expr (reg), u);
13174 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13175 reg number is 0 for f1, so we want to make it odd. */
13176 else if (reg == fpr && mode == TDmode)
13178 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13179 build_int_cst (TREE_TYPE (reg), 1));
13180 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13183 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13184 t = build2 (GE_EXPR, boolean_type_node, u, t);
13185 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13186 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13187 gimplify_and_add (t, pre_p);
13189 t = sav;
13190 if (sav_ofs)
13191 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13193 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13194 build_int_cst (TREE_TYPE (reg), n_reg));
13195 u = fold_convert (sizetype, u);
13196 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13197 t = fold_build_pointer_plus (t, u);
13199 /* _Decimal32 varargs are located in the second word of the 64-bit
13200 FP register for 32-bit binaries. */
13201 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13202 t = fold_build_pointer_plus_hwi (t, size);
13204 /* Args are passed right-aligned. */
13205 if (BYTES_BIG_ENDIAN)
13206 t = fold_build_pointer_plus_hwi (t, pad);
13208 gimplify_assign (addr, t, pre_p);
13210 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13212 stmt = gimple_build_label (lab_false);
13213 gimple_seq_add_stmt (pre_p, stmt);
13215 if ((n_reg == 2 && !regalign) || n_reg > 2)
13217 /* Ensure that we don't find any more args in regs.
13218 Alignment has taken care of for special cases. */
13219 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13223 /* ... otherwise out of the overflow area. */
13225 /* Care for on-stack alignment if needed. */
13226 t = ovf;
13227 if (align != 1)
13229 t = fold_build_pointer_plus_hwi (t, align - 1);
13230 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13231 build_int_cst (TREE_TYPE (t), -align));
13234 /* Args are passed right-aligned. */
13235 if (BYTES_BIG_ENDIAN)
13236 t = fold_build_pointer_plus_hwi (t, pad);
13238 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13240 gimplify_assign (unshare_expr (addr), t, pre_p);
13242 t = fold_build_pointer_plus_hwi (t, size);
13243 gimplify_assign (unshare_expr (ovf), t, pre_p);
13245 if (lab_over)
13247 stmt = gimple_build_label (lab_over);
13248 gimple_seq_add_stmt (pre_p, stmt);
13251 if (STRICT_ALIGNMENT
13252 && (TYPE_ALIGN (type)
13253 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13255 /* The value (of type complex double, for example) may not be
13256 aligned in memory in the saved registers, so copy via a
13257 temporary. (This is the same code as used for SPARC.) */
13258 tree tmp = create_tmp_var (type, "va_arg_tmp");
13259 tree dest_addr = build_fold_addr_expr (tmp);
13261 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13262 3, dest_addr, addr, size_int (rsize * 4));
13263 TREE_ADDRESSABLE (tmp) = 1;
13265 gimplify_and_add (copy, pre_p);
13266 addr = dest_addr;
13269 addr = fold_convert (ptrtype, addr);
13270 return build_va_arg_indirect_ref (addr);
13273 /* Builtins. */
13275 static void
13276 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13278 tree t;
13279 unsigned classify = rs6000_builtin_info[(int)code].attr;
13280 const char *attr_string = "";
13282 gcc_assert (name != NULL);
13283 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13285 if (rs6000_builtin_decls[(int)code])
13286 fatal_error (input_location,
13287 "internal error: builtin function %qs already processed",
13288 name);
13290 rs6000_builtin_decls[(int)code] = t =
13291 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13293 /* Set any special attributes. */
13294 if ((classify & RS6000_BTC_CONST) != 0)
13296 /* const function, function only depends on the inputs. */
13297 TREE_READONLY (t) = 1;
13298 TREE_NOTHROW (t) = 1;
13299 attr_string = ", const";
13301 else if ((classify & RS6000_BTC_PURE) != 0)
13303 /* pure function, function can read global memory, but does not set any
13304 external state. */
13305 DECL_PURE_P (t) = 1;
13306 TREE_NOTHROW (t) = 1;
13307 attr_string = ", pure";
13309 else if ((classify & RS6000_BTC_FP) != 0)
13311 /* Function is a math function. If rounding mode is on, then treat the
13312 function as not reading global memory, but it can have arbitrary side
13313 effects. If it is off, then assume the function is a const function.
13314 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13315 builtin-attribute.def that is used for the math functions. */
13316 TREE_NOTHROW (t) = 1;
13317 if (flag_rounding_math)
13319 DECL_PURE_P (t) = 1;
13320 DECL_IS_NOVOPS (t) = 1;
13321 attr_string = ", fp, pure";
13323 else
13325 TREE_READONLY (t) = 1;
13326 attr_string = ", fp, const";
13329 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13330 gcc_unreachable ();
13332 if (TARGET_DEBUG_BUILTIN)
13333 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13334 (int)code, name, attr_string);
13337 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13339 #undef RS6000_BUILTIN_0
13340 #undef RS6000_BUILTIN_1
13341 #undef RS6000_BUILTIN_2
13342 #undef RS6000_BUILTIN_3
13343 #undef RS6000_BUILTIN_A
13344 #undef RS6000_BUILTIN_D
13345 #undef RS6000_BUILTIN_H
13346 #undef RS6000_BUILTIN_P
13347 #undef RS6000_BUILTIN_X
13349 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13350 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13351 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13352 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13353 { MASK, ICODE, NAME, ENUM },
13355 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13356 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13357 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13358 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13359 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13361 static const struct builtin_description bdesc_3arg[] =
13363 #include "rs6000-builtin.def"
13366 /* DST operations: void foo (void *, const int, const char). */
13368 #undef RS6000_BUILTIN_0
13369 #undef RS6000_BUILTIN_1
13370 #undef RS6000_BUILTIN_2
13371 #undef RS6000_BUILTIN_3
13372 #undef RS6000_BUILTIN_A
13373 #undef RS6000_BUILTIN_D
13374 #undef RS6000_BUILTIN_H
13375 #undef RS6000_BUILTIN_P
13376 #undef RS6000_BUILTIN_X
13378 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13379 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13380 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13381 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13382 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13383 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13384 { MASK, ICODE, NAME, ENUM },
13386 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13387 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13388 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13390 static const struct builtin_description bdesc_dst[] =
13392 #include "rs6000-builtin.def"
13395 /* Simple binary operations: VECc = foo (VECa, VECb). */
13397 #undef RS6000_BUILTIN_0
13398 #undef RS6000_BUILTIN_1
13399 #undef RS6000_BUILTIN_2
13400 #undef RS6000_BUILTIN_3
13401 #undef RS6000_BUILTIN_A
13402 #undef RS6000_BUILTIN_D
13403 #undef RS6000_BUILTIN_H
13404 #undef RS6000_BUILTIN_P
13405 #undef RS6000_BUILTIN_X
13407 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13408 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13409 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13410 { MASK, ICODE, NAME, ENUM },
13412 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13413 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13414 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13415 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13416 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13417 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13419 static const struct builtin_description bdesc_2arg[] =
13421 #include "rs6000-builtin.def"
13424 #undef RS6000_BUILTIN_0
13425 #undef RS6000_BUILTIN_1
13426 #undef RS6000_BUILTIN_2
13427 #undef RS6000_BUILTIN_3
13428 #undef RS6000_BUILTIN_A
13429 #undef RS6000_BUILTIN_D
13430 #undef RS6000_BUILTIN_H
13431 #undef RS6000_BUILTIN_P
13432 #undef RS6000_BUILTIN_X
13434 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13435 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13436 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13437 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13438 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13439 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13440 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13441 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13442 { MASK, ICODE, NAME, ENUM },
13444 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13446 /* AltiVec predicates. */
13448 static const struct builtin_description bdesc_altivec_preds[] =
13450 #include "rs6000-builtin.def"
13453 /* ABS* operations. */
13455 #undef RS6000_BUILTIN_0
13456 #undef RS6000_BUILTIN_1
13457 #undef RS6000_BUILTIN_2
13458 #undef RS6000_BUILTIN_3
13459 #undef RS6000_BUILTIN_A
13460 #undef RS6000_BUILTIN_D
13461 #undef RS6000_BUILTIN_H
13462 #undef RS6000_BUILTIN_P
13463 #undef RS6000_BUILTIN_X
13465 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13466 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13467 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13468 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13469 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13470 { MASK, ICODE, NAME, ENUM },
13472 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13473 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13474 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13475 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13477 static const struct builtin_description bdesc_abs[] =
13479 #include "rs6000-builtin.def"
13482 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13483 foo (VECa). */
13485 #undef RS6000_BUILTIN_0
13486 #undef RS6000_BUILTIN_1
13487 #undef RS6000_BUILTIN_2
13488 #undef RS6000_BUILTIN_3
13489 #undef RS6000_BUILTIN_A
13490 #undef RS6000_BUILTIN_D
13491 #undef RS6000_BUILTIN_H
13492 #undef RS6000_BUILTIN_P
13493 #undef RS6000_BUILTIN_X
13495 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13496 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13497 { MASK, ICODE, NAME, ENUM },
13499 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13500 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13501 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13502 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13503 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13504 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13505 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13507 static const struct builtin_description bdesc_1arg[] =
13509 #include "rs6000-builtin.def"
13512 /* Simple no-argument operations: result = __builtin_darn_32 () */
13514 #undef RS6000_BUILTIN_0
13515 #undef RS6000_BUILTIN_1
13516 #undef RS6000_BUILTIN_2
13517 #undef RS6000_BUILTIN_3
13518 #undef RS6000_BUILTIN_A
13519 #undef RS6000_BUILTIN_D
13520 #undef RS6000_BUILTIN_H
13521 #undef RS6000_BUILTIN_P
13522 #undef RS6000_BUILTIN_X
13524 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13525 { MASK, ICODE, NAME, ENUM },
13527 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13528 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13529 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13530 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13531 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13532 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13533 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13534 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13536 static const struct builtin_description bdesc_0arg[] =
13538 #include "rs6000-builtin.def"
13541 /* HTM builtins. */
13542 #undef RS6000_BUILTIN_0
13543 #undef RS6000_BUILTIN_1
13544 #undef RS6000_BUILTIN_2
13545 #undef RS6000_BUILTIN_3
13546 #undef RS6000_BUILTIN_A
13547 #undef RS6000_BUILTIN_D
13548 #undef RS6000_BUILTIN_H
13549 #undef RS6000_BUILTIN_P
13550 #undef RS6000_BUILTIN_X
13552 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13553 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13554 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13555 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13556 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13557 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13558 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13559 { MASK, ICODE, NAME, ENUM },
13561 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13562 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13564 static const struct builtin_description bdesc_htm[] =
13566 #include "rs6000-builtin.def"
13569 #undef RS6000_BUILTIN_0
13570 #undef RS6000_BUILTIN_1
13571 #undef RS6000_BUILTIN_2
13572 #undef RS6000_BUILTIN_3
13573 #undef RS6000_BUILTIN_A
13574 #undef RS6000_BUILTIN_D
13575 #undef RS6000_BUILTIN_H
13576 #undef RS6000_BUILTIN_P
13578 /* Return true if a builtin function is overloaded. */
13579 bool
13580 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13582 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13585 const char *
13586 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13588 return rs6000_builtin_info[(int)fncode].name;
13591 /* Expand an expression EXP that calls a builtin without arguments. */
13592 static rtx
13593 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13595 rtx pat;
13596 machine_mode tmode = insn_data[icode].operand[0].mode;
13598 if (icode == CODE_FOR_nothing)
13599 /* Builtin not supported on this processor. */
13600 return 0;
13602 if (target == 0
13603 || GET_MODE (target) != tmode
13604 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13605 target = gen_reg_rtx (tmode);
13607 pat = GEN_FCN (icode) (target);
13608 if (! pat)
13609 return 0;
13610 emit_insn (pat);
13612 return target;
13616 static rtx
13617 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13619 rtx pat;
13620 tree arg0 = CALL_EXPR_ARG (exp, 0);
13621 tree arg1 = CALL_EXPR_ARG (exp, 1);
13622 rtx op0 = expand_normal (arg0);
13623 rtx op1 = expand_normal (arg1);
13624 machine_mode mode0 = insn_data[icode].operand[0].mode;
13625 machine_mode mode1 = insn_data[icode].operand[1].mode;
13627 if (icode == CODE_FOR_nothing)
13628 /* Builtin not supported on this processor. */
13629 return 0;
13631 /* If we got invalid arguments bail out before generating bad rtl. */
13632 if (arg0 == error_mark_node || arg1 == error_mark_node)
13633 return const0_rtx;
13635 if (GET_CODE (op0) != CONST_INT
13636 || INTVAL (op0) > 255
13637 || INTVAL (op0) < 0)
13639 error ("argument 1 must be an 8-bit field value");
13640 return const0_rtx;
13643 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13644 op0 = copy_to_mode_reg (mode0, op0);
13646 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13647 op1 = copy_to_mode_reg (mode1, op1);
13649 pat = GEN_FCN (icode) (op0, op1);
13650 if (! pat)
13651 return const0_rtx;
13652 emit_insn (pat);
13654 return NULL_RTX;
13657 static rtx
13658 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13660 rtx pat;
13661 tree arg0 = CALL_EXPR_ARG (exp, 0);
13662 rtx op0 = expand_normal (arg0);
13663 machine_mode tmode = insn_data[icode].operand[0].mode;
13664 machine_mode mode0 = insn_data[icode].operand[1].mode;
13666 if (icode == CODE_FOR_nothing)
13667 /* Builtin not supported on this processor. */
13668 return 0;
13670 /* If we got invalid arguments bail out before generating bad rtl. */
13671 if (arg0 == error_mark_node)
13672 return const0_rtx;
13674 if (icode == CODE_FOR_altivec_vspltisb
13675 || icode == CODE_FOR_altivec_vspltish
13676 || icode == CODE_FOR_altivec_vspltisw)
13678 /* Only allow 5-bit *signed* literals. */
13679 if (GET_CODE (op0) != CONST_INT
13680 || INTVAL (op0) > 15
13681 || INTVAL (op0) < -16)
13683 error ("argument 1 must be a 5-bit signed literal");
13684 return CONST0_RTX (tmode);
13688 if (target == 0
13689 || GET_MODE (target) != tmode
13690 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13691 target = gen_reg_rtx (tmode);
13693 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13694 op0 = copy_to_mode_reg (mode0, op0);
13696 pat = GEN_FCN (icode) (target, op0);
13697 if (! pat)
13698 return 0;
13699 emit_insn (pat);
13701 return target;
13704 static rtx
13705 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13707 rtx pat, scratch1, scratch2;
13708 tree arg0 = CALL_EXPR_ARG (exp, 0);
13709 rtx op0 = expand_normal (arg0);
13710 machine_mode tmode = insn_data[icode].operand[0].mode;
13711 machine_mode mode0 = insn_data[icode].operand[1].mode;
13713 /* If we have invalid arguments, bail out before generating bad rtl. */
13714 if (arg0 == error_mark_node)
13715 return const0_rtx;
13717 if (target == 0
13718 || GET_MODE (target) != tmode
13719 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13720 target = gen_reg_rtx (tmode);
13722 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13723 op0 = copy_to_mode_reg (mode0, op0);
13725 scratch1 = gen_reg_rtx (mode0);
13726 scratch2 = gen_reg_rtx (mode0);
13728 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13729 if (! pat)
13730 return 0;
13731 emit_insn (pat);
13733 return target;
13736 static rtx
13737 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13739 rtx pat;
13740 tree arg0 = CALL_EXPR_ARG (exp, 0);
13741 tree arg1 = CALL_EXPR_ARG (exp, 1);
13742 rtx op0 = expand_normal (arg0);
13743 rtx op1 = expand_normal (arg1);
13744 machine_mode tmode = insn_data[icode].operand[0].mode;
13745 machine_mode mode0 = insn_data[icode].operand[1].mode;
13746 machine_mode mode1 = insn_data[icode].operand[2].mode;
13748 if (icode == CODE_FOR_nothing)
13749 /* Builtin not supported on this processor. */
13750 return 0;
13752 /* If we got invalid arguments bail out before generating bad rtl. */
13753 if (arg0 == error_mark_node || arg1 == error_mark_node)
13754 return const0_rtx;
13756 if (icode == CODE_FOR_altivec_vcfux
13757 || icode == CODE_FOR_altivec_vcfsx
13758 || icode == CODE_FOR_altivec_vctsxs
13759 || icode == CODE_FOR_altivec_vctuxs
13760 || icode == CODE_FOR_altivec_vspltb
13761 || icode == CODE_FOR_altivec_vsplth
13762 || icode == CODE_FOR_altivec_vspltw)
13764 /* Only allow 5-bit unsigned literals. */
13765 STRIP_NOPS (arg1);
13766 if (TREE_CODE (arg1) != INTEGER_CST
13767 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13769 error ("argument 2 must be a 5-bit unsigned literal");
13770 return CONST0_RTX (tmode);
13773 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13774 || icode == CODE_FOR_dfptstsfi_lt_dd
13775 || icode == CODE_FOR_dfptstsfi_gt_dd
13776 || icode == CODE_FOR_dfptstsfi_unordered_dd
13777 || icode == CODE_FOR_dfptstsfi_eq_td
13778 || icode == CODE_FOR_dfptstsfi_lt_td
13779 || icode == CODE_FOR_dfptstsfi_gt_td
13780 || icode == CODE_FOR_dfptstsfi_unordered_td)
13782 /* Only allow 6-bit unsigned literals. */
13783 STRIP_NOPS (arg0);
13784 if (TREE_CODE (arg0) != INTEGER_CST
13785 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13787 error ("argument 1 must be a 6-bit unsigned literal");
13788 return CONST0_RTX (tmode);
13791 else if (icode == CODE_FOR_xststdcqp_kf
13792 || icode == CODE_FOR_xststdcqp_tf
13793 || icode == CODE_FOR_xststdcdp
13794 || icode == CODE_FOR_xststdcsp
13795 || icode == CODE_FOR_xvtstdcdp
13796 || icode == CODE_FOR_xvtstdcsp)
13798 /* Only allow 7-bit unsigned literals. */
13799 STRIP_NOPS (arg1);
13800 if (TREE_CODE (arg1) != INTEGER_CST
13801 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13803 error ("argument 2 must be a 7-bit unsigned literal");
13804 return CONST0_RTX (tmode);
13807 else if (icode == CODE_FOR_unpackv1ti
13808 || icode == CODE_FOR_unpackkf
13809 || icode == CODE_FOR_unpacktf
13810 || icode == CODE_FOR_unpackif
13811 || icode == CODE_FOR_unpacktd)
13813 /* Only allow 1-bit unsigned literals. */
13814 STRIP_NOPS (arg1);
13815 if (TREE_CODE (arg1) != INTEGER_CST
13816 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13818 error ("argument 2 must be a 1-bit unsigned literal");
13819 return CONST0_RTX (tmode);
13823 if (target == 0
13824 || GET_MODE (target) != tmode
13825 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13826 target = gen_reg_rtx (tmode);
13828 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13829 op0 = copy_to_mode_reg (mode0, op0);
13830 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13831 op1 = copy_to_mode_reg (mode1, op1);
13833 pat = GEN_FCN (icode) (target, op0, op1);
13834 if (! pat)
13835 return 0;
13836 emit_insn (pat);
13838 return target;
13841 static rtx
13842 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13844 rtx pat, scratch;
13845 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13846 tree arg0 = CALL_EXPR_ARG (exp, 1);
13847 tree arg1 = CALL_EXPR_ARG (exp, 2);
13848 rtx op0 = expand_normal (arg0);
13849 rtx op1 = expand_normal (arg1);
13850 machine_mode tmode = SImode;
13851 machine_mode mode0 = insn_data[icode].operand[1].mode;
13852 machine_mode mode1 = insn_data[icode].operand[2].mode;
13853 int cr6_form_int;
13855 if (TREE_CODE (cr6_form) != INTEGER_CST)
13857 error ("argument 1 of %qs must be a constant",
13858 "__builtin_altivec_predicate");
13859 return const0_rtx;
13861 else
13862 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13864 gcc_assert (mode0 == mode1);
13866 /* If we have invalid arguments, bail out before generating bad rtl. */
13867 if (arg0 == error_mark_node || arg1 == error_mark_node)
13868 return const0_rtx;
13870 if (target == 0
13871 || GET_MODE (target) != tmode
13872 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13873 target = gen_reg_rtx (tmode);
13875 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13876 op0 = copy_to_mode_reg (mode0, op0);
13877 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13878 op1 = copy_to_mode_reg (mode1, op1);
13880 /* Note that for many of the relevant operations (e.g. cmpne or
13881 cmpeq) with float or double operands, it makes more sense for the
13882 mode of the allocated scratch register to select a vector of
13883 integer. But the choice to copy the mode of operand 0 was made
13884 long ago and there are no plans to change it. */
13885 scratch = gen_reg_rtx (mode0);
13887 pat = GEN_FCN (icode) (scratch, op0, op1);
13888 if (! pat)
13889 return 0;
13890 emit_insn (pat);
13892 /* The vec_any* and vec_all* predicates use the same opcodes for two
13893 different operations, but the bits in CR6 will be different
13894 depending on what information we want. So we have to play tricks
13895 with CR6 to get the right bits out.
13897 If you think this is disgusting, look at the specs for the
13898 AltiVec predicates. */
13900 switch (cr6_form_int)
13902 case 0:
13903 emit_insn (gen_cr6_test_for_zero (target));
13904 break;
13905 case 1:
13906 emit_insn (gen_cr6_test_for_zero_reverse (target));
13907 break;
13908 case 2:
13909 emit_insn (gen_cr6_test_for_lt (target));
13910 break;
13911 case 3:
13912 emit_insn (gen_cr6_test_for_lt_reverse (target));
13913 break;
13914 default:
13915 error ("argument 1 of %qs is out of range",
13916 "__builtin_altivec_predicate");
13917 break;
13920 return target;
13924 swap_endian_selector_for_mode (machine_mode mode)
13926 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13927 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13928 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13929 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13931 unsigned int *swaparray, i;
13932 rtx perm[16];
13934 switch (mode)
13936 case E_V1TImode:
13937 swaparray = swap1;
13938 break;
13939 case E_V2DFmode:
13940 case E_V2DImode:
13941 swaparray = swap2;
13942 break;
13943 case E_V4SFmode:
13944 case E_V4SImode:
13945 swaparray = swap4;
13946 break;
13947 case E_V8HImode:
13948 swaparray = swap8;
13949 break;
13950 default:
13951 gcc_unreachable ();
13954 for (i = 0; i < 16; ++i)
13955 perm[i] = GEN_INT (swaparray[i]);
13957 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13958 gen_rtvec_v (16, perm)));
13961 static rtx
13962 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13964 rtx pat, addr;
13965 tree arg0 = CALL_EXPR_ARG (exp, 0);
13966 tree arg1 = CALL_EXPR_ARG (exp, 1);
13967 machine_mode tmode = insn_data[icode].operand[0].mode;
13968 machine_mode mode0 = Pmode;
13969 machine_mode mode1 = Pmode;
13970 rtx op0 = expand_normal (arg0);
13971 rtx op1 = expand_normal (arg1);
13973 if (icode == CODE_FOR_nothing)
13974 /* Builtin not supported on this processor. */
13975 return 0;
13977 /* If we got invalid arguments bail out before generating bad rtl. */
13978 if (arg0 == error_mark_node || arg1 == error_mark_node)
13979 return const0_rtx;
13981 if (target == 0
13982 || GET_MODE (target) != tmode
13983 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13984 target = gen_reg_rtx (tmode);
13986 op1 = copy_to_mode_reg (mode1, op1);
13988 /* For LVX, express the RTL accurately by ANDing the address with -16.
13989 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13990 so the raw address is fine. */
13991 if (icode == CODE_FOR_altivec_lvx_v1ti
13992 || icode == CODE_FOR_altivec_lvx_v2df
13993 || icode == CODE_FOR_altivec_lvx_v2di
13994 || icode == CODE_FOR_altivec_lvx_v4sf
13995 || icode == CODE_FOR_altivec_lvx_v4si
13996 || icode == CODE_FOR_altivec_lvx_v8hi
13997 || icode == CODE_FOR_altivec_lvx_v16qi)
13999 rtx rawaddr;
14000 if (op0 == const0_rtx)
14001 rawaddr = op1;
14002 else
14004 op0 = copy_to_mode_reg (mode0, op0);
14005 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14007 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14008 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14010 emit_insn (gen_rtx_SET (target, addr));
14012 else
14014 if (op0 == const0_rtx)
14015 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14016 else
14018 op0 = copy_to_mode_reg (mode0, op0);
14019 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14020 gen_rtx_PLUS (Pmode, op1, op0));
14023 pat = GEN_FCN (icode) (target, addr);
14024 if (! pat)
14025 return 0;
14026 emit_insn (pat);
14029 return target;
14032 static rtx
14033 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14035 rtx pat;
14036 tree arg0 = CALL_EXPR_ARG (exp, 0);
14037 tree arg1 = CALL_EXPR_ARG (exp, 1);
14038 tree arg2 = CALL_EXPR_ARG (exp, 2);
14039 rtx op0 = expand_normal (arg0);
14040 rtx op1 = expand_normal (arg1);
14041 rtx op2 = expand_normal (arg2);
14042 machine_mode mode0 = insn_data[icode].operand[0].mode;
14043 machine_mode mode1 = insn_data[icode].operand[1].mode;
14044 machine_mode mode2 = insn_data[icode].operand[2].mode;
14046 if (icode == CODE_FOR_nothing)
14047 /* Builtin not supported on this processor. */
14048 return NULL_RTX;
14050 /* If we got invalid arguments bail out before generating bad rtl. */
14051 if (arg0 == error_mark_node
14052 || arg1 == error_mark_node
14053 || arg2 == error_mark_node)
14054 return NULL_RTX;
14056 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14057 op0 = copy_to_mode_reg (mode0, op0);
14058 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14059 op1 = copy_to_mode_reg (mode1, op1);
14060 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14061 op2 = copy_to_mode_reg (mode2, op2);
14063 pat = GEN_FCN (icode) (op0, op1, op2);
14064 if (pat)
14065 emit_insn (pat);
14067 return NULL_RTX;
14070 static rtx
14071 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14073 tree arg0 = CALL_EXPR_ARG (exp, 0);
14074 tree arg1 = CALL_EXPR_ARG (exp, 1);
14075 tree arg2 = CALL_EXPR_ARG (exp, 2);
14076 rtx op0 = expand_normal (arg0);
14077 rtx op1 = expand_normal (arg1);
14078 rtx op2 = expand_normal (arg2);
14079 rtx pat, addr, rawaddr;
14080 machine_mode tmode = insn_data[icode].operand[0].mode;
14081 machine_mode smode = insn_data[icode].operand[1].mode;
14082 machine_mode mode1 = Pmode;
14083 machine_mode mode2 = Pmode;
14085 /* Invalid arguments. Bail before doing anything stoopid! */
14086 if (arg0 == error_mark_node
14087 || arg1 == error_mark_node
14088 || arg2 == error_mark_node)
14089 return const0_rtx;
14091 op2 = copy_to_mode_reg (mode2, op2);
14093 /* For STVX, express the RTL accurately by ANDing the address with -16.
14094 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14095 so the raw address is fine. */
14096 if (icode == CODE_FOR_altivec_stvx_v2df
14097 || icode == CODE_FOR_altivec_stvx_v2di
14098 || icode == CODE_FOR_altivec_stvx_v4sf
14099 || icode == CODE_FOR_altivec_stvx_v4si
14100 || icode == CODE_FOR_altivec_stvx_v8hi
14101 || icode == CODE_FOR_altivec_stvx_v16qi)
14103 if (op1 == const0_rtx)
14104 rawaddr = op2;
14105 else
14107 op1 = copy_to_mode_reg (mode1, op1);
14108 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14111 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14112 addr = gen_rtx_MEM (tmode, addr);
14114 op0 = copy_to_mode_reg (tmode, op0);
14116 emit_insn (gen_rtx_SET (addr, op0));
14118 else
14120 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14121 op0 = copy_to_mode_reg (smode, op0);
14123 if (op1 == const0_rtx)
14124 addr = gen_rtx_MEM (tmode, op2);
14125 else
14127 op1 = copy_to_mode_reg (mode1, op1);
14128 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14131 pat = GEN_FCN (icode) (addr, op0);
14132 if (pat)
14133 emit_insn (pat);
14136 return NULL_RTX;
14139 /* Return the appropriate SPR number associated with the given builtin. */
14140 static inline HOST_WIDE_INT
14141 htm_spr_num (enum rs6000_builtins code)
14143 if (code == HTM_BUILTIN_GET_TFHAR
14144 || code == HTM_BUILTIN_SET_TFHAR)
14145 return TFHAR_SPR;
14146 else if (code == HTM_BUILTIN_GET_TFIAR
14147 || code == HTM_BUILTIN_SET_TFIAR)
14148 return TFIAR_SPR;
14149 else if (code == HTM_BUILTIN_GET_TEXASR
14150 || code == HTM_BUILTIN_SET_TEXASR)
14151 return TEXASR_SPR;
14152 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14153 || code == HTM_BUILTIN_SET_TEXASRU);
14154 return TEXASRU_SPR;
14157 /* Return the appropriate SPR regno associated with the given builtin. */
14158 static inline HOST_WIDE_INT
14159 htm_spr_regno (enum rs6000_builtins code)
14161 if (code == HTM_BUILTIN_GET_TFHAR
14162 || code == HTM_BUILTIN_SET_TFHAR)
14163 return TFHAR_REGNO;
14164 else if (code == HTM_BUILTIN_GET_TFIAR
14165 || code == HTM_BUILTIN_SET_TFIAR)
14166 return TFIAR_REGNO;
14167 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14168 || code == HTM_BUILTIN_SET_TEXASR
14169 || code == HTM_BUILTIN_GET_TEXASRU
14170 || code == HTM_BUILTIN_SET_TEXASRU);
14171 return TEXASR_REGNO;
14174 /* Return the correct ICODE value depending on whether we are
14175 setting or reading the HTM SPRs. */
14176 static inline enum insn_code
14177 rs6000_htm_spr_icode (bool nonvoid)
14179 if (nonvoid)
14180 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14181 else
14182 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14185 /* Expand the HTM builtin in EXP and store the result in TARGET.
14186 Store true in *EXPANDEDP if we found a builtin to expand. */
14187 static rtx
14188 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14190 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14191 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14192 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14193 const struct builtin_description *d;
14194 size_t i;
14196 *expandedp = true;
14198 if (!TARGET_POWERPC64
14199 && (fcode == HTM_BUILTIN_TABORTDC
14200 || fcode == HTM_BUILTIN_TABORTDCI))
14202 size_t uns_fcode = (size_t)fcode;
14203 const char *name = rs6000_builtin_info[uns_fcode].name;
14204 error ("builtin %qs is only valid in 64-bit mode", name);
14205 return const0_rtx;
14208 /* Expand the HTM builtins. */
14209 d = bdesc_htm;
14210 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14211 if (d->code == fcode)
14213 rtx op[MAX_HTM_OPERANDS], pat;
14214 int nopnds = 0;
14215 tree arg;
14216 call_expr_arg_iterator iter;
14217 unsigned attr = rs6000_builtin_info[fcode].attr;
14218 enum insn_code icode = d->icode;
14219 const struct insn_operand_data *insn_op;
14220 bool uses_spr = (attr & RS6000_BTC_SPR);
14221 rtx cr = NULL_RTX;
14223 if (uses_spr)
14224 icode = rs6000_htm_spr_icode (nonvoid);
14225 insn_op = &insn_data[icode].operand[0];
14227 if (nonvoid)
14229 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14230 if (!target
14231 || GET_MODE (target) != tmode
14232 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14233 target = gen_reg_rtx (tmode);
14234 if (uses_spr)
14235 op[nopnds++] = target;
14238 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14240 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14241 return const0_rtx;
14243 insn_op = &insn_data[icode].operand[nopnds];
14245 op[nopnds] = expand_normal (arg);
14247 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14249 if (!strcmp (insn_op->constraint, "n"))
14251 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14252 if (!CONST_INT_P (op[nopnds]))
14253 error ("argument %d must be an unsigned literal", arg_num);
14254 else
14255 error ("argument %d is an unsigned literal that is "
14256 "out of range", arg_num);
14257 return const0_rtx;
14259 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14262 nopnds++;
14265 /* Handle the builtins for extended mnemonics. These accept
14266 no arguments, but map to builtins that take arguments. */
14267 switch (fcode)
14269 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14270 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14271 op[nopnds++] = GEN_INT (1);
14272 if (flag_checking)
14273 attr |= RS6000_BTC_UNARY;
14274 break;
14275 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14276 op[nopnds++] = GEN_INT (0);
14277 if (flag_checking)
14278 attr |= RS6000_BTC_UNARY;
14279 break;
14280 default:
14281 break;
14284 /* If this builtin accesses SPRs, then pass in the appropriate
14285 SPR number and SPR regno as the last two operands. */
14286 if (uses_spr)
14288 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14289 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14290 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14292 /* If this builtin accesses a CR, then pass in a scratch
14293 CR as the last operand. */
14294 else if (attr & RS6000_BTC_CR)
14295 { cr = gen_reg_rtx (CCmode);
14296 op[nopnds++] = cr;
14299 if (flag_checking)
14301 int expected_nopnds = 0;
14302 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14303 expected_nopnds = 1;
14304 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14305 expected_nopnds = 2;
14306 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14307 expected_nopnds = 3;
14308 if (!(attr & RS6000_BTC_VOID))
14309 expected_nopnds += 1;
14310 if (uses_spr)
14311 expected_nopnds += 2;
14313 gcc_assert (nopnds == expected_nopnds
14314 && nopnds <= MAX_HTM_OPERANDS);
14317 switch (nopnds)
14319 case 1:
14320 pat = GEN_FCN (icode) (op[0]);
14321 break;
14322 case 2:
14323 pat = GEN_FCN (icode) (op[0], op[1]);
14324 break;
14325 case 3:
14326 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14327 break;
14328 case 4:
14329 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14330 break;
14331 default:
14332 gcc_unreachable ();
14334 if (!pat)
14335 return NULL_RTX;
14336 emit_insn (pat);
14338 if (attr & RS6000_BTC_CR)
14340 if (fcode == HTM_BUILTIN_TBEGIN)
14342 /* Emit code to set TARGET to true or false depending on
14343 whether the tbegin. instruction successfully or failed
14344 to start a transaction. We do this by placing the 1's
14345 complement of CR's EQ bit into TARGET. */
14346 rtx scratch = gen_reg_rtx (SImode);
14347 emit_insn (gen_rtx_SET (scratch,
14348 gen_rtx_EQ (SImode, cr,
14349 const0_rtx)));
14350 emit_insn (gen_rtx_SET (target,
14351 gen_rtx_XOR (SImode, scratch,
14352 GEN_INT (1))));
14354 else
14356 /* Emit code to copy the 4-bit condition register field
14357 CR into the least significant end of register TARGET. */
14358 rtx scratch1 = gen_reg_rtx (SImode);
14359 rtx scratch2 = gen_reg_rtx (SImode);
14360 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14361 emit_insn (gen_movcc (subreg, cr));
14362 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14363 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14367 if (nonvoid)
14368 return target;
14369 return const0_rtx;
14372 *expandedp = false;
14373 return NULL_RTX;
14376 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14378 static rtx
14379 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14380 rtx target)
14382 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14383 if (fcode == RS6000_BUILTIN_CPU_INIT)
14384 return const0_rtx;
14386 if (target == 0 || GET_MODE (target) != SImode)
14387 target = gen_reg_rtx (SImode);
14389 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14390 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14391 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14392 to a STRING_CST. */
14393 if (TREE_CODE (arg) == ARRAY_REF
14394 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14395 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14396 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14397 arg = TREE_OPERAND (arg, 0);
14399 if (TREE_CODE (arg) != STRING_CST)
14401 error ("builtin %qs only accepts a string argument",
14402 rs6000_builtin_info[(size_t) fcode].name);
14403 return const0_rtx;
14406 if (fcode == RS6000_BUILTIN_CPU_IS)
14408 const char *cpu = TREE_STRING_POINTER (arg);
14409 rtx cpuid = NULL_RTX;
14410 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14411 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14413 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14414 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14415 break;
14417 if (cpuid == NULL_RTX)
14419 /* Invalid CPU argument. */
14420 error ("cpu %qs is an invalid argument to builtin %qs",
14421 cpu, rs6000_builtin_info[(size_t) fcode].name);
14422 return const0_rtx;
14425 rtx platform = gen_reg_rtx (SImode);
14426 rtx tcbmem = gen_const_mem (SImode,
14427 gen_rtx_PLUS (Pmode,
14428 gen_rtx_REG (Pmode, TLS_REGNUM),
14429 GEN_INT (TCB_PLATFORM_OFFSET)));
14430 emit_move_insn (platform, tcbmem);
14431 emit_insn (gen_eqsi3 (target, platform, cpuid));
14433 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14435 const char *hwcap = TREE_STRING_POINTER (arg);
14436 rtx mask = NULL_RTX;
14437 int hwcap_offset;
14438 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14439 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14441 mask = GEN_INT (cpu_supports_info[i].mask);
14442 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14443 break;
14445 if (mask == NULL_RTX)
14447 /* Invalid HWCAP argument. */
14448 error ("%s %qs is an invalid argument to builtin %qs",
14449 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14450 return const0_rtx;
14453 rtx tcb_hwcap = gen_reg_rtx (SImode);
14454 rtx tcbmem = gen_const_mem (SImode,
14455 gen_rtx_PLUS (Pmode,
14456 gen_rtx_REG (Pmode, TLS_REGNUM),
14457 GEN_INT (hwcap_offset)));
14458 emit_move_insn (tcb_hwcap, tcbmem);
14459 rtx scratch1 = gen_reg_rtx (SImode);
14460 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14461 rtx scratch2 = gen_reg_rtx (SImode);
14462 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14463 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14465 else
14466 gcc_unreachable ();
14468 /* Record that we have expanded a CPU builtin, so that we can later
14469 emit a reference to the special symbol exported by LIBC to ensure we
14470 do not link against an old LIBC that doesn't support this feature. */
14471 cpu_builtin_p = true;
14473 #else
14474 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14475 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14477 /* For old LIBCs, always return FALSE. */
14478 emit_move_insn (target, GEN_INT (0));
14479 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14481 return target;
14484 static rtx
14485 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14487 rtx pat;
14488 tree arg0 = CALL_EXPR_ARG (exp, 0);
14489 tree arg1 = CALL_EXPR_ARG (exp, 1);
14490 tree arg2 = CALL_EXPR_ARG (exp, 2);
14491 rtx op0 = expand_normal (arg0);
14492 rtx op1 = expand_normal (arg1);
14493 rtx op2 = expand_normal (arg2);
14494 machine_mode tmode = insn_data[icode].operand[0].mode;
14495 machine_mode mode0 = insn_data[icode].operand[1].mode;
14496 machine_mode mode1 = insn_data[icode].operand[2].mode;
14497 machine_mode mode2 = insn_data[icode].operand[3].mode;
14499 if (icode == CODE_FOR_nothing)
14500 /* Builtin not supported on this processor. */
14501 return 0;
14503 /* If we got invalid arguments bail out before generating bad rtl. */
14504 if (arg0 == error_mark_node
14505 || arg1 == error_mark_node
14506 || arg2 == error_mark_node)
14507 return const0_rtx;
14509 /* Check and prepare argument depending on the instruction code.
14511 Note that a switch statement instead of the sequence of tests
14512 would be incorrect as many of the CODE_FOR values could be
14513 CODE_FOR_nothing and that would yield multiple alternatives
14514 with identical values. We'd never reach here at runtime in
14515 this case. */
14516 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14517 || icode == CODE_FOR_altivec_vsldoi_v2df
14518 || icode == CODE_FOR_altivec_vsldoi_v4si
14519 || icode == CODE_FOR_altivec_vsldoi_v8hi
14520 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14522 /* Only allow 4-bit unsigned literals. */
14523 STRIP_NOPS (arg2);
14524 if (TREE_CODE (arg2) != INTEGER_CST
14525 || TREE_INT_CST_LOW (arg2) & ~0xf)
14527 error ("argument 3 must be a 4-bit unsigned literal");
14528 return CONST0_RTX (tmode);
14531 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14532 || icode == CODE_FOR_vsx_xxpermdi_v2di
14533 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14534 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14535 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14536 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14537 || icode == CODE_FOR_vsx_xxpermdi_v4si
14538 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14539 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14540 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14541 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14542 || icode == CODE_FOR_vsx_xxsldwi_v4si
14543 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14544 || icode == CODE_FOR_vsx_xxsldwi_v2di
14545 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14547 /* Only allow 2-bit unsigned literals. */
14548 STRIP_NOPS (arg2);
14549 if (TREE_CODE (arg2) != INTEGER_CST
14550 || TREE_INT_CST_LOW (arg2) & ~0x3)
14552 error ("argument 3 must be a 2-bit unsigned literal");
14553 return CONST0_RTX (tmode);
14556 else if (icode == CODE_FOR_vsx_set_v2df
14557 || icode == CODE_FOR_vsx_set_v2di
14558 || icode == CODE_FOR_bcdadd
14559 || icode == CODE_FOR_bcdadd_lt
14560 || icode == CODE_FOR_bcdadd_eq
14561 || icode == CODE_FOR_bcdadd_gt
14562 || icode == CODE_FOR_bcdsub
14563 || icode == CODE_FOR_bcdsub_lt
14564 || icode == CODE_FOR_bcdsub_eq
14565 || icode == CODE_FOR_bcdsub_gt)
14567 /* Only allow 1-bit unsigned literals. */
14568 STRIP_NOPS (arg2);
14569 if (TREE_CODE (arg2) != INTEGER_CST
14570 || TREE_INT_CST_LOW (arg2) & ~0x1)
14572 error ("argument 3 must be a 1-bit unsigned literal");
14573 return CONST0_RTX (tmode);
14576 else if (icode == CODE_FOR_dfp_ddedpd_dd
14577 || icode == CODE_FOR_dfp_ddedpd_td)
14579 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14580 STRIP_NOPS (arg0);
14581 if (TREE_CODE (arg0) != INTEGER_CST
14582 || TREE_INT_CST_LOW (arg2) & ~0x3)
14584 error ("argument 1 must be 0 or 2");
14585 return CONST0_RTX (tmode);
14588 else if (icode == CODE_FOR_dfp_denbcd_dd
14589 || icode == CODE_FOR_dfp_denbcd_td)
14591 /* Only allow 1-bit unsigned literals. */
14592 STRIP_NOPS (arg0);
14593 if (TREE_CODE (arg0) != INTEGER_CST
14594 || TREE_INT_CST_LOW (arg0) & ~0x1)
14596 error ("argument 1 must be a 1-bit unsigned literal");
14597 return CONST0_RTX (tmode);
14600 else if (icode == CODE_FOR_dfp_dscli_dd
14601 || icode == CODE_FOR_dfp_dscli_td
14602 || icode == CODE_FOR_dfp_dscri_dd
14603 || icode == CODE_FOR_dfp_dscri_td)
14605 /* Only allow 6-bit unsigned literals. */
14606 STRIP_NOPS (arg1);
14607 if (TREE_CODE (arg1) != INTEGER_CST
14608 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14610 error ("argument 2 must be a 6-bit unsigned literal");
14611 return CONST0_RTX (tmode);
14614 else if (icode == CODE_FOR_crypto_vshasigmaw
14615 || icode == CODE_FOR_crypto_vshasigmad)
14617 /* Check whether the 2nd and 3rd arguments are integer constants and in
14618 range and prepare arguments. */
14619 STRIP_NOPS (arg1);
14620 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14622 error ("argument 2 must be 0 or 1");
14623 return CONST0_RTX (tmode);
14626 STRIP_NOPS (arg2);
14627 if (TREE_CODE (arg2) != INTEGER_CST
14628 || wi::geu_p (wi::to_wide (arg2), 16))
14630 error ("argument 3 must be in the range 0..15");
14631 return CONST0_RTX (tmode);
14635 if (target == 0
14636 || GET_MODE (target) != tmode
14637 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14638 target = gen_reg_rtx (tmode);
14640 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14641 op0 = copy_to_mode_reg (mode0, op0);
14642 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14643 op1 = copy_to_mode_reg (mode1, op1);
14644 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14645 op2 = copy_to_mode_reg (mode2, op2);
14647 pat = GEN_FCN (icode) (target, op0, op1, op2);
14648 if (! pat)
14649 return 0;
14650 emit_insn (pat);
14652 return target;
14656 /* Expand the dst builtins. */
14657 static rtx
14658 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14659 bool *expandedp)
14661 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14662 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14663 tree arg0, arg1, arg2;
14664 machine_mode mode0, mode1;
14665 rtx pat, op0, op1, op2;
14666 const struct builtin_description *d;
14667 size_t i;
14669 *expandedp = false;
14671 /* Handle DST variants. */
14672 d = bdesc_dst;
14673 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14674 if (d->code == fcode)
14676 arg0 = CALL_EXPR_ARG (exp, 0);
14677 arg1 = CALL_EXPR_ARG (exp, 1);
14678 arg2 = CALL_EXPR_ARG (exp, 2);
14679 op0 = expand_normal (arg0);
14680 op1 = expand_normal (arg1);
14681 op2 = expand_normal (arg2);
14682 mode0 = insn_data[d->icode].operand[0].mode;
14683 mode1 = insn_data[d->icode].operand[1].mode;
14685 /* Invalid arguments, bail out before generating bad rtl. */
14686 if (arg0 == error_mark_node
14687 || arg1 == error_mark_node
14688 || arg2 == error_mark_node)
14689 return const0_rtx;
14691 *expandedp = true;
14692 STRIP_NOPS (arg2);
14693 if (TREE_CODE (arg2) != INTEGER_CST
14694 || TREE_INT_CST_LOW (arg2) & ~0x3)
14696 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14697 return const0_rtx;
14700 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14701 op0 = copy_to_mode_reg (Pmode, op0);
14702 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14703 op1 = copy_to_mode_reg (mode1, op1);
14705 pat = GEN_FCN (d->icode) (op0, op1, op2);
14706 if (pat != 0)
14707 emit_insn (pat);
14709 return NULL_RTX;
14712 return NULL_RTX;
14715 /* Expand vec_init builtin. */
14716 static rtx
14717 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14719 machine_mode tmode = TYPE_MODE (type);
14720 machine_mode inner_mode = GET_MODE_INNER (tmode);
14721 int i, n_elt = GET_MODE_NUNITS (tmode);
14723 gcc_assert (VECTOR_MODE_P (tmode));
14724 gcc_assert (n_elt == call_expr_nargs (exp));
14726 if (!target || !register_operand (target, tmode))
14727 target = gen_reg_rtx (tmode);
14729 /* If we have a vector compromised of a single element, such as V1TImode, do
14730 the initialization directly. */
14731 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14733 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14734 emit_move_insn (target, gen_lowpart (tmode, x));
14736 else
14738 rtvec v = rtvec_alloc (n_elt);
14740 for (i = 0; i < n_elt; ++i)
14742 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14743 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14746 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14749 return target;
14752 /* Return the integer constant in ARG. Constrain it to be in the range
14753 of the subparts of VEC_TYPE; issue an error if not. */
14755 static int
14756 get_element_number (tree vec_type, tree arg)
14758 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14760 if (!tree_fits_uhwi_p (arg)
14761 || (elt = tree_to_uhwi (arg), elt > max))
14763 error ("selector must be an integer constant in the range 0..%wi", max);
14764 return 0;
14767 return elt;
14770 /* Expand vec_set builtin. */
14771 static rtx
14772 altivec_expand_vec_set_builtin (tree exp)
14774 machine_mode tmode, mode1;
14775 tree arg0, arg1, arg2;
14776 int elt;
14777 rtx op0, op1;
14779 arg0 = CALL_EXPR_ARG (exp, 0);
14780 arg1 = CALL_EXPR_ARG (exp, 1);
14781 arg2 = CALL_EXPR_ARG (exp, 2);
14783 tmode = TYPE_MODE (TREE_TYPE (arg0));
14784 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14785 gcc_assert (VECTOR_MODE_P (tmode));
14787 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14788 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14789 elt = get_element_number (TREE_TYPE (arg0), arg2);
14791 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14792 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14794 op0 = force_reg (tmode, op0);
14795 op1 = force_reg (mode1, op1);
14797 rs6000_expand_vector_set (op0, op1, elt);
14799 return op0;
14802 /* Expand vec_ext builtin. */
14803 static rtx
14804 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14806 machine_mode tmode, mode0;
14807 tree arg0, arg1;
14808 rtx op0;
14809 rtx op1;
14811 arg0 = CALL_EXPR_ARG (exp, 0);
14812 arg1 = CALL_EXPR_ARG (exp, 1);
14814 op0 = expand_normal (arg0);
14815 op1 = expand_normal (arg1);
14817 /* Call get_element_number to validate arg1 if it is a constant. */
14818 if (TREE_CODE (arg1) == INTEGER_CST)
14819 (void) get_element_number (TREE_TYPE (arg0), arg1);
14821 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14822 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14823 gcc_assert (VECTOR_MODE_P (mode0));
14825 op0 = force_reg (mode0, op0);
14827 if (optimize || !target || !register_operand (target, tmode))
14828 target = gen_reg_rtx (tmode);
14830 rs6000_expand_vector_extract (target, op0, op1);
14832 return target;
14835 /* Expand the builtin in EXP and store the result in TARGET. Store
14836 true in *EXPANDEDP if we found a builtin to expand. */
14837 static rtx
14838 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14840 const struct builtin_description *d;
14841 size_t i;
14842 enum insn_code icode;
14843 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14844 tree arg0, arg1, arg2;
14845 rtx op0, pat;
14846 machine_mode tmode, mode0;
14847 enum rs6000_builtins fcode
14848 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14850 if (rs6000_overloaded_builtin_p (fcode))
14852 *expandedp = true;
14853 error ("unresolved overload for Altivec builtin %qF", fndecl);
14855 /* Given it is invalid, just generate a normal call. */
14856 return expand_call (exp, target, false);
14859 target = altivec_expand_dst_builtin (exp, target, expandedp);
14860 if (*expandedp)
14861 return target;
14863 *expandedp = true;
14865 switch (fcode)
14867 case ALTIVEC_BUILTIN_STVX_V2DF:
14868 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14869 case ALTIVEC_BUILTIN_STVX_V2DI:
14870 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14871 case ALTIVEC_BUILTIN_STVX_V4SF:
14872 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14873 case ALTIVEC_BUILTIN_STVX:
14874 case ALTIVEC_BUILTIN_STVX_V4SI:
14875 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14876 case ALTIVEC_BUILTIN_STVX_V8HI:
14877 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14878 case ALTIVEC_BUILTIN_STVX_V16QI:
14879 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14880 case ALTIVEC_BUILTIN_STVEBX:
14881 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14882 case ALTIVEC_BUILTIN_STVEHX:
14883 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14884 case ALTIVEC_BUILTIN_STVEWX:
14885 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14886 case ALTIVEC_BUILTIN_STVXL_V2DF:
14887 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14888 case ALTIVEC_BUILTIN_STVXL_V2DI:
14889 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14890 case ALTIVEC_BUILTIN_STVXL_V4SF:
14891 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14892 case ALTIVEC_BUILTIN_STVXL:
14893 case ALTIVEC_BUILTIN_STVXL_V4SI:
14894 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14895 case ALTIVEC_BUILTIN_STVXL_V8HI:
14896 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14897 case ALTIVEC_BUILTIN_STVXL_V16QI:
14898 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14900 case ALTIVEC_BUILTIN_STVLX:
14901 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14902 case ALTIVEC_BUILTIN_STVLXL:
14903 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14904 case ALTIVEC_BUILTIN_STVRX:
14905 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14906 case ALTIVEC_BUILTIN_STVRXL:
14907 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14909 case P9V_BUILTIN_STXVL:
14910 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14912 case P9V_BUILTIN_XST_LEN_R:
14913 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14915 case VSX_BUILTIN_STXVD2X_V1TI:
14916 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14917 case VSX_BUILTIN_STXVD2X_V2DF:
14918 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14919 case VSX_BUILTIN_STXVD2X_V2DI:
14920 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14921 case VSX_BUILTIN_STXVW4X_V4SF:
14922 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14923 case VSX_BUILTIN_STXVW4X_V4SI:
14924 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14925 case VSX_BUILTIN_STXVW4X_V8HI:
14926 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14927 case VSX_BUILTIN_STXVW4X_V16QI:
14928 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14930 /* For the following on big endian, it's ok to use any appropriate
14931 unaligned-supporting store, so use a generic expander. For
14932 little-endian, the exact element-reversing instruction must
14933 be used. */
14934 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14936 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14937 : CODE_FOR_vsx_st_elemrev_v1ti);
14938 return altivec_expand_stv_builtin (code, exp);
14940 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14942 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14943 : CODE_FOR_vsx_st_elemrev_v2df);
14944 return altivec_expand_stv_builtin (code, exp);
14946 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14948 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14949 : CODE_FOR_vsx_st_elemrev_v2di);
14950 return altivec_expand_stv_builtin (code, exp);
14952 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14954 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14955 : CODE_FOR_vsx_st_elemrev_v4sf);
14956 return altivec_expand_stv_builtin (code, exp);
14958 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14960 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14961 : CODE_FOR_vsx_st_elemrev_v4si);
14962 return altivec_expand_stv_builtin (code, exp);
14964 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14966 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14967 : CODE_FOR_vsx_st_elemrev_v8hi);
14968 return altivec_expand_stv_builtin (code, exp);
14970 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14972 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14973 : CODE_FOR_vsx_st_elemrev_v16qi);
14974 return altivec_expand_stv_builtin (code, exp);
14977 case ALTIVEC_BUILTIN_MFVSCR:
14978 icode = CODE_FOR_altivec_mfvscr;
14979 tmode = insn_data[icode].operand[0].mode;
14981 if (target == 0
14982 || GET_MODE (target) != tmode
14983 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14984 target = gen_reg_rtx (tmode);
14986 pat = GEN_FCN (icode) (target);
14987 if (! pat)
14988 return 0;
14989 emit_insn (pat);
14990 return target;
14992 case ALTIVEC_BUILTIN_MTVSCR:
14993 icode = CODE_FOR_altivec_mtvscr;
14994 arg0 = CALL_EXPR_ARG (exp, 0);
14995 op0 = expand_normal (arg0);
14996 mode0 = insn_data[icode].operand[0].mode;
14998 /* If we got invalid arguments bail out before generating bad rtl. */
14999 if (arg0 == error_mark_node)
15000 return const0_rtx;
15002 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15003 op0 = copy_to_mode_reg (mode0, op0);
15005 pat = GEN_FCN (icode) (op0);
15006 if (pat)
15007 emit_insn (pat);
15008 return NULL_RTX;
15010 case ALTIVEC_BUILTIN_DSSALL:
15011 emit_insn (gen_altivec_dssall ());
15012 return NULL_RTX;
15014 case ALTIVEC_BUILTIN_DSS:
15015 icode = CODE_FOR_altivec_dss;
15016 arg0 = CALL_EXPR_ARG (exp, 0);
15017 STRIP_NOPS (arg0);
15018 op0 = expand_normal (arg0);
15019 mode0 = insn_data[icode].operand[0].mode;
15021 /* If we got invalid arguments bail out before generating bad rtl. */
15022 if (arg0 == error_mark_node)
15023 return const0_rtx;
15025 if (TREE_CODE (arg0) != INTEGER_CST
15026 || TREE_INT_CST_LOW (arg0) & ~0x3)
15028 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15029 return const0_rtx;
15032 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15033 op0 = copy_to_mode_reg (mode0, op0);
15035 emit_insn (gen_altivec_dss (op0));
15036 return NULL_RTX;
15038 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15039 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15040 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15041 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15042 case VSX_BUILTIN_VEC_INIT_V2DF:
15043 case VSX_BUILTIN_VEC_INIT_V2DI:
15044 case VSX_BUILTIN_VEC_INIT_V1TI:
15045 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15047 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15048 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15049 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15050 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15051 case VSX_BUILTIN_VEC_SET_V2DF:
15052 case VSX_BUILTIN_VEC_SET_V2DI:
15053 case VSX_BUILTIN_VEC_SET_V1TI:
15054 return altivec_expand_vec_set_builtin (exp);
15056 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15057 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15058 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15059 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15060 case VSX_BUILTIN_VEC_EXT_V2DF:
15061 case VSX_BUILTIN_VEC_EXT_V2DI:
15062 case VSX_BUILTIN_VEC_EXT_V1TI:
15063 return altivec_expand_vec_ext_builtin (exp, target);
15065 case P9V_BUILTIN_VEC_EXTRACT4B:
15066 arg1 = CALL_EXPR_ARG (exp, 1);
15067 STRIP_NOPS (arg1);
15069 /* Generate a normal call if it is invalid. */
15070 if (arg1 == error_mark_node)
15071 return expand_call (exp, target, false);
15073 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15075 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15076 return expand_call (exp, target, false);
15078 break;
15080 case P9V_BUILTIN_VEC_INSERT4B:
15081 arg2 = CALL_EXPR_ARG (exp, 2);
15082 STRIP_NOPS (arg2);
15084 /* Generate a normal call if it is invalid. */
15085 if (arg2 == error_mark_node)
15086 return expand_call (exp, target, false);
15088 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15090 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15091 return expand_call (exp, target, false);
15093 break;
15095 default:
15096 break;
15097 /* Fall through. */
15100 /* Expand abs* operations. */
15101 d = bdesc_abs;
15102 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15103 if (d->code == fcode)
15104 return altivec_expand_abs_builtin (d->icode, exp, target);
15106 /* Expand the AltiVec predicates. */
15107 d = bdesc_altivec_preds;
15108 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15109 if (d->code == fcode)
15110 return altivec_expand_predicate_builtin (d->icode, exp, target);
15112 /* LV* are funky. We initialized them differently. */
15113 switch (fcode)
15115 case ALTIVEC_BUILTIN_LVSL:
15116 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15117 exp, target, false);
15118 case ALTIVEC_BUILTIN_LVSR:
15119 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15120 exp, target, false);
15121 case ALTIVEC_BUILTIN_LVEBX:
15122 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15123 exp, target, false);
15124 case ALTIVEC_BUILTIN_LVEHX:
15125 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15126 exp, target, false);
15127 case ALTIVEC_BUILTIN_LVEWX:
15128 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15129 exp, target, false);
15130 case ALTIVEC_BUILTIN_LVXL_V2DF:
15131 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15132 exp, target, false);
15133 case ALTIVEC_BUILTIN_LVXL_V2DI:
15134 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15135 exp, target, false);
15136 case ALTIVEC_BUILTIN_LVXL_V4SF:
15137 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15138 exp, target, false);
15139 case ALTIVEC_BUILTIN_LVXL:
15140 case ALTIVEC_BUILTIN_LVXL_V4SI:
15141 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15142 exp, target, false);
15143 case ALTIVEC_BUILTIN_LVXL_V8HI:
15144 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15145 exp, target, false);
15146 case ALTIVEC_BUILTIN_LVXL_V16QI:
15147 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15148 exp, target, false);
15149 case ALTIVEC_BUILTIN_LVX_V1TI:
15150 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15151 exp, target, false);
15152 case ALTIVEC_BUILTIN_LVX_V2DF:
15153 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15154 exp, target, false);
15155 case ALTIVEC_BUILTIN_LVX_V2DI:
15156 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15157 exp, target, false);
15158 case ALTIVEC_BUILTIN_LVX_V4SF:
15159 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15160 exp, target, false);
15161 case ALTIVEC_BUILTIN_LVX:
15162 case ALTIVEC_BUILTIN_LVX_V4SI:
15163 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15164 exp, target, false);
15165 case ALTIVEC_BUILTIN_LVX_V8HI:
15166 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15167 exp, target, false);
15168 case ALTIVEC_BUILTIN_LVX_V16QI:
15169 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15170 exp, target, false);
15171 case ALTIVEC_BUILTIN_LVLX:
15172 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15173 exp, target, true);
15174 case ALTIVEC_BUILTIN_LVLXL:
15175 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15176 exp, target, true);
15177 case ALTIVEC_BUILTIN_LVRX:
15178 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15179 exp, target, true);
15180 case ALTIVEC_BUILTIN_LVRXL:
15181 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15182 exp, target, true);
15183 case VSX_BUILTIN_LXVD2X_V1TI:
15184 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15185 exp, target, false);
15186 case VSX_BUILTIN_LXVD2X_V2DF:
15187 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15188 exp, target, false);
15189 case VSX_BUILTIN_LXVD2X_V2DI:
15190 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15191 exp, target, false);
15192 case VSX_BUILTIN_LXVW4X_V4SF:
15193 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15194 exp, target, false);
15195 case VSX_BUILTIN_LXVW4X_V4SI:
15196 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15197 exp, target, false);
15198 case VSX_BUILTIN_LXVW4X_V8HI:
15199 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15200 exp, target, false);
15201 case VSX_BUILTIN_LXVW4X_V16QI:
15202 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15203 exp, target, false);
15204 /* For the following on big endian, it's ok to use any appropriate
15205 unaligned-supporting load, so use a generic expander. For
15206 little-endian, the exact element-reversing instruction must
15207 be used. */
15208 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15210 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15211 : CODE_FOR_vsx_ld_elemrev_v2df);
15212 return altivec_expand_lv_builtin (code, exp, target, false);
15214 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15216 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15217 : CODE_FOR_vsx_ld_elemrev_v1ti);
15218 return altivec_expand_lv_builtin (code, exp, target, false);
15220 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15222 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15223 : CODE_FOR_vsx_ld_elemrev_v2di);
15224 return altivec_expand_lv_builtin (code, exp, target, false);
15226 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15228 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15229 : CODE_FOR_vsx_ld_elemrev_v4sf);
15230 return altivec_expand_lv_builtin (code, exp, target, false);
15232 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15234 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15235 : CODE_FOR_vsx_ld_elemrev_v4si);
15236 return altivec_expand_lv_builtin (code, exp, target, false);
15238 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15240 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15241 : CODE_FOR_vsx_ld_elemrev_v8hi);
15242 return altivec_expand_lv_builtin (code, exp, target, false);
15244 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15246 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15247 : CODE_FOR_vsx_ld_elemrev_v16qi);
15248 return altivec_expand_lv_builtin (code, exp, target, false);
15250 break;
15251 default:
15252 break;
15253 /* Fall through. */
15256 *expandedp = false;
15257 return NULL_RTX;
15260 /* Check whether a builtin function is supported in this target
15261 configuration. */
15262 bool
15263 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15265 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15266 if ((fnmask & rs6000_builtin_mask) != fnmask)
15267 return false;
15268 else
15269 return true;
15272 /* Raise an error message for a builtin function that is called without the
15273 appropriate target options being set. */
15275 static void
15276 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15278 size_t uns_fncode = (size_t) fncode;
15279 const char *name = rs6000_builtin_info[uns_fncode].name;
15280 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15282 gcc_assert (name != NULL);
15283 if ((fnmask & RS6000_BTM_CELL) != 0)
15284 error ("builtin function %qs is only valid for the cell processor", name);
15285 else if ((fnmask & RS6000_BTM_VSX) != 0)
15286 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15287 else if ((fnmask & RS6000_BTM_HTM) != 0)
15288 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15289 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15290 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15291 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15292 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15293 error ("builtin function %qs requires the %qs and %qs options",
15294 name, "-mhard-dfp", "-mpower8-vector");
15295 else if ((fnmask & RS6000_BTM_DFP) != 0)
15296 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15297 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15298 error ("builtin function %qs requires the %qs option", name,
15299 "-mpower8-vector");
15300 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15301 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15302 error ("builtin function %qs requires the %qs and %qs options",
15303 name, "-mcpu=power9", "-m64");
15304 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15305 error ("builtin function %qs requires the %qs option", name,
15306 "-mcpu=power9");
15307 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15308 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15309 error ("builtin function %qs requires the %qs and %qs options",
15310 name, "-mcpu=power9", "-m64");
15311 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15312 error ("builtin function %qs requires the %qs option", name,
15313 "-mcpu=power9");
15314 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15315 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15316 error ("builtin function %qs requires the %qs and %qs options",
15317 name, "-mhard-float", "-mlong-double-128");
15318 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15319 error ("builtin function %qs requires the %qs option", name,
15320 "-mhard-float");
15321 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15322 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15323 name);
15324 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15325 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15326 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15327 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15328 error ("builtin function %qs requires the %qs (or newer), and "
15329 "%qs or %qs options",
15330 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15331 else
15332 error ("builtin function %qs is not supported with the current options",
15333 name);
15336 /* Target hook for early folding of built-ins, shamelessly stolen
15337 from ia64.c. */
15339 static tree
15340 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15341 int n_args ATTRIBUTE_UNUSED,
15342 tree *args ATTRIBUTE_UNUSED,
15343 bool ignore ATTRIBUTE_UNUSED)
15345 #ifdef SUBTARGET_FOLD_BUILTIN
15346 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15347 #else
15348 return NULL_TREE;
15349 #endif
15352 /* Helper function to sort out which built-ins may be valid without having
15353 a LHS. */
15354 static bool
15355 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15357 switch (fn_code)
15359 case ALTIVEC_BUILTIN_STVX_V16QI:
15360 case ALTIVEC_BUILTIN_STVX_V8HI:
15361 case ALTIVEC_BUILTIN_STVX_V4SI:
15362 case ALTIVEC_BUILTIN_STVX_V4SF:
15363 case ALTIVEC_BUILTIN_STVX_V2DI:
15364 case ALTIVEC_BUILTIN_STVX_V2DF:
15365 return true;
15366 default:
15367 return false;
15371 /* Helper function to handle the gimple folding of a vector compare
15372 operation. This sets up true/false vectors, and uses the
15373 VEC_COND_EXPR operation.
15374 CODE indicates which comparison is to be made. (EQ, GT, ...).
15375 TYPE indicates the type of the result. */
15376 static tree
15377 fold_build_vec_cmp (tree_code code, tree type,
15378 tree arg0, tree arg1)
15380 tree cmp_type = build_same_sized_truth_vector_type (type);
15381 tree zero_vec = build_zero_cst (type);
15382 tree minus_one_vec = build_minus_one_cst (type);
15383 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15384 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15387 /* Helper function to handle the in-between steps for the
15388 vector compare built-ins. */
15389 static void
15390 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15392 tree arg0 = gimple_call_arg (stmt, 0);
15393 tree arg1 = gimple_call_arg (stmt, 1);
15394 tree lhs = gimple_call_lhs (stmt);
15395 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15396 gimple *g = gimple_build_assign (lhs, cmp);
15397 gimple_set_location (g, gimple_location (stmt));
15398 gsi_replace (gsi, g, true);
15401 /* Helper function to handle the vector merge[hl] built-ins. The
15402 implementation difference between h and l versions for this code are in
15403 the values used when building of the permute vector for high word versus
15404 low word merge. The variance is keyed off the use_high parameter. */
15405 static void
15406 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15408 tree arg0 = gimple_call_arg (stmt, 0);
15409 tree arg1 = gimple_call_arg (stmt, 1);
15410 tree lhs = gimple_call_lhs (stmt);
15411 tree lhs_type = TREE_TYPE (lhs);
15412 tree lhs_type_type = TREE_TYPE (lhs_type);
15413 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15414 int midpoint = n_elts / 2;
15415 int offset = 0;
15417 if (use_high == 1)
15418 offset = midpoint;
15420 tree_vector_builder elts (lhs_type, VECTOR_CST_NELTS (arg0), 1);
15422 for (int i = 0; i < midpoint; i++)
15424 elts.safe_push (build_int_cst (lhs_type_type, offset + i));
15425 elts.safe_push (build_int_cst (lhs_type_type, offset + n_elts + i));
15428 tree permute = elts.build ();
15430 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15431 gimple_set_location (g, gimple_location (stmt));
15432 gsi_replace (gsi, g, true);
15435 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15436 a constant, use rs6000_fold_builtin.) */
15438 bool
15439 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15441 gimple *stmt = gsi_stmt (*gsi);
15442 tree fndecl = gimple_call_fndecl (stmt);
15443 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15444 enum rs6000_builtins fn_code
15445 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15446 tree arg0, arg1, lhs, temp;
15447 gimple *g;
15449 size_t uns_fncode = (size_t) fn_code;
15450 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15451 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15452 const char *fn_name2 = (icode != CODE_FOR_nothing)
15453 ? get_insn_name ((int) icode)
15454 : "nothing";
15456 if (TARGET_DEBUG_BUILTIN)
15457 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15458 fn_code, fn_name1, fn_name2);
15460 if (!rs6000_fold_gimple)
15461 return false;
15463 /* Prevent gimple folding for code that does not have a LHS, unless it is
15464 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15465 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15466 return false;
15468 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15469 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15470 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15471 if (!func_valid_p)
15472 return false;
15474 switch (fn_code)
15476 /* Flavors of vec_add. We deliberately don't expand
15477 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15478 TImode, resulting in much poorer code generation. */
15479 case ALTIVEC_BUILTIN_VADDUBM:
15480 case ALTIVEC_BUILTIN_VADDUHM:
15481 case ALTIVEC_BUILTIN_VADDUWM:
15482 case P8V_BUILTIN_VADDUDM:
15483 case ALTIVEC_BUILTIN_VADDFP:
15484 case VSX_BUILTIN_XVADDDP:
15485 arg0 = gimple_call_arg (stmt, 0);
15486 arg1 = gimple_call_arg (stmt, 1);
15487 lhs = gimple_call_lhs (stmt);
15488 g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
15489 gimple_set_location (g, gimple_location (stmt));
15490 gsi_replace (gsi, g, true);
15491 return true;
15492 /* Flavors of vec_sub. We deliberately don't expand
15493 P8V_BUILTIN_VSUBUQM. */
15494 case ALTIVEC_BUILTIN_VSUBUBM:
15495 case ALTIVEC_BUILTIN_VSUBUHM:
15496 case ALTIVEC_BUILTIN_VSUBUWM:
15497 case P8V_BUILTIN_VSUBUDM:
15498 case ALTIVEC_BUILTIN_VSUBFP:
15499 case VSX_BUILTIN_XVSUBDP:
15500 arg0 = gimple_call_arg (stmt, 0);
15501 arg1 = gimple_call_arg (stmt, 1);
15502 lhs = gimple_call_lhs (stmt);
15503 g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
15504 gimple_set_location (g, gimple_location (stmt));
15505 gsi_replace (gsi, g, true);
15506 return true;
15507 case VSX_BUILTIN_XVMULSP:
15508 case VSX_BUILTIN_XVMULDP:
15509 arg0 = gimple_call_arg (stmt, 0);
15510 arg1 = gimple_call_arg (stmt, 1);
15511 lhs = gimple_call_lhs (stmt);
15512 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15513 gimple_set_location (g, gimple_location (stmt));
15514 gsi_replace (gsi, g, true);
15515 return true;
15516 /* Even element flavors of vec_mul (signed). */
15517 case ALTIVEC_BUILTIN_VMULESB:
15518 case ALTIVEC_BUILTIN_VMULESH:
15519 case P8V_BUILTIN_VMULESW:
15520 /* Even element flavors of vec_mul (unsigned). */
15521 case ALTIVEC_BUILTIN_VMULEUB:
15522 case ALTIVEC_BUILTIN_VMULEUH:
15523 case P8V_BUILTIN_VMULEUW:
15524 arg0 = gimple_call_arg (stmt, 0);
15525 arg1 = gimple_call_arg (stmt, 1);
15526 lhs = gimple_call_lhs (stmt);
15527 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15528 gimple_set_location (g, gimple_location (stmt));
15529 gsi_replace (gsi, g, true);
15530 return true;
15531 /* Odd element flavors of vec_mul (signed). */
15532 case ALTIVEC_BUILTIN_VMULOSB:
15533 case ALTIVEC_BUILTIN_VMULOSH:
15534 case P8V_BUILTIN_VMULOSW:
15535 /* Odd element flavors of vec_mul (unsigned). */
15536 case ALTIVEC_BUILTIN_VMULOUB:
15537 case ALTIVEC_BUILTIN_VMULOUH:
15538 case P8V_BUILTIN_VMULOUW:
15539 arg0 = gimple_call_arg (stmt, 0);
15540 arg1 = gimple_call_arg (stmt, 1);
15541 lhs = gimple_call_lhs (stmt);
15542 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15543 gimple_set_location (g, gimple_location (stmt));
15544 gsi_replace (gsi, g, true);
15545 return true;
15546 /* Flavors of vec_div (Integer). */
15547 case VSX_BUILTIN_DIV_V2DI:
15548 case VSX_BUILTIN_UDIV_V2DI:
15549 arg0 = gimple_call_arg (stmt, 0);
15550 arg1 = gimple_call_arg (stmt, 1);
15551 lhs = gimple_call_lhs (stmt);
15552 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15553 gimple_set_location (g, gimple_location (stmt));
15554 gsi_replace (gsi, g, true);
15555 return true;
15556 /* Flavors of vec_div (Float). */
15557 case VSX_BUILTIN_XVDIVSP:
15558 case VSX_BUILTIN_XVDIVDP:
15559 arg0 = gimple_call_arg (stmt, 0);
15560 arg1 = gimple_call_arg (stmt, 1);
15561 lhs = gimple_call_lhs (stmt);
15562 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15563 gimple_set_location (g, gimple_location (stmt));
15564 gsi_replace (gsi, g, true);
15565 return true;
15566 /* Flavors of vec_and. */
15567 case ALTIVEC_BUILTIN_VAND:
15568 arg0 = gimple_call_arg (stmt, 0);
15569 arg1 = gimple_call_arg (stmt, 1);
15570 lhs = gimple_call_lhs (stmt);
15571 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15572 gimple_set_location (g, gimple_location (stmt));
15573 gsi_replace (gsi, g, true);
15574 return true;
15575 /* Flavors of vec_andc. */
15576 case ALTIVEC_BUILTIN_VANDC:
15577 arg0 = gimple_call_arg (stmt, 0);
15578 arg1 = gimple_call_arg (stmt, 1);
15579 lhs = gimple_call_lhs (stmt);
15580 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15581 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15582 gimple_set_location (g, gimple_location (stmt));
15583 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15584 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15585 gimple_set_location (g, gimple_location (stmt));
15586 gsi_replace (gsi, g, true);
15587 return true;
15588 /* Flavors of vec_nand. */
15589 case P8V_BUILTIN_VEC_NAND:
15590 case P8V_BUILTIN_NAND_V16QI:
15591 case P8V_BUILTIN_NAND_V8HI:
15592 case P8V_BUILTIN_NAND_V4SI:
15593 case P8V_BUILTIN_NAND_V4SF:
15594 case P8V_BUILTIN_NAND_V2DF:
15595 case P8V_BUILTIN_NAND_V2DI:
15596 arg0 = gimple_call_arg (stmt, 0);
15597 arg1 = gimple_call_arg (stmt, 1);
15598 lhs = gimple_call_lhs (stmt);
15599 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15600 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15601 gimple_set_location (g, gimple_location (stmt));
15602 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15603 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15604 gimple_set_location (g, gimple_location (stmt));
15605 gsi_replace (gsi, g, true);
15606 return true;
15607 /* Flavors of vec_or. */
15608 case ALTIVEC_BUILTIN_VOR:
15609 arg0 = gimple_call_arg (stmt, 0);
15610 arg1 = gimple_call_arg (stmt, 1);
15611 lhs = gimple_call_lhs (stmt);
15612 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15613 gimple_set_location (g, gimple_location (stmt));
15614 gsi_replace (gsi, g, true);
15615 return true;
15616 /* flavors of vec_orc. */
15617 case P8V_BUILTIN_ORC_V16QI:
15618 case P8V_BUILTIN_ORC_V8HI:
15619 case P8V_BUILTIN_ORC_V4SI:
15620 case P8V_BUILTIN_ORC_V4SF:
15621 case P8V_BUILTIN_ORC_V2DF:
15622 case P8V_BUILTIN_ORC_V2DI:
15623 arg0 = gimple_call_arg (stmt, 0);
15624 arg1 = gimple_call_arg (stmt, 1);
15625 lhs = gimple_call_lhs (stmt);
15626 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15627 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15628 gimple_set_location (g, gimple_location (stmt));
15629 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15630 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15631 gimple_set_location (g, gimple_location (stmt));
15632 gsi_replace (gsi, g, true);
15633 return true;
15634 /* Flavors of vec_xor. */
15635 case ALTIVEC_BUILTIN_VXOR:
15636 arg0 = gimple_call_arg (stmt, 0);
15637 arg1 = gimple_call_arg (stmt, 1);
15638 lhs = gimple_call_lhs (stmt);
15639 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15640 gimple_set_location (g, gimple_location (stmt));
15641 gsi_replace (gsi, g, true);
15642 return true;
15643 /* Flavors of vec_nor. */
15644 case ALTIVEC_BUILTIN_VNOR:
15645 arg0 = gimple_call_arg (stmt, 0);
15646 arg1 = gimple_call_arg (stmt, 1);
15647 lhs = gimple_call_lhs (stmt);
15648 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15649 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15650 gimple_set_location (g, gimple_location (stmt));
15651 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15652 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15653 gimple_set_location (g, gimple_location (stmt));
15654 gsi_replace (gsi, g, true);
15655 return true;
15656 /* flavors of vec_abs. */
15657 case ALTIVEC_BUILTIN_ABS_V16QI:
15658 case ALTIVEC_BUILTIN_ABS_V8HI:
15659 case ALTIVEC_BUILTIN_ABS_V4SI:
15660 case ALTIVEC_BUILTIN_ABS_V4SF:
15661 case P8V_BUILTIN_ABS_V2DI:
15662 case VSX_BUILTIN_XVABSDP:
15663 arg0 = gimple_call_arg (stmt, 0);
15664 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15665 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15666 return false;
15667 lhs = gimple_call_lhs (stmt);
15668 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15669 gimple_set_location (g, gimple_location (stmt));
15670 gsi_replace (gsi, g, true);
15671 return true;
15672 /* flavors of vec_min. */
15673 case VSX_BUILTIN_XVMINDP:
15674 case P8V_BUILTIN_VMINSD:
15675 case P8V_BUILTIN_VMINUD:
15676 case ALTIVEC_BUILTIN_VMINSB:
15677 case ALTIVEC_BUILTIN_VMINSH:
15678 case ALTIVEC_BUILTIN_VMINSW:
15679 case ALTIVEC_BUILTIN_VMINUB:
15680 case ALTIVEC_BUILTIN_VMINUH:
15681 case ALTIVEC_BUILTIN_VMINUW:
15682 case ALTIVEC_BUILTIN_VMINFP:
15683 arg0 = gimple_call_arg (stmt, 0);
15684 arg1 = gimple_call_arg (stmt, 1);
15685 lhs = gimple_call_lhs (stmt);
15686 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15687 gimple_set_location (g, gimple_location (stmt));
15688 gsi_replace (gsi, g, true);
15689 return true;
15690 /* flavors of vec_max. */
15691 case VSX_BUILTIN_XVMAXDP:
15692 case P8V_BUILTIN_VMAXSD:
15693 case P8V_BUILTIN_VMAXUD:
15694 case ALTIVEC_BUILTIN_VMAXSB:
15695 case ALTIVEC_BUILTIN_VMAXSH:
15696 case ALTIVEC_BUILTIN_VMAXSW:
15697 case ALTIVEC_BUILTIN_VMAXUB:
15698 case ALTIVEC_BUILTIN_VMAXUH:
15699 case ALTIVEC_BUILTIN_VMAXUW:
15700 case ALTIVEC_BUILTIN_VMAXFP:
15701 arg0 = gimple_call_arg (stmt, 0);
15702 arg1 = gimple_call_arg (stmt, 1);
15703 lhs = gimple_call_lhs (stmt);
15704 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15705 gimple_set_location (g, gimple_location (stmt));
15706 gsi_replace (gsi, g, true);
15707 return true;
15708 /* Flavors of vec_eqv. */
15709 case P8V_BUILTIN_EQV_V16QI:
15710 case P8V_BUILTIN_EQV_V8HI:
15711 case P8V_BUILTIN_EQV_V4SI:
15712 case P8V_BUILTIN_EQV_V4SF:
15713 case P8V_BUILTIN_EQV_V2DF:
15714 case P8V_BUILTIN_EQV_V2DI:
15715 arg0 = gimple_call_arg (stmt, 0);
15716 arg1 = gimple_call_arg (stmt, 1);
15717 lhs = gimple_call_lhs (stmt);
15718 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15719 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15720 gimple_set_location (g, gimple_location (stmt));
15721 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15722 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15723 gimple_set_location (g, gimple_location (stmt));
15724 gsi_replace (gsi, g, true);
15725 return true;
15726 /* Flavors of vec_rotate_left. */
15727 case ALTIVEC_BUILTIN_VRLB:
15728 case ALTIVEC_BUILTIN_VRLH:
15729 case ALTIVEC_BUILTIN_VRLW:
15730 case P8V_BUILTIN_VRLD:
15731 arg0 = gimple_call_arg (stmt, 0);
15732 arg1 = gimple_call_arg (stmt, 1);
15733 lhs = gimple_call_lhs (stmt);
15734 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15735 gimple_set_location (g, gimple_location (stmt));
15736 gsi_replace (gsi, g, true);
15737 return true;
15738 /* Flavors of vector shift right algebraic.
15739 vec_sra{b,h,w} -> vsra{b,h,w}. */
15740 case ALTIVEC_BUILTIN_VSRAB:
15741 case ALTIVEC_BUILTIN_VSRAH:
15742 case ALTIVEC_BUILTIN_VSRAW:
15743 case P8V_BUILTIN_VSRAD:
15744 arg0 = gimple_call_arg (stmt, 0);
15745 arg1 = gimple_call_arg (stmt, 1);
15746 lhs = gimple_call_lhs (stmt);
15747 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15748 gimple_set_location (g, gimple_location (stmt));
15749 gsi_replace (gsi, g, true);
15750 return true;
15751 /* Flavors of vector shift left.
15752 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15753 case ALTIVEC_BUILTIN_VSLB:
15754 case ALTIVEC_BUILTIN_VSLH:
15755 case ALTIVEC_BUILTIN_VSLW:
15756 case P8V_BUILTIN_VSLD:
15757 arg0 = gimple_call_arg (stmt, 0);
15758 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15759 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15760 return false;
15761 arg1 = gimple_call_arg (stmt, 1);
15762 lhs = gimple_call_lhs (stmt);
15763 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
15764 gimple_set_location (g, gimple_location (stmt));
15765 gsi_replace (gsi, g, true);
15766 return true;
15767 /* Flavors of vector shift right. */
15768 case ALTIVEC_BUILTIN_VSRB:
15769 case ALTIVEC_BUILTIN_VSRH:
15770 case ALTIVEC_BUILTIN_VSRW:
15771 case P8V_BUILTIN_VSRD:
15773 arg0 = gimple_call_arg (stmt, 0);
15774 arg1 = gimple_call_arg (stmt, 1);
15775 lhs = gimple_call_lhs (stmt);
15776 gimple_seq stmts = NULL;
15777 /* Convert arg0 to unsigned. */
15778 tree arg0_unsigned
15779 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15780 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15781 tree res
15782 = gimple_build (&stmts, RSHIFT_EXPR,
15783 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15784 /* Convert result back to the lhs type. */
15785 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15786 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15787 update_call_from_tree (gsi, res);
15788 return true;
15790 /* Vector loads. */
15791 case ALTIVEC_BUILTIN_LVX_V16QI:
15792 case ALTIVEC_BUILTIN_LVX_V8HI:
15793 case ALTIVEC_BUILTIN_LVX_V4SI:
15794 case ALTIVEC_BUILTIN_LVX_V4SF:
15795 case ALTIVEC_BUILTIN_LVX_V2DI:
15796 case ALTIVEC_BUILTIN_LVX_V2DF:
15797 case ALTIVEC_BUILTIN_LVX_V1TI:
15799 arg0 = gimple_call_arg (stmt, 0); // offset
15800 arg1 = gimple_call_arg (stmt, 1); // address
15801 lhs = gimple_call_lhs (stmt);
15802 location_t loc = gimple_location (stmt);
15803 /* Since arg1 may be cast to a different type, just use ptr_type_node
15804 here instead of trying to enforce TBAA on pointer types. */
15805 tree arg1_type = ptr_type_node;
15806 tree lhs_type = TREE_TYPE (lhs);
15807 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15808 the tree using the value from arg0. The resulting type will match
15809 the type of arg1. */
15810 gimple_seq stmts = NULL;
15811 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15812 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15813 arg1_type, arg1, temp_offset);
15814 /* Mask off any lower bits from the address. */
15815 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15816 arg1_type, temp_addr,
15817 build_int_cst (arg1_type, -16));
15818 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15819 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15820 take an offset, but since we've already incorporated the offset
15821 above, here we just pass in a zero. */
15822 gimple *g
15823 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15824 build_int_cst (arg1_type, 0)));
15825 gimple_set_location (g, loc);
15826 gsi_replace (gsi, g, true);
15827 return true;
15829 /* Vector stores. */
15830 case ALTIVEC_BUILTIN_STVX_V16QI:
15831 case ALTIVEC_BUILTIN_STVX_V8HI:
15832 case ALTIVEC_BUILTIN_STVX_V4SI:
15833 case ALTIVEC_BUILTIN_STVX_V4SF:
15834 case ALTIVEC_BUILTIN_STVX_V2DI:
15835 case ALTIVEC_BUILTIN_STVX_V2DF:
15837 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15838 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15839 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15840 location_t loc = gimple_location (stmt);
15841 tree arg0_type = TREE_TYPE (arg0);
15842 /* Use ptr_type_node (no TBAA) for the arg2_type.
15843 FIXME: (Richard) "A proper fix would be to transition this type as
15844 seen from the frontend to GIMPLE, for example in a similar way we
15845 do for MEM_REFs by piggy-backing that on an extra argument, a
15846 constant zero pointer of the alias pointer type to use (which would
15847 also serve as a type indicator of the store itself). I'd use a
15848 target specific internal function for this (not sure if we can have
15849 those target specific, but I guess if it's folded away then that's
15850 fine) and get away with the overload set." */
15851 tree arg2_type = ptr_type_node;
15852 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15853 the tree using the value from arg0. The resulting type will match
15854 the type of arg2. */
15855 gimple_seq stmts = NULL;
15856 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15857 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15858 arg2_type, arg2, temp_offset);
15859 /* Mask off any lower bits from the address. */
15860 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15861 arg2_type, temp_addr,
15862 build_int_cst (arg2_type, -16));
15863 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15864 /* The desired gimple result should be similar to:
15865 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15866 gimple *g
15867 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15868 build_int_cst (arg2_type, 0)), arg0);
15869 gimple_set_location (g, loc);
15870 gsi_replace (gsi, g, true);
15871 return true;
15874 /* Vector Fused multiply-add (fma). */
15875 case ALTIVEC_BUILTIN_VMADDFP:
15876 case VSX_BUILTIN_XVMADDDP:
15877 case ALTIVEC_BUILTIN_VMLADDUHM:
15879 arg0 = gimple_call_arg (stmt, 0);
15880 arg1 = gimple_call_arg (stmt, 1);
15881 tree arg2 = gimple_call_arg (stmt, 2);
15882 lhs = gimple_call_lhs (stmt);
15883 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15884 gimple_call_set_lhs (g, lhs);
15885 gimple_call_set_nothrow (g, true);
15886 gimple_set_location (g, gimple_location (stmt));
15887 gsi_replace (gsi, g, true);
15888 return true;
15891 /* Vector compares; EQ, NE, GE, GT, LE. */
15892 case ALTIVEC_BUILTIN_VCMPEQUB:
15893 case ALTIVEC_BUILTIN_VCMPEQUH:
15894 case ALTIVEC_BUILTIN_VCMPEQUW:
15895 case P8V_BUILTIN_VCMPEQUD:
15896 fold_compare_helper (gsi, EQ_EXPR, stmt);
15897 return true;
15899 case P9V_BUILTIN_CMPNEB:
15900 case P9V_BUILTIN_CMPNEH:
15901 case P9V_BUILTIN_CMPNEW:
15902 fold_compare_helper (gsi, NE_EXPR, stmt);
15903 return true;
15905 case VSX_BUILTIN_CMPGE_16QI:
15906 case VSX_BUILTIN_CMPGE_U16QI:
15907 case VSX_BUILTIN_CMPGE_8HI:
15908 case VSX_BUILTIN_CMPGE_U8HI:
15909 case VSX_BUILTIN_CMPGE_4SI:
15910 case VSX_BUILTIN_CMPGE_U4SI:
15911 case VSX_BUILTIN_CMPGE_2DI:
15912 case VSX_BUILTIN_CMPGE_U2DI:
15913 fold_compare_helper (gsi, GE_EXPR, stmt);
15914 return true;
15916 case ALTIVEC_BUILTIN_VCMPGTSB:
15917 case ALTIVEC_BUILTIN_VCMPGTUB:
15918 case ALTIVEC_BUILTIN_VCMPGTSH:
15919 case ALTIVEC_BUILTIN_VCMPGTUH:
15920 case ALTIVEC_BUILTIN_VCMPGTSW:
15921 case ALTIVEC_BUILTIN_VCMPGTUW:
15922 case P8V_BUILTIN_VCMPGTUD:
15923 case P8V_BUILTIN_VCMPGTSD:
15924 fold_compare_helper (gsi, GT_EXPR, stmt);
15925 return true;
15927 case VSX_BUILTIN_CMPLE_16QI:
15928 case VSX_BUILTIN_CMPLE_U16QI:
15929 case VSX_BUILTIN_CMPLE_8HI:
15930 case VSX_BUILTIN_CMPLE_U8HI:
15931 case VSX_BUILTIN_CMPLE_4SI:
15932 case VSX_BUILTIN_CMPLE_U4SI:
15933 case VSX_BUILTIN_CMPLE_2DI:
15934 case VSX_BUILTIN_CMPLE_U2DI:
15935 fold_compare_helper (gsi, LE_EXPR, stmt);
15936 return true;
15938 /* flavors of vec_splat_[us]{8,16,32}. */
15939 case ALTIVEC_BUILTIN_VSPLTISB:
15940 case ALTIVEC_BUILTIN_VSPLTISH:
15941 case ALTIVEC_BUILTIN_VSPLTISW:
15943 int size;
15945 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
15946 size = 8;
15947 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
15948 size = 16;
15949 else
15950 size = 32;
15952 arg0 = gimple_call_arg (stmt, 0);
15953 lhs = gimple_call_lhs (stmt);
15955 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15956 5-bit signed constant in range -16 to +15. */
15957 if (TREE_CODE (arg0) != INTEGER_CST
15958 || !IN_RANGE (sext_hwi(TREE_INT_CST_LOW (arg0), size),
15959 -16, 15))
15960 return false;
15961 gimple_seq stmts = NULL;
15962 location_t loc = gimple_location (stmt);
15963 tree splat_value = gimple_convert (&stmts, loc,
15964 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15965 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15966 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15967 g = gimple_build_assign (lhs, splat_tree);
15968 gimple_set_location (g, gimple_location (stmt));
15969 gsi_replace (gsi, g, true);
15970 return true;
15973 /* vec_mergel (integrals). */
15974 case ALTIVEC_BUILTIN_VMRGLH:
15975 case ALTIVEC_BUILTIN_VMRGLW:
15976 case VSX_BUILTIN_XXMRGLW_4SI:
15977 case ALTIVEC_BUILTIN_VMRGLB:
15978 case VSX_BUILTIN_VEC_MERGEL_V2DI:
15979 fold_mergehl_helper (gsi, stmt, 1);
15980 return true;
15981 /* vec_mergeh (integrals). */
15982 case ALTIVEC_BUILTIN_VMRGHH:
15983 case ALTIVEC_BUILTIN_VMRGHW:
15984 case VSX_BUILTIN_XXMRGHW_4SI:
15985 case ALTIVEC_BUILTIN_VMRGHB:
15986 case VSX_BUILTIN_VEC_MERGEH_V2DI:
15987 fold_mergehl_helper (gsi, stmt, 0);
15988 return true;
15989 default:
15990 if (TARGET_DEBUG_BUILTIN)
15991 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
15992 fn_code, fn_name1, fn_name2);
15993 break;
15996 return false;
15999 /* Expand an expression EXP that calls a built-in function,
16000 with result going to TARGET if that's convenient
16001 (and in mode MODE if that's convenient).
16002 SUBTARGET may be used as the target for computing one of EXP's operands.
16003 IGNORE is nonzero if the value is to be ignored. */
16005 static rtx
16006 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16007 machine_mode mode ATTRIBUTE_UNUSED,
16008 int ignore ATTRIBUTE_UNUSED)
16010 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16011 enum rs6000_builtins fcode
16012 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16013 size_t uns_fcode = (size_t)fcode;
16014 const struct builtin_description *d;
16015 size_t i;
16016 rtx ret;
16017 bool success;
16018 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16019 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16020 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16022 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16023 floating point type, depending on whether long double is the IBM extended
16024 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16025 we only define one variant of the built-in function, and switch the code
16026 when defining it, rather than defining two built-ins and using the
16027 overload table in rs6000-c.c to switch between the two. If we don't have
16028 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16029 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16030 #ifdef HAVE_AS_POWER9
16031 if (FLOAT128_IEEE_P (TFmode))
16032 switch (icode)
16034 default:
16035 break;
16037 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16038 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16039 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16040 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16041 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16042 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16043 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16044 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16045 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16046 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16047 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16048 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16049 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16051 #endif
16053 if (TARGET_DEBUG_BUILTIN)
16055 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16056 const char *name2 = (icode != CODE_FOR_nothing)
16057 ? get_insn_name ((int) icode)
16058 : "nothing";
16059 const char *name3;
16061 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16063 default: name3 = "unknown"; break;
16064 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16065 case RS6000_BTC_UNARY: name3 = "unary"; break;
16066 case RS6000_BTC_BINARY: name3 = "binary"; break;
16067 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16068 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16069 case RS6000_BTC_ABS: name3 = "abs"; break;
16070 case RS6000_BTC_DST: name3 = "dst"; break;
16074 fprintf (stderr,
16075 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16076 (name1) ? name1 : "---", fcode,
16077 (name2) ? name2 : "---", (int) icode,
16078 name3,
16079 func_valid_p ? "" : ", not valid");
16082 if (!func_valid_p)
16084 rs6000_invalid_builtin (fcode);
16086 /* Given it is invalid, just generate a normal call. */
16087 return expand_call (exp, target, ignore);
16090 switch (fcode)
16092 case RS6000_BUILTIN_RECIP:
16093 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16095 case RS6000_BUILTIN_RECIPF:
16096 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16098 case RS6000_BUILTIN_RSQRTF:
16099 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16101 case RS6000_BUILTIN_RSQRT:
16102 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16104 case POWER7_BUILTIN_BPERMD:
16105 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16106 ? CODE_FOR_bpermd_di
16107 : CODE_FOR_bpermd_si), exp, target);
16109 case RS6000_BUILTIN_GET_TB:
16110 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16111 target);
16113 case RS6000_BUILTIN_MFTB:
16114 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16115 ? CODE_FOR_rs6000_mftb_di
16116 : CODE_FOR_rs6000_mftb_si),
16117 target);
16119 case RS6000_BUILTIN_MFFS:
16120 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16122 case RS6000_BUILTIN_MTFSF:
16123 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16125 case RS6000_BUILTIN_CPU_INIT:
16126 case RS6000_BUILTIN_CPU_IS:
16127 case RS6000_BUILTIN_CPU_SUPPORTS:
16128 return cpu_expand_builtin (fcode, exp, target);
16130 case MISC_BUILTIN_SPEC_BARRIER:
16132 emit_insn (gen_rs6000_speculation_barrier ());
16133 return NULL_RTX;
16136 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16137 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16139 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16140 : (int) CODE_FOR_altivec_lvsl_direct);
16141 machine_mode tmode = insn_data[icode2].operand[0].mode;
16142 machine_mode mode = insn_data[icode2].operand[1].mode;
16143 tree arg;
16144 rtx op, addr, pat;
16146 gcc_assert (TARGET_ALTIVEC);
16148 arg = CALL_EXPR_ARG (exp, 0);
16149 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16150 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16151 addr = memory_address (mode, op);
16152 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16153 op = addr;
16154 else
16156 /* For the load case need to negate the address. */
16157 op = gen_reg_rtx (GET_MODE (addr));
16158 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16160 op = gen_rtx_MEM (mode, op);
16162 if (target == 0
16163 || GET_MODE (target) != tmode
16164 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16165 target = gen_reg_rtx (tmode);
16167 pat = GEN_FCN (icode2) (target, op);
16168 if (!pat)
16169 return 0;
16170 emit_insn (pat);
16172 return target;
16175 case ALTIVEC_BUILTIN_VCFUX:
16176 case ALTIVEC_BUILTIN_VCFSX:
16177 case ALTIVEC_BUILTIN_VCTUXS:
16178 case ALTIVEC_BUILTIN_VCTSXS:
16179 /* FIXME: There's got to be a nicer way to handle this case than
16180 constructing a new CALL_EXPR. */
16181 if (call_expr_nargs (exp) == 1)
16183 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16184 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16186 break;
16188 default:
16189 break;
16192 if (TARGET_ALTIVEC)
16194 ret = altivec_expand_builtin (exp, target, &success);
16196 if (success)
16197 return ret;
16199 if (TARGET_HTM)
16201 ret = htm_expand_builtin (exp, target, &success);
16203 if (success)
16204 return ret;
16207 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16208 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16209 gcc_assert (attr == RS6000_BTC_UNARY
16210 || attr == RS6000_BTC_BINARY
16211 || attr == RS6000_BTC_TERNARY
16212 || attr == RS6000_BTC_SPECIAL);
16214 /* Handle simple unary operations. */
16215 d = bdesc_1arg;
16216 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16217 if (d->code == fcode)
16218 return rs6000_expand_unop_builtin (icode, exp, target);
16220 /* Handle simple binary operations. */
16221 d = bdesc_2arg;
16222 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16223 if (d->code == fcode)
16224 return rs6000_expand_binop_builtin (icode, exp, target);
16226 /* Handle simple ternary operations. */
16227 d = bdesc_3arg;
16228 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16229 if (d->code == fcode)
16230 return rs6000_expand_ternop_builtin (icode, exp, target);
16232 /* Handle simple no-argument operations. */
16233 d = bdesc_0arg;
16234 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16235 if (d->code == fcode)
16236 return rs6000_expand_zeroop_builtin (icode, target);
16238 gcc_unreachable ();
16241 /* Create a builtin vector type with a name. Taking care not to give
16242 the canonical type a name. */
16244 static tree
16245 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16247 tree result = build_vector_type (elt_type, num_elts);
16249 /* Copy so we don't give the canonical type a name. */
16250 result = build_variant_type_copy (result);
16252 add_builtin_type (name, result);
16254 return result;
16257 static void
16258 rs6000_init_builtins (void)
16260 tree tdecl;
16261 tree ftype;
16262 machine_mode mode;
16264 if (TARGET_DEBUG_BUILTIN)
16265 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16266 (TARGET_ALTIVEC) ? ", altivec" : "",
16267 (TARGET_VSX) ? ", vsx" : "");
16269 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16270 : "__vector long long",
16271 intDI_type_node, 2);
16272 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16273 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16274 intSI_type_node, 4);
16275 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16276 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16277 intHI_type_node, 8);
16278 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16279 intQI_type_node, 16);
16281 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16282 unsigned_intQI_type_node, 16);
16283 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16284 unsigned_intHI_type_node, 8);
16285 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16286 unsigned_intSI_type_node, 4);
16287 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16288 ? "__vector unsigned long"
16289 : "__vector unsigned long long",
16290 unsigned_intDI_type_node, 2);
16292 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16294 const_str_type_node
16295 = build_pointer_type (build_qualified_type (char_type_node,
16296 TYPE_QUAL_CONST));
16298 /* We use V1TI mode as a special container to hold __int128_t items that
16299 must live in VSX registers. */
16300 if (intTI_type_node)
16302 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16303 intTI_type_node, 1);
16304 unsigned_V1TI_type_node
16305 = rs6000_vector_type ("__vector unsigned __int128",
16306 unsigned_intTI_type_node, 1);
16309 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16310 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16311 'vector unsigned short'. */
16313 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16314 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16315 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16316 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16317 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16319 long_integer_type_internal_node = long_integer_type_node;
16320 long_unsigned_type_internal_node = long_unsigned_type_node;
16321 long_long_integer_type_internal_node = long_long_integer_type_node;
16322 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16323 intQI_type_internal_node = intQI_type_node;
16324 uintQI_type_internal_node = unsigned_intQI_type_node;
16325 intHI_type_internal_node = intHI_type_node;
16326 uintHI_type_internal_node = unsigned_intHI_type_node;
16327 intSI_type_internal_node = intSI_type_node;
16328 uintSI_type_internal_node = unsigned_intSI_type_node;
16329 intDI_type_internal_node = intDI_type_node;
16330 uintDI_type_internal_node = unsigned_intDI_type_node;
16331 intTI_type_internal_node = intTI_type_node;
16332 uintTI_type_internal_node = unsigned_intTI_type_node;
16333 float_type_internal_node = float_type_node;
16334 double_type_internal_node = double_type_node;
16335 long_double_type_internal_node = long_double_type_node;
16336 dfloat64_type_internal_node = dfloat64_type_node;
16337 dfloat128_type_internal_node = dfloat128_type_node;
16338 void_type_internal_node = void_type_node;
16340 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16341 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16342 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16343 format that uses a pair of doubles, depending on the switches and
16344 defaults.
16346 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16347 floating point, we need make sure the type is non-zero or else self-test
16348 fails during bootstrap.
16350 Always create __ibm128 as a separate type, even if the current long double
16351 format is IBM extended double.
16353 For IEEE 128-bit floating point, always create the type __ieee128. If the
16354 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16355 __ieee128. */
16356 if (TARGET_FLOAT128_TYPE)
16358 ibm128_float_type_node = make_node (REAL_TYPE);
16359 TYPE_PRECISION (ibm128_float_type_node) = 128;
16360 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16361 layout_type (ibm128_float_type_node);
16362 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16363 "__ibm128");
16365 ieee128_float_type_node = float128_type_node;
16366 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16367 "__ieee128");
16370 else
16371 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16373 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16374 tree type node. */
16375 builtin_mode_to_type[QImode][0] = integer_type_node;
16376 builtin_mode_to_type[HImode][0] = integer_type_node;
16377 builtin_mode_to_type[SImode][0] = intSI_type_node;
16378 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16379 builtin_mode_to_type[DImode][0] = intDI_type_node;
16380 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16381 builtin_mode_to_type[TImode][0] = intTI_type_node;
16382 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16383 builtin_mode_to_type[SFmode][0] = float_type_node;
16384 builtin_mode_to_type[DFmode][0] = double_type_node;
16385 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16386 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16387 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16388 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16389 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16390 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16391 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16392 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16393 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16394 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16395 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16396 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16397 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16398 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16399 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16400 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16401 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16403 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16404 TYPE_NAME (bool_char_type_node) = tdecl;
16406 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16407 TYPE_NAME (bool_short_type_node) = tdecl;
16409 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16410 TYPE_NAME (bool_int_type_node) = tdecl;
16412 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16413 TYPE_NAME (pixel_type_node) = tdecl;
16415 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16416 bool_char_type_node, 16);
16417 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16418 bool_short_type_node, 8);
16419 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16420 bool_int_type_node, 4);
16421 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16422 ? "__vector __bool long"
16423 : "__vector __bool long long",
16424 bool_long_long_type_node, 2);
16425 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16426 pixel_type_node, 8);
16428 /* Create Altivec and VSX builtins on machines with at least the
16429 general purpose extensions (970 and newer) to allow the use of
16430 the target attribute. */
16431 if (TARGET_EXTRA_BUILTINS)
16432 altivec_init_builtins ();
16433 if (TARGET_HTM)
16434 htm_init_builtins ();
16436 if (TARGET_EXTRA_BUILTINS)
16437 rs6000_common_init_builtins ();
16439 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16440 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16441 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16443 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16444 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16445 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16447 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16448 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16449 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16451 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16452 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16453 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16455 mode = (TARGET_64BIT) ? DImode : SImode;
16456 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16457 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16458 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16460 ftype = build_function_type_list (unsigned_intDI_type_node,
16461 NULL_TREE);
16462 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16464 if (TARGET_64BIT)
16465 ftype = build_function_type_list (unsigned_intDI_type_node,
16466 NULL_TREE);
16467 else
16468 ftype = build_function_type_list (unsigned_intSI_type_node,
16469 NULL_TREE);
16470 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16472 ftype = build_function_type_list (double_type_node, NULL_TREE);
16473 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16475 ftype = build_function_type_list (void_type_node,
16476 intSI_type_node, double_type_node,
16477 NULL_TREE);
16478 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16480 ftype = build_function_type_list (void_type_node, NULL_TREE);
16481 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16482 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16483 MISC_BUILTIN_SPEC_BARRIER);
16485 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16486 NULL_TREE);
16487 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16488 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16490 /* AIX libm provides clog as __clog. */
16491 if (TARGET_XCOFF &&
16492 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16493 set_user_assembler_name (tdecl, "__clog");
16495 #ifdef SUBTARGET_INIT_BUILTINS
16496 SUBTARGET_INIT_BUILTINS;
16497 #endif
16500 /* Returns the rs6000 builtin decl for CODE. */
16502 static tree
16503 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16505 HOST_WIDE_INT fnmask;
16507 if (code >= RS6000_BUILTIN_COUNT)
16508 return error_mark_node;
16510 fnmask = rs6000_builtin_info[code].mask;
16511 if ((fnmask & rs6000_builtin_mask) != fnmask)
16513 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16514 return error_mark_node;
16517 return rs6000_builtin_decls[code];
16520 static void
16521 altivec_init_builtins (void)
16523 const struct builtin_description *d;
16524 size_t i;
16525 tree ftype;
16526 tree decl;
16527 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16529 tree pvoid_type_node = build_pointer_type (void_type_node);
16531 tree pcvoid_type_node
16532 = build_pointer_type (build_qualified_type (void_type_node,
16533 TYPE_QUAL_CONST));
16535 tree int_ftype_opaque
16536 = build_function_type_list (integer_type_node,
16537 opaque_V4SI_type_node, NULL_TREE);
16538 tree opaque_ftype_opaque
16539 = build_function_type_list (integer_type_node, NULL_TREE);
16540 tree opaque_ftype_opaque_int
16541 = build_function_type_list (opaque_V4SI_type_node,
16542 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16543 tree opaque_ftype_opaque_opaque_int
16544 = build_function_type_list (opaque_V4SI_type_node,
16545 opaque_V4SI_type_node, opaque_V4SI_type_node,
16546 integer_type_node, NULL_TREE);
16547 tree opaque_ftype_opaque_opaque_opaque
16548 = build_function_type_list (opaque_V4SI_type_node,
16549 opaque_V4SI_type_node, opaque_V4SI_type_node,
16550 opaque_V4SI_type_node, NULL_TREE);
16551 tree opaque_ftype_opaque_opaque
16552 = build_function_type_list (opaque_V4SI_type_node,
16553 opaque_V4SI_type_node, opaque_V4SI_type_node,
16554 NULL_TREE);
16555 tree int_ftype_int_opaque_opaque
16556 = build_function_type_list (integer_type_node,
16557 integer_type_node, opaque_V4SI_type_node,
16558 opaque_V4SI_type_node, NULL_TREE);
16559 tree int_ftype_int_v4si_v4si
16560 = build_function_type_list (integer_type_node,
16561 integer_type_node, V4SI_type_node,
16562 V4SI_type_node, NULL_TREE);
16563 tree int_ftype_int_v2di_v2di
16564 = build_function_type_list (integer_type_node,
16565 integer_type_node, V2DI_type_node,
16566 V2DI_type_node, NULL_TREE);
16567 tree void_ftype_v4si
16568 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16569 tree v8hi_ftype_void
16570 = build_function_type_list (V8HI_type_node, NULL_TREE);
16571 tree void_ftype_void
16572 = build_function_type_list (void_type_node, NULL_TREE);
16573 tree void_ftype_int
16574 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16576 tree opaque_ftype_long_pcvoid
16577 = build_function_type_list (opaque_V4SI_type_node,
16578 long_integer_type_node, pcvoid_type_node,
16579 NULL_TREE);
16580 tree v16qi_ftype_long_pcvoid
16581 = build_function_type_list (V16QI_type_node,
16582 long_integer_type_node, pcvoid_type_node,
16583 NULL_TREE);
16584 tree v8hi_ftype_long_pcvoid
16585 = build_function_type_list (V8HI_type_node,
16586 long_integer_type_node, pcvoid_type_node,
16587 NULL_TREE);
16588 tree v4si_ftype_long_pcvoid
16589 = build_function_type_list (V4SI_type_node,
16590 long_integer_type_node, pcvoid_type_node,
16591 NULL_TREE);
16592 tree v4sf_ftype_long_pcvoid
16593 = build_function_type_list (V4SF_type_node,
16594 long_integer_type_node, pcvoid_type_node,
16595 NULL_TREE);
16596 tree v2df_ftype_long_pcvoid
16597 = build_function_type_list (V2DF_type_node,
16598 long_integer_type_node, pcvoid_type_node,
16599 NULL_TREE);
16600 tree v2di_ftype_long_pcvoid
16601 = build_function_type_list (V2DI_type_node,
16602 long_integer_type_node, pcvoid_type_node,
16603 NULL_TREE);
16604 tree v1ti_ftype_long_pcvoid
16605 = build_function_type_list (V1TI_type_node,
16606 long_integer_type_node, pcvoid_type_node,
16607 NULL_TREE);
16609 tree void_ftype_opaque_long_pvoid
16610 = build_function_type_list (void_type_node,
16611 opaque_V4SI_type_node, long_integer_type_node,
16612 pvoid_type_node, NULL_TREE);
16613 tree void_ftype_v4si_long_pvoid
16614 = build_function_type_list (void_type_node,
16615 V4SI_type_node, long_integer_type_node,
16616 pvoid_type_node, NULL_TREE);
16617 tree void_ftype_v16qi_long_pvoid
16618 = build_function_type_list (void_type_node,
16619 V16QI_type_node, long_integer_type_node,
16620 pvoid_type_node, NULL_TREE);
16622 tree void_ftype_v16qi_pvoid_long
16623 = build_function_type_list (void_type_node,
16624 V16QI_type_node, pvoid_type_node,
16625 long_integer_type_node, NULL_TREE);
16627 tree void_ftype_v8hi_long_pvoid
16628 = build_function_type_list (void_type_node,
16629 V8HI_type_node, long_integer_type_node,
16630 pvoid_type_node, NULL_TREE);
16631 tree void_ftype_v4sf_long_pvoid
16632 = build_function_type_list (void_type_node,
16633 V4SF_type_node, long_integer_type_node,
16634 pvoid_type_node, NULL_TREE);
16635 tree void_ftype_v2df_long_pvoid
16636 = build_function_type_list (void_type_node,
16637 V2DF_type_node, long_integer_type_node,
16638 pvoid_type_node, NULL_TREE);
16639 tree void_ftype_v1ti_long_pvoid
16640 = build_function_type_list (void_type_node,
16641 V1TI_type_node, long_integer_type_node,
16642 pvoid_type_node, NULL_TREE);
16643 tree void_ftype_v2di_long_pvoid
16644 = build_function_type_list (void_type_node,
16645 V2DI_type_node, long_integer_type_node,
16646 pvoid_type_node, NULL_TREE);
16647 tree int_ftype_int_v8hi_v8hi
16648 = build_function_type_list (integer_type_node,
16649 integer_type_node, V8HI_type_node,
16650 V8HI_type_node, NULL_TREE);
16651 tree int_ftype_int_v16qi_v16qi
16652 = build_function_type_list (integer_type_node,
16653 integer_type_node, V16QI_type_node,
16654 V16QI_type_node, NULL_TREE);
16655 tree int_ftype_int_v4sf_v4sf
16656 = build_function_type_list (integer_type_node,
16657 integer_type_node, V4SF_type_node,
16658 V4SF_type_node, NULL_TREE);
16659 tree int_ftype_int_v2df_v2df
16660 = build_function_type_list (integer_type_node,
16661 integer_type_node, V2DF_type_node,
16662 V2DF_type_node, NULL_TREE);
16663 tree v2di_ftype_v2di
16664 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16665 tree v4si_ftype_v4si
16666 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16667 tree v8hi_ftype_v8hi
16668 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16669 tree v16qi_ftype_v16qi
16670 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16671 tree v4sf_ftype_v4sf
16672 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16673 tree v2df_ftype_v2df
16674 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16675 tree void_ftype_pcvoid_int_int
16676 = build_function_type_list (void_type_node,
16677 pcvoid_type_node, integer_type_node,
16678 integer_type_node, NULL_TREE);
16680 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16681 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16682 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16683 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16684 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16685 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16686 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16687 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16688 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16689 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16690 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16691 ALTIVEC_BUILTIN_LVXL_V2DF);
16692 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16693 ALTIVEC_BUILTIN_LVXL_V2DI);
16694 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16695 ALTIVEC_BUILTIN_LVXL_V4SF);
16696 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16697 ALTIVEC_BUILTIN_LVXL_V4SI);
16698 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16699 ALTIVEC_BUILTIN_LVXL_V8HI);
16700 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16701 ALTIVEC_BUILTIN_LVXL_V16QI);
16702 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16703 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16704 ALTIVEC_BUILTIN_LVX_V1TI);
16705 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16706 ALTIVEC_BUILTIN_LVX_V2DF);
16707 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16708 ALTIVEC_BUILTIN_LVX_V2DI);
16709 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16710 ALTIVEC_BUILTIN_LVX_V4SF);
16711 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16712 ALTIVEC_BUILTIN_LVX_V4SI);
16713 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16714 ALTIVEC_BUILTIN_LVX_V8HI);
16715 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16716 ALTIVEC_BUILTIN_LVX_V16QI);
16717 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16718 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16719 ALTIVEC_BUILTIN_STVX_V2DF);
16720 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16721 ALTIVEC_BUILTIN_STVX_V2DI);
16722 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16723 ALTIVEC_BUILTIN_STVX_V4SF);
16724 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16725 ALTIVEC_BUILTIN_STVX_V4SI);
16726 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16727 ALTIVEC_BUILTIN_STVX_V8HI);
16728 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16729 ALTIVEC_BUILTIN_STVX_V16QI);
16730 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16731 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16732 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16733 ALTIVEC_BUILTIN_STVXL_V2DF);
16734 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16735 ALTIVEC_BUILTIN_STVXL_V2DI);
16736 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16737 ALTIVEC_BUILTIN_STVXL_V4SF);
16738 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16739 ALTIVEC_BUILTIN_STVXL_V4SI);
16740 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16741 ALTIVEC_BUILTIN_STVXL_V8HI);
16742 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16743 ALTIVEC_BUILTIN_STVXL_V16QI);
16744 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16745 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16746 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16747 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16748 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16749 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16750 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16751 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16752 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16753 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16754 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16755 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16756 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16757 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16758 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16759 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16761 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16762 VSX_BUILTIN_LXVD2X_V2DF);
16763 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16764 VSX_BUILTIN_LXVD2X_V2DI);
16765 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16766 VSX_BUILTIN_LXVW4X_V4SF);
16767 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16768 VSX_BUILTIN_LXVW4X_V4SI);
16769 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16770 VSX_BUILTIN_LXVW4X_V8HI);
16771 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16772 VSX_BUILTIN_LXVW4X_V16QI);
16773 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16774 VSX_BUILTIN_STXVD2X_V2DF);
16775 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16776 VSX_BUILTIN_STXVD2X_V2DI);
16777 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16778 VSX_BUILTIN_STXVW4X_V4SF);
16779 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16780 VSX_BUILTIN_STXVW4X_V4SI);
16781 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16782 VSX_BUILTIN_STXVW4X_V8HI);
16783 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16784 VSX_BUILTIN_STXVW4X_V16QI);
16786 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16787 VSX_BUILTIN_LD_ELEMREV_V2DF);
16788 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16789 VSX_BUILTIN_LD_ELEMREV_V2DI);
16790 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16791 VSX_BUILTIN_LD_ELEMREV_V4SF);
16792 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16793 VSX_BUILTIN_LD_ELEMREV_V4SI);
16794 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16795 VSX_BUILTIN_LD_ELEMREV_V8HI);
16796 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16797 VSX_BUILTIN_LD_ELEMREV_V16QI);
16798 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16799 VSX_BUILTIN_ST_ELEMREV_V2DF);
16800 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16801 VSX_BUILTIN_ST_ELEMREV_V1TI);
16802 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16803 VSX_BUILTIN_ST_ELEMREV_V2DI);
16804 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16805 VSX_BUILTIN_ST_ELEMREV_V4SF);
16806 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16807 VSX_BUILTIN_ST_ELEMREV_V4SI);
16808 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16809 VSX_BUILTIN_ST_ELEMREV_V8HI);
16810 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16811 VSX_BUILTIN_ST_ELEMREV_V16QI);
16813 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16814 VSX_BUILTIN_VEC_LD);
16815 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16816 VSX_BUILTIN_VEC_ST);
16817 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16818 VSX_BUILTIN_VEC_XL);
16819 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16820 VSX_BUILTIN_VEC_XL_BE);
16821 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16822 VSX_BUILTIN_VEC_XST);
16823 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16824 VSX_BUILTIN_VEC_XST_BE);
16826 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16827 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16828 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16830 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16831 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16832 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16833 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16834 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16835 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16836 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16837 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16838 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16839 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16840 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16841 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16843 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16844 ALTIVEC_BUILTIN_VEC_ADDE);
16845 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16846 ALTIVEC_BUILTIN_VEC_ADDEC);
16847 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16848 ALTIVEC_BUILTIN_VEC_CMPNE);
16849 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16850 ALTIVEC_BUILTIN_VEC_MUL);
16851 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16852 ALTIVEC_BUILTIN_VEC_SUBE);
16853 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16854 ALTIVEC_BUILTIN_VEC_SUBEC);
16856 /* Cell builtins. */
16857 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16858 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16859 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16860 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16862 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16863 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16864 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16865 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16867 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16868 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16869 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16870 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16872 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16873 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16874 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16875 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16877 if (TARGET_P9_VECTOR)
16879 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16880 P9V_BUILTIN_STXVL);
16881 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16882 P9V_BUILTIN_XST_LEN_R);
16885 /* Add the DST variants. */
16886 d = bdesc_dst;
16887 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16889 HOST_WIDE_INT mask = d->mask;
16891 /* It is expected that these dst built-in functions may have
16892 d->icode equal to CODE_FOR_nothing. */
16893 if ((mask & builtin_mask) != mask)
16895 if (TARGET_DEBUG_BUILTIN)
16896 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16897 d->name);
16898 continue;
16900 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16903 /* Initialize the predicates. */
16904 d = bdesc_altivec_preds;
16905 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16907 machine_mode mode1;
16908 tree type;
16909 HOST_WIDE_INT mask = d->mask;
16911 if ((mask & builtin_mask) != mask)
16913 if (TARGET_DEBUG_BUILTIN)
16914 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
16915 d->name);
16916 continue;
16919 if (rs6000_overloaded_builtin_p (d->code))
16920 mode1 = VOIDmode;
16921 else
16923 /* Cannot define builtin if the instruction is disabled. */
16924 gcc_assert (d->icode != CODE_FOR_nothing);
16925 mode1 = insn_data[d->icode].operand[1].mode;
16928 switch (mode1)
16930 case E_VOIDmode:
16931 type = int_ftype_int_opaque_opaque;
16932 break;
16933 case E_V2DImode:
16934 type = int_ftype_int_v2di_v2di;
16935 break;
16936 case E_V4SImode:
16937 type = int_ftype_int_v4si_v4si;
16938 break;
16939 case E_V8HImode:
16940 type = int_ftype_int_v8hi_v8hi;
16941 break;
16942 case E_V16QImode:
16943 type = int_ftype_int_v16qi_v16qi;
16944 break;
16945 case E_V4SFmode:
16946 type = int_ftype_int_v4sf_v4sf;
16947 break;
16948 case E_V2DFmode:
16949 type = int_ftype_int_v2df_v2df;
16950 break;
16951 default:
16952 gcc_unreachable ();
16955 def_builtin (d->name, type, d->code);
16958 /* Initialize the abs* operators. */
16959 d = bdesc_abs;
16960 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16962 machine_mode mode0;
16963 tree type;
16964 HOST_WIDE_INT mask = d->mask;
16966 if ((mask & builtin_mask) != mask)
16968 if (TARGET_DEBUG_BUILTIN)
16969 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
16970 d->name);
16971 continue;
16974 /* Cannot define builtin if the instruction is disabled. */
16975 gcc_assert (d->icode != CODE_FOR_nothing);
16976 mode0 = insn_data[d->icode].operand[0].mode;
16978 switch (mode0)
16980 case E_V2DImode:
16981 type = v2di_ftype_v2di;
16982 break;
16983 case E_V4SImode:
16984 type = v4si_ftype_v4si;
16985 break;
16986 case E_V8HImode:
16987 type = v8hi_ftype_v8hi;
16988 break;
16989 case E_V16QImode:
16990 type = v16qi_ftype_v16qi;
16991 break;
16992 case E_V4SFmode:
16993 type = v4sf_ftype_v4sf;
16994 break;
16995 case E_V2DFmode:
16996 type = v2df_ftype_v2df;
16997 break;
16998 default:
16999 gcc_unreachable ();
17002 def_builtin (d->name, type, d->code);
17005 /* Initialize target builtin that implements
17006 targetm.vectorize.builtin_mask_for_load. */
17008 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17009 v16qi_ftype_long_pcvoid,
17010 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17011 BUILT_IN_MD, NULL, NULL_TREE);
17012 TREE_READONLY (decl) = 1;
17013 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17014 altivec_builtin_mask_for_load = decl;
17016 /* Access to the vec_init patterns. */
17017 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17018 integer_type_node, integer_type_node,
17019 integer_type_node, NULL_TREE);
17020 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17022 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17023 short_integer_type_node,
17024 short_integer_type_node,
17025 short_integer_type_node,
17026 short_integer_type_node,
17027 short_integer_type_node,
17028 short_integer_type_node,
17029 short_integer_type_node, NULL_TREE);
17030 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17032 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17033 char_type_node, char_type_node,
17034 char_type_node, char_type_node,
17035 char_type_node, char_type_node,
17036 char_type_node, char_type_node,
17037 char_type_node, char_type_node,
17038 char_type_node, char_type_node,
17039 char_type_node, char_type_node,
17040 char_type_node, NULL_TREE);
17041 def_builtin ("__builtin_vec_init_v16qi", ftype,
17042 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17044 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17045 float_type_node, float_type_node,
17046 float_type_node, NULL_TREE);
17047 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17049 /* VSX builtins. */
17050 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17051 double_type_node, NULL_TREE);
17052 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17054 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17055 intDI_type_node, NULL_TREE);
17056 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17058 /* Access to the vec_set patterns. */
17059 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17060 intSI_type_node,
17061 integer_type_node, NULL_TREE);
17062 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17064 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17065 intHI_type_node,
17066 integer_type_node, NULL_TREE);
17067 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17069 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17070 intQI_type_node,
17071 integer_type_node, NULL_TREE);
17072 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17074 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17075 float_type_node,
17076 integer_type_node, NULL_TREE);
17077 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17079 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17080 double_type_node,
17081 integer_type_node, NULL_TREE);
17082 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17084 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17085 intDI_type_node,
17086 integer_type_node, NULL_TREE);
17087 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17089 /* Access to the vec_extract patterns. */
17090 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17091 integer_type_node, NULL_TREE);
17092 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17094 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17095 integer_type_node, NULL_TREE);
17096 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17098 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17099 integer_type_node, NULL_TREE);
17100 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17102 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17103 integer_type_node, NULL_TREE);
17104 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17106 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17107 integer_type_node, NULL_TREE);
17108 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17110 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17111 integer_type_node, NULL_TREE);
17112 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17115 if (V1TI_type_node)
17117 tree v1ti_ftype_long_pcvoid
17118 = build_function_type_list (V1TI_type_node,
17119 long_integer_type_node, pcvoid_type_node,
17120 NULL_TREE);
17121 tree void_ftype_v1ti_long_pvoid
17122 = build_function_type_list (void_type_node,
17123 V1TI_type_node, long_integer_type_node,
17124 pvoid_type_node, NULL_TREE);
17125 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17126 VSX_BUILTIN_LD_ELEMREV_V1TI);
17127 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17128 VSX_BUILTIN_LXVD2X_V1TI);
17129 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17130 VSX_BUILTIN_STXVD2X_V1TI);
17131 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17132 NULL_TREE, NULL_TREE);
17133 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17134 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17135 intTI_type_node,
17136 integer_type_node, NULL_TREE);
17137 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17138 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17139 integer_type_node, NULL_TREE);
17140 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17145 static void
17146 htm_init_builtins (void)
17148 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17149 const struct builtin_description *d;
17150 size_t i;
17152 d = bdesc_htm;
17153 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17155 tree op[MAX_HTM_OPERANDS], type;
17156 HOST_WIDE_INT mask = d->mask;
17157 unsigned attr = rs6000_builtin_info[d->code].attr;
17158 bool void_func = (attr & RS6000_BTC_VOID);
17159 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17160 int nopnds = 0;
17161 tree gpr_type_node;
17162 tree rettype;
17163 tree argtype;
17165 /* It is expected that these htm built-in functions may have
17166 d->icode equal to CODE_FOR_nothing. */
17168 if (TARGET_32BIT && TARGET_POWERPC64)
17169 gpr_type_node = long_long_unsigned_type_node;
17170 else
17171 gpr_type_node = long_unsigned_type_node;
17173 if (attr & RS6000_BTC_SPR)
17175 rettype = gpr_type_node;
17176 argtype = gpr_type_node;
17178 else if (d->code == HTM_BUILTIN_TABORTDC
17179 || d->code == HTM_BUILTIN_TABORTDCI)
17181 rettype = unsigned_type_node;
17182 argtype = gpr_type_node;
17184 else
17186 rettype = unsigned_type_node;
17187 argtype = unsigned_type_node;
17190 if ((mask & builtin_mask) != mask)
17192 if (TARGET_DEBUG_BUILTIN)
17193 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17194 continue;
17197 if (d->name == 0)
17199 if (TARGET_DEBUG_BUILTIN)
17200 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17201 (long unsigned) i);
17202 continue;
17205 op[nopnds++] = (void_func) ? void_type_node : rettype;
17207 if (attr_args == RS6000_BTC_UNARY)
17208 op[nopnds++] = argtype;
17209 else if (attr_args == RS6000_BTC_BINARY)
17211 op[nopnds++] = argtype;
17212 op[nopnds++] = argtype;
17214 else if (attr_args == RS6000_BTC_TERNARY)
17216 op[nopnds++] = argtype;
17217 op[nopnds++] = argtype;
17218 op[nopnds++] = argtype;
17221 switch (nopnds)
17223 case 1:
17224 type = build_function_type_list (op[0], NULL_TREE);
17225 break;
17226 case 2:
17227 type = build_function_type_list (op[0], op[1], NULL_TREE);
17228 break;
17229 case 3:
17230 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17231 break;
17232 case 4:
17233 type = build_function_type_list (op[0], op[1], op[2], op[3],
17234 NULL_TREE);
17235 break;
17236 default:
17237 gcc_unreachable ();
17240 def_builtin (d->name, type, d->code);
17244 /* Hash function for builtin functions with up to 3 arguments and a return
17245 type. */
17246 hashval_t
17247 builtin_hasher::hash (builtin_hash_struct *bh)
17249 unsigned ret = 0;
17250 int i;
17252 for (i = 0; i < 4; i++)
17254 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17255 ret = (ret * 2) + bh->uns_p[i];
17258 return ret;
17261 /* Compare builtin hash entries H1 and H2 for equivalence. */
17262 bool
17263 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17265 return ((p1->mode[0] == p2->mode[0])
17266 && (p1->mode[1] == p2->mode[1])
17267 && (p1->mode[2] == p2->mode[2])
17268 && (p1->mode[3] == p2->mode[3])
17269 && (p1->uns_p[0] == p2->uns_p[0])
17270 && (p1->uns_p[1] == p2->uns_p[1])
17271 && (p1->uns_p[2] == p2->uns_p[2])
17272 && (p1->uns_p[3] == p2->uns_p[3]));
17275 /* Map types for builtin functions with an explicit return type and up to 3
17276 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17277 of the argument. */
17278 static tree
17279 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17280 machine_mode mode_arg1, machine_mode mode_arg2,
17281 enum rs6000_builtins builtin, const char *name)
17283 struct builtin_hash_struct h;
17284 struct builtin_hash_struct *h2;
17285 int num_args = 3;
17286 int i;
17287 tree ret_type = NULL_TREE;
17288 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17290 /* Create builtin_hash_table. */
17291 if (builtin_hash_table == NULL)
17292 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17294 h.type = NULL_TREE;
17295 h.mode[0] = mode_ret;
17296 h.mode[1] = mode_arg0;
17297 h.mode[2] = mode_arg1;
17298 h.mode[3] = mode_arg2;
17299 h.uns_p[0] = 0;
17300 h.uns_p[1] = 0;
17301 h.uns_p[2] = 0;
17302 h.uns_p[3] = 0;
17304 /* If the builtin is a type that produces unsigned results or takes unsigned
17305 arguments, and it is returned as a decl for the vectorizer (such as
17306 widening multiplies, permute), make sure the arguments and return value
17307 are type correct. */
17308 switch (builtin)
17310 /* unsigned 1 argument functions. */
17311 case CRYPTO_BUILTIN_VSBOX:
17312 case P8V_BUILTIN_VGBBD:
17313 case MISC_BUILTIN_CDTBCD:
17314 case MISC_BUILTIN_CBCDTD:
17315 h.uns_p[0] = 1;
17316 h.uns_p[1] = 1;
17317 break;
17319 /* unsigned 2 argument functions. */
17320 case ALTIVEC_BUILTIN_VMULEUB:
17321 case ALTIVEC_BUILTIN_VMULEUH:
17322 case P8V_BUILTIN_VMULEUW:
17323 case ALTIVEC_BUILTIN_VMULOUB:
17324 case ALTIVEC_BUILTIN_VMULOUH:
17325 case P8V_BUILTIN_VMULOUW:
17326 case CRYPTO_BUILTIN_VCIPHER:
17327 case CRYPTO_BUILTIN_VCIPHERLAST:
17328 case CRYPTO_BUILTIN_VNCIPHER:
17329 case CRYPTO_BUILTIN_VNCIPHERLAST:
17330 case CRYPTO_BUILTIN_VPMSUMB:
17331 case CRYPTO_BUILTIN_VPMSUMH:
17332 case CRYPTO_BUILTIN_VPMSUMW:
17333 case CRYPTO_BUILTIN_VPMSUMD:
17334 case CRYPTO_BUILTIN_VPMSUM:
17335 case MISC_BUILTIN_ADDG6S:
17336 case MISC_BUILTIN_DIVWEU:
17337 case MISC_BUILTIN_DIVDEU:
17338 case VSX_BUILTIN_UDIV_V2DI:
17339 case ALTIVEC_BUILTIN_VMAXUB:
17340 case ALTIVEC_BUILTIN_VMINUB:
17341 case ALTIVEC_BUILTIN_VMAXUH:
17342 case ALTIVEC_BUILTIN_VMINUH:
17343 case ALTIVEC_BUILTIN_VMAXUW:
17344 case ALTIVEC_BUILTIN_VMINUW:
17345 case P8V_BUILTIN_VMAXUD:
17346 case P8V_BUILTIN_VMINUD:
17347 h.uns_p[0] = 1;
17348 h.uns_p[1] = 1;
17349 h.uns_p[2] = 1;
17350 break;
17352 /* unsigned 3 argument functions. */
17353 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17354 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17355 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17356 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17357 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17358 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17359 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17360 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17361 case VSX_BUILTIN_VPERM_16QI_UNS:
17362 case VSX_BUILTIN_VPERM_8HI_UNS:
17363 case VSX_BUILTIN_VPERM_4SI_UNS:
17364 case VSX_BUILTIN_VPERM_2DI_UNS:
17365 case VSX_BUILTIN_XXSEL_16QI_UNS:
17366 case VSX_BUILTIN_XXSEL_8HI_UNS:
17367 case VSX_BUILTIN_XXSEL_4SI_UNS:
17368 case VSX_BUILTIN_XXSEL_2DI_UNS:
17369 case CRYPTO_BUILTIN_VPERMXOR:
17370 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17371 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17372 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17373 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17374 case CRYPTO_BUILTIN_VSHASIGMAW:
17375 case CRYPTO_BUILTIN_VSHASIGMAD:
17376 case CRYPTO_BUILTIN_VSHASIGMA:
17377 h.uns_p[0] = 1;
17378 h.uns_p[1] = 1;
17379 h.uns_p[2] = 1;
17380 h.uns_p[3] = 1;
17381 break;
17383 /* signed permute functions with unsigned char mask. */
17384 case ALTIVEC_BUILTIN_VPERM_16QI:
17385 case ALTIVEC_BUILTIN_VPERM_8HI:
17386 case ALTIVEC_BUILTIN_VPERM_4SI:
17387 case ALTIVEC_BUILTIN_VPERM_4SF:
17388 case ALTIVEC_BUILTIN_VPERM_2DI:
17389 case ALTIVEC_BUILTIN_VPERM_2DF:
17390 case VSX_BUILTIN_VPERM_16QI:
17391 case VSX_BUILTIN_VPERM_8HI:
17392 case VSX_BUILTIN_VPERM_4SI:
17393 case VSX_BUILTIN_VPERM_4SF:
17394 case VSX_BUILTIN_VPERM_2DI:
17395 case VSX_BUILTIN_VPERM_2DF:
17396 h.uns_p[3] = 1;
17397 break;
17399 /* unsigned args, signed return. */
17400 case VSX_BUILTIN_XVCVUXDSP:
17401 case VSX_BUILTIN_XVCVUXDDP_UNS:
17402 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17403 h.uns_p[1] = 1;
17404 break;
17406 /* signed args, unsigned return. */
17407 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17408 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17409 case MISC_BUILTIN_UNPACK_TD:
17410 case MISC_BUILTIN_UNPACK_V1TI:
17411 h.uns_p[0] = 1;
17412 break;
17414 /* unsigned arguments, bool return (compares). */
17415 case ALTIVEC_BUILTIN_VCMPEQUB:
17416 case ALTIVEC_BUILTIN_VCMPEQUH:
17417 case ALTIVEC_BUILTIN_VCMPEQUW:
17418 case P8V_BUILTIN_VCMPEQUD:
17419 case VSX_BUILTIN_CMPGE_U16QI:
17420 case VSX_BUILTIN_CMPGE_U8HI:
17421 case VSX_BUILTIN_CMPGE_U4SI:
17422 case VSX_BUILTIN_CMPGE_U2DI:
17423 case ALTIVEC_BUILTIN_VCMPGTUB:
17424 case ALTIVEC_BUILTIN_VCMPGTUH:
17425 case ALTIVEC_BUILTIN_VCMPGTUW:
17426 case P8V_BUILTIN_VCMPGTUD:
17427 h.uns_p[1] = 1;
17428 h.uns_p[2] = 1;
17429 break;
17431 /* unsigned arguments for 128-bit pack instructions. */
17432 case MISC_BUILTIN_PACK_TD:
17433 case MISC_BUILTIN_PACK_V1TI:
17434 h.uns_p[1] = 1;
17435 h.uns_p[2] = 1;
17436 break;
17438 /* unsigned second arguments (vector shift right). */
17439 case ALTIVEC_BUILTIN_VSRB:
17440 case ALTIVEC_BUILTIN_VSRH:
17441 case ALTIVEC_BUILTIN_VSRW:
17442 case P8V_BUILTIN_VSRD:
17443 h.uns_p[2] = 1;
17444 break;
17446 default:
17447 break;
17450 /* Figure out how many args are present. */
17451 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17452 num_args--;
17454 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17455 if (!ret_type && h.uns_p[0])
17456 ret_type = builtin_mode_to_type[h.mode[0]][0];
17458 if (!ret_type)
17459 fatal_error (input_location,
17460 "internal error: builtin function %qs had an unexpected "
17461 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17463 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17464 arg_type[i] = NULL_TREE;
17466 for (i = 0; i < num_args; i++)
17468 int m = (int) h.mode[i+1];
17469 int uns_p = h.uns_p[i+1];
17471 arg_type[i] = builtin_mode_to_type[m][uns_p];
17472 if (!arg_type[i] && uns_p)
17473 arg_type[i] = builtin_mode_to_type[m][0];
17475 if (!arg_type[i])
17476 fatal_error (input_location,
17477 "internal error: builtin function %qs, argument %d "
17478 "had unexpected argument type %qs", name, i,
17479 GET_MODE_NAME (m));
17482 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17483 if (*found == NULL)
17485 h2 = ggc_alloc<builtin_hash_struct> ();
17486 *h2 = h;
17487 *found = h2;
17489 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17490 arg_type[2], NULL_TREE);
17493 return (*found)->type;
17496 static void
17497 rs6000_common_init_builtins (void)
17499 const struct builtin_description *d;
17500 size_t i;
17502 tree opaque_ftype_opaque = NULL_TREE;
17503 tree opaque_ftype_opaque_opaque = NULL_TREE;
17504 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17505 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17507 /* Create Altivec and VSX builtins on machines with at least the
17508 general purpose extensions (970 and newer) to allow the use of
17509 the target attribute. */
17511 if (TARGET_EXTRA_BUILTINS)
17512 builtin_mask |= RS6000_BTM_COMMON;
17514 /* Add the ternary operators. */
17515 d = bdesc_3arg;
17516 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17518 tree type;
17519 HOST_WIDE_INT mask = d->mask;
17521 if ((mask & builtin_mask) != mask)
17523 if (TARGET_DEBUG_BUILTIN)
17524 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17525 continue;
17528 if (rs6000_overloaded_builtin_p (d->code))
17530 if (! (type = opaque_ftype_opaque_opaque_opaque))
17531 type = opaque_ftype_opaque_opaque_opaque
17532 = build_function_type_list (opaque_V4SI_type_node,
17533 opaque_V4SI_type_node,
17534 opaque_V4SI_type_node,
17535 opaque_V4SI_type_node,
17536 NULL_TREE);
17538 else
17540 enum insn_code icode = d->icode;
17541 if (d->name == 0)
17543 if (TARGET_DEBUG_BUILTIN)
17544 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17545 (long unsigned)i);
17547 continue;
17550 if (icode == CODE_FOR_nothing)
17552 if (TARGET_DEBUG_BUILTIN)
17553 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17554 d->name);
17556 continue;
17559 type = builtin_function_type (insn_data[icode].operand[0].mode,
17560 insn_data[icode].operand[1].mode,
17561 insn_data[icode].operand[2].mode,
17562 insn_data[icode].operand[3].mode,
17563 d->code, d->name);
17566 def_builtin (d->name, type, d->code);
17569 /* Add the binary operators. */
17570 d = bdesc_2arg;
17571 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17573 machine_mode mode0, mode1, mode2;
17574 tree type;
17575 HOST_WIDE_INT mask = d->mask;
17577 if ((mask & builtin_mask) != mask)
17579 if (TARGET_DEBUG_BUILTIN)
17580 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17581 continue;
17584 if (rs6000_overloaded_builtin_p (d->code))
17586 if (! (type = opaque_ftype_opaque_opaque))
17587 type = opaque_ftype_opaque_opaque
17588 = build_function_type_list (opaque_V4SI_type_node,
17589 opaque_V4SI_type_node,
17590 opaque_V4SI_type_node,
17591 NULL_TREE);
17593 else
17595 enum insn_code icode = d->icode;
17596 if (d->name == 0)
17598 if (TARGET_DEBUG_BUILTIN)
17599 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17600 (long unsigned)i);
17602 continue;
17605 if (icode == CODE_FOR_nothing)
17607 if (TARGET_DEBUG_BUILTIN)
17608 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17609 d->name);
17611 continue;
17614 mode0 = insn_data[icode].operand[0].mode;
17615 mode1 = insn_data[icode].operand[1].mode;
17616 mode2 = insn_data[icode].operand[2].mode;
17618 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17619 d->code, d->name);
17622 def_builtin (d->name, type, d->code);
17625 /* Add the simple unary operators. */
17626 d = bdesc_1arg;
17627 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17629 machine_mode mode0, mode1;
17630 tree type;
17631 HOST_WIDE_INT mask = d->mask;
17633 if ((mask & builtin_mask) != mask)
17635 if (TARGET_DEBUG_BUILTIN)
17636 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17637 continue;
17640 if (rs6000_overloaded_builtin_p (d->code))
17642 if (! (type = opaque_ftype_opaque))
17643 type = opaque_ftype_opaque
17644 = build_function_type_list (opaque_V4SI_type_node,
17645 opaque_V4SI_type_node,
17646 NULL_TREE);
17648 else
17650 enum insn_code icode = d->icode;
17651 if (d->name == 0)
17653 if (TARGET_DEBUG_BUILTIN)
17654 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17655 (long unsigned)i);
17657 continue;
17660 if (icode == CODE_FOR_nothing)
17662 if (TARGET_DEBUG_BUILTIN)
17663 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17664 d->name);
17666 continue;
17669 mode0 = insn_data[icode].operand[0].mode;
17670 mode1 = insn_data[icode].operand[1].mode;
17672 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17673 d->code, d->name);
17676 def_builtin (d->name, type, d->code);
17679 /* Add the simple no-argument operators. */
17680 d = bdesc_0arg;
17681 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17683 machine_mode mode0;
17684 tree type;
17685 HOST_WIDE_INT mask = d->mask;
17687 if ((mask & builtin_mask) != mask)
17689 if (TARGET_DEBUG_BUILTIN)
17690 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17691 continue;
17693 if (rs6000_overloaded_builtin_p (d->code))
17695 if (!opaque_ftype_opaque)
17696 opaque_ftype_opaque
17697 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17698 type = opaque_ftype_opaque;
17700 else
17702 enum insn_code icode = d->icode;
17703 if (d->name == 0)
17705 if (TARGET_DEBUG_BUILTIN)
17706 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17707 (long unsigned) i);
17708 continue;
17710 if (icode == CODE_FOR_nothing)
17712 if (TARGET_DEBUG_BUILTIN)
17713 fprintf (stderr,
17714 "rs6000_builtin, skip no-argument %s (no code)\n",
17715 d->name);
17716 continue;
17718 mode0 = insn_data[icode].operand[0].mode;
17719 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17720 d->code, d->name);
17722 def_builtin (d->name, type, d->code);
17726 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17727 static void
17728 init_float128_ibm (machine_mode mode)
17730 if (!TARGET_XL_COMPAT)
17732 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17733 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17734 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17735 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17737 if (!TARGET_HARD_FLOAT)
17739 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17740 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17741 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17742 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17743 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17744 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17745 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17746 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17748 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17749 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17750 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17751 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17752 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17753 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17754 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17755 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17758 else
17760 set_optab_libfunc (add_optab, mode, "_xlqadd");
17761 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17762 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17763 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17766 /* Add various conversions for IFmode to use the traditional TFmode
17767 names. */
17768 if (mode == IFmode)
17770 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
17771 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
17772 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
17773 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
17774 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
17775 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
17777 if (TARGET_POWERPC64)
17779 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17780 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17781 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17782 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17787 /* Create a decl for either complex long double multiply or complex long double
17788 divide when long double is IEEE 128-bit floating point. We can't use
17789 __multc3 and __divtc3 because the original long double using IBM extended
17790 double used those names. The complex multiply/divide functions are encoded
17791 as builtin functions with a complex result and 4 scalar inputs. */
17793 static void
17794 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17796 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17797 name, NULL_TREE);
17799 set_builtin_decl (fncode, fndecl, true);
17801 if (TARGET_DEBUG_BUILTIN)
17802 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17804 return;
17807 /* Set up IEEE 128-bit floating point routines. Use different names if the
17808 arguments can be passed in a vector register. The historical PowerPC
17809 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17810 continue to use that if we aren't using vector registers to pass IEEE
17811 128-bit floating point. */
17813 static void
17814 init_float128_ieee (machine_mode mode)
17816 if (FLOAT128_VECTOR_P (mode))
17818 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. */
17819 if (mode == TFmode && TARGET_IEEEQUAD)
17821 built_in_function fncode_mul =
17822 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17823 - MIN_MODE_COMPLEX_FLOAT);
17824 built_in_function fncode_div =
17825 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17826 - MIN_MODE_COMPLEX_FLOAT);
17828 tree fntype = build_function_type_list (complex_long_double_type_node,
17829 long_double_type_node,
17830 long_double_type_node,
17831 long_double_type_node,
17832 long_double_type_node,
17833 NULL_TREE);
17835 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17836 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17839 set_optab_libfunc (add_optab, mode, "__addkf3");
17840 set_optab_libfunc (sub_optab, mode, "__subkf3");
17841 set_optab_libfunc (neg_optab, mode, "__negkf2");
17842 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17843 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17844 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17845 set_optab_libfunc (abs_optab, mode, "__abskf2");
17846 set_optab_libfunc (powi_optab, mode, "__powikf2");
17848 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17849 set_optab_libfunc (ne_optab, mode, "__nekf2");
17850 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17851 set_optab_libfunc (ge_optab, mode, "__gekf2");
17852 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17853 set_optab_libfunc (le_optab, mode, "__lekf2");
17854 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17856 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17857 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17858 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17859 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17861 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17862 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17863 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17865 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17866 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17867 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17869 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
17870 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
17871 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
17872 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
17873 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
17874 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
17876 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17877 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17878 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17879 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17881 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17882 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17883 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17884 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17886 if (TARGET_POWERPC64)
17888 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17889 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17890 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17891 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17895 else
17897 set_optab_libfunc (add_optab, mode, "_q_add");
17898 set_optab_libfunc (sub_optab, mode, "_q_sub");
17899 set_optab_libfunc (neg_optab, mode, "_q_neg");
17900 set_optab_libfunc (smul_optab, mode, "_q_mul");
17901 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17902 if (TARGET_PPC_GPOPT)
17903 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17905 set_optab_libfunc (eq_optab, mode, "_q_feq");
17906 set_optab_libfunc (ne_optab, mode, "_q_fne");
17907 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17908 set_optab_libfunc (ge_optab, mode, "_q_fge");
17909 set_optab_libfunc (lt_optab, mode, "_q_flt");
17910 set_optab_libfunc (le_optab, mode, "_q_fle");
17912 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17913 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17914 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17915 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17916 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17917 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17918 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17919 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17923 static void
17924 rs6000_init_libfuncs (void)
17926 /* __float128 support. */
17927 if (TARGET_FLOAT128_TYPE)
17929 init_float128_ibm (IFmode);
17930 init_float128_ieee (KFmode);
17933 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17934 if (TARGET_LONG_DOUBLE_128)
17936 if (!TARGET_IEEEQUAD)
17937 init_float128_ibm (TFmode);
17939 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17940 else
17941 init_float128_ieee (TFmode);
17945 /* Emit a potentially record-form instruction, setting DST from SRC.
17946 If DOT is 0, that is all; otherwise, set CCREG to the result of the
17947 signed comparison of DST with zero. If DOT is 1, the generated RTL
17948 doesn't care about the DST result; if DOT is 2, it does. If CCREG
17949 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
17950 a separate COMPARE. */
17952 void
17953 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
17955 if (dot == 0)
17957 emit_move_insn (dst, src);
17958 return;
17961 if (cc_reg_not_cr0_operand (ccreg, CCmode))
17963 emit_move_insn (dst, src);
17964 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
17965 return;
17968 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
17969 if (dot == 1)
17971 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
17972 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
17974 else
17976 rtx set = gen_rtx_SET (dst, src);
17977 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
17982 /* A validation routine: say whether CODE, a condition code, and MODE
17983 match. The other alternatives either don't make sense or should
17984 never be generated. */
17986 void
17987 validate_condition_mode (enum rtx_code code, machine_mode mode)
17989 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
17990 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
17991 && GET_MODE_CLASS (mode) == MODE_CC);
17993 /* These don't make sense. */
17994 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
17995 || mode != CCUNSmode);
17997 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
17998 || mode == CCUNSmode);
18000 gcc_assert (mode == CCFPmode
18001 || (code != ORDERED && code != UNORDERED
18002 && code != UNEQ && code != LTGT
18003 && code != UNGT && code != UNLT
18004 && code != UNGE && code != UNLE));
18006 /* These should never be generated except for
18007 flag_finite_math_only. */
18008 gcc_assert (mode != CCFPmode
18009 || flag_finite_math_only
18010 || (code != LE && code != GE
18011 && code != UNEQ && code != LTGT
18012 && code != UNGT && code != UNLT));
18014 /* These are invalid; the information is not there. */
18015 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18019 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18020 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18021 not zero, store there the bit offset (counted from the right) where
18022 the single stretch of 1 bits begins; and similarly for B, the bit
18023 offset where it ends. */
18025 bool
18026 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18028 unsigned HOST_WIDE_INT val = INTVAL (mask);
18029 unsigned HOST_WIDE_INT bit;
18030 int nb, ne;
18031 int n = GET_MODE_PRECISION (mode);
18033 if (mode != DImode && mode != SImode)
18034 return false;
18036 if (INTVAL (mask) >= 0)
18038 bit = val & -val;
18039 ne = exact_log2 (bit);
18040 nb = exact_log2 (val + bit);
18042 else if (val + 1 == 0)
18044 nb = n;
18045 ne = 0;
18047 else if (val & 1)
18049 val = ~val;
18050 bit = val & -val;
18051 nb = exact_log2 (bit);
18052 ne = exact_log2 (val + bit);
18054 else
18056 bit = val & -val;
18057 ne = exact_log2 (bit);
18058 if (val + bit == 0)
18059 nb = n;
18060 else
18061 nb = 0;
18064 nb--;
18066 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18067 return false;
18069 if (b)
18070 *b = nb;
18071 if (e)
18072 *e = ne;
18074 return true;
18077 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18078 or rldicr instruction, to implement an AND with it in mode MODE. */
18080 bool
18081 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18083 int nb, ne;
18085 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18086 return false;
18088 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18089 does not wrap. */
18090 if (mode == DImode)
18091 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18093 /* For SImode, rlwinm can do everything. */
18094 if (mode == SImode)
18095 return (nb < 32 && ne < 32);
18097 return false;
18100 /* Return the instruction template for an AND with mask in mode MODE, with
18101 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18103 const char *
18104 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18106 int nb, ne;
18108 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18109 gcc_unreachable ();
18111 if (mode == DImode && ne == 0)
18113 operands[3] = GEN_INT (63 - nb);
18114 if (dot)
18115 return "rldicl. %0,%1,0,%3";
18116 return "rldicl %0,%1,0,%3";
18119 if (mode == DImode && nb == 63)
18121 operands[3] = GEN_INT (63 - ne);
18122 if (dot)
18123 return "rldicr. %0,%1,0,%3";
18124 return "rldicr %0,%1,0,%3";
18127 if (nb < 32 && ne < 32)
18129 operands[3] = GEN_INT (31 - nb);
18130 operands[4] = GEN_INT (31 - ne);
18131 if (dot)
18132 return "rlwinm. %0,%1,0,%3,%4";
18133 return "rlwinm %0,%1,0,%3,%4";
18136 gcc_unreachable ();
18139 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18140 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18141 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18143 bool
18144 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18146 int nb, ne;
18148 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18149 return false;
18151 int n = GET_MODE_PRECISION (mode);
18152 int sh = -1;
18154 if (CONST_INT_P (XEXP (shift, 1)))
18156 sh = INTVAL (XEXP (shift, 1));
18157 if (sh < 0 || sh >= n)
18158 return false;
18161 rtx_code code = GET_CODE (shift);
18163 /* Convert any shift by 0 to a rotate, to simplify below code. */
18164 if (sh == 0)
18165 code = ROTATE;
18167 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18168 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18169 code = ASHIFT;
18170 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18172 code = LSHIFTRT;
18173 sh = n - sh;
18176 /* DImode rotates need rld*. */
18177 if (mode == DImode && code == ROTATE)
18178 return (nb == 63 || ne == 0 || ne == sh);
18180 /* SImode rotates need rlw*. */
18181 if (mode == SImode && code == ROTATE)
18182 return (nb < 32 && ne < 32 && sh < 32);
18184 /* Wrap-around masks are only okay for rotates. */
18185 if (ne > nb)
18186 return false;
18188 /* Variable shifts are only okay for rotates. */
18189 if (sh < 0)
18190 return false;
18192 /* Don't allow ASHIFT if the mask is wrong for that. */
18193 if (code == ASHIFT && ne < sh)
18194 return false;
18196 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18197 if the mask is wrong for that. */
18198 if (nb < 32 && ne < 32 && sh < 32
18199 && !(code == LSHIFTRT && nb >= 32 - sh))
18200 return true;
18202 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18203 if the mask is wrong for that. */
18204 if (code == LSHIFTRT)
18205 sh = 64 - sh;
18206 if (nb == 63 || ne == 0 || ne == sh)
18207 return !(code == LSHIFTRT && nb >= sh);
18209 return false;
18212 /* Return the instruction template for a shift with mask in mode MODE, with
18213 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18215 const char *
18216 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18218 int nb, ne;
18220 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18221 gcc_unreachable ();
18223 if (mode == DImode && ne == 0)
18225 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18226 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18227 operands[3] = GEN_INT (63 - nb);
18228 if (dot)
18229 return "rld%I2cl. %0,%1,%2,%3";
18230 return "rld%I2cl %0,%1,%2,%3";
18233 if (mode == DImode && nb == 63)
18235 operands[3] = GEN_INT (63 - ne);
18236 if (dot)
18237 return "rld%I2cr. %0,%1,%2,%3";
18238 return "rld%I2cr %0,%1,%2,%3";
18241 if (mode == DImode
18242 && GET_CODE (operands[4]) != LSHIFTRT
18243 && CONST_INT_P (operands[2])
18244 && ne == INTVAL (operands[2]))
18246 operands[3] = GEN_INT (63 - nb);
18247 if (dot)
18248 return "rld%I2c. %0,%1,%2,%3";
18249 return "rld%I2c %0,%1,%2,%3";
18252 if (nb < 32 && ne < 32)
18254 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18255 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18256 operands[3] = GEN_INT (31 - nb);
18257 operands[4] = GEN_INT (31 - ne);
18258 /* This insn can also be a 64-bit rotate with mask that really makes
18259 it just a shift right (with mask); the %h below are to adjust for
18260 that situation (shift count is >= 32 in that case). */
18261 if (dot)
18262 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18263 return "rlw%I2nm %0,%1,%h2,%3,%4";
18266 gcc_unreachable ();
18269 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18270 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18271 ASHIFT, or LSHIFTRT) in mode MODE. */
18273 bool
18274 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18276 int nb, ne;
18278 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18279 return false;
18281 int n = GET_MODE_PRECISION (mode);
18283 int sh = INTVAL (XEXP (shift, 1));
18284 if (sh < 0 || sh >= n)
18285 return false;
18287 rtx_code code = GET_CODE (shift);
18289 /* Convert any shift by 0 to a rotate, to simplify below code. */
18290 if (sh == 0)
18291 code = ROTATE;
18293 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18294 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18295 code = ASHIFT;
18296 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18298 code = LSHIFTRT;
18299 sh = n - sh;
18302 /* DImode rotates need rldimi. */
18303 if (mode == DImode && code == ROTATE)
18304 return (ne == sh);
18306 /* SImode rotates need rlwimi. */
18307 if (mode == SImode && code == ROTATE)
18308 return (nb < 32 && ne < 32 && sh < 32);
18310 /* Wrap-around masks are only okay for rotates. */
18311 if (ne > nb)
18312 return false;
18314 /* Don't allow ASHIFT if the mask is wrong for that. */
18315 if (code == ASHIFT && ne < sh)
18316 return false;
18318 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18319 if the mask is wrong for that. */
18320 if (nb < 32 && ne < 32 && sh < 32
18321 && !(code == LSHIFTRT && nb >= 32 - sh))
18322 return true;
18324 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18325 if the mask is wrong for that. */
18326 if (code == LSHIFTRT)
18327 sh = 64 - sh;
18328 if (ne == sh)
18329 return !(code == LSHIFTRT && nb >= sh);
18331 return false;
18334 /* Return the instruction template for an insert with mask in mode MODE, with
18335 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18337 const char *
18338 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18340 int nb, ne;
18342 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18343 gcc_unreachable ();
18345 /* Prefer rldimi because rlwimi is cracked. */
18346 if (TARGET_POWERPC64
18347 && (!dot || mode == DImode)
18348 && GET_CODE (operands[4]) != LSHIFTRT
18349 && ne == INTVAL (operands[2]))
18351 operands[3] = GEN_INT (63 - nb);
18352 if (dot)
18353 return "rldimi. %0,%1,%2,%3";
18354 return "rldimi %0,%1,%2,%3";
18357 if (nb < 32 && ne < 32)
18359 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18360 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18361 operands[3] = GEN_INT (31 - nb);
18362 operands[4] = GEN_INT (31 - ne);
18363 if (dot)
18364 return "rlwimi. %0,%1,%2,%3,%4";
18365 return "rlwimi %0,%1,%2,%3,%4";
18368 gcc_unreachable ();
18371 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18372 using two machine instructions. */
18374 bool
18375 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18377 /* There are two kinds of AND we can handle with two insns:
18378 1) those we can do with two rl* insn;
18379 2) ori[s];xori[s].
18381 We do not handle that last case yet. */
18383 /* If there is just one stretch of ones, we can do it. */
18384 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18385 return true;
18387 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18388 one insn, we can do the whole thing with two. */
18389 unsigned HOST_WIDE_INT val = INTVAL (c);
18390 unsigned HOST_WIDE_INT bit1 = val & -val;
18391 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18392 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18393 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18394 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18397 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18398 If EXPAND is true, split rotate-and-mask instructions we generate to
18399 their constituent parts as well (this is used during expand); if DOT
18400 is 1, make the last insn a record-form instruction clobbering the
18401 destination GPR and setting the CC reg (from operands[3]); if 2, set
18402 that GPR as well as the CC reg. */
18404 void
18405 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18407 gcc_assert (!(expand && dot));
18409 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18411 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18412 shift right. This generates better code than doing the masks without
18413 shifts, or shifting first right and then left. */
18414 int nb, ne;
18415 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18417 gcc_assert (mode == DImode);
18419 int shift = 63 - nb;
18420 if (expand)
18422 rtx tmp1 = gen_reg_rtx (DImode);
18423 rtx tmp2 = gen_reg_rtx (DImode);
18424 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18425 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18426 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18428 else
18430 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18431 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18432 emit_move_insn (operands[0], tmp);
18433 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18434 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18436 return;
18439 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18440 that does the rest. */
18441 unsigned HOST_WIDE_INT bit1 = val & -val;
18442 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18443 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18444 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18446 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18447 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18449 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18451 /* Two "no-rotate"-and-mask instructions, for SImode. */
18452 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18454 gcc_assert (mode == SImode);
18456 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18457 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18458 emit_move_insn (reg, tmp);
18459 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18460 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18461 return;
18464 gcc_assert (mode == DImode);
18466 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18467 insns; we have to do the first in SImode, because it wraps. */
18468 if (mask2 <= 0xffffffff
18469 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18471 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18472 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18473 GEN_INT (mask1));
18474 rtx reg_low = gen_lowpart (SImode, reg);
18475 emit_move_insn (reg_low, tmp);
18476 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18477 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18478 return;
18481 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18482 at the top end), rotate back and clear the other hole. */
18483 int right = exact_log2 (bit3);
18484 int left = 64 - right;
18486 /* Rotate the mask too. */
18487 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18489 if (expand)
18491 rtx tmp1 = gen_reg_rtx (DImode);
18492 rtx tmp2 = gen_reg_rtx (DImode);
18493 rtx tmp3 = gen_reg_rtx (DImode);
18494 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18495 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18496 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18497 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18499 else
18501 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18502 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18503 emit_move_insn (operands[0], tmp);
18504 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18505 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18506 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18510 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18511 for lfq and stfq insns iff the registers are hard registers. */
18514 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18516 /* We might have been passed a SUBREG. */
18517 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18518 return 0;
18520 /* We might have been passed non floating point registers. */
18521 if (!FP_REGNO_P (REGNO (reg1))
18522 || !FP_REGNO_P (REGNO (reg2)))
18523 return 0;
18525 return (REGNO (reg1) == REGNO (reg2) - 1);
18528 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18529 addr1 and addr2 must be in consecutive memory locations
18530 (addr2 == addr1 + 8). */
18533 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18535 rtx addr1, addr2;
18536 unsigned int reg1, reg2;
18537 int offset1, offset2;
18539 /* The mems cannot be volatile. */
18540 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18541 return 0;
18543 addr1 = XEXP (mem1, 0);
18544 addr2 = XEXP (mem2, 0);
18546 /* Extract an offset (if used) from the first addr. */
18547 if (GET_CODE (addr1) == PLUS)
18549 /* If not a REG, return zero. */
18550 if (GET_CODE (XEXP (addr1, 0)) != REG)
18551 return 0;
18552 else
18554 reg1 = REGNO (XEXP (addr1, 0));
18555 /* The offset must be constant! */
18556 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18557 return 0;
18558 offset1 = INTVAL (XEXP (addr1, 1));
18561 else if (GET_CODE (addr1) != REG)
18562 return 0;
18563 else
18565 reg1 = REGNO (addr1);
18566 /* This was a simple (mem (reg)) expression. Offset is 0. */
18567 offset1 = 0;
18570 /* And now for the second addr. */
18571 if (GET_CODE (addr2) == PLUS)
18573 /* If not a REG, return zero. */
18574 if (GET_CODE (XEXP (addr2, 0)) != REG)
18575 return 0;
18576 else
18578 reg2 = REGNO (XEXP (addr2, 0));
18579 /* The offset must be constant. */
18580 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18581 return 0;
18582 offset2 = INTVAL (XEXP (addr2, 1));
18585 else if (GET_CODE (addr2) != REG)
18586 return 0;
18587 else
18589 reg2 = REGNO (addr2);
18590 /* This was a simple (mem (reg)) expression. Offset is 0. */
18591 offset2 = 0;
18594 /* Both of these must have the same base register. */
18595 if (reg1 != reg2)
18596 return 0;
18598 /* The offset for the second addr must be 8 more than the first addr. */
18599 if (offset2 != offset1 + 8)
18600 return 0;
18602 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18603 instructions. */
18604 return 1;
18607 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18608 need to use DDmode, in all other cases we can use the same mode. */
18609 static machine_mode
18610 rs6000_secondary_memory_needed_mode (machine_mode mode)
18612 if (lra_in_progress && mode == SDmode)
18613 return DDmode;
18614 return mode;
18617 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18618 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18619 only work on the traditional altivec registers, note if an altivec register
18620 was chosen. */
18622 static enum rs6000_reg_type
18623 register_to_reg_type (rtx reg, bool *is_altivec)
18625 HOST_WIDE_INT regno;
18626 enum reg_class rclass;
18628 if (GET_CODE (reg) == SUBREG)
18629 reg = SUBREG_REG (reg);
18631 if (!REG_P (reg))
18632 return NO_REG_TYPE;
18634 regno = REGNO (reg);
18635 if (regno >= FIRST_PSEUDO_REGISTER)
18637 if (!lra_in_progress && !reload_completed)
18638 return PSEUDO_REG_TYPE;
18640 regno = true_regnum (reg);
18641 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18642 return PSEUDO_REG_TYPE;
18645 gcc_assert (regno >= 0);
18647 if (is_altivec && ALTIVEC_REGNO_P (regno))
18648 *is_altivec = true;
18650 rclass = rs6000_regno_regclass[regno];
18651 return reg_class_to_reg_type[(int)rclass];
18654 /* Helper function to return the cost of adding a TOC entry address. */
18656 static inline int
18657 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18659 int ret;
18661 if (TARGET_CMODEL != CMODEL_SMALL)
18662 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18664 else
18665 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18667 return ret;
18670 /* Helper function for rs6000_secondary_reload to determine whether the memory
18671 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18672 needs reloading. Return negative if the memory is not handled by the memory
18673 helper functions and to try a different reload method, 0 if no additional
18674 instructions are need, and positive to give the extra cost for the
18675 memory. */
18677 static int
18678 rs6000_secondary_reload_memory (rtx addr,
18679 enum reg_class rclass,
18680 machine_mode mode)
18682 int extra_cost = 0;
18683 rtx reg, and_arg, plus_arg0, plus_arg1;
18684 addr_mask_type addr_mask;
18685 const char *type = NULL;
18686 const char *fail_msg = NULL;
18688 if (GPR_REG_CLASS_P (rclass))
18689 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18691 else if (rclass == FLOAT_REGS)
18692 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18694 else if (rclass == ALTIVEC_REGS)
18695 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18697 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18698 else if (rclass == VSX_REGS)
18699 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18700 & ~RELOAD_REG_AND_M16);
18702 /* If the register allocator hasn't made up its mind yet on the register
18703 class to use, settle on defaults to use. */
18704 else if (rclass == NO_REGS)
18706 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18707 & ~RELOAD_REG_AND_M16);
18709 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18710 addr_mask &= ~(RELOAD_REG_INDEXED
18711 | RELOAD_REG_PRE_INCDEC
18712 | RELOAD_REG_PRE_MODIFY);
18715 else
18716 addr_mask = 0;
18718 /* If the register isn't valid in this register class, just return now. */
18719 if ((addr_mask & RELOAD_REG_VALID) == 0)
18721 if (TARGET_DEBUG_ADDR)
18723 fprintf (stderr,
18724 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18725 "not valid in class\n",
18726 GET_MODE_NAME (mode), reg_class_names[rclass]);
18727 debug_rtx (addr);
18730 return -1;
18733 switch (GET_CODE (addr))
18735 /* Does the register class supports auto update forms for this mode? We
18736 don't need a scratch register, since the powerpc only supports
18737 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18738 case PRE_INC:
18739 case PRE_DEC:
18740 reg = XEXP (addr, 0);
18741 if (!base_reg_operand (addr, GET_MODE (reg)))
18743 fail_msg = "no base register #1";
18744 extra_cost = -1;
18747 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18749 extra_cost = 1;
18750 type = "update";
18752 break;
18754 case PRE_MODIFY:
18755 reg = XEXP (addr, 0);
18756 plus_arg1 = XEXP (addr, 1);
18757 if (!base_reg_operand (reg, GET_MODE (reg))
18758 || GET_CODE (plus_arg1) != PLUS
18759 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18761 fail_msg = "bad PRE_MODIFY";
18762 extra_cost = -1;
18765 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18767 extra_cost = 1;
18768 type = "update";
18770 break;
18772 /* Do we need to simulate AND -16 to clear the bottom address bits used
18773 in VMX load/stores? Only allow the AND for vector sizes. */
18774 case AND:
18775 and_arg = XEXP (addr, 0);
18776 if (GET_MODE_SIZE (mode) != 16
18777 || GET_CODE (XEXP (addr, 1)) != CONST_INT
18778 || INTVAL (XEXP (addr, 1)) != -16)
18780 fail_msg = "bad Altivec AND #1";
18781 extra_cost = -1;
18784 if (rclass != ALTIVEC_REGS)
18786 if (legitimate_indirect_address_p (and_arg, false))
18787 extra_cost = 1;
18789 else if (legitimate_indexed_address_p (and_arg, false))
18790 extra_cost = 2;
18792 else
18794 fail_msg = "bad Altivec AND #2";
18795 extra_cost = -1;
18798 type = "and";
18800 break;
18802 /* If this is an indirect address, make sure it is a base register. */
18803 case REG:
18804 case SUBREG:
18805 if (!legitimate_indirect_address_p (addr, false))
18807 extra_cost = 1;
18808 type = "move";
18810 break;
18812 /* If this is an indexed address, make sure the register class can handle
18813 indexed addresses for this mode. */
18814 case PLUS:
18815 plus_arg0 = XEXP (addr, 0);
18816 plus_arg1 = XEXP (addr, 1);
18818 /* (plus (plus (reg) (constant)) (constant)) is generated during
18819 push_reload processing, so handle it now. */
18820 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18822 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18824 extra_cost = 1;
18825 type = "offset";
18829 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18830 push_reload processing, so handle it now. */
18831 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18833 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18835 extra_cost = 1;
18836 type = "indexed #2";
18840 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18842 fail_msg = "no base register #2";
18843 extra_cost = -1;
18846 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18848 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18849 || !legitimate_indexed_address_p (addr, false))
18851 extra_cost = 1;
18852 type = "indexed";
18856 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18857 && CONST_INT_P (plus_arg1))
18859 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18861 extra_cost = 1;
18862 type = "vector d-form offset";
18866 /* Make sure the register class can handle offset addresses. */
18867 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18869 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18871 extra_cost = 1;
18872 type = "offset #2";
18876 else
18878 fail_msg = "bad PLUS";
18879 extra_cost = -1;
18882 break;
18884 case LO_SUM:
18885 /* Quad offsets are restricted and can't handle normal addresses. */
18886 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18888 extra_cost = -1;
18889 type = "vector d-form lo_sum";
18892 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18894 fail_msg = "bad LO_SUM";
18895 extra_cost = -1;
18898 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18900 extra_cost = 1;
18901 type = "lo_sum";
18903 break;
18905 /* Static addresses need to create a TOC entry. */
18906 case CONST:
18907 case SYMBOL_REF:
18908 case LABEL_REF:
18909 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18911 extra_cost = -1;
18912 type = "vector d-form lo_sum #2";
18915 else
18917 type = "address";
18918 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
18920 break;
18922 /* TOC references look like offsetable memory. */
18923 case UNSPEC:
18924 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
18926 fail_msg = "bad UNSPEC";
18927 extra_cost = -1;
18930 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18932 extra_cost = -1;
18933 type = "vector d-form lo_sum #3";
18936 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18938 extra_cost = 1;
18939 type = "toc reference";
18941 break;
18943 default:
18945 fail_msg = "bad address";
18946 extra_cost = -1;
18950 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
18952 if (extra_cost < 0)
18953 fprintf (stderr,
18954 "rs6000_secondary_reload_memory error: mode = %s, "
18955 "class = %s, addr_mask = '%s', %s\n",
18956 GET_MODE_NAME (mode),
18957 reg_class_names[rclass],
18958 rs6000_debug_addr_mask (addr_mask, false),
18959 (fail_msg != NULL) ? fail_msg : "<bad address>");
18961 else
18962 fprintf (stderr,
18963 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18964 "addr_mask = '%s', extra cost = %d, %s\n",
18965 GET_MODE_NAME (mode),
18966 reg_class_names[rclass],
18967 rs6000_debug_addr_mask (addr_mask, false),
18968 extra_cost,
18969 (type) ? type : "<none>");
18971 debug_rtx (addr);
18974 return extra_cost;
18977 /* Helper function for rs6000_secondary_reload to return true if a move to a
18978 different register classe is really a simple move. */
18980 static bool
18981 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
18982 enum rs6000_reg_type from_type,
18983 machine_mode mode)
18985 int size = GET_MODE_SIZE (mode);
18987 /* Add support for various direct moves available. In this function, we only
18988 look at cases where we don't need any extra registers, and one or more
18989 simple move insns are issued. Originally small integers are not allowed
18990 in FPR/VSX registers. Single precision binary floating is not a simple
18991 move because we need to convert to the single precision memory layout.
18992 The 4-byte SDmode can be moved. TDmode values are disallowed since they
18993 need special direct move handling, which we do not support yet. */
18994 if (TARGET_DIRECT_MOVE
18995 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
18996 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
18998 if (TARGET_POWERPC64)
19000 /* ISA 2.07: MTVSRD or MVFVSRD. */
19001 if (size == 8)
19002 return true;
19004 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19005 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19006 return true;
19009 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19010 if (TARGET_P8_VECTOR)
19012 if (mode == SImode)
19013 return true;
19015 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19016 return true;
19019 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19020 if (mode == SDmode)
19021 return true;
19024 /* Power6+: MFTGPR or MFFGPR. */
19025 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19026 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19027 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19028 return true;
19030 /* Move to/from SPR. */
19031 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19032 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19033 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19034 return true;
19036 return false;
19039 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19040 special direct moves that involve allocating an extra register, return the
19041 insn code of the helper function if there is such a function or
19042 CODE_FOR_nothing if not. */
19044 static bool
19045 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19046 enum rs6000_reg_type from_type,
19047 machine_mode mode,
19048 secondary_reload_info *sri,
19049 bool altivec_p)
19051 bool ret = false;
19052 enum insn_code icode = CODE_FOR_nothing;
19053 int cost = 0;
19054 int size = GET_MODE_SIZE (mode);
19056 if (TARGET_POWERPC64 && size == 16)
19058 /* Handle moving 128-bit values from GPRs to VSX point registers on
19059 ISA 2.07 (power8, power9) when running in 64-bit mode using
19060 XXPERMDI to glue the two 64-bit values back together. */
19061 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19063 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19064 icode = reg_addr[mode].reload_vsx_gpr;
19067 /* Handle moving 128-bit values from VSX point registers to GPRs on
19068 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19069 bottom 64-bit value. */
19070 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19072 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19073 icode = reg_addr[mode].reload_gpr_vsx;
19077 else if (TARGET_POWERPC64 && mode == SFmode)
19079 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19081 cost = 3; /* xscvdpspn, mfvsrd, and. */
19082 icode = reg_addr[mode].reload_gpr_vsx;
19085 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19087 cost = 2; /* mtvsrz, xscvspdpn. */
19088 icode = reg_addr[mode].reload_vsx_gpr;
19092 else if (!TARGET_POWERPC64 && size == 8)
19094 /* Handle moving 64-bit values from GPRs to floating point registers on
19095 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19096 32-bit values back together. Altivec register classes must be handled
19097 specially since a different instruction is used, and the secondary
19098 reload support requires a single instruction class in the scratch
19099 register constraint. However, right now TFmode is not allowed in
19100 Altivec registers, so the pattern will never match. */
19101 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19103 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19104 icode = reg_addr[mode].reload_fpr_gpr;
19108 if (icode != CODE_FOR_nothing)
19110 ret = true;
19111 if (sri)
19113 sri->icode = icode;
19114 sri->extra_cost = cost;
19118 return ret;
19121 /* Return whether a move between two register classes can be done either
19122 directly (simple move) or via a pattern that uses a single extra temporary
19123 (using ISA 2.07's direct move in this case. */
19125 static bool
19126 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19127 enum rs6000_reg_type from_type,
19128 machine_mode mode,
19129 secondary_reload_info *sri,
19130 bool altivec_p)
19132 /* Fall back to load/store reloads if either type is not a register. */
19133 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19134 return false;
19136 /* If we haven't allocated registers yet, assume the move can be done for the
19137 standard register types. */
19138 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19139 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19140 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19141 return true;
19143 /* Moves to the same set of registers is a simple move for non-specialized
19144 registers. */
19145 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19146 return true;
19148 /* Check whether a simple move can be done directly. */
19149 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19151 if (sri)
19153 sri->icode = CODE_FOR_nothing;
19154 sri->extra_cost = 0;
19156 return true;
19159 /* Now check if we can do it in a few steps. */
19160 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19161 altivec_p);
19164 /* Inform reload about cases where moving X with a mode MODE to a register in
19165 RCLASS requires an extra scratch or immediate register. Return the class
19166 needed for the immediate register.
19168 For VSX and Altivec, we may need a register to convert sp+offset into
19169 reg+sp.
19171 For misaligned 64-bit gpr loads and stores we need a register to
19172 convert an offset address to indirect. */
19174 static reg_class_t
19175 rs6000_secondary_reload (bool in_p,
19176 rtx x,
19177 reg_class_t rclass_i,
19178 machine_mode mode,
19179 secondary_reload_info *sri)
19181 enum reg_class rclass = (enum reg_class) rclass_i;
19182 reg_class_t ret = ALL_REGS;
19183 enum insn_code icode;
19184 bool default_p = false;
19185 bool done_p = false;
19187 /* Allow subreg of memory before/during reload. */
19188 bool memory_p = (MEM_P (x)
19189 || (!reload_completed && GET_CODE (x) == SUBREG
19190 && MEM_P (SUBREG_REG (x))));
19192 sri->icode = CODE_FOR_nothing;
19193 sri->t_icode = CODE_FOR_nothing;
19194 sri->extra_cost = 0;
19195 icode = ((in_p)
19196 ? reg_addr[mode].reload_load
19197 : reg_addr[mode].reload_store);
19199 if (REG_P (x) || register_operand (x, mode))
19201 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19202 bool altivec_p = (rclass == ALTIVEC_REGS);
19203 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19205 if (!in_p)
19206 std::swap (to_type, from_type);
19208 /* Can we do a direct move of some sort? */
19209 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19210 altivec_p))
19212 icode = (enum insn_code)sri->icode;
19213 default_p = false;
19214 done_p = true;
19215 ret = NO_REGS;
19219 /* Make sure 0.0 is not reloaded or forced into memory. */
19220 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19222 ret = NO_REGS;
19223 default_p = false;
19224 done_p = true;
19227 /* If this is a scalar floating point value and we want to load it into the
19228 traditional Altivec registers, do it via a move via a traditional floating
19229 point register, unless we have D-form addressing. Also make sure that
19230 non-zero constants use a FPR. */
19231 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19232 && !mode_supports_vmx_dform (mode)
19233 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19234 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19236 ret = FLOAT_REGS;
19237 default_p = false;
19238 done_p = true;
19241 /* Handle reload of load/stores if we have reload helper functions. */
19242 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19244 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19245 mode);
19247 if (extra_cost >= 0)
19249 done_p = true;
19250 ret = NO_REGS;
19251 if (extra_cost > 0)
19253 sri->extra_cost = extra_cost;
19254 sri->icode = icode;
19259 /* Handle unaligned loads and stores of integer registers. */
19260 if (!done_p && TARGET_POWERPC64
19261 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19262 && memory_p
19263 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19265 rtx addr = XEXP (x, 0);
19266 rtx off = address_offset (addr);
19268 if (off != NULL_RTX)
19270 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19271 unsigned HOST_WIDE_INT offset = INTVAL (off);
19273 /* We need a secondary reload when our legitimate_address_p
19274 says the address is good (as otherwise the entire address
19275 will be reloaded), and the offset is not a multiple of
19276 four or we have an address wrap. Address wrap will only
19277 occur for LO_SUMs since legitimate_offset_address_p
19278 rejects addresses for 16-byte mems that will wrap. */
19279 if (GET_CODE (addr) == LO_SUM
19280 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19281 && ((offset & 3) != 0
19282 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19283 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19284 && (offset & 3) != 0))
19286 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19287 if (in_p)
19288 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19289 : CODE_FOR_reload_di_load);
19290 else
19291 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19292 : CODE_FOR_reload_di_store);
19293 sri->extra_cost = 2;
19294 ret = NO_REGS;
19295 done_p = true;
19297 else
19298 default_p = true;
19300 else
19301 default_p = true;
19304 if (!done_p && !TARGET_POWERPC64
19305 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19306 && memory_p
19307 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19309 rtx addr = XEXP (x, 0);
19310 rtx off = address_offset (addr);
19312 if (off != NULL_RTX)
19314 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19315 unsigned HOST_WIDE_INT offset = INTVAL (off);
19317 /* We need a secondary reload when our legitimate_address_p
19318 says the address is good (as otherwise the entire address
19319 will be reloaded), and we have a wrap.
19321 legitimate_lo_sum_address_p allows LO_SUM addresses to
19322 have any offset so test for wrap in the low 16 bits.
19324 legitimate_offset_address_p checks for the range
19325 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19326 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19327 [0x7ff4,0x7fff] respectively, so test for the
19328 intersection of these ranges, [0x7ffc,0x7fff] and
19329 [0x7ff4,0x7ff7] respectively.
19331 Note that the address we see here may have been
19332 manipulated by legitimize_reload_address. */
19333 if (GET_CODE (addr) == LO_SUM
19334 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19335 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19337 if (in_p)
19338 sri->icode = CODE_FOR_reload_si_load;
19339 else
19340 sri->icode = CODE_FOR_reload_si_store;
19341 sri->extra_cost = 2;
19342 ret = NO_REGS;
19343 done_p = true;
19345 else
19346 default_p = true;
19348 else
19349 default_p = true;
19352 if (!done_p)
19353 default_p = true;
19355 if (default_p)
19356 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19358 gcc_assert (ret != ALL_REGS);
19360 if (TARGET_DEBUG_ADDR)
19362 fprintf (stderr,
19363 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19364 "mode = %s",
19365 reg_class_names[ret],
19366 in_p ? "true" : "false",
19367 reg_class_names[rclass],
19368 GET_MODE_NAME (mode));
19370 if (reload_completed)
19371 fputs (", after reload", stderr);
19373 if (!done_p)
19374 fputs (", done_p not set", stderr);
19376 if (default_p)
19377 fputs (", default secondary reload", stderr);
19379 if (sri->icode != CODE_FOR_nothing)
19380 fprintf (stderr, ", reload func = %s, extra cost = %d",
19381 insn_data[sri->icode].name, sri->extra_cost);
19383 else if (sri->extra_cost > 0)
19384 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19386 fputs ("\n", stderr);
19387 debug_rtx (x);
19390 return ret;
19393 /* Better tracing for rs6000_secondary_reload_inner. */
19395 static void
19396 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19397 bool store_p)
19399 rtx set, clobber;
19401 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19403 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19404 store_p ? "store" : "load");
19406 if (store_p)
19407 set = gen_rtx_SET (mem, reg);
19408 else
19409 set = gen_rtx_SET (reg, mem);
19411 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19412 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19415 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19416 ATTRIBUTE_NORETURN;
19418 static void
19419 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19420 bool store_p)
19422 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19423 gcc_unreachable ();
19426 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19427 reload helper functions. These were identified in
19428 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19429 reload, it calls the insns:
19430 reload_<RELOAD:mode>_<P:mptrsize>_store
19431 reload_<RELOAD:mode>_<P:mptrsize>_load
19433 which in turn calls this function, to do whatever is necessary to create
19434 valid addresses. */
19436 void
19437 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19439 int regno = true_regnum (reg);
19440 machine_mode mode = GET_MODE (reg);
19441 addr_mask_type addr_mask;
19442 rtx addr;
19443 rtx new_addr;
19444 rtx op_reg, op0, op1;
19445 rtx and_op;
19446 rtx cc_clobber;
19447 rtvec rv;
19449 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19450 || !base_reg_operand (scratch, GET_MODE (scratch)))
19451 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19453 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19454 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19456 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19457 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19459 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19460 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19462 else
19463 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19465 /* Make sure the mode is valid in this register class. */
19466 if ((addr_mask & RELOAD_REG_VALID) == 0)
19467 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19469 if (TARGET_DEBUG_ADDR)
19470 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19472 new_addr = addr = XEXP (mem, 0);
19473 switch (GET_CODE (addr))
19475 /* Does the register class support auto update forms for this mode? If
19476 not, do the update now. We don't need a scratch register, since the
19477 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19478 case PRE_INC:
19479 case PRE_DEC:
19480 op_reg = XEXP (addr, 0);
19481 if (!base_reg_operand (op_reg, Pmode))
19482 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19484 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19486 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
19487 new_addr = op_reg;
19489 break;
19491 case PRE_MODIFY:
19492 op0 = XEXP (addr, 0);
19493 op1 = XEXP (addr, 1);
19494 if (!base_reg_operand (op0, Pmode)
19495 || GET_CODE (op1) != PLUS
19496 || !rtx_equal_p (op0, XEXP (op1, 0)))
19497 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19499 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19501 emit_insn (gen_rtx_SET (op0, op1));
19502 new_addr = reg;
19504 break;
19506 /* Do we need to simulate AND -16 to clear the bottom address bits used
19507 in VMX load/stores? */
19508 case AND:
19509 op0 = XEXP (addr, 0);
19510 op1 = XEXP (addr, 1);
19511 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19513 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19514 op_reg = op0;
19516 else if (GET_CODE (op1) == PLUS)
19518 emit_insn (gen_rtx_SET (scratch, op1));
19519 op_reg = scratch;
19522 else
19523 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19525 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19526 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19527 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19528 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19529 new_addr = scratch;
19531 break;
19533 /* If this is an indirect address, make sure it is a base register. */
19534 case REG:
19535 case SUBREG:
19536 if (!base_reg_operand (addr, GET_MODE (addr)))
19538 emit_insn (gen_rtx_SET (scratch, addr));
19539 new_addr = scratch;
19541 break;
19543 /* If this is an indexed address, make sure the register class can handle
19544 indexed addresses for this mode. */
19545 case PLUS:
19546 op0 = XEXP (addr, 0);
19547 op1 = XEXP (addr, 1);
19548 if (!base_reg_operand (op0, Pmode))
19549 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19551 else if (int_reg_operand (op1, Pmode))
19553 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19555 emit_insn (gen_rtx_SET (scratch, addr));
19556 new_addr = scratch;
19560 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19562 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19563 || !quad_address_p (addr, mode, false))
19565 emit_insn (gen_rtx_SET (scratch, addr));
19566 new_addr = scratch;
19570 /* Make sure the register class can handle offset addresses. */
19571 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19573 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19575 emit_insn (gen_rtx_SET (scratch, addr));
19576 new_addr = scratch;
19580 else
19581 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19583 break;
19585 case LO_SUM:
19586 op0 = XEXP (addr, 0);
19587 op1 = XEXP (addr, 1);
19588 if (!base_reg_operand (op0, Pmode))
19589 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19591 else if (int_reg_operand (op1, Pmode))
19593 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19595 emit_insn (gen_rtx_SET (scratch, addr));
19596 new_addr = scratch;
19600 /* Quad offsets are restricted and can't handle normal addresses. */
19601 else if (mode_supports_dq_form (mode))
19603 emit_insn (gen_rtx_SET (scratch, addr));
19604 new_addr = scratch;
19607 /* Make sure the register class can handle offset addresses. */
19608 else if (legitimate_lo_sum_address_p (mode, addr, false))
19610 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19612 emit_insn (gen_rtx_SET (scratch, addr));
19613 new_addr = scratch;
19617 else
19618 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19620 break;
19622 case SYMBOL_REF:
19623 case CONST:
19624 case LABEL_REF:
19625 rs6000_emit_move (scratch, addr, Pmode);
19626 new_addr = scratch;
19627 break;
19629 default:
19630 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19633 /* Adjust the address if it changed. */
19634 if (addr != new_addr)
19636 mem = replace_equiv_address_nv (mem, new_addr);
19637 if (TARGET_DEBUG_ADDR)
19638 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19641 /* Now create the move. */
19642 if (store_p)
19643 emit_insn (gen_rtx_SET (mem, reg));
19644 else
19645 emit_insn (gen_rtx_SET (reg, mem));
19647 return;
19650 /* Convert reloads involving 64-bit gprs and misaligned offset
19651 addressing, or multiple 32-bit gprs and offsets that are too large,
19652 to use indirect addressing. */
19654 void
19655 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19657 int regno = true_regnum (reg);
19658 enum reg_class rclass;
19659 rtx addr;
19660 rtx scratch_or_premodify = scratch;
19662 if (TARGET_DEBUG_ADDR)
19664 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19665 store_p ? "store" : "load");
19666 fprintf (stderr, "reg:\n");
19667 debug_rtx (reg);
19668 fprintf (stderr, "mem:\n");
19669 debug_rtx (mem);
19670 fprintf (stderr, "scratch:\n");
19671 debug_rtx (scratch);
19674 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
19675 gcc_assert (GET_CODE (mem) == MEM);
19676 rclass = REGNO_REG_CLASS (regno);
19677 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19678 addr = XEXP (mem, 0);
19680 if (GET_CODE (addr) == PRE_MODIFY)
19682 gcc_assert (REG_P (XEXP (addr, 0))
19683 && GET_CODE (XEXP (addr, 1)) == PLUS
19684 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19685 scratch_or_premodify = XEXP (addr, 0);
19686 if (!HARD_REGISTER_P (scratch_or_premodify))
19687 /* If we have a pseudo here then reload will have arranged
19688 to have it replaced, but only in the original insn.
19689 Use the replacement here too. */
19690 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
19692 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
19693 expressions from the original insn, without unsharing them.
19694 Any RTL that points into the original insn will of course
19695 have register replacements applied. That is why we don't
19696 need to look for replacements under the PLUS. */
19697 addr = XEXP (addr, 1);
19699 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19701 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19703 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19705 /* Now create the move. */
19706 if (store_p)
19707 emit_insn (gen_rtx_SET (mem, reg));
19708 else
19709 emit_insn (gen_rtx_SET (reg, mem));
19711 return;
19714 /* Given an rtx X being reloaded into a reg required to be
19715 in class CLASS, return the class of reg to actually use.
19716 In general this is just CLASS; but on some machines
19717 in some cases it is preferable to use a more restrictive class.
19719 On the RS/6000, we have to return NO_REGS when we want to reload a
19720 floating-point CONST_DOUBLE to force it to be copied to memory.
19722 We also don't want to reload integer values into floating-point
19723 registers if we can at all help it. In fact, this can
19724 cause reload to die, if it tries to generate a reload of CTR
19725 into a FP register and discovers it doesn't have the memory location
19726 required.
19728 ??? Would it be a good idea to have reload do the converse, that is
19729 try to reload floating modes into FP registers if possible?
19732 static enum reg_class
19733 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19735 machine_mode mode = GET_MODE (x);
19736 bool is_constant = CONSTANT_P (x);
19738 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19739 reload class for it. */
19740 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19741 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19742 return NO_REGS;
19744 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19745 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19746 return NO_REGS;
19748 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19749 the reloading of address expressions using PLUS into floating point
19750 registers. */
19751 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19753 if (is_constant)
19755 /* Zero is always allowed in all VSX registers. */
19756 if (x == CONST0_RTX (mode))
19757 return rclass;
19759 /* If this is a vector constant that can be formed with a few Altivec
19760 instructions, we want altivec registers. */
19761 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19762 return ALTIVEC_REGS;
19764 /* If this is an integer constant that can easily be loaded into
19765 vector registers, allow it. */
19766 if (CONST_INT_P (x))
19768 HOST_WIDE_INT value = INTVAL (x);
19770 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19771 2.06 can generate it in the Altivec registers with
19772 VSPLTI<x>. */
19773 if (value == -1)
19775 if (TARGET_P8_VECTOR)
19776 return rclass;
19777 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19778 return ALTIVEC_REGS;
19779 else
19780 return NO_REGS;
19783 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19784 a sign extend in the Altivec registers. */
19785 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19786 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19787 return ALTIVEC_REGS;
19790 /* Force constant to memory. */
19791 return NO_REGS;
19794 /* D-form addressing can easily reload the value. */
19795 if (mode_supports_vmx_dform (mode)
19796 || mode_supports_dq_form (mode))
19797 return rclass;
19799 /* If this is a scalar floating point value and we don't have D-form
19800 addressing, prefer the traditional floating point registers so that we
19801 can use D-form (register+offset) addressing. */
19802 if (rclass == VSX_REGS
19803 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19804 return FLOAT_REGS;
19806 /* Prefer the Altivec registers if Altivec is handling the vector
19807 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19808 loads. */
19809 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19810 || mode == V1TImode)
19811 return ALTIVEC_REGS;
19813 return rclass;
19816 if (is_constant || GET_CODE (x) == PLUS)
19818 if (reg_class_subset_p (GENERAL_REGS, rclass))
19819 return GENERAL_REGS;
19820 if (reg_class_subset_p (BASE_REGS, rclass))
19821 return BASE_REGS;
19822 return NO_REGS;
19825 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
19826 return GENERAL_REGS;
19828 return rclass;
19831 /* Debug version of rs6000_preferred_reload_class. */
19832 static enum reg_class
19833 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19835 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19837 fprintf (stderr,
19838 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19839 "mode = %s, x:\n",
19840 reg_class_names[ret], reg_class_names[rclass],
19841 GET_MODE_NAME (GET_MODE (x)));
19842 debug_rtx (x);
19844 return ret;
19847 /* If we are copying between FP or AltiVec registers and anything else, we need
19848 a memory location. The exception is when we are targeting ppc64 and the
19849 move to/from fpr to gpr instructions are available. Also, under VSX, you
19850 can copy vector registers from the FP register set to the Altivec register
19851 set and vice versa. */
19853 static bool
19854 rs6000_secondary_memory_needed (machine_mode mode,
19855 reg_class_t from_class,
19856 reg_class_t to_class)
19858 enum rs6000_reg_type from_type, to_type;
19859 bool altivec_p = ((from_class == ALTIVEC_REGS)
19860 || (to_class == ALTIVEC_REGS));
19862 /* If a simple/direct move is available, we don't need secondary memory */
19863 from_type = reg_class_to_reg_type[(int)from_class];
19864 to_type = reg_class_to_reg_type[(int)to_class];
19866 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19867 (secondary_reload_info *)0, altivec_p))
19868 return false;
19870 /* If we have a floating point or vector register class, we need to use
19871 memory to transfer the data. */
19872 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19873 return true;
19875 return false;
19878 /* Debug version of rs6000_secondary_memory_needed. */
19879 static bool
19880 rs6000_debug_secondary_memory_needed (machine_mode mode,
19881 reg_class_t from_class,
19882 reg_class_t to_class)
19884 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19886 fprintf (stderr,
19887 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19888 "to_class = %s, mode = %s\n",
19889 ret ? "true" : "false",
19890 reg_class_names[from_class],
19891 reg_class_names[to_class],
19892 GET_MODE_NAME (mode));
19894 return ret;
19897 /* Return the register class of a scratch register needed to copy IN into
19898 or out of a register in RCLASS in MODE. If it can be done directly,
19899 NO_REGS is returned. */
19901 static enum reg_class
19902 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
19903 rtx in)
19905 int regno;
19907 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
19908 #if TARGET_MACHO
19909 && MACHOPIC_INDIRECT
19910 #endif
19913 /* We cannot copy a symbolic operand directly into anything
19914 other than BASE_REGS for TARGET_ELF. So indicate that a
19915 register from BASE_REGS is needed as an intermediate
19916 register.
19918 On Darwin, pic addresses require a load from memory, which
19919 needs a base register. */
19920 if (rclass != BASE_REGS
19921 && (GET_CODE (in) == SYMBOL_REF
19922 || GET_CODE (in) == HIGH
19923 || GET_CODE (in) == LABEL_REF
19924 || GET_CODE (in) == CONST))
19925 return BASE_REGS;
19928 if (GET_CODE (in) == REG)
19930 regno = REGNO (in);
19931 if (regno >= FIRST_PSEUDO_REGISTER)
19933 regno = true_regnum (in);
19934 if (regno >= FIRST_PSEUDO_REGISTER)
19935 regno = -1;
19938 else if (GET_CODE (in) == SUBREG)
19940 regno = true_regnum (in);
19941 if (regno >= FIRST_PSEUDO_REGISTER)
19942 regno = -1;
19944 else
19945 regno = -1;
19947 /* If we have VSX register moves, prefer moving scalar values between
19948 Altivec registers and GPR by going via an FPR (and then via memory)
19949 instead of reloading the secondary memory address for Altivec moves. */
19950 if (TARGET_VSX
19951 && GET_MODE_SIZE (mode) < 16
19952 && !mode_supports_vmx_dform (mode)
19953 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
19954 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
19955 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19956 && (regno >= 0 && INT_REGNO_P (regno)))))
19957 return FLOAT_REGS;
19959 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
19960 into anything. */
19961 if (rclass == GENERAL_REGS || rclass == BASE_REGS
19962 || (regno >= 0 && INT_REGNO_P (regno)))
19963 return NO_REGS;
19965 /* Constants, memory, and VSX registers can go into VSX registers (both the
19966 traditional floating point and the altivec registers). */
19967 if (rclass == VSX_REGS
19968 && (regno == -1 || VSX_REGNO_P (regno)))
19969 return NO_REGS;
19971 /* Constants, memory, and FP registers can go into FP registers. */
19972 if ((regno == -1 || FP_REGNO_P (regno))
19973 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
19974 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
19976 /* Memory, and AltiVec registers can go into AltiVec registers. */
19977 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
19978 && rclass == ALTIVEC_REGS)
19979 return NO_REGS;
19981 /* We can copy among the CR registers. */
19982 if ((rclass == CR_REGS || rclass == CR0_REGS)
19983 && regno >= 0 && CR_REGNO_P (regno))
19984 return NO_REGS;
19986 /* Otherwise, we need GENERAL_REGS. */
19987 return GENERAL_REGS;
19990 /* Debug version of rs6000_secondary_reload_class. */
19991 static enum reg_class
19992 rs6000_debug_secondary_reload_class (enum reg_class rclass,
19993 machine_mode mode, rtx in)
19995 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
19996 fprintf (stderr,
19997 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
19998 "mode = %s, input rtx:\n",
19999 reg_class_names[ret], reg_class_names[rclass],
20000 GET_MODE_NAME (mode));
20001 debug_rtx (in);
20003 return ret;
20006 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20008 static bool
20009 rs6000_can_change_mode_class (machine_mode from,
20010 machine_mode to,
20011 reg_class_t rclass)
20013 unsigned from_size = GET_MODE_SIZE (from);
20014 unsigned to_size = GET_MODE_SIZE (to);
20016 if (from_size != to_size)
20018 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20020 if (reg_classes_intersect_p (xclass, rclass))
20022 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20023 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20024 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20025 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20027 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20028 single register under VSX because the scalar part of the register
20029 is in the upper 64-bits, and not the lower 64-bits. Types like
20030 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20031 IEEE floating point can't overlap, and neither can small
20032 values. */
20034 if (to_float128_vector_p && from_float128_vector_p)
20035 return true;
20037 else if (to_float128_vector_p || from_float128_vector_p)
20038 return false;
20040 /* TDmode in floating-mode registers must always go into a register
20041 pair with the most significant word in the even-numbered register
20042 to match ISA requirements. In little-endian mode, this does not
20043 match subreg numbering, so we cannot allow subregs. */
20044 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20045 return false;
20047 if (from_size < 8 || to_size < 8)
20048 return false;
20050 if (from_size == 8 && (8 * to_nregs) != to_size)
20051 return false;
20053 if (to_size == 8 && (8 * from_nregs) != from_size)
20054 return false;
20056 return true;
20058 else
20059 return true;
20062 /* Since the VSX register set includes traditional floating point registers
20063 and altivec registers, just check for the size being different instead of
20064 trying to check whether the modes are vector modes. Otherwise it won't
20065 allow say DF and DI to change classes. For types like TFmode and TDmode
20066 that take 2 64-bit registers, rather than a single 128-bit register, don't
20067 allow subregs of those types to other 128 bit types. */
20068 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20070 unsigned num_regs = (from_size + 15) / 16;
20071 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20072 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20073 return false;
20075 return (from_size == 8 || from_size == 16);
20078 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20079 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20080 return false;
20082 return true;
20085 /* Debug version of rs6000_can_change_mode_class. */
20086 static bool
20087 rs6000_debug_can_change_mode_class (machine_mode from,
20088 machine_mode to,
20089 reg_class_t rclass)
20091 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20093 fprintf (stderr,
20094 "rs6000_can_change_mode_class, return %s, from = %s, "
20095 "to = %s, rclass = %s\n",
20096 ret ? "true" : "false",
20097 GET_MODE_NAME (from), GET_MODE_NAME (to),
20098 reg_class_names[rclass]);
20100 return ret;
20103 /* Return a string to do a move operation of 128 bits of data. */
20105 const char *
20106 rs6000_output_move_128bit (rtx operands[])
20108 rtx dest = operands[0];
20109 rtx src = operands[1];
20110 machine_mode mode = GET_MODE (dest);
20111 int dest_regno;
20112 int src_regno;
20113 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20114 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20116 if (REG_P (dest))
20118 dest_regno = REGNO (dest);
20119 dest_gpr_p = INT_REGNO_P (dest_regno);
20120 dest_fp_p = FP_REGNO_P (dest_regno);
20121 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20122 dest_vsx_p = dest_fp_p | dest_vmx_p;
20124 else
20126 dest_regno = -1;
20127 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20130 if (REG_P (src))
20132 src_regno = REGNO (src);
20133 src_gpr_p = INT_REGNO_P (src_regno);
20134 src_fp_p = FP_REGNO_P (src_regno);
20135 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20136 src_vsx_p = src_fp_p | src_vmx_p;
20138 else
20140 src_regno = -1;
20141 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20144 /* Register moves. */
20145 if (dest_regno >= 0 && src_regno >= 0)
20147 if (dest_gpr_p)
20149 if (src_gpr_p)
20150 return "#";
20152 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20153 return (WORDS_BIG_ENDIAN
20154 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20155 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20157 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20158 return "#";
20161 else if (TARGET_VSX && dest_vsx_p)
20163 if (src_vsx_p)
20164 return "xxlor %x0,%x1,%x1";
20166 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20167 return (WORDS_BIG_ENDIAN
20168 ? "mtvsrdd %x0,%1,%L1"
20169 : "mtvsrdd %x0,%L1,%1");
20171 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20172 return "#";
20175 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20176 return "vor %0,%1,%1";
20178 else if (dest_fp_p && src_fp_p)
20179 return "#";
20182 /* Loads. */
20183 else if (dest_regno >= 0 && MEM_P (src))
20185 if (dest_gpr_p)
20187 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20188 return "lq %0,%1";
20189 else
20190 return "#";
20193 else if (TARGET_ALTIVEC && dest_vmx_p
20194 && altivec_indexed_or_indirect_operand (src, mode))
20195 return "lvx %0,%y1";
20197 else if (TARGET_VSX && dest_vsx_p)
20199 if (mode_supports_dq_form (mode)
20200 && quad_address_p (XEXP (src, 0), mode, true))
20201 return "lxv %x0,%1";
20203 else if (TARGET_P9_VECTOR)
20204 return "lxvx %x0,%y1";
20206 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20207 return "lxvw4x %x0,%y1";
20209 else
20210 return "lxvd2x %x0,%y1";
20213 else if (TARGET_ALTIVEC && dest_vmx_p)
20214 return "lvx %0,%y1";
20216 else if (dest_fp_p)
20217 return "#";
20220 /* Stores. */
20221 else if (src_regno >= 0 && MEM_P (dest))
20223 if (src_gpr_p)
20225 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20226 return "stq %1,%0";
20227 else
20228 return "#";
20231 else if (TARGET_ALTIVEC && src_vmx_p
20232 && altivec_indexed_or_indirect_operand (dest, mode))
20233 return "stvx %1,%y0";
20235 else if (TARGET_VSX && src_vsx_p)
20237 if (mode_supports_dq_form (mode)
20238 && quad_address_p (XEXP (dest, 0), mode, true))
20239 return "stxv %x1,%0";
20241 else if (TARGET_P9_VECTOR)
20242 return "stxvx %x1,%y0";
20244 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20245 return "stxvw4x %x1,%y0";
20247 else
20248 return "stxvd2x %x1,%y0";
20251 else if (TARGET_ALTIVEC && src_vmx_p)
20252 return "stvx %1,%y0";
20254 else if (src_fp_p)
20255 return "#";
20258 /* Constants. */
20259 else if (dest_regno >= 0
20260 && (GET_CODE (src) == CONST_INT
20261 || GET_CODE (src) == CONST_WIDE_INT
20262 || GET_CODE (src) == CONST_DOUBLE
20263 || GET_CODE (src) == CONST_VECTOR))
20265 if (dest_gpr_p)
20266 return "#";
20268 else if ((dest_vmx_p && TARGET_ALTIVEC)
20269 || (dest_vsx_p && TARGET_VSX))
20270 return output_vec_const_move (operands);
20273 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20276 /* Validate a 128-bit move. */
20277 bool
20278 rs6000_move_128bit_ok_p (rtx operands[])
20280 machine_mode mode = GET_MODE (operands[0]);
20281 return (gpc_reg_operand (operands[0], mode)
20282 || gpc_reg_operand (operands[1], mode));
20285 /* Return true if a 128-bit move needs to be split. */
20286 bool
20287 rs6000_split_128bit_ok_p (rtx operands[])
20289 if (!reload_completed)
20290 return false;
20292 if (!gpr_or_gpr_p (operands[0], operands[1]))
20293 return false;
20295 if (quad_load_store_p (operands[0], operands[1]))
20296 return false;
20298 return true;
20302 /* Given a comparison operation, return the bit number in CCR to test. We
20303 know this is a valid comparison.
20305 SCC_P is 1 if this is for an scc. That means that %D will have been
20306 used instead of %C, so the bits will be in different places.
20308 Return -1 if OP isn't a valid comparison for some reason. */
20311 ccr_bit (rtx op, int scc_p)
20313 enum rtx_code code = GET_CODE (op);
20314 machine_mode cc_mode;
20315 int cc_regnum;
20316 int base_bit;
20317 rtx reg;
20319 if (!COMPARISON_P (op))
20320 return -1;
20322 reg = XEXP (op, 0);
20324 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20326 cc_mode = GET_MODE (reg);
20327 cc_regnum = REGNO (reg);
20328 base_bit = 4 * (cc_regnum - CR0_REGNO);
20330 validate_condition_mode (code, cc_mode);
20332 /* When generating a sCOND operation, only positive conditions are
20333 allowed. */
20334 gcc_assert (!scc_p
20335 || code == EQ || code == GT || code == LT || code == UNORDERED
20336 || code == GTU || code == LTU);
20338 switch (code)
20340 case NE:
20341 return scc_p ? base_bit + 3 : base_bit + 2;
20342 case EQ:
20343 return base_bit + 2;
20344 case GT: case GTU: case UNLE:
20345 return base_bit + 1;
20346 case LT: case LTU: case UNGE:
20347 return base_bit;
20348 case ORDERED: case UNORDERED:
20349 return base_bit + 3;
20351 case GE: case GEU:
20352 /* If scc, we will have done a cror to put the bit in the
20353 unordered position. So test that bit. For integer, this is ! LT
20354 unless this is an scc insn. */
20355 return scc_p ? base_bit + 3 : base_bit;
20357 case LE: case LEU:
20358 return scc_p ? base_bit + 3 : base_bit + 1;
20360 default:
20361 gcc_unreachable ();
20365 /* Return the GOT register. */
20368 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20370 /* The second flow pass currently (June 1999) can't update
20371 regs_ever_live without disturbing other parts of the compiler, so
20372 update it here to make the prolog/epilogue code happy. */
20373 if (!can_create_pseudo_p ()
20374 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20375 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20377 crtl->uses_pic_offset_table = 1;
20379 return pic_offset_table_rtx;
20382 static rs6000_stack_t stack_info;
20384 /* Function to init struct machine_function.
20385 This will be called, via a pointer variable,
20386 from push_function_context. */
20388 static struct machine_function *
20389 rs6000_init_machine_status (void)
20391 stack_info.reload_completed = 0;
20392 return ggc_cleared_alloc<machine_function> ();
20395 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20397 /* Write out a function code label. */
20399 void
20400 rs6000_output_function_entry (FILE *file, const char *fname)
20402 if (fname[0] != '.')
20404 switch (DEFAULT_ABI)
20406 default:
20407 gcc_unreachable ();
20409 case ABI_AIX:
20410 if (DOT_SYMBOLS)
20411 putc ('.', file);
20412 else
20413 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20414 break;
20416 case ABI_ELFv2:
20417 case ABI_V4:
20418 case ABI_DARWIN:
20419 break;
20423 RS6000_OUTPUT_BASENAME (file, fname);
20426 /* Print an operand. Recognize special options, documented below. */
20428 #if TARGET_ELF
20429 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20430 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20431 #else
20432 #define SMALL_DATA_RELOC "sda21"
20433 #define SMALL_DATA_REG 0
20434 #endif
20436 void
20437 print_operand (FILE *file, rtx x, int code)
20439 int i;
20440 unsigned HOST_WIDE_INT uval;
20442 switch (code)
20444 /* %a is output_address. */
20446 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20447 output_operand. */
20449 case 'D':
20450 /* Like 'J' but get to the GT bit only. */
20451 gcc_assert (REG_P (x));
20453 /* Bit 1 is GT bit. */
20454 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20456 /* Add one for shift count in rlinm for scc. */
20457 fprintf (file, "%d", i + 1);
20458 return;
20460 case 'e':
20461 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20462 if (! INT_P (x))
20464 output_operand_lossage ("invalid %%e value");
20465 return;
20468 uval = INTVAL (x);
20469 if ((uval & 0xffff) == 0 && uval != 0)
20470 putc ('s', file);
20471 return;
20473 case 'E':
20474 /* X is a CR register. Print the number of the EQ bit of the CR */
20475 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20476 output_operand_lossage ("invalid %%E value");
20477 else
20478 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20479 return;
20481 case 'f':
20482 /* X is a CR register. Print the shift count needed to move it
20483 to the high-order four bits. */
20484 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20485 output_operand_lossage ("invalid %%f value");
20486 else
20487 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20488 return;
20490 case 'F':
20491 /* Similar, but print the count for the rotate in the opposite
20492 direction. */
20493 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20494 output_operand_lossage ("invalid %%F value");
20495 else
20496 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20497 return;
20499 case 'G':
20500 /* X is a constant integer. If it is negative, print "m",
20501 otherwise print "z". This is to make an aze or ame insn. */
20502 if (GET_CODE (x) != CONST_INT)
20503 output_operand_lossage ("invalid %%G value");
20504 else if (INTVAL (x) >= 0)
20505 putc ('z', file);
20506 else
20507 putc ('m', file);
20508 return;
20510 case 'h':
20511 /* If constant, output low-order five bits. Otherwise, write
20512 normally. */
20513 if (INT_P (x))
20514 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20515 else
20516 print_operand (file, x, 0);
20517 return;
20519 case 'H':
20520 /* If constant, output low-order six bits. Otherwise, write
20521 normally. */
20522 if (INT_P (x))
20523 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20524 else
20525 print_operand (file, x, 0);
20526 return;
20528 case 'I':
20529 /* Print `i' if this is a constant, else nothing. */
20530 if (INT_P (x))
20531 putc ('i', file);
20532 return;
20534 case 'j':
20535 /* Write the bit number in CCR for jump. */
20536 i = ccr_bit (x, 0);
20537 if (i == -1)
20538 output_operand_lossage ("invalid %%j code");
20539 else
20540 fprintf (file, "%d", i);
20541 return;
20543 case 'J':
20544 /* Similar, but add one for shift count in rlinm for scc and pass
20545 scc flag to `ccr_bit'. */
20546 i = ccr_bit (x, 1);
20547 if (i == -1)
20548 output_operand_lossage ("invalid %%J code");
20549 else
20550 /* If we want bit 31, write a shift count of zero, not 32. */
20551 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20552 return;
20554 case 'k':
20555 /* X must be a constant. Write the 1's complement of the
20556 constant. */
20557 if (! INT_P (x))
20558 output_operand_lossage ("invalid %%k value");
20559 else
20560 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20561 return;
20563 case 'K':
20564 /* X must be a symbolic constant on ELF. Write an
20565 expression suitable for an 'addi' that adds in the low 16
20566 bits of the MEM. */
20567 if (GET_CODE (x) == CONST)
20569 if (GET_CODE (XEXP (x, 0)) != PLUS
20570 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20571 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20572 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20573 output_operand_lossage ("invalid %%K value");
20575 print_operand_address (file, x);
20576 fputs ("@l", file);
20577 return;
20579 /* %l is output_asm_label. */
20581 case 'L':
20582 /* Write second word of DImode or DFmode reference. Works on register
20583 or non-indexed memory only. */
20584 if (REG_P (x))
20585 fputs (reg_names[REGNO (x) + 1], file);
20586 else if (MEM_P (x))
20588 machine_mode mode = GET_MODE (x);
20589 /* Handle possible auto-increment. Since it is pre-increment and
20590 we have already done it, we can just use an offset of word. */
20591 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20592 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20593 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20594 UNITS_PER_WORD));
20595 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20596 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20597 UNITS_PER_WORD));
20598 else
20599 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20600 UNITS_PER_WORD),
20601 0));
20603 if (small_data_operand (x, GET_MODE (x)))
20604 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20605 reg_names[SMALL_DATA_REG]);
20607 return;
20609 case 'N': /* Unused */
20610 /* Write the number of elements in the vector times 4. */
20611 if (GET_CODE (x) != PARALLEL)
20612 output_operand_lossage ("invalid %%N value");
20613 else
20614 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20615 return;
20617 case 'O': /* Unused */
20618 /* Similar, but subtract 1 first. */
20619 if (GET_CODE (x) != PARALLEL)
20620 output_operand_lossage ("invalid %%O value");
20621 else
20622 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20623 return;
20625 case 'p':
20626 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20627 if (! INT_P (x)
20628 || INTVAL (x) < 0
20629 || (i = exact_log2 (INTVAL (x))) < 0)
20630 output_operand_lossage ("invalid %%p value");
20631 else
20632 fprintf (file, "%d", i);
20633 return;
20635 case 'P':
20636 /* The operand must be an indirect memory reference. The result
20637 is the register name. */
20638 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20639 || REGNO (XEXP (x, 0)) >= 32)
20640 output_operand_lossage ("invalid %%P value");
20641 else
20642 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20643 return;
20645 case 'q':
20646 /* This outputs the logical code corresponding to a boolean
20647 expression. The expression may have one or both operands
20648 negated (if one, only the first one). For condition register
20649 logical operations, it will also treat the negated
20650 CR codes as NOTs, but not handle NOTs of them. */
20652 const char *const *t = 0;
20653 const char *s;
20654 enum rtx_code code = GET_CODE (x);
20655 static const char * const tbl[3][3] = {
20656 { "and", "andc", "nor" },
20657 { "or", "orc", "nand" },
20658 { "xor", "eqv", "xor" } };
20660 if (code == AND)
20661 t = tbl[0];
20662 else if (code == IOR)
20663 t = tbl[1];
20664 else if (code == XOR)
20665 t = tbl[2];
20666 else
20667 output_operand_lossage ("invalid %%q value");
20669 if (GET_CODE (XEXP (x, 0)) != NOT)
20670 s = t[0];
20671 else
20673 if (GET_CODE (XEXP (x, 1)) == NOT)
20674 s = t[2];
20675 else
20676 s = t[1];
20679 fputs (s, file);
20681 return;
20683 case 'Q':
20684 if (! TARGET_MFCRF)
20685 return;
20686 fputc (',', file);
20687 /* FALLTHRU */
20689 case 'R':
20690 /* X is a CR register. Print the mask for `mtcrf'. */
20691 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20692 output_operand_lossage ("invalid %%R value");
20693 else
20694 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20695 return;
20697 case 's':
20698 /* Low 5 bits of 32 - value */
20699 if (! INT_P (x))
20700 output_operand_lossage ("invalid %%s value");
20701 else
20702 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20703 return;
20705 case 't':
20706 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20707 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
20709 /* Bit 3 is OV bit. */
20710 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20712 /* If we want bit 31, write a shift count of zero, not 32. */
20713 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20714 return;
20716 case 'T':
20717 /* Print the symbolic name of a branch target register. */
20718 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
20719 && REGNO (x) != CTR_REGNO))
20720 output_operand_lossage ("invalid %%T value");
20721 else if (REGNO (x) == LR_REGNO)
20722 fputs ("lr", file);
20723 else
20724 fputs ("ctr", file);
20725 return;
20727 case 'u':
20728 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20729 for use in unsigned operand. */
20730 if (! INT_P (x))
20732 output_operand_lossage ("invalid %%u value");
20733 return;
20736 uval = INTVAL (x);
20737 if ((uval & 0xffff) == 0)
20738 uval >>= 16;
20740 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20741 return;
20743 case 'v':
20744 /* High-order 16 bits of constant for use in signed operand. */
20745 if (! INT_P (x))
20746 output_operand_lossage ("invalid %%v value");
20747 else
20748 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20749 (INTVAL (x) >> 16) & 0xffff);
20750 return;
20752 case 'U':
20753 /* Print `u' if this has an auto-increment or auto-decrement. */
20754 if (MEM_P (x)
20755 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20756 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20757 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20758 putc ('u', file);
20759 return;
20761 case 'V':
20762 /* Print the trap code for this operand. */
20763 switch (GET_CODE (x))
20765 case EQ:
20766 fputs ("eq", file); /* 4 */
20767 break;
20768 case NE:
20769 fputs ("ne", file); /* 24 */
20770 break;
20771 case LT:
20772 fputs ("lt", file); /* 16 */
20773 break;
20774 case LE:
20775 fputs ("le", file); /* 20 */
20776 break;
20777 case GT:
20778 fputs ("gt", file); /* 8 */
20779 break;
20780 case GE:
20781 fputs ("ge", file); /* 12 */
20782 break;
20783 case LTU:
20784 fputs ("llt", file); /* 2 */
20785 break;
20786 case LEU:
20787 fputs ("lle", file); /* 6 */
20788 break;
20789 case GTU:
20790 fputs ("lgt", file); /* 1 */
20791 break;
20792 case GEU:
20793 fputs ("lge", file); /* 5 */
20794 break;
20795 default:
20796 gcc_unreachable ();
20798 break;
20800 case 'w':
20801 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20802 normally. */
20803 if (INT_P (x))
20804 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20805 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20806 else
20807 print_operand (file, x, 0);
20808 return;
20810 case 'x':
20811 /* X is a FPR or Altivec register used in a VSX context. */
20812 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
20813 output_operand_lossage ("invalid %%x value");
20814 else
20816 int reg = REGNO (x);
20817 int vsx_reg = (FP_REGNO_P (reg)
20818 ? reg - 32
20819 : reg - FIRST_ALTIVEC_REGNO + 32);
20821 #ifdef TARGET_REGNAMES
20822 if (TARGET_REGNAMES)
20823 fprintf (file, "%%vs%d", vsx_reg);
20824 else
20825 #endif
20826 fprintf (file, "%d", vsx_reg);
20828 return;
20830 case 'X':
20831 if (MEM_P (x)
20832 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20833 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20834 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20835 putc ('x', file);
20836 return;
20838 case 'Y':
20839 /* Like 'L', for third word of TImode/PTImode */
20840 if (REG_P (x))
20841 fputs (reg_names[REGNO (x) + 2], file);
20842 else if (MEM_P (x))
20844 machine_mode mode = GET_MODE (x);
20845 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20846 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20847 output_address (mode, plus_constant (Pmode,
20848 XEXP (XEXP (x, 0), 0), 8));
20849 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20850 output_address (mode, plus_constant (Pmode,
20851 XEXP (XEXP (x, 0), 0), 8));
20852 else
20853 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20854 if (small_data_operand (x, GET_MODE (x)))
20855 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20856 reg_names[SMALL_DATA_REG]);
20858 return;
20860 case 'z':
20861 /* X is a SYMBOL_REF. Write out the name preceded by a
20862 period and without any trailing data in brackets. Used for function
20863 names. If we are configured for System V (or the embedded ABI) on
20864 the PowerPC, do not emit the period, since those systems do not use
20865 TOCs and the like. */
20866 gcc_assert (GET_CODE (x) == SYMBOL_REF);
20868 /* For macho, check to see if we need a stub. */
20869 if (TARGET_MACHO)
20871 const char *name = XSTR (x, 0);
20872 #if TARGET_MACHO
20873 if (darwin_emit_branch_islands
20874 && MACHOPIC_INDIRECT
20875 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
20876 name = machopic_indirection_name (x, /*stub_p=*/true);
20877 #endif
20878 assemble_name (file, name);
20880 else if (!DOT_SYMBOLS)
20881 assemble_name (file, XSTR (x, 0));
20882 else
20883 rs6000_output_function_entry (file, XSTR (x, 0));
20884 return;
20886 case 'Z':
20887 /* Like 'L', for last word of TImode/PTImode. */
20888 if (REG_P (x))
20889 fputs (reg_names[REGNO (x) + 3], file);
20890 else if (MEM_P (x))
20892 machine_mode mode = GET_MODE (x);
20893 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20894 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20895 output_address (mode, plus_constant (Pmode,
20896 XEXP (XEXP (x, 0), 0), 12));
20897 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20898 output_address (mode, plus_constant (Pmode,
20899 XEXP (XEXP (x, 0), 0), 12));
20900 else
20901 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
20902 if (small_data_operand (x, GET_MODE (x)))
20903 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20904 reg_names[SMALL_DATA_REG]);
20906 return;
20908 /* Print AltiVec memory operand. */
20909 case 'y':
20911 rtx tmp;
20913 gcc_assert (MEM_P (x));
20915 tmp = XEXP (x, 0);
20917 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
20918 && GET_CODE (tmp) == AND
20919 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
20920 && INTVAL (XEXP (tmp, 1)) == -16)
20921 tmp = XEXP (tmp, 0);
20922 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
20923 && GET_CODE (tmp) == PRE_MODIFY)
20924 tmp = XEXP (tmp, 1);
20925 if (REG_P (tmp))
20926 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
20927 else
20929 if (GET_CODE (tmp) != PLUS
20930 || !REG_P (XEXP (tmp, 0))
20931 || !REG_P (XEXP (tmp, 1)))
20933 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
20934 break;
20937 if (REGNO (XEXP (tmp, 0)) == 0)
20938 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
20939 reg_names[ REGNO (XEXP (tmp, 0)) ]);
20940 else
20941 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
20942 reg_names[ REGNO (XEXP (tmp, 1)) ]);
20944 break;
20947 case 0:
20948 if (REG_P (x))
20949 fprintf (file, "%s", reg_names[REGNO (x)]);
20950 else if (MEM_P (x))
20952 /* We need to handle PRE_INC and PRE_DEC here, since we need to
20953 know the width from the mode. */
20954 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
20955 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
20956 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20957 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
20958 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
20959 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
20960 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20961 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
20962 else
20963 output_address (GET_MODE (x), XEXP (x, 0));
20965 else
20967 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
20968 /* This hack along with a corresponding hack in
20969 rs6000_output_addr_const_extra arranges to output addends
20970 where the assembler expects to find them. eg.
20971 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
20972 without this hack would be output as "x@toc+4". We
20973 want "x+4@toc". */
20974 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
20975 else
20976 output_addr_const (file, x);
20978 return;
20980 case '&':
20981 if (const char *name = get_some_local_dynamic_name ())
20982 assemble_name (file, name);
20983 else
20984 output_operand_lossage ("'%%&' used without any "
20985 "local dynamic TLS references");
20986 return;
20988 default:
20989 output_operand_lossage ("invalid %%xn code");
20993 /* Print the address of an operand. */
20995 void
20996 print_operand_address (FILE *file, rtx x)
20998 if (REG_P (x))
20999 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21000 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21001 || GET_CODE (x) == LABEL_REF)
21003 output_addr_const (file, x);
21004 if (small_data_operand (x, GET_MODE (x)))
21005 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21006 reg_names[SMALL_DATA_REG]);
21007 else
21008 gcc_assert (!TARGET_TOC);
21010 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21011 && REG_P (XEXP (x, 1)))
21013 if (REGNO (XEXP (x, 0)) == 0)
21014 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21015 reg_names[ REGNO (XEXP (x, 0)) ]);
21016 else
21017 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21018 reg_names[ REGNO (XEXP (x, 1)) ]);
21020 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21021 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21022 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21023 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21024 #if TARGET_MACHO
21025 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21026 && CONSTANT_P (XEXP (x, 1)))
21028 fprintf (file, "lo16(");
21029 output_addr_const (file, XEXP (x, 1));
21030 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21032 #endif
21033 #if TARGET_ELF
21034 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21035 && CONSTANT_P (XEXP (x, 1)))
21037 output_addr_const (file, XEXP (x, 1));
21038 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21040 #endif
21041 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21043 /* This hack along with a corresponding hack in
21044 rs6000_output_addr_const_extra arranges to output addends
21045 where the assembler expects to find them. eg.
21046 (lo_sum (reg 9)
21047 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21048 without this hack would be output as "x@toc+8@l(9)". We
21049 want "x+8@toc@l(9)". */
21050 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21051 if (GET_CODE (x) == LO_SUM)
21052 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21053 else
21054 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21056 else
21057 gcc_unreachable ();
21060 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21062 static bool
21063 rs6000_output_addr_const_extra (FILE *file, rtx x)
21065 if (GET_CODE (x) == UNSPEC)
21066 switch (XINT (x, 1))
21068 case UNSPEC_TOCREL:
21069 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21070 && REG_P (XVECEXP (x, 0, 1))
21071 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21072 output_addr_const (file, XVECEXP (x, 0, 0));
21073 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21075 if (INTVAL (tocrel_offset_oac) >= 0)
21076 fprintf (file, "+");
21077 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21079 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21081 putc ('-', file);
21082 assemble_name (file, toc_label_name);
21083 need_toc_init = 1;
21085 else if (TARGET_ELF)
21086 fputs ("@toc", file);
21087 return true;
21089 #if TARGET_MACHO
21090 case UNSPEC_MACHOPIC_OFFSET:
21091 output_addr_const (file, XVECEXP (x, 0, 0));
21092 putc ('-', file);
21093 machopic_output_function_base_name (file);
21094 return true;
21095 #endif
21097 return false;
21100 /* Target hook for assembling integer objects. The PowerPC version has
21101 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21102 is defined. It also needs to handle DI-mode objects on 64-bit
21103 targets. */
21105 static bool
21106 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21108 #ifdef RELOCATABLE_NEEDS_FIXUP
21109 /* Special handling for SI values. */
21110 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21112 static int recurse = 0;
21114 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21115 the .fixup section. Since the TOC section is already relocated, we
21116 don't need to mark it here. We used to skip the text section, but it
21117 should never be valid for relocated addresses to be placed in the text
21118 section. */
21119 if (DEFAULT_ABI == ABI_V4
21120 && (TARGET_RELOCATABLE || flag_pic > 1)
21121 && in_section != toc_section
21122 && !recurse
21123 && !CONST_SCALAR_INT_P (x)
21124 && CONSTANT_P (x))
21126 char buf[256];
21128 recurse = 1;
21129 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21130 fixuplabelno++;
21131 ASM_OUTPUT_LABEL (asm_out_file, buf);
21132 fprintf (asm_out_file, "\t.long\t(");
21133 output_addr_const (asm_out_file, x);
21134 fprintf (asm_out_file, ")@fixup\n");
21135 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21136 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21137 fprintf (asm_out_file, "\t.long\t");
21138 assemble_name (asm_out_file, buf);
21139 fprintf (asm_out_file, "\n\t.previous\n");
21140 recurse = 0;
21141 return true;
21143 /* Remove initial .'s to turn a -mcall-aixdesc function
21144 address into the address of the descriptor, not the function
21145 itself. */
21146 else if (GET_CODE (x) == SYMBOL_REF
21147 && XSTR (x, 0)[0] == '.'
21148 && DEFAULT_ABI == ABI_AIX)
21150 const char *name = XSTR (x, 0);
21151 while (*name == '.')
21152 name++;
21154 fprintf (asm_out_file, "\t.long\t%s\n", name);
21155 return true;
21158 #endif /* RELOCATABLE_NEEDS_FIXUP */
21159 return default_assemble_integer (x, size, aligned_p);
21162 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21163 /* Emit an assembler directive to set symbol visibility for DECL to
21164 VISIBILITY_TYPE. */
21166 static void
21167 rs6000_assemble_visibility (tree decl, int vis)
21169 if (TARGET_XCOFF)
21170 return;
21172 /* Functions need to have their entry point symbol visibility set as
21173 well as their descriptor symbol visibility. */
21174 if (DEFAULT_ABI == ABI_AIX
21175 && DOT_SYMBOLS
21176 && TREE_CODE (decl) == FUNCTION_DECL)
21178 static const char * const visibility_types[] = {
21179 NULL, "protected", "hidden", "internal"
21182 const char *name, *type;
21184 name = ((* targetm.strip_name_encoding)
21185 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21186 type = visibility_types[vis];
21188 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21189 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21191 else
21192 default_assemble_visibility (decl, vis);
21194 #endif
21196 enum rtx_code
21197 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21199 /* Reversal of FP compares takes care -- an ordered compare
21200 becomes an unordered compare and vice versa. */
21201 if (mode == CCFPmode
21202 && (!flag_finite_math_only
21203 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21204 || code == UNEQ || code == LTGT))
21205 return reverse_condition_maybe_unordered (code);
21206 else
21207 return reverse_condition (code);
21210 /* Generate a compare for CODE. Return a brand-new rtx that
21211 represents the result of the compare. */
21213 static rtx
21214 rs6000_generate_compare (rtx cmp, machine_mode mode)
21216 machine_mode comp_mode;
21217 rtx compare_result;
21218 enum rtx_code code = GET_CODE (cmp);
21219 rtx op0 = XEXP (cmp, 0);
21220 rtx op1 = XEXP (cmp, 1);
21222 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21223 comp_mode = CCmode;
21224 else if (FLOAT_MODE_P (mode))
21225 comp_mode = CCFPmode;
21226 else if (code == GTU || code == LTU
21227 || code == GEU || code == LEU)
21228 comp_mode = CCUNSmode;
21229 else if ((code == EQ || code == NE)
21230 && unsigned_reg_p (op0)
21231 && (unsigned_reg_p (op1)
21232 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21233 /* These are unsigned values, perhaps there will be a later
21234 ordering compare that can be shared with this one. */
21235 comp_mode = CCUNSmode;
21236 else
21237 comp_mode = CCmode;
21239 /* If we have an unsigned compare, make sure we don't have a signed value as
21240 an immediate. */
21241 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21242 && INTVAL (op1) < 0)
21244 op0 = copy_rtx_if_shared (op0);
21245 op1 = force_reg (GET_MODE (op0), op1);
21246 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21249 /* First, the compare. */
21250 compare_result = gen_reg_rtx (comp_mode);
21252 /* IEEE 128-bit support in VSX registers when we do not have hardware
21253 support. */
21254 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21256 rtx libfunc = NULL_RTX;
21257 bool check_nan = false;
21258 rtx dest;
21260 switch (code)
21262 case EQ:
21263 case NE:
21264 libfunc = optab_libfunc (eq_optab, mode);
21265 break;
21267 case GT:
21268 case GE:
21269 libfunc = optab_libfunc (ge_optab, mode);
21270 break;
21272 case LT:
21273 case LE:
21274 libfunc = optab_libfunc (le_optab, mode);
21275 break;
21277 case UNORDERED:
21278 case ORDERED:
21279 libfunc = optab_libfunc (unord_optab, mode);
21280 code = (code == UNORDERED) ? NE : EQ;
21281 break;
21283 case UNGE:
21284 case UNGT:
21285 check_nan = true;
21286 libfunc = optab_libfunc (ge_optab, mode);
21287 code = (code == UNGE) ? GE : GT;
21288 break;
21290 case UNLE:
21291 case UNLT:
21292 check_nan = true;
21293 libfunc = optab_libfunc (le_optab, mode);
21294 code = (code == UNLE) ? LE : LT;
21295 break;
21297 case UNEQ:
21298 case LTGT:
21299 check_nan = true;
21300 libfunc = optab_libfunc (eq_optab, mode);
21301 code = (code = UNEQ) ? EQ : NE;
21302 break;
21304 default:
21305 gcc_unreachable ();
21308 gcc_assert (libfunc);
21310 if (!check_nan)
21311 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21312 SImode, op0, mode, op1, mode);
21314 /* The library signals an exception for signalling NaNs, so we need to
21315 handle isgreater, etc. by first checking isordered. */
21316 else
21318 rtx ne_rtx, normal_dest, unord_dest;
21319 rtx unord_func = optab_libfunc (unord_optab, mode);
21320 rtx join_label = gen_label_rtx ();
21321 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21322 rtx unord_cmp = gen_reg_rtx (comp_mode);
21325 /* Test for either value being a NaN. */
21326 gcc_assert (unord_func);
21327 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21328 SImode, op0, mode, op1, mode);
21330 /* Set value (0) if either value is a NaN, and jump to the join
21331 label. */
21332 dest = gen_reg_rtx (SImode);
21333 emit_move_insn (dest, const1_rtx);
21334 emit_insn (gen_rtx_SET (unord_cmp,
21335 gen_rtx_COMPARE (comp_mode, unord_dest,
21336 const0_rtx)));
21338 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21339 emit_jump_insn (gen_rtx_SET (pc_rtx,
21340 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21341 join_ref,
21342 pc_rtx)));
21344 /* Do the normal comparison, knowing that the values are not
21345 NaNs. */
21346 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21347 SImode, op0, mode, op1, mode);
21349 emit_insn (gen_cstoresi4 (dest,
21350 gen_rtx_fmt_ee (code, SImode, normal_dest,
21351 const0_rtx),
21352 normal_dest, const0_rtx));
21354 /* Join NaN and non-Nan paths. Compare dest against 0. */
21355 emit_label (join_label);
21356 code = NE;
21359 emit_insn (gen_rtx_SET (compare_result,
21360 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21363 else
21365 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21366 CLOBBERs to match cmptf_internal2 pattern. */
21367 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21368 && FLOAT128_IBM_P (GET_MODE (op0))
21369 && TARGET_HARD_FLOAT)
21370 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21371 gen_rtvec (10,
21372 gen_rtx_SET (compare_result,
21373 gen_rtx_COMPARE (comp_mode, op0, op1)),
21374 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21375 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21376 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21377 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21378 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21379 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21380 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21381 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21382 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21383 else if (GET_CODE (op1) == UNSPEC
21384 && XINT (op1, 1) == UNSPEC_SP_TEST)
21386 rtx op1b = XVECEXP (op1, 0, 0);
21387 comp_mode = CCEQmode;
21388 compare_result = gen_reg_rtx (CCEQmode);
21389 if (TARGET_64BIT)
21390 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21391 else
21392 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21394 else
21395 emit_insn (gen_rtx_SET (compare_result,
21396 gen_rtx_COMPARE (comp_mode, op0, op1)));
21399 /* Some kinds of FP comparisons need an OR operation;
21400 under flag_finite_math_only we don't bother. */
21401 if (FLOAT_MODE_P (mode)
21402 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21403 && !flag_finite_math_only
21404 && (code == LE || code == GE
21405 || code == UNEQ || code == LTGT
21406 || code == UNGT || code == UNLT))
21408 enum rtx_code or1, or2;
21409 rtx or1_rtx, or2_rtx, compare2_rtx;
21410 rtx or_result = gen_reg_rtx (CCEQmode);
21412 switch (code)
21414 case LE: or1 = LT; or2 = EQ; break;
21415 case GE: or1 = GT; or2 = EQ; break;
21416 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21417 case LTGT: or1 = LT; or2 = GT; break;
21418 case UNGT: or1 = UNORDERED; or2 = GT; break;
21419 case UNLT: or1 = UNORDERED; or2 = LT; break;
21420 default: gcc_unreachable ();
21422 validate_condition_mode (or1, comp_mode);
21423 validate_condition_mode (or2, comp_mode);
21424 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21425 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21426 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21427 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21428 const_true_rtx);
21429 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21431 compare_result = or_result;
21432 code = EQ;
21435 validate_condition_mode (code, GET_MODE (compare_result));
21437 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21441 /* Return the diagnostic message string if the binary operation OP is
21442 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21444 static const char*
21445 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21446 const_tree type1,
21447 const_tree type2)
21449 machine_mode mode1 = TYPE_MODE (type1);
21450 machine_mode mode2 = TYPE_MODE (type2);
21452 /* For complex modes, use the inner type. */
21453 if (COMPLEX_MODE_P (mode1))
21454 mode1 = GET_MODE_INNER (mode1);
21456 if (COMPLEX_MODE_P (mode2))
21457 mode2 = GET_MODE_INNER (mode2);
21459 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21460 double to intermix unless -mfloat128-convert. */
21461 if (mode1 == mode2)
21462 return NULL;
21464 if (!TARGET_FLOAT128_CVT)
21466 if ((mode1 == KFmode && mode2 == IFmode)
21467 || (mode1 == IFmode && mode2 == KFmode))
21468 return N_("__float128 and __ibm128 cannot be used in the same "
21469 "expression");
21471 if (TARGET_IEEEQUAD
21472 && ((mode1 == IFmode && mode2 == TFmode)
21473 || (mode1 == TFmode && mode2 == IFmode)))
21474 return N_("__ibm128 and long double cannot be used in the same "
21475 "expression");
21477 if (!TARGET_IEEEQUAD
21478 && ((mode1 == KFmode && mode2 == TFmode)
21479 || (mode1 == TFmode && mode2 == KFmode)))
21480 return N_("__float128 and long double cannot be used in the same "
21481 "expression");
21484 return NULL;
21488 /* Expand floating point conversion to/from __float128 and __ibm128. */
21490 void
21491 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21493 machine_mode dest_mode = GET_MODE (dest);
21494 machine_mode src_mode = GET_MODE (src);
21495 convert_optab cvt = unknown_optab;
21496 bool do_move = false;
21497 rtx libfunc = NULL_RTX;
21498 rtx dest2;
21499 typedef rtx (*rtx_2func_t) (rtx, rtx);
21500 rtx_2func_t hw_convert = (rtx_2func_t)0;
21501 size_t kf_or_tf;
21503 struct hw_conv_t {
21504 rtx_2func_t from_df;
21505 rtx_2func_t from_sf;
21506 rtx_2func_t from_si_sign;
21507 rtx_2func_t from_si_uns;
21508 rtx_2func_t from_di_sign;
21509 rtx_2func_t from_di_uns;
21510 rtx_2func_t to_df;
21511 rtx_2func_t to_sf;
21512 rtx_2func_t to_si_sign;
21513 rtx_2func_t to_si_uns;
21514 rtx_2func_t to_di_sign;
21515 rtx_2func_t to_di_uns;
21516 } hw_conversions[2] = {
21517 /* convertions to/from KFmode */
21519 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21520 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21521 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21522 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21523 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21524 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21525 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21526 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21527 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21528 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21529 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21530 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21533 /* convertions to/from TFmode */
21535 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21536 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21537 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21538 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21539 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21540 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21541 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21542 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21543 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21544 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21545 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21546 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21550 if (dest_mode == src_mode)
21551 gcc_unreachable ();
21553 /* Eliminate memory operations. */
21554 if (MEM_P (src))
21555 src = force_reg (src_mode, src);
21557 if (MEM_P (dest))
21559 rtx tmp = gen_reg_rtx (dest_mode);
21560 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21561 rs6000_emit_move (dest, tmp, dest_mode);
21562 return;
21565 /* Convert to IEEE 128-bit floating point. */
21566 if (FLOAT128_IEEE_P (dest_mode))
21568 if (dest_mode == KFmode)
21569 kf_or_tf = 0;
21570 else if (dest_mode == TFmode)
21571 kf_or_tf = 1;
21572 else
21573 gcc_unreachable ();
21575 switch (src_mode)
21577 case E_DFmode:
21578 cvt = sext_optab;
21579 hw_convert = hw_conversions[kf_or_tf].from_df;
21580 break;
21582 case E_SFmode:
21583 cvt = sext_optab;
21584 hw_convert = hw_conversions[kf_or_tf].from_sf;
21585 break;
21587 case E_KFmode:
21588 case E_IFmode:
21589 case E_TFmode:
21590 if (FLOAT128_IBM_P (src_mode))
21591 cvt = sext_optab;
21592 else
21593 do_move = true;
21594 break;
21596 case E_SImode:
21597 if (unsigned_p)
21599 cvt = ufloat_optab;
21600 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21602 else
21604 cvt = sfloat_optab;
21605 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
21607 break;
21609 case E_DImode:
21610 if (unsigned_p)
21612 cvt = ufloat_optab;
21613 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
21615 else
21617 cvt = sfloat_optab;
21618 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
21620 break;
21622 default:
21623 gcc_unreachable ();
21627 /* Convert from IEEE 128-bit floating point. */
21628 else if (FLOAT128_IEEE_P (src_mode))
21630 if (src_mode == KFmode)
21631 kf_or_tf = 0;
21632 else if (src_mode == TFmode)
21633 kf_or_tf = 1;
21634 else
21635 gcc_unreachable ();
21637 switch (dest_mode)
21639 case E_DFmode:
21640 cvt = trunc_optab;
21641 hw_convert = hw_conversions[kf_or_tf].to_df;
21642 break;
21644 case E_SFmode:
21645 cvt = trunc_optab;
21646 hw_convert = hw_conversions[kf_or_tf].to_sf;
21647 break;
21649 case E_KFmode:
21650 case E_IFmode:
21651 case E_TFmode:
21652 if (FLOAT128_IBM_P (dest_mode))
21653 cvt = trunc_optab;
21654 else
21655 do_move = true;
21656 break;
21658 case E_SImode:
21659 if (unsigned_p)
21661 cvt = ufix_optab;
21662 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
21664 else
21666 cvt = sfix_optab;
21667 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
21669 break;
21671 case E_DImode:
21672 if (unsigned_p)
21674 cvt = ufix_optab;
21675 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
21677 else
21679 cvt = sfix_optab;
21680 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
21682 break;
21684 default:
21685 gcc_unreachable ();
21689 /* Both IBM format. */
21690 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
21691 do_move = true;
21693 else
21694 gcc_unreachable ();
21696 /* Handle conversion between TFmode/KFmode/IFmode. */
21697 if (do_move)
21698 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
21700 /* Handle conversion if we have hardware support. */
21701 else if (TARGET_FLOAT128_HW && hw_convert)
21702 emit_insn ((hw_convert) (dest, src));
21704 /* Call an external function to do the conversion. */
21705 else if (cvt != unknown_optab)
21707 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
21708 gcc_assert (libfunc != NULL_RTX);
21710 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
21711 src, src_mode);
21713 gcc_assert (dest2 != NULL_RTX);
21714 if (!rtx_equal_p (dest, dest2))
21715 emit_move_insn (dest, dest2);
21718 else
21719 gcc_unreachable ();
21721 return;
21725 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
21726 can be used as that dest register. Return the dest register. */
21729 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
21731 if (op2 == const0_rtx)
21732 return op1;
21734 if (GET_CODE (scratch) == SCRATCH)
21735 scratch = gen_reg_rtx (mode);
21737 if (logical_operand (op2, mode))
21738 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
21739 else
21740 emit_insn (gen_rtx_SET (scratch,
21741 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
21743 return scratch;
21746 void
21747 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
21749 rtx condition_rtx;
21750 machine_mode op_mode;
21751 enum rtx_code cond_code;
21752 rtx result = operands[0];
21754 condition_rtx = rs6000_generate_compare (operands[1], mode);
21755 cond_code = GET_CODE (condition_rtx);
21757 if (cond_code == NE
21758 || cond_code == GE || cond_code == LE
21759 || cond_code == GEU || cond_code == LEU
21760 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
21762 rtx not_result = gen_reg_rtx (CCEQmode);
21763 rtx not_op, rev_cond_rtx;
21764 machine_mode cc_mode;
21766 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
21768 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
21769 SImode, XEXP (condition_rtx, 0), const0_rtx);
21770 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
21771 emit_insn (gen_rtx_SET (not_result, not_op));
21772 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
21775 op_mode = GET_MODE (XEXP (operands[1], 0));
21776 if (op_mode == VOIDmode)
21777 op_mode = GET_MODE (XEXP (operands[1], 1));
21779 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
21781 PUT_MODE (condition_rtx, DImode);
21782 convert_move (result, condition_rtx, 0);
21784 else
21786 PUT_MODE (condition_rtx, SImode);
21787 emit_insn (gen_rtx_SET (result, condition_rtx));
21791 /* Emit a branch of kind CODE to location LOC. */
21793 void
21794 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
21796 rtx condition_rtx, loc_ref;
21798 condition_rtx = rs6000_generate_compare (operands[0], mode);
21799 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
21800 emit_jump_insn (gen_rtx_SET (pc_rtx,
21801 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
21802 loc_ref, pc_rtx)));
21805 /* Return the string to output a conditional branch to LABEL, which is
21806 the operand template of the label, or NULL if the branch is really a
21807 conditional return.
21809 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
21810 condition code register and its mode specifies what kind of
21811 comparison we made.
21813 REVERSED is nonzero if we should reverse the sense of the comparison.
21815 INSN is the insn. */
21817 char *
21818 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
21820 static char string[64];
21821 enum rtx_code code = GET_CODE (op);
21822 rtx cc_reg = XEXP (op, 0);
21823 machine_mode mode = GET_MODE (cc_reg);
21824 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
21825 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
21826 int really_reversed = reversed ^ need_longbranch;
21827 char *s = string;
21828 const char *ccode;
21829 const char *pred;
21830 rtx note;
21832 validate_condition_mode (code, mode);
21834 /* Work out which way this really branches. We could use
21835 reverse_condition_maybe_unordered here always but this
21836 makes the resulting assembler clearer. */
21837 if (really_reversed)
21839 /* Reversal of FP compares takes care -- an ordered compare
21840 becomes an unordered compare and vice versa. */
21841 if (mode == CCFPmode)
21842 code = reverse_condition_maybe_unordered (code);
21843 else
21844 code = reverse_condition (code);
21847 switch (code)
21849 /* Not all of these are actually distinct opcodes, but
21850 we distinguish them for clarity of the resulting assembler. */
21851 case NE: case LTGT:
21852 ccode = "ne"; break;
21853 case EQ: case UNEQ:
21854 ccode = "eq"; break;
21855 case GE: case GEU:
21856 ccode = "ge"; break;
21857 case GT: case GTU: case UNGT:
21858 ccode = "gt"; break;
21859 case LE: case LEU:
21860 ccode = "le"; break;
21861 case LT: case LTU: case UNLT:
21862 ccode = "lt"; break;
21863 case UNORDERED: ccode = "un"; break;
21864 case ORDERED: ccode = "nu"; break;
21865 case UNGE: ccode = "nl"; break;
21866 case UNLE: ccode = "ng"; break;
21867 default:
21868 gcc_unreachable ();
21871 /* Maybe we have a guess as to how likely the branch is. */
21872 pred = "";
21873 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
21874 if (note != NULL_RTX)
21876 /* PROB is the difference from 50%. */
21877 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
21878 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
21880 /* Only hint for highly probable/improbable branches on newer cpus when
21881 we have real profile data, as static prediction overrides processor
21882 dynamic prediction. For older cpus we may as well always hint, but
21883 assume not taken for branches that are very close to 50% as a
21884 mispredicted taken branch is more expensive than a
21885 mispredicted not-taken branch. */
21886 if (rs6000_always_hint
21887 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
21888 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
21889 && br_prob_note_reliable_p (note)))
21891 if (abs (prob) > REG_BR_PROB_BASE / 20
21892 && ((prob > 0) ^ need_longbranch))
21893 pred = "+";
21894 else
21895 pred = "-";
21899 if (label == NULL)
21900 s += sprintf (s, "b%slr%s ", ccode, pred);
21901 else
21902 s += sprintf (s, "b%s%s ", ccode, pred);
21904 /* We need to escape any '%' characters in the reg_names string.
21905 Assume they'd only be the first character.... */
21906 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
21907 *s++ = '%';
21908 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
21910 if (label != NULL)
21912 /* If the branch distance was too far, we may have to use an
21913 unconditional branch to go the distance. */
21914 if (need_longbranch)
21915 s += sprintf (s, ",$+8\n\tb %s", label);
21916 else
21917 s += sprintf (s, ",%s", label);
21920 return string;
21923 /* Return insn for VSX or Altivec comparisons. */
21925 static rtx
21926 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
21928 rtx mask;
21929 machine_mode mode = GET_MODE (op0);
21931 switch (code)
21933 default:
21934 break;
21936 case GE:
21937 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
21938 return NULL_RTX;
21939 /* FALLTHRU */
21941 case EQ:
21942 case GT:
21943 case GTU:
21944 case ORDERED:
21945 case UNORDERED:
21946 case UNEQ:
21947 case LTGT:
21948 mask = gen_reg_rtx (mode);
21949 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
21950 return mask;
21953 return NULL_RTX;
21956 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
21957 DMODE is expected destination mode. This is a recursive function. */
21959 static rtx
21960 rs6000_emit_vector_compare (enum rtx_code rcode,
21961 rtx op0, rtx op1,
21962 machine_mode dmode)
21964 rtx mask;
21965 bool swap_operands = false;
21966 bool try_again = false;
21968 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
21969 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
21971 /* See if the comparison works as is. */
21972 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
21973 if (mask)
21974 return mask;
21976 switch (rcode)
21978 case LT:
21979 rcode = GT;
21980 swap_operands = true;
21981 try_again = true;
21982 break;
21983 case LTU:
21984 rcode = GTU;
21985 swap_operands = true;
21986 try_again = true;
21987 break;
21988 case NE:
21989 case UNLE:
21990 case UNLT:
21991 case UNGE:
21992 case UNGT:
21993 /* Invert condition and try again.
21994 e.g., A != B becomes ~(A==B). */
21996 enum rtx_code rev_code;
21997 enum insn_code nor_code;
21998 rtx mask2;
22000 rev_code = reverse_condition_maybe_unordered (rcode);
22001 if (rev_code == UNKNOWN)
22002 return NULL_RTX;
22004 nor_code = optab_handler (one_cmpl_optab, dmode);
22005 if (nor_code == CODE_FOR_nothing)
22006 return NULL_RTX;
22008 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22009 if (!mask2)
22010 return NULL_RTX;
22012 mask = gen_reg_rtx (dmode);
22013 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22014 return mask;
22016 break;
22017 case GE:
22018 case GEU:
22019 case LE:
22020 case LEU:
22021 /* Try GT/GTU/LT/LTU OR EQ */
22023 rtx c_rtx, eq_rtx;
22024 enum insn_code ior_code;
22025 enum rtx_code new_code;
22027 switch (rcode)
22029 case GE:
22030 new_code = GT;
22031 break;
22033 case GEU:
22034 new_code = GTU;
22035 break;
22037 case LE:
22038 new_code = LT;
22039 break;
22041 case LEU:
22042 new_code = LTU;
22043 break;
22045 default:
22046 gcc_unreachable ();
22049 ior_code = optab_handler (ior_optab, dmode);
22050 if (ior_code == CODE_FOR_nothing)
22051 return NULL_RTX;
22053 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22054 if (!c_rtx)
22055 return NULL_RTX;
22057 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22058 if (!eq_rtx)
22059 return NULL_RTX;
22061 mask = gen_reg_rtx (dmode);
22062 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22063 return mask;
22065 break;
22066 default:
22067 return NULL_RTX;
22070 if (try_again)
22072 if (swap_operands)
22073 std::swap (op0, op1);
22075 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22076 if (mask)
22077 return mask;
22080 /* You only get two chances. */
22081 return NULL_RTX;
22084 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22085 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22086 operands for the relation operation COND. */
22089 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22090 rtx cond, rtx cc_op0, rtx cc_op1)
22092 machine_mode dest_mode = GET_MODE (dest);
22093 machine_mode mask_mode = GET_MODE (cc_op0);
22094 enum rtx_code rcode = GET_CODE (cond);
22095 machine_mode cc_mode = CCmode;
22096 rtx mask;
22097 rtx cond2;
22098 bool invert_move = false;
22100 if (VECTOR_UNIT_NONE_P (dest_mode))
22101 return 0;
22103 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22104 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22106 switch (rcode)
22108 /* Swap operands if we can, and fall back to doing the operation as
22109 specified, and doing a NOR to invert the test. */
22110 case NE:
22111 case UNLE:
22112 case UNLT:
22113 case UNGE:
22114 case UNGT:
22115 /* Invert condition and try again.
22116 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22117 invert_move = true;
22118 rcode = reverse_condition_maybe_unordered (rcode);
22119 if (rcode == UNKNOWN)
22120 return 0;
22121 break;
22123 case GE:
22124 case LE:
22125 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22127 /* Invert condition to avoid compound test. */
22128 invert_move = true;
22129 rcode = reverse_condition (rcode);
22131 break;
22133 case GTU:
22134 case GEU:
22135 case LTU:
22136 case LEU:
22137 /* Mark unsigned tests with CCUNSmode. */
22138 cc_mode = CCUNSmode;
22140 /* Invert condition to avoid compound test if necessary. */
22141 if (rcode == GEU || rcode == LEU)
22143 invert_move = true;
22144 rcode = reverse_condition (rcode);
22146 break;
22148 default:
22149 break;
22152 /* Get the vector mask for the given relational operations. */
22153 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22155 if (!mask)
22156 return 0;
22158 if (invert_move)
22159 std::swap (op_true, op_false);
22161 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22162 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22163 && (GET_CODE (op_true) == CONST_VECTOR
22164 || GET_CODE (op_false) == CONST_VECTOR))
22166 rtx constant_0 = CONST0_RTX (dest_mode);
22167 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22169 if (op_true == constant_m1 && op_false == constant_0)
22171 emit_move_insn (dest, mask);
22172 return 1;
22175 else if (op_true == constant_0 && op_false == constant_m1)
22177 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22178 return 1;
22181 /* If we can't use the vector comparison directly, perhaps we can use
22182 the mask for the true or false fields, instead of loading up a
22183 constant. */
22184 if (op_true == constant_m1)
22185 op_true = mask;
22187 if (op_false == constant_0)
22188 op_false = mask;
22191 if (!REG_P (op_true) && !SUBREG_P (op_true))
22192 op_true = force_reg (dest_mode, op_true);
22194 if (!REG_P (op_false) && !SUBREG_P (op_false))
22195 op_false = force_reg (dest_mode, op_false);
22197 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22198 CONST0_RTX (dest_mode));
22199 emit_insn (gen_rtx_SET (dest,
22200 gen_rtx_IF_THEN_ELSE (dest_mode,
22201 cond2,
22202 op_true,
22203 op_false)));
22204 return 1;
22207 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22208 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22209 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22210 hardware has no such operation. */
22212 static int
22213 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22215 enum rtx_code code = GET_CODE (op);
22216 rtx op0 = XEXP (op, 0);
22217 rtx op1 = XEXP (op, 1);
22218 machine_mode compare_mode = GET_MODE (op0);
22219 machine_mode result_mode = GET_MODE (dest);
22220 bool max_p = false;
22222 if (result_mode != compare_mode)
22223 return 0;
22225 if (code == GE || code == GT)
22226 max_p = true;
22227 else if (code == LE || code == LT)
22228 max_p = false;
22229 else
22230 return 0;
22232 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22235 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22236 max_p = !max_p;
22238 else
22239 return 0;
22241 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22242 return 1;
22245 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22246 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22247 operands of the last comparison is nonzero/true, FALSE_COND if it is
22248 zero/false. Return 0 if the hardware has no such operation. */
22250 static int
22251 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22253 enum rtx_code code = GET_CODE (op);
22254 rtx op0 = XEXP (op, 0);
22255 rtx op1 = XEXP (op, 1);
22256 machine_mode result_mode = GET_MODE (dest);
22257 rtx compare_rtx;
22258 rtx cmove_rtx;
22259 rtx clobber_rtx;
22261 if (!can_create_pseudo_p ())
22262 return 0;
22264 switch (code)
22266 case EQ:
22267 case GE:
22268 case GT:
22269 break;
22271 case NE:
22272 case LT:
22273 case LE:
22274 code = swap_condition (code);
22275 std::swap (op0, op1);
22276 break;
22278 default:
22279 return 0;
22282 /* Generate: [(parallel [(set (dest)
22283 (if_then_else (op (cmp1) (cmp2))
22284 (true)
22285 (false)))
22286 (clobber (scratch))])]. */
22288 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22289 cmove_rtx = gen_rtx_SET (dest,
22290 gen_rtx_IF_THEN_ELSE (result_mode,
22291 compare_rtx,
22292 true_cond,
22293 false_cond));
22295 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22296 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22297 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22299 return 1;
22302 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22303 operands of the last comparison is nonzero/true, FALSE_COND if it
22304 is zero/false. Return 0 if the hardware has no such operation. */
22307 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22309 enum rtx_code code = GET_CODE (op);
22310 rtx op0 = XEXP (op, 0);
22311 rtx op1 = XEXP (op, 1);
22312 machine_mode compare_mode = GET_MODE (op0);
22313 machine_mode result_mode = GET_MODE (dest);
22314 rtx temp;
22315 bool is_against_zero;
22317 /* These modes should always match. */
22318 if (GET_MODE (op1) != compare_mode
22319 /* In the isel case however, we can use a compare immediate, so
22320 op1 may be a small constant. */
22321 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22322 return 0;
22323 if (GET_MODE (true_cond) != result_mode)
22324 return 0;
22325 if (GET_MODE (false_cond) != result_mode)
22326 return 0;
22328 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22329 if (TARGET_P9_MINMAX
22330 && (compare_mode == SFmode || compare_mode == DFmode)
22331 && (result_mode == SFmode || result_mode == DFmode))
22333 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22334 return 1;
22336 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22337 return 1;
22340 /* Don't allow using floating point comparisons for integer results for
22341 now. */
22342 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22343 return 0;
22345 /* First, work out if the hardware can do this at all, or
22346 if it's too slow.... */
22347 if (!FLOAT_MODE_P (compare_mode))
22349 if (TARGET_ISEL)
22350 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22351 return 0;
22354 is_against_zero = op1 == CONST0_RTX (compare_mode);
22356 /* A floating-point subtract might overflow, underflow, or produce
22357 an inexact result, thus changing the floating-point flags, so it
22358 can't be generated if we care about that. It's safe if one side
22359 of the construct is zero, since then no subtract will be
22360 generated. */
22361 if (SCALAR_FLOAT_MODE_P (compare_mode)
22362 && flag_trapping_math && ! is_against_zero)
22363 return 0;
22365 /* Eliminate half of the comparisons by switching operands, this
22366 makes the remaining code simpler. */
22367 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22368 || code == LTGT || code == LT || code == UNLE)
22370 code = reverse_condition_maybe_unordered (code);
22371 temp = true_cond;
22372 true_cond = false_cond;
22373 false_cond = temp;
22376 /* UNEQ and LTGT take four instructions for a comparison with zero,
22377 it'll probably be faster to use a branch here too. */
22378 if (code == UNEQ && HONOR_NANS (compare_mode))
22379 return 0;
22381 /* We're going to try to implement comparisons by performing
22382 a subtract, then comparing against zero. Unfortunately,
22383 Inf - Inf is NaN which is not zero, and so if we don't
22384 know that the operand is finite and the comparison
22385 would treat EQ different to UNORDERED, we can't do it. */
22386 if (HONOR_INFINITIES (compare_mode)
22387 && code != GT && code != UNGE
22388 && (GET_CODE (op1) != CONST_DOUBLE
22389 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22390 /* Constructs of the form (a OP b ? a : b) are safe. */
22391 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22392 || (! rtx_equal_p (op0, true_cond)
22393 && ! rtx_equal_p (op1, true_cond))))
22394 return 0;
22396 /* At this point we know we can use fsel. */
22398 /* Reduce the comparison to a comparison against zero. */
22399 if (! is_against_zero)
22401 temp = gen_reg_rtx (compare_mode);
22402 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22403 op0 = temp;
22404 op1 = CONST0_RTX (compare_mode);
22407 /* If we don't care about NaNs we can reduce some of the comparisons
22408 down to faster ones. */
22409 if (! HONOR_NANS (compare_mode))
22410 switch (code)
22412 case GT:
22413 code = LE;
22414 temp = true_cond;
22415 true_cond = false_cond;
22416 false_cond = temp;
22417 break;
22418 case UNGE:
22419 code = GE;
22420 break;
22421 case UNEQ:
22422 code = EQ;
22423 break;
22424 default:
22425 break;
22428 /* Now, reduce everything down to a GE. */
22429 switch (code)
22431 case GE:
22432 break;
22434 case LE:
22435 temp = gen_reg_rtx (compare_mode);
22436 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22437 op0 = temp;
22438 break;
22440 case ORDERED:
22441 temp = gen_reg_rtx (compare_mode);
22442 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22443 op0 = temp;
22444 break;
22446 case EQ:
22447 temp = gen_reg_rtx (compare_mode);
22448 emit_insn (gen_rtx_SET (temp,
22449 gen_rtx_NEG (compare_mode,
22450 gen_rtx_ABS (compare_mode, op0))));
22451 op0 = temp;
22452 break;
22454 case UNGE:
22455 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22456 temp = gen_reg_rtx (result_mode);
22457 emit_insn (gen_rtx_SET (temp,
22458 gen_rtx_IF_THEN_ELSE (result_mode,
22459 gen_rtx_GE (VOIDmode,
22460 op0, op1),
22461 true_cond, false_cond)));
22462 false_cond = true_cond;
22463 true_cond = temp;
22465 temp = gen_reg_rtx (compare_mode);
22466 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22467 op0 = temp;
22468 break;
22470 case GT:
22471 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22472 temp = gen_reg_rtx (result_mode);
22473 emit_insn (gen_rtx_SET (temp,
22474 gen_rtx_IF_THEN_ELSE (result_mode,
22475 gen_rtx_GE (VOIDmode,
22476 op0, op1),
22477 true_cond, false_cond)));
22478 true_cond = false_cond;
22479 false_cond = temp;
22481 temp = gen_reg_rtx (compare_mode);
22482 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22483 op0 = temp;
22484 break;
22486 default:
22487 gcc_unreachable ();
22490 emit_insn (gen_rtx_SET (dest,
22491 gen_rtx_IF_THEN_ELSE (result_mode,
22492 gen_rtx_GE (VOIDmode,
22493 op0, op1),
22494 true_cond, false_cond)));
22495 return 1;
22498 /* Same as above, but for ints (isel). */
22501 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22503 rtx condition_rtx, cr;
22504 machine_mode mode = GET_MODE (dest);
22505 enum rtx_code cond_code;
22506 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22507 bool signedp;
22509 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22510 return 0;
22512 /* We still have to do the compare, because isel doesn't do a
22513 compare, it just looks at the CRx bits set by a previous compare
22514 instruction. */
22515 condition_rtx = rs6000_generate_compare (op, mode);
22516 cond_code = GET_CODE (condition_rtx);
22517 cr = XEXP (condition_rtx, 0);
22518 signedp = GET_MODE (cr) == CCmode;
22520 isel_func = (mode == SImode
22521 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22522 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22524 switch (cond_code)
22526 case LT: case GT: case LTU: case GTU: case EQ:
22527 /* isel handles these directly. */
22528 break;
22530 default:
22531 /* We need to swap the sense of the comparison. */
22533 std::swap (false_cond, true_cond);
22534 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22536 break;
22539 false_cond = force_reg (mode, false_cond);
22540 if (true_cond != const0_rtx)
22541 true_cond = force_reg (mode, true_cond);
22543 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22545 return 1;
22548 void
22549 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22551 machine_mode mode = GET_MODE (op0);
22552 enum rtx_code c;
22553 rtx target;
22555 /* VSX/altivec have direct min/max insns. */
22556 if ((code == SMAX || code == SMIN)
22557 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22558 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22560 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22561 return;
22564 if (code == SMAX || code == SMIN)
22565 c = GE;
22566 else
22567 c = GEU;
22569 if (code == SMAX || code == UMAX)
22570 target = emit_conditional_move (dest, c, op0, op1, mode,
22571 op0, op1, mode, 0);
22572 else
22573 target = emit_conditional_move (dest, c, op0, op1, mode,
22574 op1, op0, mode, 0);
22575 gcc_assert (target);
22576 if (target != dest)
22577 emit_move_insn (dest, target);
22580 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22581 COND is true. Mark the jump as unlikely to be taken. */
22583 static void
22584 emit_unlikely_jump (rtx cond, rtx label)
22586 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22587 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22588 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22591 /* A subroutine of the atomic operation splitters. Emit a load-locked
22592 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22593 the zero_extend operation. */
22595 static void
22596 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22598 rtx (*fn) (rtx, rtx) = NULL;
22600 switch (mode)
22602 case E_QImode:
22603 fn = gen_load_lockedqi;
22604 break;
22605 case E_HImode:
22606 fn = gen_load_lockedhi;
22607 break;
22608 case E_SImode:
22609 if (GET_MODE (mem) == QImode)
22610 fn = gen_load_lockedqi_si;
22611 else if (GET_MODE (mem) == HImode)
22612 fn = gen_load_lockedhi_si;
22613 else
22614 fn = gen_load_lockedsi;
22615 break;
22616 case E_DImode:
22617 fn = gen_load_lockeddi;
22618 break;
22619 case E_TImode:
22620 fn = gen_load_lockedti;
22621 break;
22622 default:
22623 gcc_unreachable ();
22625 emit_insn (fn (reg, mem));
22628 /* A subroutine of the atomic operation splitters. Emit a store-conditional
22629 instruction in MODE. */
22631 static void
22632 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
22634 rtx (*fn) (rtx, rtx, rtx) = NULL;
22636 switch (mode)
22638 case E_QImode:
22639 fn = gen_store_conditionalqi;
22640 break;
22641 case E_HImode:
22642 fn = gen_store_conditionalhi;
22643 break;
22644 case E_SImode:
22645 fn = gen_store_conditionalsi;
22646 break;
22647 case E_DImode:
22648 fn = gen_store_conditionaldi;
22649 break;
22650 case E_TImode:
22651 fn = gen_store_conditionalti;
22652 break;
22653 default:
22654 gcc_unreachable ();
22657 /* Emit sync before stwcx. to address PPC405 Erratum. */
22658 if (PPC405_ERRATUM77)
22659 emit_insn (gen_hwsync ());
22661 emit_insn (fn (res, mem, val));
22664 /* Expand barriers before and after a load_locked/store_cond sequence. */
22666 static rtx
22667 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
22669 rtx addr = XEXP (mem, 0);
22671 if (!legitimate_indirect_address_p (addr, reload_completed)
22672 && !legitimate_indexed_address_p (addr, reload_completed))
22674 addr = force_reg (Pmode, addr);
22675 mem = replace_equiv_address_nv (mem, addr);
22678 switch (model)
22680 case MEMMODEL_RELAXED:
22681 case MEMMODEL_CONSUME:
22682 case MEMMODEL_ACQUIRE:
22683 break;
22684 case MEMMODEL_RELEASE:
22685 case MEMMODEL_ACQ_REL:
22686 emit_insn (gen_lwsync ());
22687 break;
22688 case MEMMODEL_SEQ_CST:
22689 emit_insn (gen_hwsync ());
22690 break;
22691 default:
22692 gcc_unreachable ();
22694 return mem;
22697 static void
22698 rs6000_post_atomic_barrier (enum memmodel model)
22700 switch (model)
22702 case MEMMODEL_RELAXED:
22703 case MEMMODEL_CONSUME:
22704 case MEMMODEL_RELEASE:
22705 break;
22706 case MEMMODEL_ACQUIRE:
22707 case MEMMODEL_ACQ_REL:
22708 case MEMMODEL_SEQ_CST:
22709 emit_insn (gen_isync ());
22710 break;
22711 default:
22712 gcc_unreachable ();
22716 /* A subroutine of the various atomic expanders. For sub-word operations,
22717 we must adjust things to operate on SImode. Given the original MEM,
22718 return a new aligned memory. Also build and return the quantities by
22719 which to shift and mask. */
22721 static rtx
22722 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
22724 rtx addr, align, shift, mask, mem;
22725 HOST_WIDE_INT shift_mask;
22726 machine_mode mode = GET_MODE (orig_mem);
22728 /* For smaller modes, we have to implement this via SImode. */
22729 shift_mask = (mode == QImode ? 0x18 : 0x10);
22731 addr = XEXP (orig_mem, 0);
22732 addr = force_reg (GET_MODE (addr), addr);
22734 /* Aligned memory containing subword. Generate a new memory. We
22735 do not want any of the existing MEM_ATTR data, as we're now
22736 accessing memory outside the original object. */
22737 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
22738 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22739 mem = gen_rtx_MEM (SImode, align);
22740 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
22741 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
22742 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
22744 /* Shift amount for subword relative to aligned word. */
22745 shift = gen_reg_rtx (SImode);
22746 addr = gen_lowpart (SImode, addr);
22747 rtx tmp = gen_reg_rtx (SImode);
22748 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
22749 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
22750 if (BYTES_BIG_ENDIAN)
22751 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
22752 shift, 1, OPTAB_LIB_WIDEN);
22753 *pshift = shift;
22755 /* Mask for insertion. */
22756 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
22757 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
22758 *pmask = mask;
22760 return mem;
22763 /* A subroutine of the various atomic expanders. For sub-word operands,
22764 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
22766 static rtx
22767 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
22769 rtx x;
22771 x = gen_reg_rtx (SImode);
22772 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
22773 gen_rtx_NOT (SImode, mask),
22774 oldval)));
22776 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
22778 return x;
22781 /* A subroutine of the various atomic expanders. For sub-word operands,
22782 extract WIDE to NARROW via SHIFT. */
22784 static void
22785 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
22787 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
22788 wide, 1, OPTAB_LIB_WIDEN);
22789 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
22792 /* Expand an atomic compare and swap operation. */
22794 void
22795 rs6000_expand_atomic_compare_and_swap (rtx operands[])
22797 rtx boolval, retval, mem, oldval, newval, cond;
22798 rtx label1, label2, x, mask, shift;
22799 machine_mode mode, orig_mode;
22800 enum memmodel mod_s, mod_f;
22801 bool is_weak;
22803 boolval = operands[0];
22804 retval = operands[1];
22805 mem = operands[2];
22806 oldval = operands[3];
22807 newval = operands[4];
22808 is_weak = (INTVAL (operands[5]) != 0);
22809 mod_s = memmodel_base (INTVAL (operands[6]));
22810 mod_f = memmodel_base (INTVAL (operands[7]));
22811 orig_mode = mode = GET_MODE (mem);
22813 mask = shift = NULL_RTX;
22814 if (mode == QImode || mode == HImode)
22816 /* Before power8, we didn't have access to lbarx/lharx, so generate a
22817 lwarx and shift/mask operations. With power8, we need to do the
22818 comparison in SImode, but the store is still done in QI/HImode. */
22819 oldval = convert_modes (SImode, mode, oldval, 1);
22821 if (!TARGET_SYNC_HI_QI)
22823 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
22825 /* Shift and mask OLDVAL into position with the word. */
22826 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
22827 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22829 /* Shift and mask NEWVAL into position within the word. */
22830 newval = convert_modes (SImode, mode, newval, 1);
22831 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
22832 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22835 /* Prepare to adjust the return value. */
22836 retval = gen_reg_rtx (SImode);
22837 mode = SImode;
22839 else if (reg_overlap_mentioned_p (retval, oldval))
22840 oldval = copy_to_reg (oldval);
22842 if (mode != TImode && !reg_or_short_operand (oldval, mode))
22843 oldval = copy_to_mode_reg (mode, oldval);
22845 if (reg_overlap_mentioned_p (retval, newval))
22846 newval = copy_to_reg (newval);
22848 mem = rs6000_pre_atomic_barrier (mem, mod_s);
22850 label1 = NULL_RTX;
22851 if (!is_weak)
22853 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22854 emit_label (XEXP (label1, 0));
22856 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22858 emit_load_locked (mode, retval, mem);
22860 x = retval;
22861 if (mask)
22862 x = expand_simple_binop (SImode, AND, retval, mask,
22863 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22865 cond = gen_reg_rtx (CCmode);
22866 /* If we have TImode, synthesize a comparison. */
22867 if (mode != TImode)
22868 x = gen_rtx_COMPARE (CCmode, x, oldval);
22869 else
22871 rtx xor1_result = gen_reg_rtx (DImode);
22872 rtx xor2_result = gen_reg_rtx (DImode);
22873 rtx or_result = gen_reg_rtx (DImode);
22874 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
22875 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
22876 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
22877 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
22879 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
22880 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
22881 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
22882 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
22885 emit_insn (gen_rtx_SET (cond, x));
22887 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
22888 emit_unlikely_jump (x, label2);
22890 x = newval;
22891 if (mask)
22892 x = rs6000_mask_atomic_subword (retval, newval, mask);
22894 emit_store_conditional (orig_mode, cond, mem, x);
22896 if (!is_weak)
22898 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
22899 emit_unlikely_jump (x, label1);
22902 if (!is_mm_relaxed (mod_f))
22903 emit_label (XEXP (label2, 0));
22905 rs6000_post_atomic_barrier (mod_s);
22907 if (is_mm_relaxed (mod_f))
22908 emit_label (XEXP (label2, 0));
22910 if (shift)
22911 rs6000_finish_atomic_subword (operands[1], retval, shift);
22912 else if (mode != GET_MODE (operands[1]))
22913 convert_move (operands[1], retval, 1);
22915 /* In all cases, CR0 contains EQ on success, and NE on failure. */
22916 x = gen_rtx_EQ (SImode, cond, const0_rtx);
22917 emit_insn (gen_rtx_SET (boolval, x));
22920 /* Expand an atomic exchange operation. */
22922 void
22923 rs6000_expand_atomic_exchange (rtx operands[])
22925 rtx retval, mem, val, cond;
22926 machine_mode mode;
22927 enum memmodel model;
22928 rtx label, x, mask, shift;
22930 retval = operands[0];
22931 mem = operands[1];
22932 val = operands[2];
22933 model = memmodel_base (INTVAL (operands[3]));
22934 mode = GET_MODE (mem);
22936 mask = shift = NULL_RTX;
22937 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
22939 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
22941 /* Shift and mask VAL into position with the word. */
22942 val = convert_modes (SImode, mode, val, 1);
22943 val = expand_simple_binop (SImode, ASHIFT, val, shift,
22944 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22946 /* Prepare to adjust the return value. */
22947 retval = gen_reg_rtx (SImode);
22948 mode = SImode;
22951 mem = rs6000_pre_atomic_barrier (mem, model);
22953 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22954 emit_label (XEXP (label, 0));
22956 emit_load_locked (mode, retval, mem);
22958 x = val;
22959 if (mask)
22960 x = rs6000_mask_atomic_subword (retval, val, mask);
22962 cond = gen_reg_rtx (CCmode);
22963 emit_store_conditional (mode, cond, mem, x);
22965 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
22966 emit_unlikely_jump (x, label);
22968 rs6000_post_atomic_barrier (model);
22970 if (shift)
22971 rs6000_finish_atomic_subword (operands[0], retval, shift);
22974 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
22975 to perform. MEM is the memory on which to operate. VAL is the second
22976 operand of the binary operator. BEFORE and AFTER are optional locations to
22977 return the value of MEM either before of after the operation. MODEL_RTX
22978 is a CONST_INT containing the memory model to use. */
22980 void
22981 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
22982 rtx orig_before, rtx orig_after, rtx model_rtx)
22984 enum memmodel model = memmodel_base (INTVAL (model_rtx));
22985 machine_mode mode = GET_MODE (mem);
22986 machine_mode store_mode = mode;
22987 rtx label, x, cond, mask, shift;
22988 rtx before = orig_before, after = orig_after;
22990 mask = shift = NULL_RTX;
22991 /* On power8, we want to use SImode for the operation. On previous systems,
22992 use the operation in a subword and shift/mask to get the proper byte or
22993 halfword. */
22994 if (mode == QImode || mode == HImode)
22996 if (TARGET_SYNC_HI_QI)
22998 val = convert_modes (SImode, mode, val, 1);
23000 /* Prepare to adjust the return value. */
23001 before = gen_reg_rtx (SImode);
23002 if (after)
23003 after = gen_reg_rtx (SImode);
23004 mode = SImode;
23006 else
23008 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23010 /* Shift and mask VAL into position with the word. */
23011 val = convert_modes (SImode, mode, val, 1);
23012 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23013 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23015 switch (code)
23017 case IOR:
23018 case XOR:
23019 /* We've already zero-extended VAL. That is sufficient to
23020 make certain that it does not affect other bits. */
23021 mask = NULL;
23022 break;
23024 case AND:
23025 /* If we make certain that all of the other bits in VAL are
23026 set, that will be sufficient to not affect other bits. */
23027 x = gen_rtx_NOT (SImode, mask);
23028 x = gen_rtx_IOR (SImode, x, val);
23029 emit_insn (gen_rtx_SET (val, x));
23030 mask = NULL;
23031 break;
23033 case NOT:
23034 case PLUS:
23035 case MINUS:
23036 /* These will all affect bits outside the field and need
23037 adjustment via MASK within the loop. */
23038 break;
23040 default:
23041 gcc_unreachable ();
23044 /* Prepare to adjust the return value. */
23045 before = gen_reg_rtx (SImode);
23046 if (after)
23047 after = gen_reg_rtx (SImode);
23048 store_mode = mode = SImode;
23052 mem = rs6000_pre_atomic_barrier (mem, model);
23054 label = gen_label_rtx ();
23055 emit_label (label);
23056 label = gen_rtx_LABEL_REF (VOIDmode, label);
23058 if (before == NULL_RTX)
23059 before = gen_reg_rtx (mode);
23061 emit_load_locked (mode, before, mem);
23063 if (code == NOT)
23065 x = expand_simple_binop (mode, AND, before, val,
23066 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23067 after = expand_simple_unop (mode, NOT, x, after, 1);
23069 else
23071 after = expand_simple_binop (mode, code, before, val,
23072 after, 1, OPTAB_LIB_WIDEN);
23075 x = after;
23076 if (mask)
23078 x = expand_simple_binop (SImode, AND, after, mask,
23079 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23080 x = rs6000_mask_atomic_subword (before, x, mask);
23082 else if (store_mode != mode)
23083 x = convert_modes (store_mode, mode, x, 1);
23085 cond = gen_reg_rtx (CCmode);
23086 emit_store_conditional (store_mode, cond, mem, x);
23088 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23089 emit_unlikely_jump (x, label);
23091 rs6000_post_atomic_barrier (model);
23093 if (shift)
23095 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23096 then do the calcuations in a SImode register. */
23097 if (orig_before)
23098 rs6000_finish_atomic_subword (orig_before, before, shift);
23099 if (orig_after)
23100 rs6000_finish_atomic_subword (orig_after, after, shift);
23102 else if (store_mode != mode)
23104 /* QImode/HImode on machines with lbarx/lharx where we do the native
23105 operation and then do the calcuations in a SImode register. */
23106 if (orig_before)
23107 convert_move (orig_before, before, 1);
23108 if (orig_after)
23109 convert_move (orig_after, after, 1);
23111 else if (orig_after && after != orig_after)
23112 emit_move_insn (orig_after, after);
23115 /* Emit instructions to move SRC to DST. Called by splitters for
23116 multi-register moves. It will emit at most one instruction for
23117 each register that is accessed; that is, it won't emit li/lis pairs
23118 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23119 register. */
23121 void
23122 rs6000_split_multireg_move (rtx dst, rtx src)
23124 /* The register number of the first register being moved. */
23125 int reg;
23126 /* The mode that is to be moved. */
23127 machine_mode mode;
23128 /* The mode that the move is being done in, and its size. */
23129 machine_mode reg_mode;
23130 int reg_mode_size;
23131 /* The number of registers that will be moved. */
23132 int nregs;
23134 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23135 mode = GET_MODE (dst);
23136 nregs = hard_regno_nregs (reg, mode);
23137 if (FP_REGNO_P (reg))
23138 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23139 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23140 else if (ALTIVEC_REGNO_P (reg))
23141 reg_mode = V16QImode;
23142 else
23143 reg_mode = word_mode;
23144 reg_mode_size = GET_MODE_SIZE (reg_mode);
23146 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23148 /* TDmode residing in FP registers is special, since the ISA requires that
23149 the lower-numbered word of a register pair is always the most significant
23150 word, even in little-endian mode. This does not match the usual subreg
23151 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23152 the appropriate constituent registers "by hand" in little-endian mode.
23154 Note we do not need to check for destructive overlap here since TDmode
23155 can only reside in even/odd register pairs. */
23156 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23158 rtx p_src, p_dst;
23159 int i;
23161 for (i = 0; i < nregs; i++)
23163 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23164 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23165 else
23166 p_src = simplify_gen_subreg (reg_mode, src, mode,
23167 i * reg_mode_size);
23169 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23170 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23171 else
23172 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23173 i * reg_mode_size);
23175 emit_insn (gen_rtx_SET (p_dst, p_src));
23178 return;
23181 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23183 /* Move register range backwards, if we might have destructive
23184 overlap. */
23185 int i;
23186 for (i = nregs - 1; i >= 0; i--)
23187 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23188 i * reg_mode_size),
23189 simplify_gen_subreg (reg_mode, src, mode,
23190 i * reg_mode_size)));
23192 else
23194 int i;
23195 int j = -1;
23196 bool used_update = false;
23197 rtx restore_basereg = NULL_RTX;
23199 if (MEM_P (src) && INT_REGNO_P (reg))
23201 rtx breg;
23203 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23204 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23206 rtx delta_rtx;
23207 breg = XEXP (XEXP (src, 0), 0);
23208 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23209 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23210 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23211 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23212 src = replace_equiv_address (src, breg);
23214 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23216 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23218 rtx basereg = XEXP (XEXP (src, 0), 0);
23219 if (TARGET_UPDATE)
23221 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23222 emit_insn (gen_rtx_SET (ndst,
23223 gen_rtx_MEM (reg_mode,
23224 XEXP (src, 0))));
23225 used_update = true;
23227 else
23228 emit_insn (gen_rtx_SET (basereg,
23229 XEXP (XEXP (src, 0), 1)));
23230 src = replace_equiv_address (src, basereg);
23232 else
23234 rtx basereg = gen_rtx_REG (Pmode, reg);
23235 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23236 src = replace_equiv_address (src, basereg);
23240 breg = XEXP (src, 0);
23241 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23242 breg = XEXP (breg, 0);
23244 /* If the base register we are using to address memory is
23245 also a destination reg, then change that register last. */
23246 if (REG_P (breg)
23247 && REGNO (breg) >= REGNO (dst)
23248 && REGNO (breg) < REGNO (dst) + nregs)
23249 j = REGNO (breg) - REGNO (dst);
23251 else if (MEM_P (dst) && INT_REGNO_P (reg))
23253 rtx breg;
23255 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23256 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23258 rtx delta_rtx;
23259 breg = XEXP (XEXP (dst, 0), 0);
23260 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23261 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23262 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23264 /* We have to update the breg before doing the store.
23265 Use store with update, if available. */
23267 if (TARGET_UPDATE)
23269 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23270 emit_insn (TARGET_32BIT
23271 ? (TARGET_POWERPC64
23272 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23273 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23274 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23275 used_update = true;
23277 else
23278 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23279 dst = replace_equiv_address (dst, breg);
23281 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23282 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23284 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23286 rtx basereg = XEXP (XEXP (dst, 0), 0);
23287 if (TARGET_UPDATE)
23289 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23290 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23291 XEXP (dst, 0)),
23292 nsrc));
23293 used_update = true;
23295 else
23296 emit_insn (gen_rtx_SET (basereg,
23297 XEXP (XEXP (dst, 0), 1)));
23298 dst = replace_equiv_address (dst, basereg);
23300 else
23302 rtx basereg = XEXP (XEXP (dst, 0), 0);
23303 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23304 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23305 && REG_P (basereg)
23306 && REG_P (offsetreg)
23307 && REGNO (basereg) != REGNO (offsetreg));
23308 if (REGNO (basereg) == 0)
23310 rtx tmp = offsetreg;
23311 offsetreg = basereg;
23312 basereg = tmp;
23314 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23315 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23316 dst = replace_equiv_address (dst, basereg);
23319 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23320 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23323 for (i = 0; i < nregs; i++)
23325 /* Calculate index to next subword. */
23326 ++j;
23327 if (j == nregs)
23328 j = 0;
23330 /* If compiler already emitted move of first word by
23331 store with update, no need to do anything. */
23332 if (j == 0 && used_update)
23333 continue;
23335 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23336 j * reg_mode_size),
23337 simplify_gen_subreg (reg_mode, src, mode,
23338 j * reg_mode_size)));
23340 if (restore_basereg != NULL_RTX)
23341 emit_insn (restore_basereg);
23346 /* This page contains routines that are used to determine what the
23347 function prologue and epilogue code will do and write them out. */
23349 /* Determine whether the REG is really used. */
23351 static bool
23352 save_reg_p (int reg)
23354 /* We need to mark the PIC offset register live for the same conditions
23355 as it is set up, or otherwise it won't be saved before we clobber it. */
23357 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23359 /* When calling eh_return, we must return true for all the cases
23360 where conditional_register_usage marks the PIC offset reg
23361 call used. */
23362 if (TARGET_TOC && TARGET_MINIMAL_TOC
23363 && (crtl->calls_eh_return
23364 || df_regs_ever_live_p (reg)
23365 || !constant_pool_empty_p ()))
23366 return true;
23368 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23369 && flag_pic)
23370 return true;
23373 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23376 /* Return the first fixed-point register that is required to be
23377 saved. 32 if none. */
23380 first_reg_to_save (void)
23382 int first_reg;
23384 /* Find lowest numbered live register. */
23385 for (first_reg = 13; first_reg <= 31; first_reg++)
23386 if (save_reg_p (first_reg))
23387 break;
23389 #if TARGET_MACHO
23390 if (flag_pic
23391 && crtl->uses_pic_offset_table
23392 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
23393 return RS6000_PIC_OFFSET_TABLE_REGNUM;
23394 #endif
23396 return first_reg;
23399 /* Similar, for FP regs. */
23402 first_fp_reg_to_save (void)
23404 int first_reg;
23406 /* Find lowest numbered live register. */
23407 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23408 if (save_reg_p (first_reg))
23409 break;
23411 return first_reg;
23414 /* Similar, for AltiVec regs. */
23416 static int
23417 first_altivec_reg_to_save (void)
23419 int i;
23421 /* Stack frame remains as is unless we are in AltiVec ABI. */
23422 if (! TARGET_ALTIVEC_ABI)
23423 return LAST_ALTIVEC_REGNO + 1;
23425 /* On Darwin, the unwind routines are compiled without
23426 TARGET_ALTIVEC, and use save_world to save/restore the
23427 altivec registers when necessary. */
23428 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23429 && ! TARGET_ALTIVEC)
23430 return FIRST_ALTIVEC_REGNO + 20;
23432 /* Find lowest numbered live register. */
23433 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23434 if (save_reg_p (i))
23435 break;
23437 return i;
23440 /* Return a 32-bit mask of the AltiVec registers we need to set in
23441 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23442 the 32-bit word is 0. */
23444 static unsigned int
23445 compute_vrsave_mask (void)
23447 unsigned int i, mask = 0;
23449 /* On Darwin, the unwind routines are compiled without
23450 TARGET_ALTIVEC, and use save_world to save/restore the
23451 call-saved altivec registers when necessary. */
23452 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23453 && ! TARGET_ALTIVEC)
23454 mask |= 0xFFF;
23456 /* First, find out if we use _any_ altivec registers. */
23457 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23458 if (df_regs_ever_live_p (i))
23459 mask |= ALTIVEC_REG_BIT (i);
23461 if (mask == 0)
23462 return mask;
23464 /* Next, remove the argument registers from the set. These must
23465 be in the VRSAVE mask set by the caller, so we don't need to add
23466 them in again. More importantly, the mask we compute here is
23467 used to generate CLOBBERs in the set_vrsave insn, and we do not
23468 wish the argument registers to die. */
23469 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23470 mask &= ~ALTIVEC_REG_BIT (i);
23472 /* Similarly, remove the return value from the set. */
23474 bool yes = false;
23475 diddle_return_value (is_altivec_return_reg, &yes);
23476 if (yes)
23477 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23480 return mask;
23483 /* For a very restricted set of circumstances, we can cut down the
23484 size of prologues/epilogues by calling our own save/restore-the-world
23485 routines. */
23487 static void
23488 compute_save_world_info (rs6000_stack_t *info)
23490 info->world_save_p = 1;
23491 info->world_save_p
23492 = (WORLD_SAVE_P (info)
23493 && DEFAULT_ABI == ABI_DARWIN
23494 && !cfun->has_nonlocal_label
23495 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
23496 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
23497 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
23498 && info->cr_save_p);
23500 /* This will not work in conjunction with sibcalls. Make sure there
23501 are none. (This check is expensive, but seldom executed.) */
23502 if (WORLD_SAVE_P (info))
23504 rtx_insn *insn;
23505 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
23506 if (CALL_P (insn) && SIBLING_CALL_P (insn))
23508 info->world_save_p = 0;
23509 break;
23513 if (WORLD_SAVE_P (info))
23515 /* Even if we're not touching VRsave, make sure there's room on the
23516 stack for it, if it looks like we're calling SAVE_WORLD, which
23517 will attempt to save it. */
23518 info->vrsave_size = 4;
23520 /* If we are going to save the world, we need to save the link register too. */
23521 info->lr_save_p = 1;
23523 /* "Save" the VRsave register too if we're saving the world. */
23524 if (info->vrsave_mask == 0)
23525 info->vrsave_mask = compute_vrsave_mask ();
23527 /* Because the Darwin register save/restore routines only handle
23528 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23529 check. */
23530 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
23531 && (info->first_altivec_reg_save
23532 >= FIRST_SAVED_ALTIVEC_REGNO));
23535 return;
23539 static void
23540 is_altivec_return_reg (rtx reg, void *xyes)
23542 bool *yes = (bool *) xyes;
23543 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
23544 *yes = true;
23548 /* Return whether REG is a global user reg or has been specifed by
23549 -ffixed-REG. We should not restore these, and so cannot use
23550 lmw or out-of-line restore functions if there are any. We also
23551 can't save them (well, emit frame notes for them), because frame
23552 unwinding during exception handling will restore saved registers. */
23554 static bool
23555 fixed_reg_p (int reg)
23557 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23558 backend sets it, overriding anything the user might have given. */
23559 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23560 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23561 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23562 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23563 return false;
23565 return fixed_regs[reg];
23568 /* Determine the strategy for savings/restoring registers. */
23570 enum {
23571 SAVE_MULTIPLE = 0x1,
23572 SAVE_INLINE_GPRS = 0x2,
23573 SAVE_INLINE_FPRS = 0x4,
23574 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
23575 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
23576 SAVE_INLINE_VRS = 0x20,
23577 REST_MULTIPLE = 0x100,
23578 REST_INLINE_GPRS = 0x200,
23579 REST_INLINE_FPRS = 0x400,
23580 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
23581 REST_INLINE_VRS = 0x1000
23584 static int
23585 rs6000_savres_strategy (rs6000_stack_t *info,
23586 bool using_static_chain_p)
23588 int strategy = 0;
23590 /* Select between in-line and out-of-line save and restore of regs.
23591 First, all the obvious cases where we don't use out-of-line. */
23592 if (crtl->calls_eh_return
23593 || cfun->machine->ra_need_lr)
23594 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
23595 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
23596 | SAVE_INLINE_VRS | REST_INLINE_VRS);
23598 if (info->first_gp_reg_save == 32)
23599 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23601 if (info->first_fp_reg_save == 64)
23602 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23604 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
23605 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23607 /* Define cutoff for using out-of-line functions to save registers. */
23608 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
23610 if (!optimize_size)
23612 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23613 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23614 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23616 else
23618 /* Prefer out-of-line restore if it will exit. */
23619 if (info->first_fp_reg_save > 61)
23620 strategy |= SAVE_INLINE_FPRS;
23621 if (info->first_gp_reg_save > 29)
23623 if (info->first_fp_reg_save == 64)
23624 strategy |= SAVE_INLINE_GPRS;
23625 else
23626 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23628 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
23629 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23632 else if (DEFAULT_ABI == ABI_DARWIN)
23634 if (info->first_fp_reg_save > 60)
23635 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23636 if (info->first_gp_reg_save > 29)
23637 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23638 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23640 else
23642 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
23643 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
23644 || info->first_fp_reg_save > 61)
23645 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23646 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23647 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23650 /* Don't bother to try to save things out-of-line if r11 is occupied
23651 by the static chain. It would require too much fiddling and the
23652 static chain is rarely used anyway. FPRs are saved w.r.t the stack
23653 pointer on Darwin, and AIX uses r1 or r12. */
23654 if (using_static_chain_p
23655 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
23656 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
23657 | SAVE_INLINE_GPRS
23658 | SAVE_INLINE_VRS);
23660 /* Don't ever restore fixed regs. That means we can't use the
23661 out-of-line register restore functions if a fixed reg is in the
23662 range of regs restored. */
23663 if (!(strategy & REST_INLINE_FPRS))
23664 for (int i = info->first_fp_reg_save; i < 64; i++)
23665 if (fixed_regs[i])
23667 strategy |= REST_INLINE_FPRS;
23668 break;
23671 /* We can only use the out-of-line routines to restore fprs if we've
23672 saved all the registers from first_fp_reg_save in the prologue.
23673 Otherwise, we risk loading garbage. Of course, if we have saved
23674 out-of-line then we know we haven't skipped any fprs. */
23675 if ((strategy & SAVE_INLINE_FPRS)
23676 && !(strategy & REST_INLINE_FPRS))
23677 for (int i = info->first_fp_reg_save; i < 64; i++)
23678 if (!save_reg_p (i))
23680 strategy |= REST_INLINE_FPRS;
23681 break;
23684 /* Similarly, for altivec regs. */
23685 if (!(strategy & REST_INLINE_VRS))
23686 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23687 if (fixed_regs[i])
23689 strategy |= REST_INLINE_VRS;
23690 break;
23693 if ((strategy & SAVE_INLINE_VRS)
23694 && !(strategy & REST_INLINE_VRS))
23695 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23696 if (!save_reg_p (i))
23698 strategy |= REST_INLINE_VRS;
23699 break;
23702 /* info->lr_save_p isn't yet set if the only reason lr needs to be
23703 saved is an out-of-line save or restore. Set up the value for
23704 the next test (excluding out-of-line gprs). */
23705 bool lr_save_p = (info->lr_save_p
23706 || !(strategy & SAVE_INLINE_FPRS)
23707 || !(strategy & SAVE_INLINE_VRS)
23708 || !(strategy & REST_INLINE_FPRS)
23709 || !(strategy & REST_INLINE_VRS));
23711 if (TARGET_MULTIPLE
23712 && !TARGET_POWERPC64
23713 && info->first_gp_reg_save < 31
23714 && !(flag_shrink_wrap
23715 && flag_shrink_wrap_separate
23716 && optimize_function_for_speed_p (cfun)))
23718 int count = 0;
23719 for (int i = info->first_gp_reg_save; i < 32; i++)
23720 if (save_reg_p (i))
23721 count++;
23723 if (count <= 1)
23724 /* Don't use store multiple if only one reg needs to be
23725 saved. This can occur for example when the ABI_V4 pic reg
23726 (r30) needs to be saved to make calls, but r31 is not
23727 used. */
23728 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23729 else
23731 /* Prefer store multiple for saves over out-of-line
23732 routines, since the store-multiple instruction will
23733 always be smaller. */
23734 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
23736 /* The situation is more complicated with load multiple.
23737 We'd prefer to use the out-of-line routines for restores,
23738 since the "exit" out-of-line routines can handle the
23739 restore of LR and the frame teardown. However if doesn't
23740 make sense to use the out-of-line routine if that is the
23741 only reason we'd need to save LR, and we can't use the
23742 "exit" out-of-line gpr restore if we have saved some
23743 fprs; In those cases it is advantageous to use load
23744 multiple when available. */
23745 if (info->first_fp_reg_save != 64 || !lr_save_p)
23746 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
23750 /* Using the "exit" out-of-line routine does not improve code size
23751 if using it would require lr to be saved and if only saving one
23752 or two gprs. */
23753 else if (!lr_save_p && info->first_gp_reg_save > 29)
23754 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23756 /* Don't ever restore fixed regs. */
23757 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23758 for (int i = info->first_gp_reg_save; i < 32; i++)
23759 if (fixed_reg_p (i))
23761 strategy |= REST_INLINE_GPRS;
23762 strategy &= ~REST_MULTIPLE;
23763 break;
23766 /* We can only use load multiple or the out-of-line routines to
23767 restore gprs if we've saved all the registers from
23768 first_gp_reg_save. Otherwise, we risk loading garbage.
23769 Of course, if we have saved out-of-line or used stmw then we know
23770 we haven't skipped any gprs. */
23771 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
23772 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23773 for (int i = info->first_gp_reg_save; i < 32; i++)
23774 if (!save_reg_p (i))
23776 strategy |= REST_INLINE_GPRS;
23777 strategy &= ~REST_MULTIPLE;
23778 break;
23781 if (TARGET_ELF && TARGET_64BIT)
23783 if (!(strategy & SAVE_INLINE_FPRS))
23784 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
23785 else if (!(strategy & SAVE_INLINE_GPRS)
23786 && info->first_fp_reg_save == 64)
23787 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
23789 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
23790 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
23792 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
23793 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
23795 return strategy;
23798 /* Calculate the stack information for the current function. This is
23799 complicated by having two separate calling sequences, the AIX calling
23800 sequence and the V.4 calling sequence.
23802 AIX (and Darwin/Mac OS X) stack frames look like:
23803 32-bit 64-bit
23804 SP----> +---------------------------------------+
23805 | back chain to caller | 0 0
23806 +---------------------------------------+
23807 | saved CR | 4 8 (8-11)
23808 +---------------------------------------+
23809 | saved LR | 8 16
23810 +---------------------------------------+
23811 | reserved for compilers | 12 24
23812 +---------------------------------------+
23813 | reserved for binders | 16 32
23814 +---------------------------------------+
23815 | saved TOC pointer | 20 40
23816 +---------------------------------------+
23817 | Parameter save area (+padding*) (P) | 24 48
23818 +---------------------------------------+
23819 | Alloca space (A) | 24+P etc.
23820 +---------------------------------------+
23821 | Local variable space (L) | 24+P+A
23822 +---------------------------------------+
23823 | Float/int conversion temporary (X) | 24+P+A+L
23824 +---------------------------------------+
23825 | Save area for AltiVec registers (W) | 24+P+A+L+X
23826 +---------------------------------------+
23827 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
23828 +---------------------------------------+
23829 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
23830 +---------------------------------------+
23831 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
23832 +---------------------------------------+
23833 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
23834 +---------------------------------------+
23835 old SP->| back chain to caller's caller |
23836 +---------------------------------------+
23838 * If the alloca area is present, the parameter save area is
23839 padded so that the former starts 16-byte aligned.
23841 The required alignment for AIX configurations is two words (i.e., 8
23842 or 16 bytes).
23844 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
23846 SP----> +---------------------------------------+
23847 | Back chain to caller | 0
23848 +---------------------------------------+
23849 | Save area for CR | 8
23850 +---------------------------------------+
23851 | Saved LR | 16
23852 +---------------------------------------+
23853 | Saved TOC pointer | 24
23854 +---------------------------------------+
23855 | Parameter save area (+padding*) (P) | 32
23856 +---------------------------------------+
23857 | Alloca space (A) | 32+P
23858 +---------------------------------------+
23859 | Local variable space (L) | 32+P+A
23860 +---------------------------------------+
23861 | Save area for AltiVec registers (W) | 32+P+A+L
23862 +---------------------------------------+
23863 | AltiVec alignment padding (Y) | 32+P+A+L+W
23864 +---------------------------------------+
23865 | Save area for GP registers (G) | 32+P+A+L+W+Y
23866 +---------------------------------------+
23867 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
23868 +---------------------------------------+
23869 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
23870 +---------------------------------------+
23872 * If the alloca area is present, the parameter save area is
23873 padded so that the former starts 16-byte aligned.
23875 V.4 stack frames look like:
23877 SP----> +---------------------------------------+
23878 | back chain to caller | 0
23879 +---------------------------------------+
23880 | caller's saved LR | 4
23881 +---------------------------------------+
23882 | Parameter save area (+padding*) (P) | 8
23883 +---------------------------------------+
23884 | Alloca space (A) | 8+P
23885 +---------------------------------------+
23886 | Varargs save area (V) | 8+P+A
23887 +---------------------------------------+
23888 | Local variable space (L) | 8+P+A+V
23889 +---------------------------------------+
23890 | Float/int conversion temporary (X) | 8+P+A+V+L
23891 +---------------------------------------+
23892 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
23893 +---------------------------------------+
23894 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
23895 +---------------------------------------+
23896 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
23897 +---------------------------------------+
23898 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
23899 +---------------------------------------+
23900 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
23901 +---------------------------------------+
23902 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
23903 +---------------------------------------+
23904 old SP->| back chain to caller's caller |
23905 +---------------------------------------+
23907 * If the alloca area is present and the required alignment is
23908 16 bytes, the parameter save area is padded so that the
23909 alloca area starts 16-byte aligned.
23911 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
23912 given. (But note below and in sysv4.h that we require only 8 and
23913 may round up the size of our stack frame anyways. The historical
23914 reason is early versions of powerpc-linux which didn't properly
23915 align the stack at program startup. A happy side-effect is that
23916 -mno-eabi libraries can be used with -meabi programs.)
23918 The EABI configuration defaults to the V.4 layout. However,
23919 the stack alignment requirements may differ. If -mno-eabi is not
23920 given, the required stack alignment is 8 bytes; if -mno-eabi is
23921 given, the required alignment is 16 bytes. (But see V.4 comment
23922 above.) */
23924 #ifndef ABI_STACK_BOUNDARY
23925 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
23926 #endif
23928 static rs6000_stack_t *
23929 rs6000_stack_info (void)
23931 /* We should never be called for thunks, we are not set up for that. */
23932 gcc_assert (!cfun->is_thunk);
23934 rs6000_stack_t *info = &stack_info;
23935 int reg_size = TARGET_32BIT ? 4 : 8;
23936 int ehrd_size;
23937 int ehcr_size;
23938 int save_align;
23939 int first_gp;
23940 HOST_WIDE_INT non_fixed_size;
23941 bool using_static_chain_p;
23943 if (reload_completed && info->reload_completed)
23944 return info;
23946 memset (info, 0, sizeof (*info));
23947 info->reload_completed = reload_completed;
23949 /* Select which calling sequence. */
23950 info->abi = DEFAULT_ABI;
23952 /* Calculate which registers need to be saved & save area size. */
23953 info->first_gp_reg_save = first_reg_to_save ();
23954 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
23955 even if it currently looks like we won't. Reload may need it to
23956 get at a constant; if so, it will have already created a constant
23957 pool entry for it. */
23958 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
23959 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
23960 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
23961 && crtl->uses_const_pool
23962 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
23963 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
23964 else
23965 first_gp = info->first_gp_reg_save;
23967 info->gp_size = reg_size * (32 - first_gp);
23969 info->first_fp_reg_save = first_fp_reg_to_save ();
23970 info->fp_size = 8 * (64 - info->first_fp_reg_save);
23972 info->first_altivec_reg_save = first_altivec_reg_to_save ();
23973 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
23974 - info->first_altivec_reg_save);
23976 /* Does this function call anything? */
23977 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
23979 /* Determine if we need to save the condition code registers. */
23980 if (save_reg_p (CR2_REGNO)
23981 || save_reg_p (CR3_REGNO)
23982 || save_reg_p (CR4_REGNO))
23984 info->cr_save_p = 1;
23985 if (DEFAULT_ABI == ABI_V4)
23986 info->cr_size = reg_size;
23989 /* If the current function calls __builtin_eh_return, then we need
23990 to allocate stack space for registers that will hold data for
23991 the exception handler. */
23992 if (crtl->calls_eh_return)
23994 unsigned int i;
23995 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
23996 continue;
23998 ehrd_size = i * UNITS_PER_WORD;
24000 else
24001 ehrd_size = 0;
24003 /* In the ELFv2 ABI, we also need to allocate space for separate
24004 CR field save areas if the function calls __builtin_eh_return. */
24005 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24007 /* This hard-codes that we have three call-saved CR fields. */
24008 ehcr_size = 3 * reg_size;
24009 /* We do *not* use the regular CR save mechanism. */
24010 info->cr_save_p = 0;
24012 else
24013 ehcr_size = 0;
24015 /* Determine various sizes. */
24016 info->reg_size = reg_size;
24017 info->fixed_size = RS6000_SAVE_AREA;
24018 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24019 if (cfun->calls_alloca)
24020 info->parm_size =
24021 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24022 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24023 else
24024 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24025 TARGET_ALTIVEC ? 16 : 8);
24026 if (FRAME_GROWS_DOWNWARD)
24027 info->vars_size
24028 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24029 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24030 - (info->fixed_size + info->vars_size + info->parm_size);
24032 if (TARGET_ALTIVEC_ABI)
24033 info->vrsave_mask = compute_vrsave_mask ();
24035 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24036 info->vrsave_size = 4;
24038 compute_save_world_info (info);
24040 /* Calculate the offsets. */
24041 switch (DEFAULT_ABI)
24043 case ABI_NONE:
24044 default:
24045 gcc_unreachable ();
24047 case ABI_AIX:
24048 case ABI_ELFv2:
24049 case ABI_DARWIN:
24050 info->fp_save_offset = -info->fp_size;
24051 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24053 if (TARGET_ALTIVEC_ABI)
24055 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24057 /* Align stack so vector save area is on a quadword boundary.
24058 The padding goes above the vectors. */
24059 if (info->altivec_size != 0)
24060 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24062 info->altivec_save_offset = info->vrsave_save_offset
24063 - info->altivec_padding_size
24064 - info->altivec_size;
24065 gcc_assert (info->altivec_size == 0
24066 || info->altivec_save_offset % 16 == 0);
24068 /* Adjust for AltiVec case. */
24069 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24071 else
24072 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24074 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24075 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24076 info->lr_save_offset = 2*reg_size;
24077 break;
24079 case ABI_V4:
24080 info->fp_save_offset = -info->fp_size;
24081 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24082 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24084 if (TARGET_ALTIVEC_ABI)
24086 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24088 /* Align stack so vector save area is on a quadword boundary. */
24089 if (info->altivec_size != 0)
24090 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24092 info->altivec_save_offset = info->vrsave_save_offset
24093 - info->altivec_padding_size
24094 - info->altivec_size;
24096 /* Adjust for AltiVec case. */
24097 info->ehrd_offset = info->altivec_save_offset;
24099 else
24100 info->ehrd_offset = info->cr_save_offset;
24102 info->ehrd_offset -= ehrd_size;
24103 info->lr_save_offset = reg_size;
24106 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24107 info->save_size = RS6000_ALIGN (info->fp_size
24108 + info->gp_size
24109 + info->altivec_size
24110 + info->altivec_padding_size
24111 + ehrd_size
24112 + ehcr_size
24113 + info->cr_size
24114 + info->vrsave_size,
24115 save_align);
24117 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24119 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24120 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24122 /* Determine if we need to save the link register. */
24123 if (info->calls_p
24124 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24125 && crtl->profile
24126 && !TARGET_PROFILE_KERNEL)
24127 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24128 #ifdef TARGET_RELOCATABLE
24129 || (DEFAULT_ABI == ABI_V4
24130 && (TARGET_RELOCATABLE || flag_pic > 1)
24131 && !constant_pool_empty_p ())
24132 #endif
24133 || rs6000_ra_ever_killed ())
24134 info->lr_save_p = 1;
24136 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24137 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24138 && call_used_regs[STATIC_CHAIN_REGNUM]);
24139 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24141 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24142 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24143 || !(info->savres_strategy & SAVE_INLINE_VRS)
24144 || !(info->savres_strategy & REST_INLINE_GPRS)
24145 || !(info->savres_strategy & REST_INLINE_FPRS)
24146 || !(info->savres_strategy & REST_INLINE_VRS))
24147 info->lr_save_p = 1;
24149 if (info->lr_save_p)
24150 df_set_regs_ever_live (LR_REGNO, true);
24152 /* Determine if we need to allocate any stack frame:
24154 For AIX we need to push the stack if a frame pointer is needed
24155 (because the stack might be dynamically adjusted), if we are
24156 debugging, if we make calls, or if the sum of fp_save, gp_save,
24157 and local variables are more than the space needed to save all
24158 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24159 + 18*8 = 288 (GPR13 reserved).
24161 For V.4 we don't have the stack cushion that AIX uses, but assume
24162 that the debugger can handle stackless frames. */
24164 if (info->calls_p)
24165 info->push_p = 1;
24167 else if (DEFAULT_ABI == ABI_V4)
24168 info->push_p = non_fixed_size != 0;
24170 else if (frame_pointer_needed)
24171 info->push_p = 1;
24173 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24174 info->push_p = 1;
24176 else
24177 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24179 return info;
24182 static void
24183 debug_stack_info (rs6000_stack_t *info)
24185 const char *abi_string;
24187 if (! info)
24188 info = rs6000_stack_info ();
24190 fprintf (stderr, "\nStack information for function %s:\n",
24191 ((current_function_decl && DECL_NAME (current_function_decl))
24192 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24193 : "<unknown>"));
24195 switch (info->abi)
24197 default: abi_string = "Unknown"; break;
24198 case ABI_NONE: abi_string = "NONE"; break;
24199 case ABI_AIX: abi_string = "AIX"; break;
24200 case ABI_ELFv2: abi_string = "ELFv2"; break;
24201 case ABI_DARWIN: abi_string = "Darwin"; break;
24202 case ABI_V4: abi_string = "V.4"; break;
24205 fprintf (stderr, "\tABI = %5s\n", abi_string);
24207 if (TARGET_ALTIVEC_ABI)
24208 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24210 if (info->first_gp_reg_save != 32)
24211 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24213 if (info->first_fp_reg_save != 64)
24214 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24216 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24217 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24218 info->first_altivec_reg_save);
24220 if (info->lr_save_p)
24221 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24223 if (info->cr_save_p)
24224 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24226 if (info->vrsave_mask)
24227 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24229 if (info->push_p)
24230 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24232 if (info->calls_p)
24233 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24235 if (info->gp_size)
24236 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24238 if (info->fp_size)
24239 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24241 if (info->altivec_size)
24242 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24243 info->altivec_save_offset);
24245 if (info->vrsave_size)
24246 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24247 info->vrsave_save_offset);
24249 if (info->lr_save_p)
24250 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24252 if (info->cr_save_p)
24253 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24255 if (info->varargs_save_offset)
24256 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24258 if (info->total_size)
24259 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24260 info->total_size);
24262 if (info->vars_size)
24263 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24264 info->vars_size);
24266 if (info->parm_size)
24267 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24269 if (info->fixed_size)
24270 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24272 if (info->gp_size)
24273 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24275 if (info->fp_size)
24276 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24278 if (info->altivec_size)
24279 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24281 if (info->vrsave_size)
24282 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24284 if (info->altivec_padding_size)
24285 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24286 info->altivec_padding_size);
24288 if (info->cr_size)
24289 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24291 if (info->save_size)
24292 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24294 if (info->reg_size != 4)
24295 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24297 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24299 fprintf (stderr, "\n");
24303 rs6000_return_addr (int count, rtx frame)
24305 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24306 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24307 if (count != 0
24308 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24310 cfun->machine->ra_needs_full_frame = 1;
24312 if (count == 0)
24313 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24314 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24315 frame = stack_pointer_rtx;
24316 rtx prev_frame_addr = memory_address (Pmode, frame);
24317 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24318 rtx lr_save_off = plus_constant (Pmode,
24319 prev_frame, RETURN_ADDRESS_OFFSET);
24320 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24321 return gen_rtx_MEM (Pmode, lr_save_addr);
24324 cfun->machine->ra_need_lr = 1;
24325 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24328 /* Say whether a function is a candidate for sibcall handling or not. */
24330 static bool
24331 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24333 tree fntype;
24335 if (decl)
24336 fntype = TREE_TYPE (decl);
24337 else
24338 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24340 /* We can't do it if the called function has more vector parameters
24341 than the current function; there's nowhere to put the VRsave code. */
24342 if (TARGET_ALTIVEC_ABI
24343 && TARGET_ALTIVEC_VRSAVE
24344 && !(decl && decl == current_function_decl))
24346 function_args_iterator args_iter;
24347 tree type;
24348 int nvreg = 0;
24350 /* Functions with vector parameters are required to have a
24351 prototype, so the argument type info must be available
24352 here. */
24353 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24354 if (TREE_CODE (type) == VECTOR_TYPE
24355 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24356 nvreg++;
24358 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24359 if (TREE_CODE (type) == VECTOR_TYPE
24360 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24361 nvreg--;
24363 if (nvreg > 0)
24364 return false;
24367 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24368 functions, because the callee may have a different TOC pointer to
24369 the caller and there's no way to ensure we restore the TOC when
24370 we return. With the secure-plt SYSV ABI we can't make non-local
24371 calls when -fpic/PIC because the plt call stubs use r30. */
24372 if (DEFAULT_ABI == ABI_DARWIN
24373 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24374 && decl
24375 && !DECL_EXTERNAL (decl)
24376 && !DECL_WEAK (decl)
24377 && (*targetm.binds_local_p) (decl))
24378 || (DEFAULT_ABI == ABI_V4
24379 && (!TARGET_SECURE_PLT
24380 || !flag_pic
24381 || (decl
24382 && (*targetm.binds_local_p) (decl)))))
24384 tree attr_list = TYPE_ATTRIBUTES (fntype);
24386 if (!lookup_attribute ("longcall", attr_list)
24387 || lookup_attribute ("shortcall", attr_list))
24388 return true;
24391 return false;
24394 static int
24395 rs6000_ra_ever_killed (void)
24397 rtx_insn *top;
24398 rtx reg;
24399 rtx_insn *insn;
24401 if (cfun->is_thunk)
24402 return 0;
24404 if (cfun->machine->lr_save_state)
24405 return cfun->machine->lr_save_state - 1;
24407 /* regs_ever_live has LR marked as used if any sibcalls are present,
24408 but this should not force saving and restoring in the
24409 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24410 clobbers LR, so that is inappropriate. */
24412 /* Also, the prologue can generate a store into LR that
24413 doesn't really count, like this:
24415 move LR->R0
24416 bcl to set PIC register
24417 move LR->R31
24418 move R0->LR
24420 When we're called from the epilogue, we need to avoid counting
24421 this as a store. */
24423 push_topmost_sequence ();
24424 top = get_insns ();
24425 pop_topmost_sequence ();
24426 reg = gen_rtx_REG (Pmode, LR_REGNO);
24428 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24430 if (INSN_P (insn))
24432 if (CALL_P (insn))
24434 if (!SIBLING_CALL_P (insn))
24435 return 1;
24437 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24438 return 1;
24439 else if (set_of (reg, insn) != NULL_RTX
24440 && !prologue_epilogue_contains (insn))
24441 return 1;
24444 return 0;
24447 /* Emit instructions needed to load the TOC register.
24448 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24449 a constant pool; or for SVR4 -fpic. */
24451 void
24452 rs6000_emit_load_toc_table (int fromprolog)
24454 rtx dest;
24455 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24457 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24459 char buf[30];
24460 rtx lab, tmp1, tmp2, got;
24462 lab = gen_label_rtx ();
24463 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24464 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24465 if (flag_pic == 2)
24467 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24468 need_toc_init = 1;
24470 else
24471 got = rs6000_got_sym ();
24472 tmp1 = tmp2 = dest;
24473 if (!fromprolog)
24475 tmp1 = gen_reg_rtx (Pmode);
24476 tmp2 = gen_reg_rtx (Pmode);
24478 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24479 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24480 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24481 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24483 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24485 emit_insn (gen_load_toc_v4_pic_si ());
24486 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24488 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
24490 char buf[30];
24491 rtx temp0 = (fromprolog
24492 ? gen_rtx_REG (Pmode, 0)
24493 : gen_reg_rtx (Pmode));
24495 if (fromprolog)
24497 rtx symF, symL;
24499 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24500 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24502 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
24503 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24505 emit_insn (gen_load_toc_v4_PIC_1 (symF));
24506 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24507 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
24509 else
24511 rtx tocsym, lab;
24513 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24514 need_toc_init = 1;
24515 lab = gen_label_rtx ();
24516 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
24517 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24518 if (TARGET_LINK_STACK)
24519 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
24520 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
24522 emit_insn (gen_addsi3 (dest, temp0, dest));
24524 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
24526 /* This is for AIX code running in non-PIC ELF32. */
24527 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24529 need_toc_init = 1;
24530 emit_insn (gen_elf_high (dest, realsym));
24531 emit_insn (gen_elf_low (dest, dest, realsym));
24533 else
24535 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24537 if (TARGET_32BIT)
24538 emit_insn (gen_load_toc_aix_si (dest));
24539 else
24540 emit_insn (gen_load_toc_aix_di (dest));
24544 /* Emit instructions to restore the link register after determining where
24545 its value has been stored. */
24547 void
24548 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
24550 rs6000_stack_t *info = rs6000_stack_info ();
24551 rtx operands[2];
24553 operands[0] = source;
24554 operands[1] = scratch;
24556 if (info->lr_save_p)
24558 rtx frame_rtx = stack_pointer_rtx;
24559 HOST_WIDE_INT sp_offset = 0;
24560 rtx tmp;
24562 if (frame_pointer_needed
24563 || cfun->calls_alloca
24564 || info->total_size > 32767)
24566 tmp = gen_frame_mem (Pmode, frame_rtx);
24567 emit_move_insn (operands[1], tmp);
24568 frame_rtx = operands[1];
24570 else if (info->push_p)
24571 sp_offset = info->total_size;
24573 tmp = plus_constant (Pmode, frame_rtx,
24574 info->lr_save_offset + sp_offset);
24575 tmp = gen_frame_mem (Pmode, tmp);
24576 emit_move_insn (tmp, operands[0]);
24578 else
24579 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
24581 /* Freeze lr_save_p. We've just emitted rtl that depends on the
24582 state of lr_save_p so any change from here on would be a bug. In
24583 particular, stop rs6000_ra_ever_killed from considering the SET
24584 of lr we may have added just above. */
24585 cfun->machine->lr_save_state = info->lr_save_p + 1;
24588 static GTY(()) alias_set_type set = -1;
24590 alias_set_type
24591 get_TOC_alias_set (void)
24593 if (set == -1)
24594 set = new_alias_set ();
24595 return set;
24598 /* This returns nonzero if the current function uses the TOC. This is
24599 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
24600 is generated by the ABI_V4 load_toc_* patterns.
24601 Return 2 instead of 1 if the load_toc_* pattern is in the function
24602 partition that doesn't start the function. */
24603 #if TARGET_ELF
24604 static int
24605 uses_TOC (void)
24607 rtx_insn *insn;
24608 int ret = 1;
24610 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
24612 if (INSN_P (insn))
24614 rtx pat = PATTERN (insn);
24615 int i;
24617 if (GET_CODE (pat) == PARALLEL)
24618 for (i = 0; i < XVECLEN (pat, 0); i++)
24620 rtx sub = XVECEXP (pat, 0, i);
24621 if (GET_CODE (sub) == USE)
24623 sub = XEXP (sub, 0);
24624 if (GET_CODE (sub) == UNSPEC
24625 && XINT (sub, 1) == UNSPEC_TOC)
24626 return ret;
24630 else if (crtl->has_bb_partition
24631 && NOTE_P (insn)
24632 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
24633 ret = 2;
24635 return 0;
24637 #endif
24640 create_TOC_reference (rtx symbol, rtx largetoc_reg)
24642 rtx tocrel, tocreg, hi;
24644 if (TARGET_DEBUG_ADDR)
24646 if (GET_CODE (symbol) == SYMBOL_REF)
24647 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
24648 XSTR (symbol, 0));
24649 else
24651 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
24652 GET_RTX_NAME (GET_CODE (symbol)));
24653 debug_rtx (symbol);
24657 if (!can_create_pseudo_p ())
24658 df_set_regs_ever_live (TOC_REGISTER, true);
24660 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
24661 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
24662 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
24663 return tocrel;
24665 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
24666 if (largetoc_reg != NULL)
24668 emit_move_insn (largetoc_reg, hi);
24669 hi = largetoc_reg;
24671 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
24674 /* Issue assembly directives that create a reference to the given DWARF
24675 FRAME_TABLE_LABEL from the current function section. */
24676 void
24677 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
24679 fprintf (asm_out_file, "\t.ref %s\n",
24680 (* targetm.strip_name_encoding) (frame_table_label));
24683 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
24684 and the change to the stack pointer. */
24686 static void
24687 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
24689 rtvec p;
24690 int i;
24691 rtx regs[3];
24693 i = 0;
24694 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
24695 if (hard_frame_needed)
24696 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
24697 if (!(REGNO (fp) == STACK_POINTER_REGNUM
24698 || (hard_frame_needed
24699 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
24700 regs[i++] = fp;
24702 p = rtvec_alloc (i);
24703 while (--i >= 0)
24705 rtx mem = gen_frame_mem (BLKmode, regs[i]);
24706 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
24709 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
24712 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
24713 and set the appropriate attributes for the generated insn. Return the
24714 first insn which adjusts the stack pointer or the last insn before
24715 the stack adjustment loop.
24717 SIZE_INT is used to create the CFI note for the allocation.
24719 SIZE_RTX is an rtx containing the size of the adjustment. Note that
24720 since stacks grow to lower addresses its runtime value is -SIZE_INT.
24722 ORIG_SP contains the backchain value that must be stored at *sp. */
24724 static rtx_insn *
24725 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
24727 rtx_insn *insn;
24729 rtx size_rtx = GEN_INT (-size_int);
24730 if (size_int > 32767)
24732 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
24733 /* Need a note here so that try_split doesn't get confused. */
24734 if (get_last_insn () == NULL_RTX)
24735 emit_note (NOTE_INSN_DELETED);
24736 insn = emit_move_insn (tmp_reg, size_rtx);
24737 try_split (PATTERN (insn), insn, 0);
24738 size_rtx = tmp_reg;
24741 if (Pmode == SImode)
24742 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
24743 stack_pointer_rtx,
24744 size_rtx,
24745 orig_sp));
24746 else
24747 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
24748 stack_pointer_rtx,
24749 size_rtx,
24750 orig_sp));
24751 rtx par = PATTERN (insn);
24752 gcc_assert (GET_CODE (par) == PARALLEL);
24753 rtx set = XVECEXP (par, 0, 0);
24754 gcc_assert (GET_CODE (set) == SET);
24755 rtx mem = SET_DEST (set);
24756 gcc_assert (MEM_P (mem));
24757 MEM_NOTRAP_P (mem) = 1;
24758 set_mem_alias_set (mem, get_frame_alias_set ());
24760 RTX_FRAME_RELATED_P (insn) = 1;
24761 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24762 gen_rtx_SET (stack_pointer_rtx,
24763 gen_rtx_PLUS (Pmode,
24764 stack_pointer_rtx,
24765 GEN_INT (-size_int))));
24767 /* Emit a blockage to ensure the allocation/probing insns are
24768 not optimized, combined, removed, etc. Add REG_STACK_CHECK
24769 note for similar reasons. */
24770 if (flag_stack_clash_protection)
24772 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
24773 emit_insn (gen_blockage ());
24776 return insn;
24779 static HOST_WIDE_INT
24780 get_stack_clash_protection_probe_interval (void)
24782 return (HOST_WIDE_INT_1U
24783 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
24786 static HOST_WIDE_INT
24787 get_stack_clash_protection_guard_size (void)
24789 return (HOST_WIDE_INT_1U
24790 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
24793 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
24794 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
24796 COPY_REG, if non-null, should contain a copy of the original
24797 stack pointer at exit from this function.
24799 This is subtly different than the Ada probing in that it tries hard to
24800 prevent attacks that jump the stack guard. Thus it is never allowed to
24801 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
24802 space without a suitable probe. */
24803 static rtx_insn *
24804 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
24805 rtx copy_reg)
24807 rtx orig_sp = copy_reg;
24809 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
24811 /* Round the size down to a multiple of PROBE_INTERVAL. */
24812 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
24814 /* If explicitly requested,
24815 or the rounded size is not the same as the original size
24816 or the the rounded size is greater than a page,
24817 then we will need a copy of the original stack pointer. */
24818 if (rounded_size != orig_size
24819 || rounded_size > probe_interval
24820 || copy_reg)
24822 /* If the caller did not request a copy of the incoming stack
24823 pointer, then we use r0 to hold the copy. */
24824 if (!copy_reg)
24825 orig_sp = gen_rtx_REG (Pmode, 0);
24826 emit_move_insn (orig_sp, stack_pointer_rtx);
24829 /* There's three cases here.
24831 One is a single probe which is the most common and most efficiently
24832 implemented as it does not have to have a copy of the original
24833 stack pointer if there are no residuals.
24835 Second is unrolled allocation/probes which we use if there's just
24836 a few of them. It needs to save the original stack pointer into a
24837 temporary for use as a source register in the allocation/probe.
24839 Last is a loop. This is the most uncommon case and least efficient. */
24840 rtx_insn *retval = NULL;
24841 if (rounded_size == probe_interval)
24843 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
24845 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
24847 else if (rounded_size <= 8 * probe_interval)
24849 /* The ABI requires using the store with update insns to allocate
24850 space and store the backchain into the stack
24852 So we save the current stack pointer into a temporary, then
24853 emit the store-with-update insns to store the saved stack pointer
24854 into the right location in each new page. */
24855 for (int i = 0; i < rounded_size; i += probe_interval)
24857 rtx_insn *insn
24858 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
24860 /* Save the first stack adjustment in RETVAL. */
24861 if (i == 0)
24862 retval = insn;
24865 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
24867 else
24869 /* Compute the ending address. */
24870 rtx end_addr
24871 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
24872 rtx rs = GEN_INT (-rounded_size);
24873 rtx_insn *insn;
24874 if (add_operand (rs, Pmode))
24875 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
24876 else
24878 emit_move_insn (end_addr, GEN_INT (-rounded_size));
24879 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
24880 stack_pointer_rtx));
24881 /* Describe the effect of INSN to the CFI engine. */
24882 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24883 gen_rtx_SET (end_addr,
24884 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24885 rs)));
24887 RTX_FRAME_RELATED_P (insn) = 1;
24889 /* Emit the loop. */
24890 if (TARGET_64BIT)
24891 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
24892 stack_pointer_rtx, orig_sp,
24893 end_addr));
24894 else
24895 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
24896 stack_pointer_rtx, orig_sp,
24897 end_addr));
24898 RTX_FRAME_RELATED_P (retval) = 1;
24899 /* Describe the effect of INSN to the CFI engine. */
24900 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
24901 gen_rtx_SET (stack_pointer_rtx, end_addr));
24903 /* Emit a blockage to ensure the allocation/probing insns are
24904 not optimized, combined, removed, etc. Other cases handle this
24905 within their call to rs6000_emit_allocate_stack_1. */
24906 emit_insn (gen_blockage ());
24908 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
24911 if (orig_size != rounded_size)
24913 /* Allocate (and implicitly probe) any residual space. */
24914 HOST_WIDE_INT residual = orig_size - rounded_size;
24916 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
24918 /* If the residual was the only allocation, then we can return the
24919 allocating insn. */
24920 if (!retval)
24921 retval = insn;
24924 return retval;
24927 /* Emit the correct code for allocating stack space, as insns.
24928 If COPY_REG, make sure a copy of the old frame is left there.
24929 The generated code may use hard register 0 as a temporary. */
24931 static rtx_insn *
24932 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
24934 rtx_insn *insn;
24935 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
24936 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
24937 rtx todec = gen_int_mode (-size, Pmode);
24939 if (INTVAL (todec) != -size)
24941 warning (0, "stack frame too large");
24942 emit_insn (gen_trap ());
24943 return 0;
24946 if (crtl->limit_stack)
24948 if (REG_P (stack_limit_rtx)
24949 && REGNO (stack_limit_rtx) > 1
24950 && REGNO (stack_limit_rtx) <= 31)
24952 rtx_insn *insn
24953 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
24954 gcc_assert (insn);
24955 emit_insn (insn);
24956 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
24958 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
24959 && TARGET_32BIT
24960 && DEFAULT_ABI == ABI_V4
24961 && !flag_pic)
24963 rtx toload = gen_rtx_CONST (VOIDmode,
24964 gen_rtx_PLUS (Pmode,
24965 stack_limit_rtx,
24966 GEN_INT (size)));
24968 emit_insn (gen_elf_high (tmp_reg, toload));
24969 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
24970 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
24971 const0_rtx));
24973 else
24974 warning (0, "stack limit expression is not supported");
24977 if (flag_stack_clash_protection)
24979 if (size < get_stack_clash_protection_guard_size ())
24980 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
24981 else
24983 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
24984 copy_reg);
24986 /* If we asked for a copy with an offset, then we still need add in
24987 the offset. */
24988 if (copy_reg && copy_off)
24989 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
24990 return insn;
24994 if (copy_reg)
24996 if (copy_off != 0)
24997 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
24998 else
24999 emit_move_insn (copy_reg, stack_reg);
25002 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25003 it now and set the alias set/attributes. The above gen_*_update
25004 calls will generate a PARALLEL with the MEM set being the first
25005 operation. */
25006 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25007 return insn;
25010 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25012 #if PROBE_INTERVAL > 32768
25013 #error Cannot use indexed addressing mode for stack probing
25014 #endif
25016 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25017 inclusive. These are offsets from the current stack pointer. */
25019 static void
25020 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25022 /* See if we have a constant small number of probes to generate. If so,
25023 that's the easy case. */
25024 if (first + size <= 32768)
25026 HOST_WIDE_INT i;
25028 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25029 it exceeds SIZE. If only one probe is needed, this will not
25030 generate any code. Then probe at FIRST + SIZE. */
25031 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25032 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25033 -(first + i)));
25035 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25036 -(first + size)));
25039 /* Otherwise, do the same as above, but in a loop. Note that we must be
25040 extra careful with variables wrapping around because we might be at
25041 the very top (or the very bottom) of the address space and we have
25042 to be able to handle this case properly; in particular, we use an
25043 equality test for the loop condition. */
25044 else
25046 HOST_WIDE_INT rounded_size;
25047 rtx r12 = gen_rtx_REG (Pmode, 12);
25048 rtx r0 = gen_rtx_REG (Pmode, 0);
25050 /* Sanity check for the addressing mode we're going to use. */
25051 gcc_assert (first <= 32768);
25053 /* Step 1: round SIZE to the previous multiple of the interval. */
25055 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25058 /* Step 2: compute initial and final value of the loop counter. */
25060 /* TEST_ADDR = SP + FIRST. */
25061 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25062 -first)));
25064 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25065 if (rounded_size > 32768)
25067 emit_move_insn (r0, GEN_INT (-rounded_size));
25068 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25070 else
25071 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25072 -rounded_size)));
25075 /* Step 3: the loop
25079 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25080 probe at TEST_ADDR
25082 while (TEST_ADDR != LAST_ADDR)
25084 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25085 until it is equal to ROUNDED_SIZE. */
25087 if (TARGET_64BIT)
25088 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25089 else
25090 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25093 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25094 that SIZE is equal to ROUNDED_SIZE. */
25096 if (size != rounded_size)
25097 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25101 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25102 addresses, not offsets. */
25104 static const char *
25105 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25107 static int labelno = 0;
25108 char loop_lab[32];
25109 rtx xops[2];
25111 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25113 /* Loop. */
25114 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25116 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25117 xops[0] = reg1;
25118 xops[1] = GEN_INT (-PROBE_INTERVAL);
25119 output_asm_insn ("addi %0,%0,%1", xops);
25121 /* Probe at TEST_ADDR. */
25122 xops[1] = gen_rtx_REG (Pmode, 0);
25123 output_asm_insn ("stw %1,0(%0)", xops);
25125 /* Test if TEST_ADDR == LAST_ADDR. */
25126 xops[1] = reg2;
25127 if (TARGET_64BIT)
25128 output_asm_insn ("cmpd 0,%0,%1", xops);
25129 else
25130 output_asm_insn ("cmpw 0,%0,%1", xops);
25132 /* Branch. */
25133 fputs ("\tbne 0,", asm_out_file);
25134 assemble_name_raw (asm_out_file, loop_lab);
25135 fputc ('\n', asm_out_file);
25137 return "";
25140 /* This function is called when rs6000_frame_related is processing
25141 SETs within a PARALLEL, and returns whether the REGNO save ought to
25142 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25143 for out-of-line register save functions, store multiple, and the
25144 Darwin world_save. They may contain registers that don't really
25145 need saving. */
25147 static bool
25148 interesting_frame_related_regno (unsigned int regno)
25150 /* Saves apparently of r0 are actually saving LR. It doesn't make
25151 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25152 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25153 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25154 as frame related. */
25155 if (regno == 0)
25156 return true;
25157 /* If we see CR2 then we are here on a Darwin world save. Saves of
25158 CR2 signify the whole CR is being saved. This is a long-standing
25159 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25160 that CR needs to be saved. */
25161 if (regno == CR2_REGNO)
25162 return true;
25163 /* Omit frame info for any user-defined global regs. If frame info
25164 is supplied for them, frame unwinding will restore a user reg.
25165 Also omit frame info for any reg we don't need to save, as that
25166 bloats frame info and can cause problems with shrink wrapping.
25167 Since global regs won't be seen as needing to be saved, both of
25168 these conditions are covered by save_reg_p. */
25169 return save_reg_p (regno);
25172 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25173 addresses, not offsets.
25175 REG2 contains the backchain that must be stored into *sp at each allocation.
25177 This is subtly different than the Ada probing above in that it tries hard
25178 to prevent attacks that jump the stack guard. Thus, it is never allowed
25179 to allocate more than PROBE_INTERVAL bytes of stack space without a
25180 suitable probe. */
25182 static const char *
25183 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25185 static int labelno = 0;
25186 char loop_lab[32];
25187 rtx xops[3];
25189 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25191 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25193 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25195 /* This allocates and probes. */
25196 xops[0] = reg1;
25197 xops[1] = reg2;
25198 xops[2] = GEN_INT (-probe_interval);
25199 if (TARGET_64BIT)
25200 output_asm_insn ("stdu %1,%2(%0)", xops);
25201 else
25202 output_asm_insn ("stwu %1,%2(%0)", xops);
25204 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25205 xops[0] = reg1;
25206 xops[1] = reg3;
25207 if (TARGET_64BIT)
25208 output_asm_insn ("cmpd 0,%0,%1", xops);
25209 else
25210 output_asm_insn ("cmpw 0,%0,%1", xops);
25212 fputs ("\tbne 0,", asm_out_file);
25213 assemble_name_raw (asm_out_file, loop_lab);
25214 fputc ('\n', asm_out_file);
25216 return "";
25219 /* Wrapper around the output_probe_stack_range routines. */
25220 const char *
25221 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25223 if (flag_stack_clash_protection)
25224 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25225 else
25226 return output_probe_stack_range_1 (reg1, reg3);
25229 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25230 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25231 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25232 deduce these equivalences by itself so it wasn't necessary to hold
25233 its hand so much. Don't be tempted to always supply d2_f_d_e with
25234 the actual cfa register, ie. r31 when we are using a hard frame
25235 pointer. That fails when saving regs off r1, and sched moves the
25236 r31 setup past the reg saves. */
25238 static rtx_insn *
25239 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25240 rtx reg2, rtx repl2)
25242 rtx repl;
25244 if (REGNO (reg) == STACK_POINTER_REGNUM)
25246 gcc_checking_assert (val == 0);
25247 repl = NULL_RTX;
25249 else
25250 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25251 GEN_INT (val));
25253 rtx pat = PATTERN (insn);
25254 if (!repl && !reg2)
25256 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25257 if (GET_CODE (pat) == PARALLEL)
25258 for (int i = 0; i < XVECLEN (pat, 0); i++)
25259 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25261 rtx set = XVECEXP (pat, 0, i);
25263 if (!REG_P (SET_SRC (set))
25264 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25265 RTX_FRAME_RELATED_P (set) = 1;
25267 RTX_FRAME_RELATED_P (insn) = 1;
25268 return insn;
25271 /* We expect that 'pat' is either a SET or a PARALLEL containing
25272 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25273 are important so they all have to be marked RTX_FRAME_RELATED_P.
25274 Call simplify_replace_rtx on the SETs rather than the whole insn
25275 so as to leave the other stuff alone (for example USE of r12). */
25277 set_used_flags (pat);
25278 if (GET_CODE (pat) == SET)
25280 if (repl)
25281 pat = simplify_replace_rtx (pat, reg, repl);
25282 if (reg2)
25283 pat = simplify_replace_rtx (pat, reg2, repl2);
25285 else if (GET_CODE (pat) == PARALLEL)
25287 pat = shallow_copy_rtx (pat);
25288 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25290 for (int i = 0; i < XVECLEN (pat, 0); i++)
25291 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25293 rtx set = XVECEXP (pat, 0, i);
25295 if (repl)
25296 set = simplify_replace_rtx (set, reg, repl);
25297 if (reg2)
25298 set = simplify_replace_rtx (set, reg2, repl2);
25299 XVECEXP (pat, 0, i) = set;
25301 if (!REG_P (SET_SRC (set))
25302 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25303 RTX_FRAME_RELATED_P (set) = 1;
25306 else
25307 gcc_unreachable ();
25309 RTX_FRAME_RELATED_P (insn) = 1;
25310 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25312 return insn;
25315 /* Returns an insn that has a vrsave set operation with the
25316 appropriate CLOBBERs. */
25318 static rtx
25319 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25321 int nclobs, i;
25322 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25323 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25325 clobs[0]
25326 = gen_rtx_SET (vrsave,
25327 gen_rtx_UNSPEC_VOLATILE (SImode,
25328 gen_rtvec (2, reg, vrsave),
25329 UNSPECV_SET_VRSAVE));
25331 nclobs = 1;
25333 /* We need to clobber the registers in the mask so the scheduler
25334 does not move sets to VRSAVE before sets of AltiVec registers.
25336 However, if the function receives nonlocal gotos, reload will set
25337 all call saved registers live. We will end up with:
25339 (set (reg 999) (mem))
25340 (parallel [ (set (reg vrsave) (unspec blah))
25341 (clobber (reg 999))])
25343 The clobber will cause the store into reg 999 to be dead, and
25344 flow will attempt to delete an epilogue insn. In this case, we
25345 need an unspec use/set of the register. */
25347 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25348 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25350 if (!epiloguep || call_used_regs [i])
25351 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25352 gen_rtx_REG (V4SImode, i));
25353 else
25355 rtx reg = gen_rtx_REG (V4SImode, i);
25357 clobs[nclobs++]
25358 = gen_rtx_SET (reg,
25359 gen_rtx_UNSPEC (V4SImode,
25360 gen_rtvec (1, reg), 27));
25364 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25366 for (i = 0; i < nclobs; ++i)
25367 XVECEXP (insn, 0, i) = clobs[i];
25369 return insn;
25372 static rtx
25373 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25375 rtx addr, mem;
25377 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25378 mem = gen_frame_mem (GET_MODE (reg), addr);
25379 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25382 static rtx
25383 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25385 return gen_frame_set (reg, frame_reg, offset, false);
25388 static rtx
25389 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25391 return gen_frame_set (reg, frame_reg, offset, true);
25394 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25395 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25397 static rtx_insn *
25398 emit_frame_save (rtx frame_reg, machine_mode mode,
25399 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25401 rtx reg;
25403 /* Some cases that need register indexed addressing. */
25404 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25405 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25407 reg = gen_rtx_REG (mode, regno);
25408 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25409 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25410 NULL_RTX, NULL_RTX);
25413 /* Emit an offset memory reference suitable for a frame store, while
25414 converting to a valid addressing mode. */
25416 static rtx
25417 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25419 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25422 #ifndef TARGET_FIX_AND_CONTINUE
25423 #define TARGET_FIX_AND_CONTINUE 0
25424 #endif
25426 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25427 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25428 #define LAST_SAVRES_REGISTER 31
25429 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25431 enum {
25432 SAVRES_LR = 0x1,
25433 SAVRES_SAVE = 0x2,
25434 SAVRES_REG = 0x0c,
25435 SAVRES_GPR = 0,
25436 SAVRES_FPR = 4,
25437 SAVRES_VR = 8
25440 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25442 /* Temporary holding space for an out-of-line register save/restore
25443 routine name. */
25444 static char savres_routine_name[30];
25446 /* Return the name for an out-of-line register save/restore routine.
25447 We are saving/restoring GPRs if GPR is true. */
25449 static char *
25450 rs6000_savres_routine_name (int regno, int sel)
25452 const char *prefix = "";
25453 const char *suffix = "";
25455 /* Different targets are supposed to define
25456 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25457 routine name could be defined with:
25459 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25461 This is a nice idea in practice, but in reality, things are
25462 complicated in several ways:
25464 - ELF targets have save/restore routines for GPRs.
25466 - PPC64 ELF targets have routines for save/restore of GPRs that
25467 differ in what they do with the link register, so having a set
25468 prefix doesn't work. (We only use one of the save routines at
25469 the moment, though.)
25471 - PPC32 elf targets have "exit" versions of the restore routines
25472 that restore the link register and can save some extra space.
25473 These require an extra suffix. (There are also "tail" versions
25474 of the restore routines and "GOT" versions of the save routines,
25475 but we don't generate those at present. Same problems apply,
25476 though.)
25478 We deal with all this by synthesizing our own prefix/suffix and
25479 using that for the simple sprintf call shown above. */
25480 if (DEFAULT_ABI == ABI_V4)
25482 if (TARGET_64BIT)
25483 goto aix_names;
25485 if ((sel & SAVRES_REG) == SAVRES_GPR)
25486 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25487 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25488 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25489 else if ((sel & SAVRES_REG) == SAVRES_VR)
25490 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25491 else
25492 abort ();
25494 if ((sel & SAVRES_LR))
25495 suffix = "_x";
25497 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25499 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25500 /* No out-of-line save/restore routines for GPRs on AIX. */
25501 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25502 #endif
25504 aix_names:
25505 if ((sel & SAVRES_REG) == SAVRES_GPR)
25506 prefix = ((sel & SAVRES_SAVE)
25507 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25508 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25509 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25511 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25512 if ((sel & SAVRES_LR))
25513 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25514 else
25515 #endif
25517 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25518 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25521 else if ((sel & SAVRES_REG) == SAVRES_VR)
25522 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25523 else
25524 abort ();
25527 if (DEFAULT_ABI == ABI_DARWIN)
25529 /* The Darwin approach is (slightly) different, in order to be
25530 compatible with code generated by the system toolchain. There is a
25531 single symbol for the start of save sequence, and the code here
25532 embeds an offset into that code on the basis of the first register
25533 to be saved. */
25534 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25535 if ((sel & SAVRES_REG) == SAVRES_GPR)
25536 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25537 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25538 (regno - 13) * 4, prefix, regno);
25539 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25540 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25541 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25542 else if ((sel & SAVRES_REG) == SAVRES_VR)
25543 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25544 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25545 else
25546 abort ();
25548 else
25549 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25551 return savres_routine_name;
25554 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25555 We are saving/restoring GPRs if GPR is true. */
25557 static rtx
25558 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25560 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25561 ? info->first_gp_reg_save
25562 : (sel & SAVRES_REG) == SAVRES_FPR
25563 ? info->first_fp_reg_save - 32
25564 : (sel & SAVRES_REG) == SAVRES_VR
25565 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25566 : -1);
25567 rtx sym;
25568 int select = sel;
25570 /* Don't generate bogus routine names. */
25571 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25572 && regno <= LAST_SAVRES_REGISTER
25573 && select >= 0 && select <= 12);
25575 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25577 if (sym == NULL)
25579 char *name;
25581 name = rs6000_savres_routine_name (regno, sel);
25583 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25584 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25585 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25588 return sym;
25591 /* Emit a sequence of insns, including a stack tie if needed, for
25592 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25593 reset the stack pointer, but move the base of the frame into
25594 reg UPDT_REGNO for use by out-of-line register restore routines. */
25596 static rtx
25597 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25598 unsigned updt_regno)
25600 /* If there is nothing to do, don't do anything. */
25601 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
25602 return NULL_RTX;
25604 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25606 /* This blockage is needed so that sched doesn't decide to move
25607 the sp change before the register restores. */
25608 if (DEFAULT_ABI == ABI_V4)
25609 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
25610 GEN_INT (frame_off)));
25612 /* If we are restoring registers out-of-line, we will be using the
25613 "exit" variants of the restore routines, which will reset the
25614 stack for us. But we do need to point updt_reg into the
25615 right place for those routines. */
25616 if (frame_off != 0)
25617 return emit_insn (gen_add3_insn (updt_reg_rtx,
25618 frame_reg_rtx, GEN_INT (frame_off)));
25619 else
25620 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
25622 return NULL_RTX;
25625 /* Return the register number used as a pointer by out-of-line
25626 save/restore functions. */
25628 static inline unsigned
25629 ptr_regno_for_savres (int sel)
25631 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25632 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
25633 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
25636 /* Construct a parallel rtx describing the effect of a call to an
25637 out-of-line register save/restore routine, and emit the insn
25638 or jump_insn as appropriate. */
25640 static rtx_insn *
25641 rs6000_emit_savres_rtx (rs6000_stack_t *info,
25642 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
25643 machine_mode reg_mode, int sel)
25645 int i;
25646 int offset, start_reg, end_reg, n_regs, use_reg;
25647 int reg_size = GET_MODE_SIZE (reg_mode);
25648 rtx sym;
25649 rtvec p;
25650 rtx par;
25651 rtx_insn *insn;
25653 offset = 0;
25654 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25655 ? info->first_gp_reg_save
25656 : (sel & SAVRES_REG) == SAVRES_FPR
25657 ? info->first_fp_reg_save
25658 : (sel & SAVRES_REG) == SAVRES_VR
25659 ? info->first_altivec_reg_save
25660 : -1);
25661 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25662 ? 32
25663 : (sel & SAVRES_REG) == SAVRES_FPR
25664 ? 64
25665 : (sel & SAVRES_REG) == SAVRES_VR
25666 ? LAST_ALTIVEC_REGNO + 1
25667 : -1);
25668 n_regs = end_reg - start_reg;
25669 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
25670 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
25671 + n_regs);
25673 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25674 RTVEC_ELT (p, offset++) = ret_rtx;
25676 RTVEC_ELT (p, offset++)
25677 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
25679 sym = rs6000_savres_routine_sym (info, sel);
25680 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
25682 use_reg = ptr_regno_for_savres (sel);
25683 if ((sel & SAVRES_REG) == SAVRES_VR)
25685 /* Vector regs are saved/restored using [reg+reg] addressing. */
25686 RTVEC_ELT (p, offset++)
25687 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25688 RTVEC_ELT (p, offset++)
25689 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
25691 else
25692 RTVEC_ELT (p, offset++)
25693 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25695 for (i = 0; i < end_reg - start_reg; i++)
25696 RTVEC_ELT (p, i + offset)
25697 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
25698 frame_reg_rtx, save_area_offset + reg_size * i,
25699 (sel & SAVRES_SAVE) != 0);
25701 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25702 RTVEC_ELT (p, i + offset)
25703 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
25705 par = gen_rtx_PARALLEL (VOIDmode, p);
25707 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25709 insn = emit_jump_insn (par);
25710 JUMP_LABEL (insn) = ret_rtx;
25712 else
25713 insn = emit_insn (par);
25714 return insn;
25717 /* Emit prologue code to store CR fields that need to be saved into REG. This
25718 function should only be called when moving the non-volatile CRs to REG, it
25719 is not a general purpose routine to move the entire set of CRs to REG.
25720 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
25721 volatile CRs. */
25723 static void
25724 rs6000_emit_prologue_move_from_cr (rtx reg)
25726 /* Only the ELFv2 ABI allows storing only selected fields. */
25727 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
25729 int i, cr_reg[8], count = 0;
25731 /* Collect CR fields that must be saved. */
25732 for (i = 0; i < 8; i++)
25733 if (save_reg_p (CR0_REGNO + i))
25734 cr_reg[count++] = i;
25736 /* If it's just a single one, use mfcrf. */
25737 if (count == 1)
25739 rtvec p = rtvec_alloc (1);
25740 rtvec r = rtvec_alloc (2);
25741 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
25742 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
25743 RTVEC_ELT (p, 0)
25744 = gen_rtx_SET (reg,
25745 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
25747 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
25748 return;
25751 /* ??? It might be better to handle count == 2 / 3 cases here
25752 as well, using logical operations to combine the values. */
25755 emit_insn (gen_prologue_movesi_from_cr (reg));
25758 /* Return whether the split-stack arg pointer (r12) is used. */
25760 static bool
25761 split_stack_arg_pointer_used_p (void)
25763 /* If the pseudo holding the arg pointer is no longer a pseudo,
25764 then the arg pointer is used. */
25765 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
25766 && (!REG_P (cfun->machine->split_stack_arg_pointer)
25767 || (REGNO (cfun->machine->split_stack_arg_pointer)
25768 < FIRST_PSEUDO_REGISTER)))
25769 return true;
25771 /* Unfortunately we also need to do some code scanning, since
25772 r12 may have been substituted for the pseudo. */
25773 rtx_insn *insn;
25774 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
25775 FOR_BB_INSNS (bb, insn)
25776 if (NONDEBUG_INSN_P (insn))
25778 /* A call destroys r12. */
25779 if (CALL_P (insn))
25780 return false;
25782 df_ref use;
25783 FOR_EACH_INSN_USE (use, insn)
25785 rtx x = DF_REF_REG (use);
25786 if (REG_P (x) && REGNO (x) == 12)
25787 return true;
25789 df_ref def;
25790 FOR_EACH_INSN_DEF (def, insn)
25792 rtx x = DF_REF_REG (def);
25793 if (REG_P (x) && REGNO (x) == 12)
25794 return false;
25797 return bitmap_bit_p (DF_LR_OUT (bb), 12);
25800 /* Return whether we need to emit an ELFv2 global entry point prologue. */
25802 static bool
25803 rs6000_global_entry_point_needed_p (void)
25805 /* Only needed for the ELFv2 ABI. */
25806 if (DEFAULT_ABI != ABI_ELFv2)
25807 return false;
25809 /* With -msingle-pic-base, we assume the whole program shares the same
25810 TOC, so no global entry point prologues are needed anywhere. */
25811 if (TARGET_SINGLE_PIC_BASE)
25812 return false;
25814 /* Ensure we have a global entry point for thunks. ??? We could
25815 avoid that if the target routine doesn't need a global entry point,
25816 but we do not know whether this is the case at this point. */
25817 if (cfun->is_thunk)
25818 return true;
25820 /* For regular functions, rs6000_emit_prologue sets this flag if the
25821 routine ever uses the TOC pointer. */
25822 return cfun->machine->r2_setup_needed;
25825 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
25826 static sbitmap
25827 rs6000_get_separate_components (void)
25829 rs6000_stack_t *info = rs6000_stack_info ();
25831 if (WORLD_SAVE_P (info))
25832 return NULL;
25834 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
25835 && !(info->savres_strategy & REST_MULTIPLE));
25837 /* Component 0 is the save/restore of LR (done via GPR0).
25838 Component 2 is the save of the TOC (GPR2).
25839 Components 13..31 are the save/restore of GPR13..GPR31.
25840 Components 46..63 are the save/restore of FPR14..FPR31. */
25842 cfun->machine->n_components = 64;
25844 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
25845 bitmap_clear (components);
25847 int reg_size = TARGET_32BIT ? 4 : 8;
25848 int fp_reg_size = 8;
25850 /* The GPRs we need saved to the frame. */
25851 if ((info->savres_strategy & SAVE_INLINE_GPRS)
25852 && (info->savres_strategy & REST_INLINE_GPRS))
25854 int offset = info->gp_save_offset;
25855 if (info->push_p)
25856 offset += info->total_size;
25858 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
25860 if (IN_RANGE (offset, -0x8000, 0x7fff)
25861 && save_reg_p (regno))
25862 bitmap_set_bit (components, regno);
25864 offset += reg_size;
25868 /* Don't mess with the hard frame pointer. */
25869 if (frame_pointer_needed)
25870 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
25872 /* Don't mess with the fixed TOC register. */
25873 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
25874 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
25875 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
25876 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
25878 /* The FPRs we need saved to the frame. */
25879 if ((info->savres_strategy & SAVE_INLINE_FPRS)
25880 && (info->savres_strategy & REST_INLINE_FPRS))
25882 int offset = info->fp_save_offset;
25883 if (info->push_p)
25884 offset += info->total_size;
25886 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
25888 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
25889 bitmap_set_bit (components, regno);
25891 offset += fp_reg_size;
25895 /* Optimize LR save and restore if we can. This is component 0. Any
25896 out-of-line register save/restore routines need LR. */
25897 if (info->lr_save_p
25898 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
25899 && (info->savres_strategy & SAVE_INLINE_GPRS)
25900 && (info->savres_strategy & REST_INLINE_GPRS)
25901 && (info->savres_strategy & SAVE_INLINE_FPRS)
25902 && (info->savres_strategy & REST_INLINE_FPRS)
25903 && (info->savres_strategy & SAVE_INLINE_VRS)
25904 && (info->savres_strategy & REST_INLINE_VRS))
25906 int offset = info->lr_save_offset;
25907 if (info->push_p)
25908 offset += info->total_size;
25909 if (IN_RANGE (offset, -0x8000, 0x7fff))
25910 bitmap_set_bit (components, 0);
25913 /* Optimize saving the TOC. This is component 2. */
25914 if (cfun->machine->save_toc_in_prologue)
25915 bitmap_set_bit (components, 2);
25917 return components;
25920 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
25921 static sbitmap
25922 rs6000_components_for_bb (basic_block bb)
25924 rs6000_stack_t *info = rs6000_stack_info ();
25926 bitmap in = DF_LIVE_IN (bb);
25927 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
25928 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
25930 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
25931 bitmap_clear (components);
25933 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
25935 /* GPRs. */
25936 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
25937 if (bitmap_bit_p (in, regno)
25938 || bitmap_bit_p (gen, regno)
25939 || bitmap_bit_p (kill, regno))
25940 bitmap_set_bit (components, regno);
25942 /* FPRs. */
25943 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
25944 if (bitmap_bit_p (in, regno)
25945 || bitmap_bit_p (gen, regno)
25946 || bitmap_bit_p (kill, regno))
25947 bitmap_set_bit (components, regno);
25949 /* The link register. */
25950 if (bitmap_bit_p (in, LR_REGNO)
25951 || bitmap_bit_p (gen, LR_REGNO)
25952 || bitmap_bit_p (kill, LR_REGNO))
25953 bitmap_set_bit (components, 0);
25955 /* The TOC save. */
25956 if (bitmap_bit_p (in, TOC_REGNUM)
25957 || bitmap_bit_p (gen, TOC_REGNUM)
25958 || bitmap_bit_p (kill, TOC_REGNUM))
25959 bitmap_set_bit (components, 2);
25961 return components;
25964 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
25965 static void
25966 rs6000_disqualify_components (sbitmap components, edge e,
25967 sbitmap edge_components, bool /*is_prologue*/)
25969 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
25970 live where we want to place that code. */
25971 if (bitmap_bit_p (edge_components, 0)
25972 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
25974 if (dump_file)
25975 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
25976 "on entry to bb %d\n", e->dest->index);
25977 bitmap_clear_bit (components, 0);
25981 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
25982 static void
25983 rs6000_emit_prologue_components (sbitmap components)
25985 rs6000_stack_t *info = rs6000_stack_info ();
25986 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
25987 ? HARD_FRAME_POINTER_REGNUM
25988 : STACK_POINTER_REGNUM);
25990 machine_mode reg_mode = Pmode;
25991 int reg_size = TARGET_32BIT ? 4 : 8;
25992 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
25993 int fp_reg_size = 8;
25995 /* Prologue for LR. */
25996 if (bitmap_bit_p (components, 0))
25998 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
25999 rtx reg = gen_rtx_REG (reg_mode, 0);
26000 rtx_insn *insn = emit_move_insn (reg, lr);
26001 RTX_FRAME_RELATED_P (insn) = 1;
26002 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26004 int offset = info->lr_save_offset;
26005 if (info->push_p)
26006 offset += info->total_size;
26008 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26009 RTX_FRAME_RELATED_P (insn) = 1;
26010 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26011 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26014 /* Prologue for TOC. */
26015 if (bitmap_bit_p (components, 2))
26017 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26018 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26019 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26022 /* Prologue for the GPRs. */
26023 int offset = info->gp_save_offset;
26024 if (info->push_p)
26025 offset += info->total_size;
26027 for (int i = info->first_gp_reg_save; i < 32; i++)
26029 if (bitmap_bit_p (components, i))
26031 rtx reg = gen_rtx_REG (reg_mode, i);
26032 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26033 RTX_FRAME_RELATED_P (insn) = 1;
26034 rtx set = copy_rtx (single_set (insn));
26035 add_reg_note (insn, REG_CFA_OFFSET, set);
26038 offset += reg_size;
26041 /* Prologue for the FPRs. */
26042 offset = info->fp_save_offset;
26043 if (info->push_p)
26044 offset += info->total_size;
26046 for (int i = info->first_fp_reg_save; i < 64; i++)
26048 if (bitmap_bit_p (components, i))
26050 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26051 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26052 RTX_FRAME_RELATED_P (insn) = 1;
26053 rtx set = copy_rtx (single_set (insn));
26054 add_reg_note (insn, REG_CFA_OFFSET, set);
26057 offset += fp_reg_size;
26061 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26062 static void
26063 rs6000_emit_epilogue_components (sbitmap components)
26065 rs6000_stack_t *info = rs6000_stack_info ();
26066 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26067 ? HARD_FRAME_POINTER_REGNUM
26068 : STACK_POINTER_REGNUM);
26070 machine_mode reg_mode = Pmode;
26071 int reg_size = TARGET_32BIT ? 4 : 8;
26073 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26074 int fp_reg_size = 8;
26076 /* Epilogue for the FPRs. */
26077 int offset = info->fp_save_offset;
26078 if (info->push_p)
26079 offset += info->total_size;
26081 for (int i = info->first_fp_reg_save; i < 64; i++)
26083 if (bitmap_bit_p (components, i))
26085 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26086 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26087 RTX_FRAME_RELATED_P (insn) = 1;
26088 add_reg_note (insn, REG_CFA_RESTORE, reg);
26091 offset += fp_reg_size;
26094 /* Epilogue for the GPRs. */
26095 offset = info->gp_save_offset;
26096 if (info->push_p)
26097 offset += info->total_size;
26099 for (int i = info->first_gp_reg_save; i < 32; i++)
26101 if (bitmap_bit_p (components, i))
26103 rtx reg = gen_rtx_REG (reg_mode, i);
26104 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26105 RTX_FRAME_RELATED_P (insn) = 1;
26106 add_reg_note (insn, REG_CFA_RESTORE, reg);
26109 offset += reg_size;
26112 /* Epilogue for LR. */
26113 if (bitmap_bit_p (components, 0))
26115 int offset = info->lr_save_offset;
26116 if (info->push_p)
26117 offset += info->total_size;
26119 rtx reg = gen_rtx_REG (reg_mode, 0);
26120 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26122 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26123 insn = emit_move_insn (lr, reg);
26124 RTX_FRAME_RELATED_P (insn) = 1;
26125 add_reg_note (insn, REG_CFA_RESTORE, lr);
26129 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26130 static void
26131 rs6000_set_handled_components (sbitmap components)
26133 rs6000_stack_t *info = rs6000_stack_info ();
26135 for (int i = info->first_gp_reg_save; i < 32; i++)
26136 if (bitmap_bit_p (components, i))
26137 cfun->machine->gpr_is_wrapped_separately[i] = true;
26139 for (int i = info->first_fp_reg_save; i < 64; i++)
26140 if (bitmap_bit_p (components, i))
26141 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26143 if (bitmap_bit_p (components, 0))
26144 cfun->machine->lr_is_wrapped_separately = true;
26146 if (bitmap_bit_p (components, 2))
26147 cfun->machine->toc_is_wrapped_separately = true;
26150 /* VRSAVE is a bit vector representing which AltiVec registers
26151 are used. The OS uses this to determine which vector
26152 registers to save on a context switch. We need to save
26153 VRSAVE on the stack frame, add whatever AltiVec registers we
26154 used in this function, and do the corresponding magic in the
26155 epilogue. */
26156 static void
26157 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26158 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26160 /* Get VRSAVE into a GPR. */
26161 rtx reg = gen_rtx_REG (SImode, save_regno);
26162 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26163 if (TARGET_MACHO)
26164 emit_insn (gen_get_vrsave_internal (reg));
26165 else
26166 emit_insn (gen_rtx_SET (reg, vrsave));
26168 /* Save VRSAVE. */
26169 int offset = info->vrsave_save_offset + frame_off;
26170 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26172 /* Include the registers in the mask. */
26173 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26175 emit_insn (generate_set_vrsave (reg, info, 0));
26178 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26179 called, it left the arg pointer to the old stack in r29. Otherwise, the
26180 arg pointer is the top of the current frame. */
26181 static void
26182 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26183 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26185 cfun->machine->split_stack_argp_used = true;
26187 if (sp_adjust)
26189 rtx r12 = gen_rtx_REG (Pmode, 12);
26190 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26191 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26192 emit_insn_before (set_r12, sp_adjust);
26194 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26196 rtx r12 = gen_rtx_REG (Pmode, 12);
26197 if (frame_off == 0)
26198 emit_move_insn (r12, frame_reg_rtx);
26199 else
26200 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26203 if (info->push_p)
26205 rtx r12 = gen_rtx_REG (Pmode, 12);
26206 rtx r29 = gen_rtx_REG (Pmode, 29);
26207 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26208 rtx not_more = gen_label_rtx ();
26209 rtx jump;
26211 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26212 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26213 gen_rtx_LABEL_REF (VOIDmode, not_more),
26214 pc_rtx);
26215 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26216 JUMP_LABEL (jump) = not_more;
26217 LABEL_NUSES (not_more) += 1;
26218 emit_move_insn (r12, r29);
26219 emit_label (not_more);
26223 /* Emit function prologue as insns. */
26225 void
26226 rs6000_emit_prologue (void)
26228 rs6000_stack_t *info = rs6000_stack_info ();
26229 machine_mode reg_mode = Pmode;
26230 int reg_size = TARGET_32BIT ? 4 : 8;
26231 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26232 int fp_reg_size = 8;
26233 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26234 rtx frame_reg_rtx = sp_reg_rtx;
26235 unsigned int cr_save_regno;
26236 rtx cr_save_rtx = NULL_RTX;
26237 rtx_insn *insn;
26238 int strategy;
26239 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26240 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26241 && call_used_regs[STATIC_CHAIN_REGNUM]);
26242 int using_split_stack = (flag_split_stack
26243 && (lookup_attribute ("no_split_stack",
26244 DECL_ATTRIBUTES (cfun->decl))
26245 == NULL));
26247 /* Offset to top of frame for frame_reg and sp respectively. */
26248 HOST_WIDE_INT frame_off = 0;
26249 HOST_WIDE_INT sp_off = 0;
26250 /* sp_adjust is the stack adjusting instruction, tracked so that the
26251 insn setting up the split-stack arg pointer can be emitted just
26252 prior to it, when r12 is not used here for other purposes. */
26253 rtx_insn *sp_adjust = 0;
26255 #if CHECKING_P
26256 /* Track and check usage of r0, r11, r12. */
26257 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26258 #define START_USE(R) do \
26260 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26261 reg_inuse |= 1 << (R); \
26262 } while (0)
26263 #define END_USE(R) do \
26265 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26266 reg_inuse &= ~(1 << (R)); \
26267 } while (0)
26268 #define NOT_INUSE(R) do \
26270 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26271 } while (0)
26272 #else
26273 #define START_USE(R) do {} while (0)
26274 #define END_USE(R) do {} while (0)
26275 #define NOT_INUSE(R) do {} while (0)
26276 #endif
26278 if (DEFAULT_ABI == ABI_ELFv2
26279 && !TARGET_SINGLE_PIC_BASE)
26281 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26283 /* With -mminimal-toc we may generate an extra use of r2 below. */
26284 if (TARGET_TOC && TARGET_MINIMAL_TOC
26285 && !constant_pool_empty_p ())
26286 cfun->machine->r2_setup_needed = true;
26290 if (flag_stack_usage_info)
26291 current_function_static_stack_size = info->total_size;
26293 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26295 HOST_WIDE_INT size = info->total_size;
26297 if (crtl->is_leaf && !cfun->calls_alloca)
26299 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26300 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26301 size - get_stack_check_protect ());
26303 else if (size > 0)
26304 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26307 if (TARGET_FIX_AND_CONTINUE)
26309 /* gdb on darwin arranges to forward a function from the old
26310 address by modifying the first 5 instructions of the function
26311 to branch to the overriding function. This is necessary to
26312 permit function pointers that point to the old function to
26313 actually forward to the new function. */
26314 emit_insn (gen_nop ());
26315 emit_insn (gen_nop ());
26316 emit_insn (gen_nop ());
26317 emit_insn (gen_nop ());
26318 emit_insn (gen_nop ());
26321 /* Handle world saves specially here. */
26322 if (WORLD_SAVE_P (info))
26324 int i, j, sz;
26325 rtx treg;
26326 rtvec p;
26327 rtx reg0;
26329 /* save_world expects lr in r0. */
26330 reg0 = gen_rtx_REG (Pmode, 0);
26331 if (info->lr_save_p)
26333 insn = emit_move_insn (reg0,
26334 gen_rtx_REG (Pmode, LR_REGNO));
26335 RTX_FRAME_RELATED_P (insn) = 1;
26338 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26339 assumptions about the offsets of various bits of the stack
26340 frame. */
26341 gcc_assert (info->gp_save_offset == -220
26342 && info->fp_save_offset == -144
26343 && info->lr_save_offset == 8
26344 && info->cr_save_offset == 4
26345 && info->push_p
26346 && info->lr_save_p
26347 && (!crtl->calls_eh_return
26348 || info->ehrd_offset == -432)
26349 && info->vrsave_save_offset == -224
26350 && info->altivec_save_offset == -416);
26352 treg = gen_rtx_REG (SImode, 11);
26353 emit_move_insn (treg, GEN_INT (-info->total_size));
26355 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26356 in R11. It also clobbers R12, so beware! */
26358 /* Preserve CR2 for save_world prologues */
26359 sz = 5;
26360 sz += 32 - info->first_gp_reg_save;
26361 sz += 64 - info->first_fp_reg_save;
26362 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26363 p = rtvec_alloc (sz);
26364 j = 0;
26365 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26366 gen_rtx_REG (SImode,
26367 LR_REGNO));
26368 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26369 gen_rtx_SYMBOL_REF (Pmode,
26370 "*save_world"));
26371 /* We do floats first so that the instruction pattern matches
26372 properly. */
26373 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26374 RTVEC_ELT (p, j++)
26375 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26376 info->first_fp_reg_save + i),
26377 frame_reg_rtx,
26378 info->fp_save_offset + frame_off + 8 * i);
26379 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26380 RTVEC_ELT (p, j++)
26381 = gen_frame_store (gen_rtx_REG (V4SImode,
26382 info->first_altivec_reg_save + i),
26383 frame_reg_rtx,
26384 info->altivec_save_offset + frame_off + 16 * i);
26385 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26386 RTVEC_ELT (p, j++)
26387 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26388 frame_reg_rtx,
26389 info->gp_save_offset + frame_off + reg_size * i);
26391 /* CR register traditionally saved as CR2. */
26392 RTVEC_ELT (p, j++)
26393 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26394 frame_reg_rtx, info->cr_save_offset + frame_off);
26395 /* Explain about use of R0. */
26396 if (info->lr_save_p)
26397 RTVEC_ELT (p, j++)
26398 = gen_frame_store (reg0,
26399 frame_reg_rtx, info->lr_save_offset + frame_off);
26400 /* Explain what happens to the stack pointer. */
26402 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26403 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26406 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26407 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26408 treg, GEN_INT (-info->total_size));
26409 sp_off = frame_off = info->total_size;
26412 strategy = info->savres_strategy;
26414 /* For V.4, update stack before we do any saving and set back pointer. */
26415 if (! WORLD_SAVE_P (info)
26416 && info->push_p
26417 && (DEFAULT_ABI == ABI_V4
26418 || crtl->calls_eh_return))
26420 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26421 || !(strategy & SAVE_INLINE_GPRS)
26422 || !(strategy & SAVE_INLINE_VRS));
26423 int ptr_regno = -1;
26424 rtx ptr_reg = NULL_RTX;
26425 int ptr_off = 0;
26427 if (info->total_size < 32767)
26428 frame_off = info->total_size;
26429 else if (need_r11)
26430 ptr_regno = 11;
26431 else if (info->cr_save_p
26432 || info->lr_save_p
26433 || info->first_fp_reg_save < 64
26434 || info->first_gp_reg_save < 32
26435 || info->altivec_size != 0
26436 || info->vrsave_size != 0
26437 || crtl->calls_eh_return)
26438 ptr_regno = 12;
26439 else
26441 /* The prologue won't be saving any regs so there is no need
26442 to set up a frame register to access any frame save area.
26443 We also won't be using frame_off anywhere below, but set
26444 the correct value anyway to protect against future
26445 changes to this function. */
26446 frame_off = info->total_size;
26448 if (ptr_regno != -1)
26450 /* Set up the frame offset to that needed by the first
26451 out-of-line save function. */
26452 START_USE (ptr_regno);
26453 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26454 frame_reg_rtx = ptr_reg;
26455 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26456 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26457 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26458 ptr_off = info->gp_save_offset + info->gp_size;
26459 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26460 ptr_off = info->altivec_save_offset + info->altivec_size;
26461 frame_off = -ptr_off;
26463 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26464 ptr_reg, ptr_off);
26465 if (REGNO (frame_reg_rtx) == 12)
26466 sp_adjust = 0;
26467 sp_off = info->total_size;
26468 if (frame_reg_rtx != sp_reg_rtx)
26469 rs6000_emit_stack_tie (frame_reg_rtx, false);
26472 /* If we use the link register, get it into r0. */
26473 if (!WORLD_SAVE_P (info) && info->lr_save_p
26474 && !cfun->machine->lr_is_wrapped_separately)
26476 rtx addr, reg, mem;
26478 reg = gen_rtx_REG (Pmode, 0);
26479 START_USE (0);
26480 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26481 RTX_FRAME_RELATED_P (insn) = 1;
26483 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26484 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26486 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26487 GEN_INT (info->lr_save_offset + frame_off));
26488 mem = gen_rtx_MEM (Pmode, addr);
26489 /* This should not be of rs6000_sr_alias_set, because of
26490 __builtin_return_address. */
26492 insn = emit_move_insn (mem, reg);
26493 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26494 NULL_RTX, NULL_RTX);
26495 END_USE (0);
26499 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26500 r12 will be needed by out-of-line gpr restore. */
26501 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26502 && !(strategy & (SAVE_INLINE_GPRS
26503 | SAVE_NOINLINE_GPRS_SAVES_LR))
26504 ? 11 : 12);
26505 if (!WORLD_SAVE_P (info)
26506 && info->cr_save_p
26507 && REGNO (frame_reg_rtx) != cr_save_regno
26508 && !(using_static_chain_p && cr_save_regno == 11)
26509 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26511 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26512 START_USE (cr_save_regno);
26513 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26516 /* Do any required saving of fpr's. If only one or two to save, do
26517 it ourselves. Otherwise, call function. */
26518 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26520 int offset = info->fp_save_offset + frame_off;
26521 for (int i = info->first_fp_reg_save; i < 64; i++)
26523 if (save_reg_p (i)
26524 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26525 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26526 sp_off - frame_off);
26528 offset += fp_reg_size;
26531 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26533 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26534 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26535 unsigned ptr_regno = ptr_regno_for_savres (sel);
26536 rtx ptr_reg = frame_reg_rtx;
26538 if (REGNO (frame_reg_rtx) == ptr_regno)
26539 gcc_checking_assert (frame_off == 0);
26540 else
26542 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26543 NOT_INUSE (ptr_regno);
26544 emit_insn (gen_add3_insn (ptr_reg,
26545 frame_reg_rtx, GEN_INT (frame_off)));
26547 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26548 info->fp_save_offset,
26549 info->lr_save_offset,
26550 DFmode, sel);
26551 rs6000_frame_related (insn, ptr_reg, sp_off,
26552 NULL_RTX, NULL_RTX);
26553 if (lr)
26554 END_USE (0);
26557 /* Save GPRs. This is done as a PARALLEL if we are using
26558 the store-multiple instructions. */
26559 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26561 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26562 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26563 unsigned ptr_regno = ptr_regno_for_savres (sel);
26564 rtx ptr_reg = frame_reg_rtx;
26565 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26566 int end_save = info->gp_save_offset + info->gp_size;
26567 int ptr_off;
26569 if (ptr_regno == 12)
26570 sp_adjust = 0;
26571 if (!ptr_set_up)
26572 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26574 /* Need to adjust r11 (r12) if we saved any FPRs. */
26575 if (end_save + frame_off != 0)
26577 rtx offset = GEN_INT (end_save + frame_off);
26579 if (ptr_set_up)
26580 frame_off = -end_save;
26581 else
26582 NOT_INUSE (ptr_regno);
26583 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26585 else if (!ptr_set_up)
26587 NOT_INUSE (ptr_regno);
26588 emit_move_insn (ptr_reg, frame_reg_rtx);
26590 ptr_off = -end_save;
26591 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26592 info->gp_save_offset + ptr_off,
26593 info->lr_save_offset + ptr_off,
26594 reg_mode, sel);
26595 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26596 NULL_RTX, NULL_RTX);
26597 if (lr)
26598 END_USE (0);
26600 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26602 rtvec p;
26603 int i;
26604 p = rtvec_alloc (32 - info->first_gp_reg_save);
26605 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26606 RTVEC_ELT (p, i)
26607 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26608 frame_reg_rtx,
26609 info->gp_save_offset + frame_off + reg_size * i);
26610 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26611 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26612 NULL_RTX, NULL_RTX);
26614 else if (!WORLD_SAVE_P (info))
26616 int offset = info->gp_save_offset + frame_off;
26617 for (int i = info->first_gp_reg_save; i < 32; i++)
26619 if (save_reg_p (i)
26620 && !cfun->machine->gpr_is_wrapped_separately[i])
26621 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
26622 sp_off - frame_off);
26624 offset += reg_size;
26628 if (crtl->calls_eh_return)
26630 unsigned int i;
26631 rtvec p;
26633 for (i = 0; ; ++i)
26635 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26636 if (regno == INVALID_REGNUM)
26637 break;
26640 p = rtvec_alloc (i);
26642 for (i = 0; ; ++i)
26644 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26645 if (regno == INVALID_REGNUM)
26646 break;
26648 rtx set
26649 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
26650 sp_reg_rtx,
26651 info->ehrd_offset + sp_off + reg_size * (int) i);
26652 RTVEC_ELT (p, i) = set;
26653 RTX_FRAME_RELATED_P (set) = 1;
26656 insn = emit_insn (gen_blockage ());
26657 RTX_FRAME_RELATED_P (insn) = 1;
26658 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
26661 /* In AIX ABI we need to make sure r2 is really saved. */
26662 if (TARGET_AIX && crtl->calls_eh_return)
26664 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
26665 rtx join_insn, note;
26666 rtx_insn *save_insn;
26667 long toc_restore_insn;
26669 tmp_reg = gen_rtx_REG (Pmode, 11);
26670 tmp_reg_si = gen_rtx_REG (SImode, 11);
26671 if (using_static_chain_p)
26673 START_USE (0);
26674 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
26676 else
26677 START_USE (11);
26678 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
26679 /* Peek at instruction to which this function returns. If it's
26680 restoring r2, then we know we've already saved r2. We can't
26681 unconditionally save r2 because the value we have will already
26682 be updated if we arrived at this function via a plt call or
26683 toc adjusting stub. */
26684 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
26685 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
26686 + RS6000_TOC_SAVE_SLOT);
26687 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
26688 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
26689 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
26690 validate_condition_mode (EQ, CCUNSmode);
26691 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
26692 emit_insn (gen_rtx_SET (compare_result,
26693 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
26694 toc_save_done = gen_label_rtx ();
26695 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26696 gen_rtx_EQ (VOIDmode, compare_result,
26697 const0_rtx),
26698 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
26699 pc_rtx);
26700 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26701 JUMP_LABEL (jump) = toc_save_done;
26702 LABEL_NUSES (toc_save_done) += 1;
26704 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
26705 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
26706 sp_off - frame_off);
26708 emit_label (toc_save_done);
26710 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26711 have a CFG that has different saves along different paths.
26712 Move the note to a dummy blockage insn, which describes that
26713 R2 is unconditionally saved after the label. */
26714 /* ??? An alternate representation might be a special insn pattern
26715 containing both the branch and the store. That might let the
26716 code that minimizes the number of DW_CFA_advance opcodes better
26717 freedom in placing the annotations. */
26718 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
26719 if (note)
26720 remove_note (save_insn, note);
26721 else
26722 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
26723 copy_rtx (PATTERN (save_insn)), NULL_RTX);
26724 RTX_FRAME_RELATED_P (save_insn) = 0;
26726 join_insn = emit_insn (gen_blockage ());
26727 REG_NOTES (join_insn) = note;
26728 RTX_FRAME_RELATED_P (join_insn) = 1;
26730 if (using_static_chain_p)
26732 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
26733 END_USE (0);
26735 else
26736 END_USE (11);
26739 /* Save CR if we use any that must be preserved. */
26740 if (!WORLD_SAVE_P (info) && info->cr_save_p)
26742 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26743 GEN_INT (info->cr_save_offset + frame_off));
26744 rtx mem = gen_frame_mem (SImode, addr);
26746 /* If we didn't copy cr before, do so now using r0. */
26747 if (cr_save_rtx == NULL_RTX)
26749 START_USE (0);
26750 cr_save_rtx = gen_rtx_REG (SImode, 0);
26751 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26754 /* Saving CR requires a two-instruction sequence: one instruction
26755 to move the CR to a general-purpose register, and a second
26756 instruction that stores the GPR to memory.
26758 We do not emit any DWARF CFI records for the first of these,
26759 because we cannot properly represent the fact that CR is saved in
26760 a register. One reason is that we cannot express that multiple
26761 CR fields are saved; another reason is that on 64-bit, the size
26762 of the CR register in DWARF (4 bytes) differs from the size of
26763 a general-purpose register.
26765 This means if any intervening instruction were to clobber one of
26766 the call-saved CR fields, we'd have incorrect CFI. To prevent
26767 this from happening, we mark the store to memory as a use of
26768 those CR fields, which prevents any such instruction from being
26769 scheduled in between the two instructions. */
26770 rtx crsave_v[9];
26771 int n_crsave = 0;
26772 int i;
26774 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
26775 for (i = 0; i < 8; i++)
26776 if (save_reg_p (CR0_REGNO + i))
26777 crsave_v[n_crsave++]
26778 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26780 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
26781 gen_rtvec_v (n_crsave, crsave_v)));
26782 END_USE (REGNO (cr_save_rtx));
26784 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
26785 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
26786 so we need to construct a frame expression manually. */
26787 RTX_FRAME_RELATED_P (insn) = 1;
26789 /* Update address to be stack-pointer relative, like
26790 rs6000_frame_related would do. */
26791 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26792 GEN_INT (info->cr_save_offset + sp_off));
26793 mem = gen_frame_mem (SImode, addr);
26795 if (DEFAULT_ABI == ABI_ELFv2)
26797 /* In the ELFv2 ABI we generate separate CFI records for each
26798 CR field that was actually saved. They all point to the
26799 same 32-bit stack slot. */
26800 rtx crframe[8];
26801 int n_crframe = 0;
26803 for (i = 0; i < 8; i++)
26804 if (save_reg_p (CR0_REGNO + i))
26806 crframe[n_crframe]
26807 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
26809 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
26810 n_crframe++;
26813 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
26814 gen_rtx_PARALLEL (VOIDmode,
26815 gen_rtvec_v (n_crframe, crframe)));
26817 else
26819 /* In other ABIs, by convention, we use a single CR regnum to
26820 represent the fact that all call-saved CR fields are saved.
26821 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
26822 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
26823 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
26827 /* In the ELFv2 ABI we need to save all call-saved CR fields into
26828 *separate* slots if the routine calls __builtin_eh_return, so
26829 that they can be independently restored by the unwinder. */
26830 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
26832 int i, cr_off = info->ehcr_offset;
26833 rtx crsave;
26835 /* ??? We might get better performance by using multiple mfocrf
26836 instructions. */
26837 crsave = gen_rtx_REG (SImode, 0);
26838 emit_insn (gen_prologue_movesi_from_cr (crsave));
26840 for (i = 0; i < 8; i++)
26841 if (!call_used_regs[CR0_REGNO + i])
26843 rtvec p = rtvec_alloc (2);
26844 RTVEC_ELT (p, 0)
26845 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
26846 RTVEC_ELT (p, 1)
26847 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26849 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26851 RTX_FRAME_RELATED_P (insn) = 1;
26852 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
26853 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
26854 sp_reg_rtx, cr_off + sp_off));
26856 cr_off += reg_size;
26860 /* If we are emitting stack probes, but allocate no stack, then
26861 just note that in the dump file. */
26862 if (flag_stack_clash_protection
26863 && dump_file
26864 && !info->push_p)
26865 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
26867 /* Update stack and set back pointer unless this is V.4,
26868 for which it was done previously. */
26869 if (!WORLD_SAVE_P (info) && info->push_p
26870 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
26872 rtx ptr_reg = NULL;
26873 int ptr_off = 0;
26875 /* If saving altivec regs we need to be able to address all save
26876 locations using a 16-bit offset. */
26877 if ((strategy & SAVE_INLINE_VRS) == 0
26878 || (info->altivec_size != 0
26879 && (info->altivec_save_offset + info->altivec_size - 16
26880 + info->total_size - frame_off) > 32767)
26881 || (info->vrsave_size != 0
26882 && (info->vrsave_save_offset
26883 + info->total_size - frame_off) > 32767))
26885 int sel = SAVRES_SAVE | SAVRES_VR;
26886 unsigned ptr_regno = ptr_regno_for_savres (sel);
26888 if (using_static_chain_p
26889 && ptr_regno == STATIC_CHAIN_REGNUM)
26890 ptr_regno = 12;
26891 if (REGNO (frame_reg_rtx) != ptr_regno)
26892 START_USE (ptr_regno);
26893 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26894 frame_reg_rtx = ptr_reg;
26895 ptr_off = info->altivec_save_offset + info->altivec_size;
26896 frame_off = -ptr_off;
26898 else if (REGNO (frame_reg_rtx) == 1)
26899 frame_off = info->total_size;
26900 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26901 ptr_reg, ptr_off);
26902 if (REGNO (frame_reg_rtx) == 12)
26903 sp_adjust = 0;
26904 sp_off = info->total_size;
26905 if (frame_reg_rtx != sp_reg_rtx)
26906 rs6000_emit_stack_tie (frame_reg_rtx, false);
26909 /* Set frame pointer, if needed. */
26910 if (frame_pointer_needed)
26912 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
26913 sp_reg_rtx);
26914 RTX_FRAME_RELATED_P (insn) = 1;
26917 /* Save AltiVec registers if needed. Save here because the red zone does
26918 not always include AltiVec registers. */
26919 if (!WORLD_SAVE_P (info)
26920 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
26922 int end_save = info->altivec_save_offset + info->altivec_size;
26923 int ptr_off;
26924 /* Oddly, the vector save/restore functions point r0 at the end
26925 of the save area, then use r11 or r12 to load offsets for
26926 [reg+reg] addressing. */
26927 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
26928 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
26929 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
26931 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
26932 NOT_INUSE (0);
26933 if (scratch_regno == 12)
26934 sp_adjust = 0;
26935 if (end_save + frame_off != 0)
26937 rtx offset = GEN_INT (end_save + frame_off);
26939 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26941 else
26942 emit_move_insn (ptr_reg, frame_reg_rtx);
26944 ptr_off = -end_save;
26945 insn = rs6000_emit_savres_rtx (info, scratch_reg,
26946 info->altivec_save_offset + ptr_off,
26947 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
26948 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
26949 NULL_RTX, NULL_RTX);
26950 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
26952 /* The oddity mentioned above clobbered our frame reg. */
26953 emit_move_insn (frame_reg_rtx, ptr_reg);
26954 frame_off = ptr_off;
26957 else if (!WORLD_SAVE_P (info)
26958 && info->altivec_size != 0)
26960 int i;
26962 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
26963 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26965 rtx areg, savereg, mem;
26966 HOST_WIDE_INT offset;
26968 offset = (info->altivec_save_offset + frame_off
26969 + 16 * (i - info->first_altivec_reg_save));
26971 savereg = gen_rtx_REG (V4SImode, i);
26973 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
26975 mem = gen_frame_mem (V4SImode,
26976 gen_rtx_PLUS (Pmode, frame_reg_rtx,
26977 GEN_INT (offset)));
26978 insn = emit_insn (gen_rtx_SET (mem, savereg));
26979 areg = NULL_RTX;
26981 else
26983 NOT_INUSE (0);
26984 areg = gen_rtx_REG (Pmode, 0);
26985 emit_move_insn (areg, GEN_INT (offset));
26987 /* AltiVec addressing mode is [reg+reg]. */
26988 mem = gen_frame_mem (V4SImode,
26989 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
26991 /* Rather than emitting a generic move, force use of the stvx
26992 instruction, which we always want on ISA 2.07 (power8) systems.
26993 In particular we don't want xxpermdi/stxvd2x for little
26994 endian. */
26995 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
26998 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26999 areg, GEN_INT (offset));
27003 /* VRSAVE is a bit vector representing which AltiVec registers
27004 are used. The OS uses this to determine which vector
27005 registers to save on a context switch. We need to save
27006 VRSAVE on the stack frame, add whatever AltiVec registers we
27007 used in this function, and do the corresponding magic in the
27008 epilogue. */
27010 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27012 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27013 be using r12 as frame_reg_rtx and r11 as the static chain
27014 pointer for nested functions. */
27015 int save_regno = 12;
27016 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27017 && !using_static_chain_p)
27018 save_regno = 11;
27019 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27021 save_regno = 11;
27022 if (using_static_chain_p)
27023 save_regno = 0;
27025 NOT_INUSE (save_regno);
27027 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27030 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27031 if (!TARGET_SINGLE_PIC_BASE
27032 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27033 && !constant_pool_empty_p ())
27034 || (DEFAULT_ABI == ABI_V4
27035 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27036 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27038 /* If emit_load_toc_table will use the link register, we need to save
27039 it. We use R12 for this purpose because emit_load_toc_table
27040 can use register 0. This allows us to use a plain 'blr' to return
27041 from the procedure more often. */
27042 int save_LR_around_toc_setup = (TARGET_ELF
27043 && DEFAULT_ABI == ABI_V4
27044 && flag_pic
27045 && ! info->lr_save_p
27046 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27047 if (save_LR_around_toc_setup)
27049 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27050 rtx tmp = gen_rtx_REG (Pmode, 12);
27052 sp_adjust = 0;
27053 insn = emit_move_insn (tmp, lr);
27054 RTX_FRAME_RELATED_P (insn) = 1;
27056 rs6000_emit_load_toc_table (TRUE);
27058 insn = emit_move_insn (lr, tmp);
27059 add_reg_note (insn, REG_CFA_RESTORE, lr);
27060 RTX_FRAME_RELATED_P (insn) = 1;
27062 else
27063 rs6000_emit_load_toc_table (TRUE);
27066 #if TARGET_MACHO
27067 if (!TARGET_SINGLE_PIC_BASE
27068 && DEFAULT_ABI == ABI_DARWIN
27069 && flag_pic && crtl->uses_pic_offset_table)
27071 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27072 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27074 /* Save and restore LR locally around this call (in R0). */
27075 if (!info->lr_save_p)
27076 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27078 emit_insn (gen_load_macho_picbase (src));
27080 emit_move_insn (gen_rtx_REG (Pmode,
27081 RS6000_PIC_OFFSET_TABLE_REGNUM),
27082 lr);
27084 if (!info->lr_save_p)
27085 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27087 #endif
27089 /* If we need to, save the TOC register after doing the stack setup.
27090 Do not emit eh frame info for this save. The unwinder wants info,
27091 conceptually attached to instructions in this function, about
27092 register values in the caller of this function. This R2 may have
27093 already been changed from the value in the caller.
27094 We don't attempt to write accurate DWARF EH frame info for R2
27095 because code emitted by gcc for a (non-pointer) function call
27096 doesn't save and restore R2. Instead, R2 is managed out-of-line
27097 by a linker generated plt call stub when the function resides in
27098 a shared library. This behavior is costly to describe in DWARF,
27099 both in terms of the size of DWARF info and the time taken in the
27100 unwinder to interpret it. R2 changes, apart from the
27101 calls_eh_return case earlier in this function, are handled by
27102 linux-unwind.h frob_update_context. */
27103 if (rs6000_save_toc_in_prologue_p ()
27104 && !cfun->machine->toc_is_wrapped_separately)
27106 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27107 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27110 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27111 if (using_split_stack && split_stack_arg_pointer_used_p ())
27112 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27115 /* Output .extern statements for the save/restore routines we use. */
27117 static void
27118 rs6000_output_savres_externs (FILE *file)
27120 rs6000_stack_t *info = rs6000_stack_info ();
27122 if (TARGET_DEBUG_STACK)
27123 debug_stack_info (info);
27125 /* Write .extern for any function we will call to save and restore
27126 fp values. */
27127 if (info->first_fp_reg_save < 64
27128 && !TARGET_MACHO
27129 && !TARGET_ELF)
27131 char *name;
27132 int regno = info->first_fp_reg_save - 32;
27134 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27136 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27137 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27138 name = rs6000_savres_routine_name (regno, sel);
27139 fprintf (file, "\t.extern %s\n", name);
27141 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27143 bool lr = (info->savres_strategy
27144 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27145 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27146 name = rs6000_savres_routine_name (regno, sel);
27147 fprintf (file, "\t.extern %s\n", name);
27152 /* Write function prologue. */
27154 static void
27155 rs6000_output_function_prologue (FILE *file)
27157 if (!cfun->is_thunk)
27158 rs6000_output_savres_externs (file);
27160 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27161 immediately after the global entry point label. */
27162 if (rs6000_global_entry_point_needed_p ())
27164 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27166 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27168 if (TARGET_CMODEL != CMODEL_LARGE)
27170 /* In the small and medium code models, we assume the TOC is less
27171 2 GB away from the text section, so it can be computed via the
27172 following two-instruction sequence. */
27173 char buf[256];
27175 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27176 fprintf (file, "0:\taddis 2,12,.TOC.-");
27177 assemble_name (file, buf);
27178 fprintf (file, "@ha\n");
27179 fprintf (file, "\taddi 2,2,.TOC.-");
27180 assemble_name (file, buf);
27181 fprintf (file, "@l\n");
27183 else
27185 /* In the large code model, we allow arbitrary offsets between the
27186 TOC and the text section, so we have to load the offset from
27187 memory. The data field is emitted directly before the global
27188 entry point in rs6000_elf_declare_function_name. */
27189 char buf[256];
27191 #ifdef HAVE_AS_ENTRY_MARKERS
27192 /* If supported by the linker, emit a marker relocation. If the
27193 total code size of the final executable or shared library
27194 happens to fit into 2 GB after all, the linker will replace
27195 this code sequence with the sequence for the small or medium
27196 code model. */
27197 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27198 #endif
27199 fprintf (file, "\tld 2,");
27200 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27201 assemble_name (file, buf);
27202 fprintf (file, "-");
27203 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27204 assemble_name (file, buf);
27205 fprintf (file, "(12)\n");
27206 fprintf (file, "\tadd 2,2,12\n");
27209 fputs ("\t.localentry\t", file);
27210 assemble_name (file, name);
27211 fputs (",.-", file);
27212 assemble_name (file, name);
27213 fputs ("\n", file);
27216 /* Output -mprofile-kernel code. This needs to be done here instead of
27217 in output_function_profile since it must go after the ELFv2 ABI
27218 local entry point. */
27219 if (TARGET_PROFILE_KERNEL && crtl->profile)
27221 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27222 gcc_assert (!TARGET_32BIT);
27224 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27226 /* In the ELFv2 ABI we have no compiler stack word. It must be
27227 the resposibility of _mcount to preserve the static chain
27228 register if required. */
27229 if (DEFAULT_ABI != ABI_ELFv2
27230 && cfun->static_chain_decl != NULL)
27232 asm_fprintf (file, "\tstd %s,24(%s)\n",
27233 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27234 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27235 asm_fprintf (file, "\tld %s,24(%s)\n",
27236 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27238 else
27239 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27242 rs6000_pic_labelno++;
27245 /* -mprofile-kernel code calls mcount before the function prolog,
27246 so a profiled leaf function should stay a leaf function. */
27247 static bool
27248 rs6000_keep_leaf_when_profiled ()
27250 return TARGET_PROFILE_KERNEL;
27253 /* Non-zero if vmx regs are restored before the frame pop, zero if
27254 we restore after the pop when possible. */
27255 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27257 /* Restoring cr is a two step process: loading a reg from the frame
27258 save, then moving the reg to cr. For ABI_V4 we must let the
27259 unwinder know that the stack location is no longer valid at or
27260 before the stack deallocation, but we can't emit a cfa_restore for
27261 cr at the stack deallocation like we do for other registers.
27262 The trouble is that it is possible for the move to cr to be
27263 scheduled after the stack deallocation. So say exactly where cr
27264 is located on each of the two insns. */
27266 static rtx
27267 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27269 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27270 rtx reg = gen_rtx_REG (SImode, regno);
27271 rtx_insn *insn = emit_move_insn (reg, mem);
27273 if (!exit_func && DEFAULT_ABI == ABI_V4)
27275 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27276 rtx set = gen_rtx_SET (reg, cr);
27278 add_reg_note (insn, REG_CFA_REGISTER, set);
27279 RTX_FRAME_RELATED_P (insn) = 1;
27281 return reg;
27284 /* Reload CR from REG. */
27286 static void
27287 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27289 int count = 0;
27290 int i;
27292 if (using_mfcr_multiple)
27294 for (i = 0; i < 8; i++)
27295 if (save_reg_p (CR0_REGNO + i))
27296 count++;
27297 gcc_assert (count);
27300 if (using_mfcr_multiple && count > 1)
27302 rtx_insn *insn;
27303 rtvec p;
27304 int ndx;
27306 p = rtvec_alloc (count);
27308 ndx = 0;
27309 for (i = 0; i < 8; i++)
27310 if (save_reg_p (CR0_REGNO + i))
27312 rtvec r = rtvec_alloc (2);
27313 RTVEC_ELT (r, 0) = reg;
27314 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27315 RTVEC_ELT (p, ndx) =
27316 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27317 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27318 ndx++;
27320 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27321 gcc_assert (ndx == count);
27323 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27324 CR field separately. */
27325 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27327 for (i = 0; i < 8; i++)
27328 if (save_reg_p (CR0_REGNO + i))
27329 add_reg_note (insn, REG_CFA_RESTORE,
27330 gen_rtx_REG (SImode, CR0_REGNO + i));
27332 RTX_FRAME_RELATED_P (insn) = 1;
27335 else
27336 for (i = 0; i < 8; i++)
27337 if (save_reg_p (CR0_REGNO + i))
27339 rtx insn = emit_insn (gen_movsi_to_cr_one
27340 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27342 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27343 CR field separately, attached to the insn that in fact
27344 restores this particular CR field. */
27345 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27347 add_reg_note (insn, REG_CFA_RESTORE,
27348 gen_rtx_REG (SImode, CR0_REGNO + i));
27350 RTX_FRAME_RELATED_P (insn) = 1;
27354 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27355 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27356 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27358 rtx_insn *insn = get_last_insn ();
27359 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27361 add_reg_note (insn, REG_CFA_RESTORE, cr);
27362 RTX_FRAME_RELATED_P (insn) = 1;
27366 /* Like cr, the move to lr instruction can be scheduled after the
27367 stack deallocation, but unlike cr, its stack frame save is still
27368 valid. So we only need to emit the cfa_restore on the correct
27369 instruction. */
27371 static void
27372 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27374 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27375 rtx reg = gen_rtx_REG (Pmode, regno);
27377 emit_move_insn (reg, mem);
27380 static void
27381 restore_saved_lr (int regno, bool exit_func)
27383 rtx reg = gen_rtx_REG (Pmode, regno);
27384 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27385 rtx_insn *insn = emit_move_insn (lr, reg);
27387 if (!exit_func && flag_shrink_wrap)
27389 add_reg_note (insn, REG_CFA_RESTORE, lr);
27390 RTX_FRAME_RELATED_P (insn) = 1;
27394 static rtx
27395 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27397 if (DEFAULT_ABI == ABI_ELFv2)
27399 int i;
27400 for (i = 0; i < 8; i++)
27401 if (save_reg_p (CR0_REGNO + i))
27403 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27404 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27405 cfa_restores);
27408 else if (info->cr_save_p)
27409 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27410 gen_rtx_REG (SImode, CR2_REGNO),
27411 cfa_restores);
27413 if (info->lr_save_p)
27414 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27415 gen_rtx_REG (Pmode, LR_REGNO),
27416 cfa_restores);
27417 return cfa_restores;
27420 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27421 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27422 below stack pointer not cloberred by signals. */
27424 static inline bool
27425 offset_below_red_zone_p (HOST_WIDE_INT offset)
27427 return offset < (DEFAULT_ABI == ABI_V4
27429 : TARGET_32BIT ? -220 : -288);
27432 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27434 static void
27435 emit_cfa_restores (rtx cfa_restores)
27437 rtx_insn *insn = get_last_insn ();
27438 rtx *loc = &REG_NOTES (insn);
27440 while (*loc)
27441 loc = &XEXP (*loc, 1);
27442 *loc = cfa_restores;
27443 RTX_FRAME_RELATED_P (insn) = 1;
27446 /* Emit function epilogue as insns. */
27448 void
27449 rs6000_emit_epilogue (int sibcall)
27451 rs6000_stack_t *info;
27452 int restoring_GPRs_inline;
27453 int restoring_FPRs_inline;
27454 int using_load_multiple;
27455 int using_mtcr_multiple;
27456 int use_backchain_to_restore_sp;
27457 int restore_lr;
27458 int strategy;
27459 HOST_WIDE_INT frame_off = 0;
27460 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27461 rtx frame_reg_rtx = sp_reg_rtx;
27462 rtx cfa_restores = NULL_RTX;
27463 rtx insn;
27464 rtx cr_save_reg = NULL_RTX;
27465 machine_mode reg_mode = Pmode;
27466 int reg_size = TARGET_32BIT ? 4 : 8;
27467 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27468 int fp_reg_size = 8;
27469 int i;
27470 bool exit_func;
27471 unsigned ptr_regno;
27473 info = rs6000_stack_info ();
27475 strategy = info->savres_strategy;
27476 using_load_multiple = strategy & REST_MULTIPLE;
27477 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27478 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27479 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27480 || rs6000_tune == PROCESSOR_PPC603
27481 || rs6000_tune == PROCESSOR_PPC750
27482 || optimize_size);
27483 /* Restore via the backchain when we have a large frame, since this
27484 is more efficient than an addis, addi pair. The second condition
27485 here will not trigger at the moment; We don't actually need a
27486 frame pointer for alloca, but the generic parts of the compiler
27487 give us one anyway. */
27488 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27489 ? info->lr_save_offset
27490 : 0) > 32767
27491 || (cfun->calls_alloca
27492 && !frame_pointer_needed));
27493 restore_lr = (info->lr_save_p
27494 && (restoring_FPRs_inline
27495 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27496 && (restoring_GPRs_inline
27497 || info->first_fp_reg_save < 64)
27498 && !cfun->machine->lr_is_wrapped_separately);
27501 if (WORLD_SAVE_P (info))
27503 int i, j;
27504 char rname[30];
27505 const char *alloc_rname;
27506 rtvec p;
27508 /* eh_rest_world_r10 will return to the location saved in the LR
27509 stack slot (which is not likely to be our caller.)
27510 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27511 rest_world is similar, except any R10 parameter is ignored.
27512 The exception-handling stuff that was here in 2.95 is no
27513 longer necessary. */
27515 p = rtvec_alloc (9
27516 + 32 - info->first_gp_reg_save
27517 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27518 + 63 + 1 - info->first_fp_reg_save);
27520 strcpy (rname, ((crtl->calls_eh_return) ?
27521 "*eh_rest_world_r10" : "*rest_world"));
27522 alloc_rname = ggc_strdup (rname);
27524 j = 0;
27525 RTVEC_ELT (p, j++) = ret_rtx;
27526 RTVEC_ELT (p, j++)
27527 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27528 /* The instruction pattern requires a clobber here;
27529 it is shared with the restVEC helper. */
27530 RTVEC_ELT (p, j++)
27531 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27534 /* CR register traditionally saved as CR2. */
27535 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27536 RTVEC_ELT (p, j++)
27537 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27538 if (flag_shrink_wrap)
27540 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27541 gen_rtx_REG (Pmode, LR_REGNO),
27542 cfa_restores);
27543 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27547 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27549 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27550 RTVEC_ELT (p, j++)
27551 = gen_frame_load (reg,
27552 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27553 if (flag_shrink_wrap
27554 && save_reg_p (info->first_gp_reg_save + i))
27555 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27557 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27559 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27560 RTVEC_ELT (p, j++)
27561 = gen_frame_load (reg,
27562 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27563 if (flag_shrink_wrap
27564 && save_reg_p (info->first_altivec_reg_save + i))
27565 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27567 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27569 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27570 info->first_fp_reg_save + i);
27571 RTVEC_ELT (p, j++)
27572 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27573 if (flag_shrink_wrap
27574 && save_reg_p (info->first_fp_reg_save + i))
27575 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27577 RTVEC_ELT (p, j++)
27578 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27579 RTVEC_ELT (p, j++)
27580 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27581 RTVEC_ELT (p, j++)
27582 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27583 RTVEC_ELT (p, j++)
27584 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27585 RTVEC_ELT (p, j++)
27586 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27587 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27589 if (flag_shrink_wrap)
27591 REG_NOTES (insn) = cfa_restores;
27592 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27593 RTX_FRAME_RELATED_P (insn) = 1;
27595 return;
27598 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27599 if (info->push_p)
27600 frame_off = info->total_size;
27602 /* Restore AltiVec registers if we must do so before adjusting the
27603 stack. */
27604 if (info->altivec_size != 0
27605 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27606 || (DEFAULT_ABI != ABI_V4
27607 && offset_below_red_zone_p (info->altivec_save_offset))))
27609 int i;
27610 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27612 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27613 if (use_backchain_to_restore_sp)
27615 int frame_regno = 11;
27617 if ((strategy & REST_INLINE_VRS) == 0)
27619 /* Of r11 and r12, select the one not clobbered by an
27620 out-of-line restore function for the frame register. */
27621 frame_regno = 11 + 12 - scratch_regno;
27623 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27624 emit_move_insn (frame_reg_rtx,
27625 gen_rtx_MEM (Pmode, sp_reg_rtx));
27626 frame_off = 0;
27628 else if (frame_pointer_needed)
27629 frame_reg_rtx = hard_frame_pointer_rtx;
27631 if ((strategy & REST_INLINE_VRS) == 0)
27633 int end_save = info->altivec_save_offset + info->altivec_size;
27634 int ptr_off;
27635 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27636 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27638 if (end_save + frame_off != 0)
27640 rtx offset = GEN_INT (end_save + frame_off);
27642 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27644 else
27645 emit_move_insn (ptr_reg, frame_reg_rtx);
27647 ptr_off = -end_save;
27648 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27649 info->altivec_save_offset + ptr_off,
27650 0, V4SImode, SAVRES_VR);
27652 else
27654 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27655 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27657 rtx addr, areg, mem, insn;
27658 rtx reg = gen_rtx_REG (V4SImode, i);
27659 HOST_WIDE_INT offset
27660 = (info->altivec_save_offset + frame_off
27661 + 16 * (i - info->first_altivec_reg_save));
27663 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27665 mem = gen_frame_mem (V4SImode,
27666 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27667 GEN_INT (offset)));
27668 insn = gen_rtx_SET (reg, mem);
27670 else
27672 areg = gen_rtx_REG (Pmode, 0);
27673 emit_move_insn (areg, GEN_INT (offset));
27675 /* AltiVec addressing mode is [reg+reg]. */
27676 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27677 mem = gen_frame_mem (V4SImode, addr);
27679 /* Rather than emitting a generic move, force use of the
27680 lvx instruction, which we always want. In particular we
27681 don't want lxvd2x/xxpermdi for little endian. */
27682 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27685 (void) emit_insn (insn);
27689 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27690 if (((strategy & REST_INLINE_VRS) == 0
27691 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27692 && (flag_shrink_wrap
27693 || (offset_below_red_zone_p
27694 (info->altivec_save_offset
27695 + 16 * (i - info->first_altivec_reg_save))))
27696 && save_reg_p (i))
27698 rtx reg = gen_rtx_REG (V4SImode, i);
27699 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27703 /* Restore VRSAVE if we must do so before adjusting the stack. */
27704 if (info->vrsave_size != 0
27705 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27706 || (DEFAULT_ABI != ABI_V4
27707 && offset_below_red_zone_p (info->vrsave_save_offset))))
27709 rtx reg;
27711 if (frame_reg_rtx == sp_reg_rtx)
27713 if (use_backchain_to_restore_sp)
27715 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27716 emit_move_insn (frame_reg_rtx,
27717 gen_rtx_MEM (Pmode, sp_reg_rtx));
27718 frame_off = 0;
27720 else if (frame_pointer_needed)
27721 frame_reg_rtx = hard_frame_pointer_rtx;
27724 reg = gen_rtx_REG (SImode, 12);
27725 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27726 info->vrsave_save_offset + frame_off));
27728 emit_insn (generate_set_vrsave (reg, info, 1));
27731 insn = NULL_RTX;
27732 /* If we have a large stack frame, restore the old stack pointer
27733 using the backchain. */
27734 if (use_backchain_to_restore_sp)
27736 if (frame_reg_rtx == sp_reg_rtx)
27738 /* Under V.4, don't reset the stack pointer until after we're done
27739 loading the saved registers. */
27740 if (DEFAULT_ABI == ABI_V4)
27741 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27743 insn = emit_move_insn (frame_reg_rtx,
27744 gen_rtx_MEM (Pmode, sp_reg_rtx));
27745 frame_off = 0;
27747 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27748 && DEFAULT_ABI == ABI_V4)
27749 /* frame_reg_rtx has been set up by the altivec restore. */
27751 else
27753 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
27754 frame_reg_rtx = sp_reg_rtx;
27757 /* If we have a frame pointer, we can restore the old stack pointer
27758 from it. */
27759 else if (frame_pointer_needed)
27761 frame_reg_rtx = sp_reg_rtx;
27762 if (DEFAULT_ABI == ABI_V4)
27763 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27764 /* Prevent reordering memory accesses against stack pointer restore. */
27765 else if (cfun->calls_alloca
27766 || offset_below_red_zone_p (-info->total_size))
27767 rs6000_emit_stack_tie (frame_reg_rtx, true);
27769 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
27770 GEN_INT (info->total_size)));
27771 frame_off = 0;
27773 else if (info->push_p
27774 && DEFAULT_ABI != ABI_V4
27775 && !crtl->calls_eh_return)
27777 /* Prevent reordering memory accesses against stack pointer restore. */
27778 if (cfun->calls_alloca
27779 || offset_below_red_zone_p (-info->total_size))
27780 rs6000_emit_stack_tie (frame_reg_rtx, false);
27781 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
27782 GEN_INT (info->total_size)));
27783 frame_off = 0;
27785 if (insn && frame_reg_rtx == sp_reg_rtx)
27787 if (cfa_restores)
27789 REG_NOTES (insn) = cfa_restores;
27790 cfa_restores = NULL_RTX;
27792 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27793 RTX_FRAME_RELATED_P (insn) = 1;
27796 /* Restore AltiVec registers if we have not done so already. */
27797 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27798 && info->altivec_size != 0
27799 && (DEFAULT_ABI == ABI_V4
27800 || !offset_below_red_zone_p (info->altivec_save_offset)))
27802 int i;
27804 if ((strategy & REST_INLINE_VRS) == 0)
27806 int end_save = info->altivec_save_offset + info->altivec_size;
27807 int ptr_off;
27808 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27809 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27810 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27812 if (end_save + frame_off != 0)
27814 rtx offset = GEN_INT (end_save + frame_off);
27816 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27818 else
27819 emit_move_insn (ptr_reg, frame_reg_rtx);
27821 ptr_off = -end_save;
27822 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27823 info->altivec_save_offset + ptr_off,
27824 0, V4SImode, SAVRES_VR);
27825 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27827 /* Frame reg was clobbered by out-of-line save. Restore it
27828 from ptr_reg, and if we are calling out-of-line gpr or
27829 fpr restore set up the correct pointer and offset. */
27830 unsigned newptr_regno = 1;
27831 if (!restoring_GPRs_inline)
27833 bool lr = info->gp_save_offset + info->gp_size == 0;
27834 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
27835 newptr_regno = ptr_regno_for_savres (sel);
27836 end_save = info->gp_save_offset + info->gp_size;
27838 else if (!restoring_FPRs_inline)
27840 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
27841 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27842 newptr_regno = ptr_regno_for_savres (sel);
27843 end_save = info->fp_save_offset + info->fp_size;
27846 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
27847 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
27849 if (end_save + ptr_off != 0)
27851 rtx offset = GEN_INT (end_save + ptr_off);
27853 frame_off = -end_save;
27854 if (TARGET_32BIT)
27855 emit_insn (gen_addsi3_carry (frame_reg_rtx,
27856 ptr_reg, offset));
27857 else
27858 emit_insn (gen_adddi3_carry (frame_reg_rtx,
27859 ptr_reg, offset));
27861 else
27863 frame_off = ptr_off;
27864 emit_move_insn (frame_reg_rtx, ptr_reg);
27868 else
27870 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27871 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27873 rtx addr, areg, mem, insn;
27874 rtx reg = gen_rtx_REG (V4SImode, i);
27875 HOST_WIDE_INT offset
27876 = (info->altivec_save_offset + frame_off
27877 + 16 * (i - info->first_altivec_reg_save));
27879 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27881 mem = gen_frame_mem (V4SImode,
27882 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27883 GEN_INT (offset)));
27884 insn = gen_rtx_SET (reg, mem);
27886 else
27888 areg = gen_rtx_REG (Pmode, 0);
27889 emit_move_insn (areg, GEN_INT (offset));
27891 /* AltiVec addressing mode is [reg+reg]. */
27892 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27893 mem = gen_frame_mem (V4SImode, addr);
27895 /* Rather than emitting a generic move, force use of the
27896 lvx instruction, which we always want. In particular we
27897 don't want lxvd2x/xxpermdi for little endian. */
27898 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27901 (void) emit_insn (insn);
27905 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27906 if (((strategy & REST_INLINE_VRS) == 0
27907 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27908 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
27909 && save_reg_p (i))
27911 rtx reg = gen_rtx_REG (V4SImode, i);
27912 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27916 /* Restore VRSAVE if we have not done so already. */
27917 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27918 && info->vrsave_size != 0
27919 && (DEFAULT_ABI == ABI_V4
27920 || !offset_below_red_zone_p (info->vrsave_save_offset)))
27922 rtx reg;
27924 reg = gen_rtx_REG (SImode, 12);
27925 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27926 info->vrsave_save_offset + frame_off));
27928 emit_insn (generate_set_vrsave (reg, info, 1));
27931 /* If we exit by an out-of-line restore function on ABI_V4 then that
27932 function will deallocate the stack, so we don't need to worry
27933 about the unwinder restoring cr from an invalid stack frame
27934 location. */
27935 exit_func = (!restoring_FPRs_inline
27936 || (!restoring_GPRs_inline
27937 && info->first_fp_reg_save == 64));
27939 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
27940 *separate* slots if the routine calls __builtin_eh_return, so
27941 that they can be independently restored by the unwinder. */
27942 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27944 int i, cr_off = info->ehcr_offset;
27946 for (i = 0; i < 8; i++)
27947 if (!call_used_regs[CR0_REGNO + i])
27949 rtx reg = gen_rtx_REG (SImode, 0);
27950 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27951 cr_off + frame_off));
27953 insn = emit_insn (gen_movsi_to_cr_one
27954 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27956 if (!exit_func && flag_shrink_wrap)
27958 add_reg_note (insn, REG_CFA_RESTORE,
27959 gen_rtx_REG (SImode, CR0_REGNO + i));
27961 RTX_FRAME_RELATED_P (insn) = 1;
27964 cr_off += reg_size;
27968 /* Get the old lr if we saved it. If we are restoring registers
27969 out-of-line, then the out-of-line routines can do this for us. */
27970 if (restore_lr && restoring_GPRs_inline)
27971 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
27973 /* Get the old cr if we saved it. */
27974 if (info->cr_save_p)
27976 unsigned cr_save_regno = 12;
27978 if (!restoring_GPRs_inline)
27980 /* Ensure we don't use the register used by the out-of-line
27981 gpr register restore below. */
27982 bool lr = info->gp_save_offset + info->gp_size == 0;
27983 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
27984 int gpr_ptr_regno = ptr_regno_for_savres (sel);
27986 if (gpr_ptr_regno == 12)
27987 cr_save_regno = 11;
27988 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
27990 else if (REGNO (frame_reg_rtx) == 12)
27991 cr_save_regno = 11;
27993 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
27994 info->cr_save_offset + frame_off,
27995 exit_func);
27998 /* Set LR here to try to overlap restores below. */
27999 if (restore_lr && restoring_GPRs_inline)
28000 restore_saved_lr (0, exit_func);
28002 /* Load exception handler data registers, if needed. */
28003 if (crtl->calls_eh_return)
28005 unsigned int i, regno;
28007 if (TARGET_AIX)
28009 rtx reg = gen_rtx_REG (reg_mode, 2);
28010 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28011 frame_off + RS6000_TOC_SAVE_SLOT));
28014 for (i = 0; ; ++i)
28016 rtx mem;
28018 regno = EH_RETURN_DATA_REGNO (i);
28019 if (regno == INVALID_REGNUM)
28020 break;
28022 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28023 info->ehrd_offset + frame_off
28024 + reg_size * (int) i);
28026 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28030 /* Restore GPRs. This is done as a PARALLEL if we are using
28031 the load-multiple instructions. */
28032 if (!restoring_GPRs_inline)
28034 /* We are jumping to an out-of-line function. */
28035 rtx ptr_reg;
28036 int end_save = info->gp_save_offset + info->gp_size;
28037 bool can_use_exit = end_save == 0;
28038 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28039 int ptr_off;
28041 /* Emit stack reset code if we need it. */
28042 ptr_regno = ptr_regno_for_savres (sel);
28043 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28044 if (can_use_exit)
28045 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28046 else if (end_save + frame_off != 0)
28047 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28048 GEN_INT (end_save + frame_off)));
28049 else if (REGNO (frame_reg_rtx) != ptr_regno)
28050 emit_move_insn (ptr_reg, frame_reg_rtx);
28051 if (REGNO (frame_reg_rtx) == ptr_regno)
28052 frame_off = -end_save;
28054 if (can_use_exit && info->cr_save_p)
28055 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28057 ptr_off = -end_save;
28058 rs6000_emit_savres_rtx (info, ptr_reg,
28059 info->gp_save_offset + ptr_off,
28060 info->lr_save_offset + ptr_off,
28061 reg_mode, sel);
28063 else if (using_load_multiple)
28065 rtvec p;
28066 p = rtvec_alloc (32 - info->first_gp_reg_save);
28067 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28068 RTVEC_ELT (p, i)
28069 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28070 frame_reg_rtx,
28071 info->gp_save_offset + frame_off + reg_size * i);
28072 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28074 else
28076 int offset = info->gp_save_offset + frame_off;
28077 for (i = info->first_gp_reg_save; i < 32; i++)
28079 if (save_reg_p (i)
28080 && !cfun->machine->gpr_is_wrapped_separately[i])
28082 rtx reg = gen_rtx_REG (reg_mode, i);
28083 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28086 offset += reg_size;
28090 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28092 /* If the frame pointer was used then we can't delay emitting
28093 a REG_CFA_DEF_CFA note. This must happen on the insn that
28094 restores the frame pointer, r31. We may have already emitted
28095 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28096 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28097 be harmless if emitted. */
28098 if (frame_pointer_needed)
28100 insn = get_last_insn ();
28101 add_reg_note (insn, REG_CFA_DEF_CFA,
28102 plus_constant (Pmode, frame_reg_rtx, frame_off));
28103 RTX_FRAME_RELATED_P (insn) = 1;
28106 /* Set up cfa_restores. We always need these when
28107 shrink-wrapping. If not shrink-wrapping then we only need
28108 the cfa_restore when the stack location is no longer valid.
28109 The cfa_restores must be emitted on or before the insn that
28110 invalidates the stack, and of course must not be emitted
28111 before the insn that actually does the restore. The latter
28112 is why it is a bad idea to emit the cfa_restores as a group
28113 on the last instruction here that actually does a restore:
28114 That insn may be reordered with respect to others doing
28115 restores. */
28116 if (flag_shrink_wrap
28117 && !restoring_GPRs_inline
28118 && info->first_fp_reg_save == 64)
28119 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28121 for (i = info->first_gp_reg_save; i < 32; i++)
28122 if (save_reg_p (i)
28123 && !cfun->machine->gpr_is_wrapped_separately[i])
28125 rtx reg = gen_rtx_REG (reg_mode, i);
28126 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28130 if (!restoring_GPRs_inline
28131 && info->first_fp_reg_save == 64)
28133 /* We are jumping to an out-of-line function. */
28134 if (cfa_restores)
28135 emit_cfa_restores (cfa_restores);
28136 return;
28139 if (restore_lr && !restoring_GPRs_inline)
28141 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28142 restore_saved_lr (0, exit_func);
28145 /* Restore fpr's if we need to do it without calling a function. */
28146 if (restoring_FPRs_inline)
28148 int offset = info->fp_save_offset + frame_off;
28149 for (i = info->first_fp_reg_save; i < 64; i++)
28151 if (save_reg_p (i)
28152 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28154 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28155 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28156 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28157 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28158 cfa_restores);
28161 offset += fp_reg_size;
28165 /* If we saved cr, restore it here. Just those that were used. */
28166 if (info->cr_save_p)
28167 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28169 /* If this is V.4, unwind the stack pointer after all of the loads
28170 have been done, or set up r11 if we are restoring fp out of line. */
28171 ptr_regno = 1;
28172 if (!restoring_FPRs_inline)
28174 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28175 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28176 ptr_regno = ptr_regno_for_savres (sel);
28179 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28180 if (REGNO (frame_reg_rtx) == ptr_regno)
28181 frame_off = 0;
28183 if (insn && restoring_FPRs_inline)
28185 if (cfa_restores)
28187 REG_NOTES (insn) = cfa_restores;
28188 cfa_restores = NULL_RTX;
28190 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28191 RTX_FRAME_RELATED_P (insn) = 1;
28194 if (crtl->calls_eh_return)
28196 rtx sa = EH_RETURN_STACKADJ_RTX;
28197 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28200 if (!sibcall && restoring_FPRs_inline)
28202 if (cfa_restores)
28204 /* We can't hang the cfa_restores off a simple return,
28205 since the shrink-wrap code sometimes uses an existing
28206 return. This means there might be a path from
28207 pre-prologue code to this return, and dwarf2cfi code
28208 wants the eh_frame unwinder state to be the same on
28209 all paths to any point. So we need to emit the
28210 cfa_restores before the return. For -m64 we really
28211 don't need epilogue cfa_restores at all, except for
28212 this irritating dwarf2cfi with shrink-wrap
28213 requirement; The stack red-zone means eh_frame info
28214 from the prologue telling the unwinder to restore
28215 from the stack is perfectly good right to the end of
28216 the function. */
28217 emit_insn (gen_blockage ());
28218 emit_cfa_restores (cfa_restores);
28219 cfa_restores = NULL_RTX;
28222 emit_jump_insn (targetm.gen_simple_return ());
28225 if (!sibcall && !restoring_FPRs_inline)
28227 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28228 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28229 int elt = 0;
28230 RTVEC_ELT (p, elt++) = ret_rtx;
28231 if (lr)
28232 RTVEC_ELT (p, elt++)
28233 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28235 /* We have to restore more than two FP registers, so branch to the
28236 restore function. It will return to our caller. */
28237 int i;
28238 int reg;
28239 rtx sym;
28241 if (flag_shrink_wrap)
28242 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28244 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28245 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28246 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28247 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28249 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28251 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28253 RTVEC_ELT (p, elt++)
28254 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28255 if (flag_shrink_wrap
28256 && save_reg_p (info->first_fp_reg_save + i))
28257 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28260 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28263 if (cfa_restores)
28265 if (sibcall)
28266 /* Ensure the cfa_restores are hung off an insn that won't
28267 be reordered above other restores. */
28268 emit_insn (gen_blockage ());
28270 emit_cfa_restores (cfa_restores);
28274 /* Write function epilogue. */
28276 static void
28277 rs6000_output_function_epilogue (FILE *file)
28279 #if TARGET_MACHO
28280 macho_branch_islands ();
28283 rtx_insn *insn = get_last_insn ();
28284 rtx_insn *deleted_debug_label = NULL;
28286 /* Mach-O doesn't support labels at the end of objects, so if
28287 it looks like we might want one, take special action.
28289 First, collect any sequence of deleted debug labels. */
28290 while (insn
28291 && NOTE_P (insn)
28292 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28294 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28295 notes only, instead set their CODE_LABEL_NUMBER to -1,
28296 otherwise there would be code generation differences
28297 in between -g and -g0. */
28298 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28299 deleted_debug_label = insn;
28300 insn = PREV_INSN (insn);
28303 /* Second, if we have:
28304 label:
28305 barrier
28306 then this needs to be detected, so skip past the barrier. */
28308 if (insn && BARRIER_P (insn))
28309 insn = PREV_INSN (insn);
28311 /* Up to now we've only seen notes or barriers. */
28312 if (insn)
28314 if (LABEL_P (insn)
28315 || (NOTE_P (insn)
28316 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28317 /* Trailing label: <barrier>. */
28318 fputs ("\tnop\n", file);
28319 else
28321 /* Lastly, see if we have a completely empty function body. */
28322 while (insn && ! INSN_P (insn))
28323 insn = PREV_INSN (insn);
28324 /* If we don't find any insns, we've got an empty function body;
28325 I.e. completely empty - without a return or branch. This is
28326 taken as the case where a function body has been removed
28327 because it contains an inline __builtin_unreachable(). GCC
28328 states that reaching __builtin_unreachable() means UB so we're
28329 not obliged to do anything special; however, we want
28330 non-zero-sized function bodies. To meet this, and help the
28331 user out, let's trap the case. */
28332 if (insn == NULL)
28333 fputs ("\ttrap\n", file);
28336 else if (deleted_debug_label)
28337 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28338 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28339 CODE_LABEL_NUMBER (insn) = -1;
28341 #endif
28343 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28344 on its format.
28346 We don't output a traceback table if -finhibit-size-directive was
28347 used. The documentation for -finhibit-size-directive reads
28348 ``don't output a @code{.size} assembler directive, or anything
28349 else that would cause trouble if the function is split in the
28350 middle, and the two halves are placed at locations far apart in
28351 memory.'' The traceback table has this property, since it
28352 includes the offset from the start of the function to the
28353 traceback table itself.
28355 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28356 different traceback table. */
28357 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28358 && ! flag_inhibit_size_directive
28359 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28361 const char *fname = NULL;
28362 const char *language_string = lang_hooks.name;
28363 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28364 int i;
28365 int optional_tbtab;
28366 rs6000_stack_t *info = rs6000_stack_info ();
28368 if (rs6000_traceback == traceback_full)
28369 optional_tbtab = 1;
28370 else if (rs6000_traceback == traceback_part)
28371 optional_tbtab = 0;
28372 else
28373 optional_tbtab = !optimize_size && !TARGET_ELF;
28375 if (optional_tbtab)
28377 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28378 while (*fname == '.') /* V.4 encodes . in the name */
28379 fname++;
28381 /* Need label immediately before tbtab, so we can compute
28382 its offset from the function start. */
28383 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28384 ASM_OUTPUT_LABEL (file, fname);
28387 /* The .tbtab pseudo-op can only be used for the first eight
28388 expressions, since it can't handle the possibly variable
28389 length fields that follow. However, if you omit the optional
28390 fields, the assembler outputs zeros for all optional fields
28391 anyways, giving each variable length field is minimum length
28392 (as defined in sys/debug.h). Thus we can not use the .tbtab
28393 pseudo-op at all. */
28395 /* An all-zero word flags the start of the tbtab, for debuggers
28396 that have to find it by searching forward from the entry
28397 point or from the current pc. */
28398 fputs ("\t.long 0\n", file);
28400 /* Tbtab format type. Use format type 0. */
28401 fputs ("\t.byte 0,", file);
28403 /* Language type. Unfortunately, there does not seem to be any
28404 official way to discover the language being compiled, so we
28405 use language_string.
28406 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28407 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28408 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28409 either, so for now use 0. */
28410 if (lang_GNU_C ()
28411 || ! strcmp (language_string, "GNU GIMPLE")
28412 || ! strcmp (language_string, "GNU Go")
28413 || ! strcmp (language_string, "libgccjit"))
28414 i = 0;
28415 else if (! strcmp (language_string, "GNU F77")
28416 || lang_GNU_Fortran ())
28417 i = 1;
28418 else if (! strcmp (language_string, "GNU Pascal"))
28419 i = 2;
28420 else if (! strcmp (language_string, "GNU Ada"))
28421 i = 3;
28422 else if (lang_GNU_CXX ()
28423 || ! strcmp (language_string, "GNU Objective-C++"))
28424 i = 9;
28425 else if (! strcmp (language_string, "GNU Java"))
28426 i = 13;
28427 else if (! strcmp (language_string, "GNU Objective-C"))
28428 i = 14;
28429 else
28430 gcc_unreachable ();
28431 fprintf (file, "%d,", i);
28433 /* 8 single bit fields: global linkage (not set for C extern linkage,
28434 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28435 from start of procedure stored in tbtab, internal function, function
28436 has controlled storage, function has no toc, function uses fp,
28437 function logs/aborts fp operations. */
28438 /* Assume that fp operations are used if any fp reg must be saved. */
28439 fprintf (file, "%d,",
28440 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28442 /* 6 bitfields: function is interrupt handler, name present in
28443 proc table, function calls alloca, on condition directives
28444 (controls stack walks, 3 bits), saves condition reg, saves
28445 link reg. */
28446 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28447 set up as a frame pointer, even when there is no alloca call. */
28448 fprintf (file, "%d,",
28449 ((optional_tbtab << 6)
28450 | ((optional_tbtab & frame_pointer_needed) << 5)
28451 | (info->cr_save_p << 1)
28452 | (info->lr_save_p)));
28454 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28455 (6 bits). */
28456 fprintf (file, "%d,",
28457 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28459 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28460 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28462 if (optional_tbtab)
28464 /* Compute the parameter info from the function decl argument
28465 list. */
28466 tree decl;
28467 int next_parm_info_bit = 31;
28469 for (decl = DECL_ARGUMENTS (current_function_decl);
28470 decl; decl = DECL_CHAIN (decl))
28472 rtx parameter = DECL_INCOMING_RTL (decl);
28473 machine_mode mode = GET_MODE (parameter);
28475 if (GET_CODE (parameter) == REG)
28477 if (SCALAR_FLOAT_MODE_P (mode))
28479 int bits;
28481 float_parms++;
28483 switch (mode)
28485 case E_SFmode:
28486 case E_SDmode:
28487 bits = 0x2;
28488 break;
28490 case E_DFmode:
28491 case E_DDmode:
28492 case E_TFmode:
28493 case E_TDmode:
28494 case E_IFmode:
28495 case E_KFmode:
28496 bits = 0x3;
28497 break;
28499 default:
28500 gcc_unreachable ();
28503 /* If only one bit will fit, don't or in this entry. */
28504 if (next_parm_info_bit > 0)
28505 parm_info |= (bits << (next_parm_info_bit - 1));
28506 next_parm_info_bit -= 2;
28508 else
28510 fixed_parms += ((GET_MODE_SIZE (mode)
28511 + (UNITS_PER_WORD - 1))
28512 / UNITS_PER_WORD);
28513 next_parm_info_bit -= 1;
28519 /* Number of fixed point parameters. */
28520 /* This is actually the number of words of fixed point parameters; thus
28521 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28522 fprintf (file, "%d,", fixed_parms);
28524 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28525 all on stack. */
28526 /* This is actually the number of fp registers that hold parameters;
28527 and thus the maximum value is 13. */
28528 /* Set parameters on stack bit if parameters are not in their original
28529 registers, regardless of whether they are on the stack? Xlc
28530 seems to set the bit when not optimizing. */
28531 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28533 if (optional_tbtab)
28535 /* Optional fields follow. Some are variable length. */
28537 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28538 float, 11 double float. */
28539 /* There is an entry for each parameter in a register, in the order
28540 that they occur in the parameter list. Any intervening arguments
28541 on the stack are ignored. If the list overflows a long (max
28542 possible length 34 bits) then completely leave off all elements
28543 that don't fit. */
28544 /* Only emit this long if there was at least one parameter. */
28545 if (fixed_parms || float_parms)
28546 fprintf (file, "\t.long %d\n", parm_info);
28548 /* Offset from start of code to tb table. */
28549 fputs ("\t.long ", file);
28550 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28551 RS6000_OUTPUT_BASENAME (file, fname);
28552 putc ('-', file);
28553 rs6000_output_function_entry (file, fname);
28554 putc ('\n', file);
28556 /* Interrupt handler mask. */
28557 /* Omit this long, since we never set the interrupt handler bit
28558 above. */
28560 /* Number of CTL (controlled storage) anchors. */
28561 /* Omit this long, since the has_ctl bit is never set above. */
28563 /* Displacement into stack of each CTL anchor. */
28564 /* Omit this list of longs, because there are no CTL anchors. */
28566 /* Length of function name. */
28567 if (*fname == '*')
28568 ++fname;
28569 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28571 /* Function name. */
28572 assemble_string (fname, strlen (fname));
28574 /* Register for alloca automatic storage; this is always reg 31.
28575 Only emit this if the alloca bit was set above. */
28576 if (frame_pointer_needed)
28577 fputs ("\t.byte 31\n", file);
28579 fputs ("\t.align 2\n", file);
28583 /* Arrange to define .LCTOC1 label, if not already done. */
28584 if (need_toc_init)
28586 need_toc_init = 0;
28587 if (!toc_initialized)
28589 switch_to_section (toc_section);
28590 switch_to_section (current_function_section ());
28595 /* -fsplit-stack support. */
28597 /* A SYMBOL_REF for __morestack. */
28598 static GTY(()) rtx morestack_ref;
28600 static rtx
28601 gen_add3_const (rtx rt, rtx ra, long c)
28603 if (TARGET_64BIT)
28604 return gen_adddi3 (rt, ra, GEN_INT (c));
28605 else
28606 return gen_addsi3 (rt, ra, GEN_INT (c));
28609 /* Emit -fsplit-stack prologue, which goes before the regular function
28610 prologue (at local entry point in the case of ELFv2). */
28612 void
28613 rs6000_expand_split_stack_prologue (void)
28615 rs6000_stack_t *info = rs6000_stack_info ();
28616 unsigned HOST_WIDE_INT allocate;
28617 long alloc_hi, alloc_lo;
28618 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28619 rtx_insn *insn;
28621 gcc_assert (flag_split_stack && reload_completed);
28623 if (!info->push_p)
28624 return;
28626 if (global_regs[29])
28628 error ("%qs uses register r29", "-fsplit-stack");
28629 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28630 "conflicts with %qD", global_regs_decl[29]);
28633 allocate = info->total_size;
28634 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28636 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28637 return;
28639 if (morestack_ref == NULL_RTX)
28641 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28642 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
28643 | SYMBOL_FLAG_FUNCTION);
28646 r0 = gen_rtx_REG (Pmode, 0);
28647 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28648 r12 = gen_rtx_REG (Pmode, 12);
28649 emit_insn (gen_load_split_stack_limit (r0));
28650 /* Always emit two insns here to calculate the requested stack,
28651 so that the linker can edit them when adjusting size for calling
28652 non-split-stack code. */
28653 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
28654 alloc_lo = -allocate - alloc_hi;
28655 if (alloc_hi != 0)
28657 emit_insn (gen_add3_const (r12, r1, alloc_hi));
28658 if (alloc_lo != 0)
28659 emit_insn (gen_add3_const (r12, r12, alloc_lo));
28660 else
28661 emit_insn (gen_nop ());
28663 else
28665 emit_insn (gen_add3_const (r12, r1, alloc_lo));
28666 emit_insn (gen_nop ());
28669 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
28670 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
28671 ok_label = gen_label_rtx ();
28672 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28673 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
28674 gen_rtx_LABEL_REF (VOIDmode, ok_label),
28675 pc_rtx);
28676 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28677 JUMP_LABEL (insn) = ok_label;
28678 /* Mark the jump as very likely to be taken. */
28679 add_reg_br_prob_note (insn, profile_probability::very_likely ());
28681 lr = gen_rtx_REG (Pmode, LR_REGNO);
28682 insn = emit_move_insn (r0, lr);
28683 RTX_FRAME_RELATED_P (insn) = 1;
28684 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
28685 RTX_FRAME_RELATED_P (insn) = 1;
28687 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
28688 const0_rtx, const0_rtx));
28689 call_fusage = NULL_RTX;
28690 use_reg (&call_fusage, r12);
28691 /* Say the call uses r0, even though it doesn't, to stop regrename
28692 from twiddling with the insns saving lr, trashing args for cfun.
28693 The insns restoring lr are similarly protected by making
28694 split_stack_return use r0. */
28695 use_reg (&call_fusage, r0);
28696 add_function_usage_to (insn, call_fusage);
28697 /* Indicate that this function can't jump to non-local gotos. */
28698 make_reg_eh_region_note_nothrow_nononlocal (insn);
28699 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
28700 insn = emit_move_insn (lr, r0);
28701 add_reg_note (insn, REG_CFA_RESTORE, lr);
28702 RTX_FRAME_RELATED_P (insn) = 1;
28703 emit_insn (gen_split_stack_return ());
28705 emit_label (ok_label);
28706 LABEL_NUSES (ok_label) = 1;
28709 /* Return the internal arg pointer used for function incoming
28710 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28711 to copy it to a pseudo in order for it to be preserved over calls
28712 and suchlike. We'd really like to use a pseudo here for the
28713 internal arg pointer but data-flow analysis is not prepared to
28714 accept pseudos as live at the beginning of a function. */
28716 static rtx
28717 rs6000_internal_arg_pointer (void)
28719 if (flag_split_stack
28720 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
28721 == NULL))
28724 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
28726 rtx pat;
28728 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
28729 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
28731 /* Put the pseudo initialization right after the note at the
28732 beginning of the function. */
28733 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
28734 gen_rtx_REG (Pmode, 12));
28735 push_topmost_sequence ();
28736 emit_insn_after (pat, get_insns ());
28737 pop_topmost_sequence ();
28739 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
28740 FIRST_PARM_OFFSET (current_function_decl));
28741 return copy_to_reg (ret);
28743 return virtual_incoming_args_rtx;
28746 /* We may have to tell the dataflow pass that the split stack prologue
28747 is initializing a register. */
28749 static void
28750 rs6000_live_on_entry (bitmap regs)
28752 if (flag_split_stack)
28753 bitmap_set_bit (regs, 12);
28756 /* Emit -fsplit-stack dynamic stack allocation space check. */
28758 void
28759 rs6000_split_stack_space_check (rtx size, rtx label)
28761 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28762 rtx limit = gen_reg_rtx (Pmode);
28763 rtx requested = gen_reg_rtx (Pmode);
28764 rtx cmp = gen_reg_rtx (CCUNSmode);
28765 rtx jump;
28767 emit_insn (gen_load_split_stack_limit (limit));
28768 if (CONST_INT_P (size))
28769 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
28770 else
28772 size = force_reg (Pmode, size);
28773 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
28775 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
28776 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28777 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
28778 gen_rtx_LABEL_REF (VOIDmode, label),
28779 pc_rtx);
28780 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28781 JUMP_LABEL (jump) = label;
28784 /* A C compound statement that outputs the assembler code for a thunk
28785 function, used to implement C++ virtual function calls with
28786 multiple inheritance. The thunk acts as a wrapper around a virtual
28787 function, adjusting the implicit object parameter before handing
28788 control off to the real function.
28790 First, emit code to add the integer DELTA to the location that
28791 contains the incoming first argument. Assume that this argument
28792 contains a pointer, and is the one used to pass the `this' pointer
28793 in C++. This is the incoming argument *before* the function
28794 prologue, e.g. `%o0' on a sparc. The addition must preserve the
28795 values of all other incoming arguments.
28797 After the addition, emit code to jump to FUNCTION, which is a
28798 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
28799 not touch the return address. Hence returning from FUNCTION will
28800 return to whoever called the current `thunk'.
28802 The effect must be as if FUNCTION had been called directly with the
28803 adjusted first argument. This macro is responsible for emitting
28804 all of the code for a thunk function; output_function_prologue()
28805 and output_function_epilogue() are not invoked.
28807 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
28808 been extracted from it.) It might possibly be useful on some
28809 targets, but probably not.
28811 If you do not define this macro, the target-independent code in the
28812 C++ frontend will generate a less efficient heavyweight thunk that
28813 calls FUNCTION instead of jumping to it. The generic approach does
28814 not support varargs. */
28816 static void
28817 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
28818 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
28819 tree function)
28821 rtx this_rtx, funexp;
28822 rtx_insn *insn;
28824 reload_completed = 1;
28825 epilogue_completed = 1;
28827 /* Mark the end of the (empty) prologue. */
28828 emit_note (NOTE_INSN_PROLOGUE_END);
28830 /* Find the "this" pointer. If the function returns a structure,
28831 the structure return pointer is in r3. */
28832 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
28833 this_rtx = gen_rtx_REG (Pmode, 4);
28834 else
28835 this_rtx = gen_rtx_REG (Pmode, 3);
28837 /* Apply the constant offset, if required. */
28838 if (delta)
28839 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
28841 /* Apply the offset from the vtable, if required. */
28842 if (vcall_offset)
28844 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
28845 rtx tmp = gen_rtx_REG (Pmode, 12);
28847 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
28848 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
28850 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
28851 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
28853 else
28855 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
28857 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
28859 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
28862 /* Generate a tail call to the target function. */
28863 if (!TREE_USED (function))
28865 assemble_external (function);
28866 TREE_USED (function) = 1;
28868 funexp = XEXP (DECL_RTL (function), 0);
28869 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
28871 #if TARGET_MACHO
28872 if (MACHOPIC_INDIRECT)
28873 funexp = machopic_indirect_call_target (funexp);
28874 #endif
28876 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
28877 generate sibcall RTL explicitly. */
28878 insn = emit_call_insn (
28879 gen_rtx_PARALLEL (VOIDmode,
28880 gen_rtvec (3,
28881 gen_rtx_CALL (VOIDmode,
28882 funexp, const0_rtx),
28883 gen_rtx_USE (VOIDmode, const0_rtx),
28884 simple_return_rtx)));
28885 SIBLING_CALL_P (insn) = 1;
28886 emit_barrier ();
28888 /* Run just enough of rest_of_compilation to get the insns emitted.
28889 There's not really enough bulk here to make other passes such as
28890 instruction scheduling worth while. Note that use_thunk calls
28891 assemble_start_function and assemble_end_function. */
28892 insn = get_insns ();
28893 shorten_branches (insn);
28894 final_start_function (insn, file, 1);
28895 final (insn, file, 1);
28896 final_end_function ();
28898 reload_completed = 0;
28899 epilogue_completed = 0;
28902 /* A quick summary of the various types of 'constant-pool tables'
28903 under PowerPC:
28905 Target Flags Name One table per
28906 AIX (none) AIX TOC object file
28907 AIX -mfull-toc AIX TOC object file
28908 AIX -mminimal-toc AIX minimal TOC translation unit
28909 SVR4/EABI (none) SVR4 SDATA object file
28910 SVR4/EABI -fpic SVR4 pic object file
28911 SVR4/EABI -fPIC SVR4 PIC translation unit
28912 SVR4/EABI -mrelocatable EABI TOC function
28913 SVR4/EABI -maix AIX TOC object file
28914 SVR4/EABI -maix -mminimal-toc
28915 AIX minimal TOC translation unit
28917 Name Reg. Set by entries contains:
28918 made by addrs? fp? sum?
28920 AIX TOC 2 crt0 as Y option option
28921 AIX minimal TOC 30 prolog gcc Y Y option
28922 SVR4 SDATA 13 crt0 gcc N Y N
28923 SVR4 pic 30 prolog ld Y not yet N
28924 SVR4 PIC 30 prolog gcc Y option option
28925 EABI TOC 30 prolog gcc Y option option
28929 /* Hash functions for the hash table. */
28931 static unsigned
28932 rs6000_hash_constant (rtx k)
28934 enum rtx_code code = GET_CODE (k);
28935 machine_mode mode = GET_MODE (k);
28936 unsigned result = (code << 3) ^ mode;
28937 const char *format;
28938 int flen, fidx;
28940 format = GET_RTX_FORMAT (code);
28941 flen = strlen (format);
28942 fidx = 0;
28944 switch (code)
28946 case LABEL_REF:
28947 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
28949 case CONST_WIDE_INT:
28951 int i;
28952 flen = CONST_WIDE_INT_NUNITS (k);
28953 for (i = 0; i < flen; i++)
28954 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
28955 return result;
28958 case CONST_DOUBLE:
28959 if (mode != VOIDmode)
28960 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
28961 flen = 2;
28962 break;
28964 case CODE_LABEL:
28965 fidx = 3;
28966 break;
28968 default:
28969 break;
28972 for (; fidx < flen; fidx++)
28973 switch (format[fidx])
28975 case 's':
28977 unsigned i, len;
28978 const char *str = XSTR (k, fidx);
28979 len = strlen (str);
28980 result = result * 613 + len;
28981 for (i = 0; i < len; i++)
28982 result = result * 613 + (unsigned) str[i];
28983 break;
28985 case 'u':
28986 case 'e':
28987 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
28988 break;
28989 case 'i':
28990 case 'n':
28991 result = result * 613 + (unsigned) XINT (k, fidx);
28992 break;
28993 case 'w':
28994 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
28995 result = result * 613 + (unsigned) XWINT (k, fidx);
28996 else
28998 size_t i;
28999 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29000 result = result * 613 + (unsigned) (XWINT (k, fidx)
29001 >> CHAR_BIT * i);
29003 break;
29004 case '0':
29005 break;
29006 default:
29007 gcc_unreachable ();
29010 return result;
29013 hashval_t
29014 toc_hasher::hash (toc_hash_struct *thc)
29016 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29019 /* Compare H1 and H2 for equivalence. */
29021 bool
29022 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29024 rtx r1 = h1->key;
29025 rtx r2 = h2->key;
29027 if (h1->key_mode != h2->key_mode)
29028 return 0;
29030 return rtx_equal_p (r1, r2);
29033 /* These are the names given by the C++ front-end to vtables, and
29034 vtable-like objects. Ideally, this logic should not be here;
29035 instead, there should be some programmatic way of inquiring as
29036 to whether or not an object is a vtable. */
29038 #define VTABLE_NAME_P(NAME) \
29039 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29040 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29041 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29042 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29043 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29045 #ifdef NO_DOLLAR_IN_LABEL
29046 /* Return a GGC-allocated character string translating dollar signs in
29047 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29049 const char *
29050 rs6000_xcoff_strip_dollar (const char *name)
29052 char *strip, *p;
29053 const char *q;
29054 size_t len;
29056 q = (const char *) strchr (name, '$');
29058 if (q == 0 || q == name)
29059 return name;
29061 len = strlen (name);
29062 strip = XALLOCAVEC (char, len + 1);
29063 strcpy (strip, name);
29064 p = strip + (q - name);
29065 while (p)
29067 *p = '_';
29068 p = strchr (p + 1, '$');
29071 return ggc_alloc_string (strip, len);
29073 #endif
29075 void
29076 rs6000_output_symbol_ref (FILE *file, rtx x)
29078 const char *name = XSTR (x, 0);
29080 /* Currently C++ toc references to vtables can be emitted before it
29081 is decided whether the vtable is public or private. If this is
29082 the case, then the linker will eventually complain that there is
29083 a reference to an unknown section. Thus, for vtables only,
29084 we emit the TOC reference to reference the identifier and not the
29085 symbol. */
29086 if (VTABLE_NAME_P (name))
29088 RS6000_OUTPUT_BASENAME (file, name);
29090 else
29091 assemble_name (file, name);
29094 /* Output a TOC entry. We derive the entry name from what is being
29095 written. */
29097 void
29098 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29100 char buf[256];
29101 const char *name = buf;
29102 rtx base = x;
29103 HOST_WIDE_INT offset = 0;
29105 gcc_assert (!TARGET_NO_TOC);
29107 /* When the linker won't eliminate them, don't output duplicate
29108 TOC entries (this happens on AIX if there is any kind of TOC,
29109 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29110 CODE_LABELs. */
29111 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29113 struct toc_hash_struct *h;
29115 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29116 time because GGC is not initialized at that point. */
29117 if (toc_hash_table == NULL)
29118 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29120 h = ggc_alloc<toc_hash_struct> ();
29121 h->key = x;
29122 h->key_mode = mode;
29123 h->labelno = labelno;
29125 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29126 if (*found == NULL)
29127 *found = h;
29128 else /* This is indeed a duplicate.
29129 Set this label equal to that label. */
29131 fputs ("\t.set ", file);
29132 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29133 fprintf (file, "%d,", labelno);
29134 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29135 fprintf (file, "%d\n", ((*found)->labelno));
29137 #ifdef HAVE_AS_TLS
29138 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29139 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29140 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29142 fputs ("\t.set ", file);
29143 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29144 fprintf (file, "%d,", labelno);
29145 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29146 fprintf (file, "%d\n", ((*found)->labelno));
29148 #endif
29149 return;
29153 /* If we're going to put a double constant in the TOC, make sure it's
29154 aligned properly when strict alignment is on. */
29155 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29156 && STRICT_ALIGNMENT
29157 && GET_MODE_BITSIZE (mode) >= 64
29158 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29159 ASM_OUTPUT_ALIGN (file, 3);
29162 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29164 /* Handle FP constants specially. Note that if we have a minimal
29165 TOC, things we put here aren't actually in the TOC, so we can allow
29166 FP constants. */
29167 if (GET_CODE (x) == CONST_DOUBLE &&
29168 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29169 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29171 long k[4];
29173 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29174 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29175 else
29176 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29178 if (TARGET_64BIT)
29180 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29181 fputs (DOUBLE_INT_ASM_OP, file);
29182 else
29183 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29184 k[0] & 0xffffffff, k[1] & 0xffffffff,
29185 k[2] & 0xffffffff, k[3] & 0xffffffff);
29186 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29187 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29188 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29189 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29190 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29191 return;
29193 else
29195 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29196 fputs ("\t.long ", file);
29197 else
29198 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29199 k[0] & 0xffffffff, k[1] & 0xffffffff,
29200 k[2] & 0xffffffff, k[3] & 0xffffffff);
29201 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29202 k[0] & 0xffffffff, k[1] & 0xffffffff,
29203 k[2] & 0xffffffff, k[3] & 0xffffffff);
29204 return;
29207 else if (GET_CODE (x) == CONST_DOUBLE &&
29208 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29210 long k[2];
29212 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29213 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29214 else
29215 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29217 if (TARGET_64BIT)
29219 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29220 fputs (DOUBLE_INT_ASM_OP, file);
29221 else
29222 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29223 k[0] & 0xffffffff, k[1] & 0xffffffff);
29224 fprintf (file, "0x%lx%08lx\n",
29225 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29226 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29227 return;
29229 else
29231 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29232 fputs ("\t.long ", file);
29233 else
29234 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29235 k[0] & 0xffffffff, k[1] & 0xffffffff);
29236 fprintf (file, "0x%lx,0x%lx\n",
29237 k[0] & 0xffffffff, k[1] & 0xffffffff);
29238 return;
29241 else if (GET_CODE (x) == CONST_DOUBLE &&
29242 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29244 long l;
29246 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29247 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29248 else
29249 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29251 if (TARGET_64BIT)
29253 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29254 fputs (DOUBLE_INT_ASM_OP, file);
29255 else
29256 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29257 if (WORDS_BIG_ENDIAN)
29258 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29259 else
29260 fprintf (file, "0x%lx\n", l & 0xffffffff);
29261 return;
29263 else
29265 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29266 fputs ("\t.long ", file);
29267 else
29268 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29269 fprintf (file, "0x%lx\n", l & 0xffffffff);
29270 return;
29273 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29275 unsigned HOST_WIDE_INT low;
29276 HOST_WIDE_INT high;
29278 low = INTVAL (x) & 0xffffffff;
29279 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29281 /* TOC entries are always Pmode-sized, so when big-endian
29282 smaller integer constants in the TOC need to be padded.
29283 (This is still a win over putting the constants in
29284 a separate constant pool, because then we'd have
29285 to have both a TOC entry _and_ the actual constant.)
29287 For a 32-bit target, CONST_INT values are loaded and shifted
29288 entirely within `low' and can be stored in one TOC entry. */
29290 /* It would be easy to make this work, but it doesn't now. */
29291 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29293 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29295 low |= high << 32;
29296 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29297 high = (HOST_WIDE_INT) low >> 32;
29298 low &= 0xffffffff;
29301 if (TARGET_64BIT)
29303 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29304 fputs (DOUBLE_INT_ASM_OP, file);
29305 else
29306 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29307 (long) high & 0xffffffff, (long) low & 0xffffffff);
29308 fprintf (file, "0x%lx%08lx\n",
29309 (long) high & 0xffffffff, (long) low & 0xffffffff);
29310 return;
29312 else
29314 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29316 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29317 fputs ("\t.long ", file);
29318 else
29319 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29320 (long) high & 0xffffffff, (long) low & 0xffffffff);
29321 fprintf (file, "0x%lx,0x%lx\n",
29322 (long) high & 0xffffffff, (long) low & 0xffffffff);
29324 else
29326 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29327 fputs ("\t.long ", file);
29328 else
29329 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29330 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29332 return;
29336 if (GET_CODE (x) == CONST)
29338 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29339 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29341 base = XEXP (XEXP (x, 0), 0);
29342 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29345 switch (GET_CODE (base))
29347 case SYMBOL_REF:
29348 name = XSTR (base, 0);
29349 break;
29351 case LABEL_REF:
29352 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29353 CODE_LABEL_NUMBER (XEXP (base, 0)));
29354 break;
29356 case CODE_LABEL:
29357 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29358 break;
29360 default:
29361 gcc_unreachable ();
29364 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29365 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29366 else
29368 fputs ("\t.tc ", file);
29369 RS6000_OUTPUT_BASENAME (file, name);
29371 if (offset < 0)
29372 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29373 else if (offset)
29374 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29376 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29377 after other TOC symbols, reducing overflow of small TOC access
29378 to [TC] symbols. */
29379 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29380 ? "[TE]," : "[TC],", file);
29383 /* Currently C++ toc references to vtables can be emitted before it
29384 is decided whether the vtable is public or private. If this is
29385 the case, then the linker will eventually complain that there is
29386 a TOC reference to an unknown section. Thus, for vtables only,
29387 we emit the TOC reference to reference the symbol and not the
29388 section. */
29389 if (VTABLE_NAME_P (name))
29391 RS6000_OUTPUT_BASENAME (file, name);
29392 if (offset < 0)
29393 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29394 else if (offset > 0)
29395 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29397 else
29398 output_addr_const (file, x);
29400 #if HAVE_AS_TLS
29401 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29403 switch (SYMBOL_REF_TLS_MODEL (base))
29405 case 0:
29406 break;
29407 case TLS_MODEL_LOCAL_EXEC:
29408 fputs ("@le", file);
29409 break;
29410 case TLS_MODEL_INITIAL_EXEC:
29411 fputs ("@ie", file);
29412 break;
29413 /* Use global-dynamic for local-dynamic. */
29414 case TLS_MODEL_GLOBAL_DYNAMIC:
29415 case TLS_MODEL_LOCAL_DYNAMIC:
29416 putc ('\n', file);
29417 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29418 fputs ("\t.tc .", file);
29419 RS6000_OUTPUT_BASENAME (file, name);
29420 fputs ("[TC],", file);
29421 output_addr_const (file, x);
29422 fputs ("@m", file);
29423 break;
29424 default:
29425 gcc_unreachable ();
29428 #endif
29430 putc ('\n', file);
29433 /* Output an assembler pseudo-op to write an ASCII string of N characters
29434 starting at P to FILE.
29436 On the RS/6000, we have to do this using the .byte operation and
29437 write out special characters outside the quoted string.
29438 Also, the assembler is broken; very long strings are truncated,
29439 so we must artificially break them up early. */
29441 void
29442 output_ascii (FILE *file, const char *p, int n)
29444 char c;
29445 int i, count_string;
29446 const char *for_string = "\t.byte \"";
29447 const char *for_decimal = "\t.byte ";
29448 const char *to_close = NULL;
29450 count_string = 0;
29451 for (i = 0; i < n; i++)
29453 c = *p++;
29454 if (c >= ' ' && c < 0177)
29456 if (for_string)
29457 fputs (for_string, file);
29458 putc (c, file);
29460 /* Write two quotes to get one. */
29461 if (c == '"')
29463 putc (c, file);
29464 ++count_string;
29467 for_string = NULL;
29468 for_decimal = "\"\n\t.byte ";
29469 to_close = "\"\n";
29470 ++count_string;
29472 if (count_string >= 512)
29474 fputs (to_close, file);
29476 for_string = "\t.byte \"";
29477 for_decimal = "\t.byte ";
29478 to_close = NULL;
29479 count_string = 0;
29482 else
29484 if (for_decimal)
29485 fputs (for_decimal, file);
29486 fprintf (file, "%d", c);
29488 for_string = "\n\t.byte \"";
29489 for_decimal = ", ";
29490 to_close = "\n";
29491 count_string = 0;
29495 /* Now close the string if we have written one. Then end the line. */
29496 if (to_close)
29497 fputs (to_close, file);
29500 /* Generate a unique section name for FILENAME for a section type
29501 represented by SECTION_DESC. Output goes into BUF.
29503 SECTION_DESC can be any string, as long as it is different for each
29504 possible section type.
29506 We name the section in the same manner as xlc. The name begins with an
29507 underscore followed by the filename (after stripping any leading directory
29508 names) with the last period replaced by the string SECTION_DESC. If
29509 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29510 the name. */
29512 void
29513 rs6000_gen_section_name (char **buf, const char *filename,
29514 const char *section_desc)
29516 const char *q, *after_last_slash, *last_period = 0;
29517 char *p;
29518 int len;
29520 after_last_slash = filename;
29521 for (q = filename; *q; q++)
29523 if (*q == '/')
29524 after_last_slash = q + 1;
29525 else if (*q == '.')
29526 last_period = q;
29529 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29530 *buf = (char *) xmalloc (len);
29532 p = *buf;
29533 *p++ = '_';
29535 for (q = after_last_slash; *q; q++)
29537 if (q == last_period)
29539 strcpy (p, section_desc);
29540 p += strlen (section_desc);
29541 break;
29544 else if (ISALNUM (*q))
29545 *p++ = *q;
29548 if (last_period == 0)
29549 strcpy (p, section_desc);
29550 else
29551 *p = '\0';
29554 /* Emit profile function. */
29556 void
29557 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29559 /* Non-standard profiling for kernels, which just saves LR then calls
29560 _mcount without worrying about arg saves. The idea is to change
29561 the function prologue as little as possible as it isn't easy to
29562 account for arg save/restore code added just for _mcount. */
29563 if (TARGET_PROFILE_KERNEL)
29564 return;
29566 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29568 #ifndef NO_PROFILE_COUNTERS
29569 # define NO_PROFILE_COUNTERS 0
29570 #endif
29571 if (NO_PROFILE_COUNTERS)
29572 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29573 LCT_NORMAL, VOIDmode);
29574 else
29576 char buf[30];
29577 const char *label_name;
29578 rtx fun;
29580 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29581 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29582 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29584 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29585 LCT_NORMAL, VOIDmode, fun, Pmode);
29588 else if (DEFAULT_ABI == ABI_DARWIN)
29590 const char *mcount_name = RS6000_MCOUNT;
29591 int caller_addr_regno = LR_REGNO;
29593 /* Be conservative and always set this, at least for now. */
29594 crtl->uses_pic_offset_table = 1;
29596 #if TARGET_MACHO
29597 /* For PIC code, set up a stub and collect the caller's address
29598 from r0, which is where the prologue puts it. */
29599 if (MACHOPIC_INDIRECT
29600 && crtl->uses_pic_offset_table)
29601 caller_addr_regno = 0;
29602 #endif
29603 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29604 LCT_NORMAL, VOIDmode,
29605 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29609 /* Write function profiler code. */
29611 void
29612 output_function_profiler (FILE *file, int labelno)
29614 char buf[100];
29616 switch (DEFAULT_ABI)
29618 default:
29619 gcc_unreachable ();
29621 case ABI_V4:
29622 if (!TARGET_32BIT)
29624 warning (0, "no profiling of 64-bit code for this ABI");
29625 return;
29627 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29628 fprintf (file, "\tmflr %s\n", reg_names[0]);
29629 if (NO_PROFILE_COUNTERS)
29631 asm_fprintf (file, "\tstw %s,4(%s)\n",
29632 reg_names[0], reg_names[1]);
29634 else if (TARGET_SECURE_PLT && flag_pic)
29636 if (TARGET_LINK_STACK)
29638 char name[32];
29639 get_ppc476_thunk_name (name);
29640 asm_fprintf (file, "\tbl %s\n", name);
29642 else
29643 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
29644 asm_fprintf (file, "\tstw %s,4(%s)\n",
29645 reg_names[0], reg_names[1]);
29646 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29647 asm_fprintf (file, "\taddis %s,%s,",
29648 reg_names[12], reg_names[12]);
29649 assemble_name (file, buf);
29650 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
29651 assemble_name (file, buf);
29652 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
29654 else if (flag_pic == 1)
29656 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
29657 asm_fprintf (file, "\tstw %s,4(%s)\n",
29658 reg_names[0], reg_names[1]);
29659 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29660 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
29661 assemble_name (file, buf);
29662 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
29664 else if (flag_pic > 1)
29666 asm_fprintf (file, "\tstw %s,4(%s)\n",
29667 reg_names[0], reg_names[1]);
29668 /* Now, we need to get the address of the label. */
29669 if (TARGET_LINK_STACK)
29671 char name[32];
29672 get_ppc476_thunk_name (name);
29673 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
29674 assemble_name (file, buf);
29675 fputs ("-.\n1:", file);
29676 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29677 asm_fprintf (file, "\taddi %s,%s,4\n",
29678 reg_names[11], reg_names[11]);
29680 else
29682 fputs ("\tbcl 20,31,1f\n\t.long ", file);
29683 assemble_name (file, buf);
29684 fputs ("-.\n1:", file);
29685 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29687 asm_fprintf (file, "\tlwz %s,0(%s)\n",
29688 reg_names[0], reg_names[11]);
29689 asm_fprintf (file, "\tadd %s,%s,%s\n",
29690 reg_names[0], reg_names[0], reg_names[11]);
29692 else
29694 asm_fprintf (file, "\tlis %s,", reg_names[12]);
29695 assemble_name (file, buf);
29696 fputs ("@ha\n", file);
29697 asm_fprintf (file, "\tstw %s,4(%s)\n",
29698 reg_names[0], reg_names[1]);
29699 asm_fprintf (file, "\tla %s,", reg_names[0]);
29700 assemble_name (file, buf);
29701 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
29704 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29705 fprintf (file, "\tbl %s%s\n",
29706 RS6000_MCOUNT, flag_pic ? "@plt" : "");
29707 break;
29709 case ABI_AIX:
29710 case ABI_ELFv2:
29711 case ABI_DARWIN:
29712 /* Don't do anything, done in output_profile_hook (). */
29713 break;
29719 /* The following variable value is the last issued insn. */
29721 static rtx_insn *last_scheduled_insn;
29723 /* The following variable helps to balance issuing of load and
29724 store instructions */
29726 static int load_store_pendulum;
29728 /* The following variable helps pair divide insns during scheduling. */
29729 static int divide_cnt;
29730 /* The following variable helps pair and alternate vector and vector load
29731 insns during scheduling. */
29732 static int vec_pairing;
29735 /* Power4 load update and store update instructions are cracked into a
29736 load or store and an integer insn which are executed in the same cycle.
29737 Branches have their own dispatch slot which does not count against the
29738 GCC issue rate, but it changes the program flow so there are no other
29739 instructions to issue in this cycle. */
29741 static int
29742 rs6000_variable_issue_1 (rtx_insn *insn, int more)
29744 last_scheduled_insn = insn;
29745 if (GET_CODE (PATTERN (insn)) == USE
29746 || GET_CODE (PATTERN (insn)) == CLOBBER)
29748 cached_can_issue_more = more;
29749 return cached_can_issue_more;
29752 if (insn_terminates_group_p (insn, current_group))
29754 cached_can_issue_more = 0;
29755 return cached_can_issue_more;
29758 /* If no reservation, but reach here */
29759 if (recog_memoized (insn) < 0)
29760 return more;
29762 if (rs6000_sched_groups)
29764 if (is_microcoded_insn (insn))
29765 cached_can_issue_more = 0;
29766 else if (is_cracked_insn (insn))
29767 cached_can_issue_more = more > 2 ? more - 2 : 0;
29768 else
29769 cached_can_issue_more = more - 1;
29771 return cached_can_issue_more;
29774 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
29775 return 0;
29777 cached_can_issue_more = more - 1;
29778 return cached_can_issue_more;
29781 static int
29782 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
29784 int r = rs6000_variable_issue_1 (insn, more);
29785 if (verbose)
29786 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
29787 return r;
29790 /* Adjust the cost of a scheduling dependency. Return the new cost of
29791 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
29793 static int
29794 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
29795 unsigned int)
29797 enum attr_type attr_type;
29799 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
29800 return cost;
29802 switch (dep_type)
29804 case REG_DEP_TRUE:
29806 /* Data dependency; DEP_INSN writes a register that INSN reads
29807 some cycles later. */
29809 /* Separate a load from a narrower, dependent store. */
29810 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
29811 && GET_CODE (PATTERN (insn)) == SET
29812 && GET_CODE (PATTERN (dep_insn)) == SET
29813 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
29814 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
29815 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
29816 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
29817 return cost + 14;
29819 attr_type = get_attr_type (insn);
29821 switch (attr_type)
29823 case TYPE_JMPREG:
29824 /* Tell the first scheduling pass about the latency between
29825 a mtctr and bctr (and mtlr and br/blr). The first
29826 scheduling pass will not know about this latency since
29827 the mtctr instruction, which has the latency associated
29828 to it, will be generated by reload. */
29829 return 4;
29830 case TYPE_BRANCH:
29831 /* Leave some extra cycles between a compare and its
29832 dependent branch, to inhibit expensive mispredicts. */
29833 if ((rs6000_tune == PROCESSOR_PPC603
29834 || rs6000_tune == PROCESSOR_PPC604
29835 || rs6000_tune == PROCESSOR_PPC604e
29836 || rs6000_tune == PROCESSOR_PPC620
29837 || rs6000_tune == PROCESSOR_PPC630
29838 || rs6000_tune == PROCESSOR_PPC750
29839 || rs6000_tune == PROCESSOR_PPC7400
29840 || rs6000_tune == PROCESSOR_PPC7450
29841 || rs6000_tune == PROCESSOR_PPCE5500
29842 || rs6000_tune == PROCESSOR_PPCE6500
29843 || rs6000_tune == PROCESSOR_POWER4
29844 || rs6000_tune == PROCESSOR_POWER5
29845 || rs6000_tune == PROCESSOR_POWER7
29846 || rs6000_tune == PROCESSOR_POWER8
29847 || rs6000_tune == PROCESSOR_POWER9
29848 || rs6000_tune == PROCESSOR_CELL)
29849 && recog_memoized (dep_insn)
29850 && (INSN_CODE (dep_insn) >= 0))
29852 switch (get_attr_type (dep_insn))
29854 case TYPE_CMP:
29855 case TYPE_FPCOMPARE:
29856 case TYPE_CR_LOGICAL:
29857 return cost + 2;
29858 case TYPE_EXTS:
29859 case TYPE_MUL:
29860 if (get_attr_dot (dep_insn) == DOT_YES)
29861 return cost + 2;
29862 else
29863 break;
29864 case TYPE_SHIFT:
29865 if (get_attr_dot (dep_insn) == DOT_YES
29866 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
29867 return cost + 2;
29868 else
29869 break;
29870 default:
29871 break;
29873 break;
29875 case TYPE_STORE:
29876 case TYPE_FPSTORE:
29877 if ((rs6000_tune == PROCESSOR_POWER6)
29878 && recog_memoized (dep_insn)
29879 && (INSN_CODE (dep_insn) >= 0))
29882 if (GET_CODE (PATTERN (insn)) != SET)
29883 /* If this happens, we have to extend this to schedule
29884 optimally. Return default for now. */
29885 return cost;
29887 /* Adjust the cost for the case where the value written
29888 by a fixed point operation is used as the address
29889 gen value on a store. */
29890 switch (get_attr_type (dep_insn))
29892 case TYPE_LOAD:
29893 case TYPE_CNTLZ:
29895 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29896 return get_attr_sign_extend (dep_insn)
29897 == SIGN_EXTEND_YES ? 6 : 4;
29898 break;
29900 case TYPE_SHIFT:
29902 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29903 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
29904 6 : 3;
29905 break;
29907 case TYPE_INTEGER:
29908 case TYPE_ADD:
29909 case TYPE_LOGICAL:
29910 case TYPE_EXTS:
29911 case TYPE_INSERT:
29913 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29914 return 3;
29915 break;
29917 case TYPE_STORE:
29918 case TYPE_FPLOAD:
29919 case TYPE_FPSTORE:
29921 if (get_attr_update (dep_insn) == UPDATE_YES
29922 && ! rs6000_store_data_bypass_p (dep_insn, insn))
29923 return 3;
29924 break;
29926 case TYPE_MUL:
29928 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29929 return 17;
29930 break;
29932 case TYPE_DIV:
29934 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29935 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
29936 break;
29938 default:
29939 break;
29942 break;
29944 case TYPE_LOAD:
29945 if ((rs6000_tune == PROCESSOR_POWER6)
29946 && recog_memoized (dep_insn)
29947 && (INSN_CODE (dep_insn) >= 0))
29950 /* Adjust the cost for the case where the value written
29951 by a fixed point instruction is used within the address
29952 gen portion of a subsequent load(u)(x) */
29953 switch (get_attr_type (dep_insn))
29955 case TYPE_LOAD:
29956 case TYPE_CNTLZ:
29958 if (set_to_load_agen (dep_insn, insn))
29959 return get_attr_sign_extend (dep_insn)
29960 == SIGN_EXTEND_YES ? 6 : 4;
29961 break;
29963 case TYPE_SHIFT:
29965 if (set_to_load_agen (dep_insn, insn))
29966 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
29967 6 : 3;
29968 break;
29970 case TYPE_INTEGER:
29971 case TYPE_ADD:
29972 case TYPE_LOGICAL:
29973 case TYPE_EXTS:
29974 case TYPE_INSERT:
29976 if (set_to_load_agen (dep_insn, insn))
29977 return 3;
29978 break;
29980 case TYPE_STORE:
29981 case TYPE_FPLOAD:
29982 case TYPE_FPSTORE:
29984 if (get_attr_update (dep_insn) == UPDATE_YES
29985 && set_to_load_agen (dep_insn, insn))
29986 return 3;
29987 break;
29989 case TYPE_MUL:
29991 if (set_to_load_agen (dep_insn, insn))
29992 return 17;
29993 break;
29995 case TYPE_DIV:
29997 if (set_to_load_agen (dep_insn, insn))
29998 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
29999 break;
30001 default:
30002 break;
30005 break;
30007 case TYPE_FPLOAD:
30008 if ((rs6000_tune == PROCESSOR_POWER6)
30009 && get_attr_update (insn) == UPDATE_NO
30010 && recog_memoized (dep_insn)
30011 && (INSN_CODE (dep_insn) >= 0)
30012 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30013 return 2;
30015 default:
30016 break;
30019 /* Fall out to return default cost. */
30021 break;
30023 case REG_DEP_OUTPUT:
30024 /* Output dependency; DEP_INSN writes a register that INSN writes some
30025 cycles later. */
30026 if ((rs6000_tune == PROCESSOR_POWER6)
30027 && recog_memoized (dep_insn)
30028 && (INSN_CODE (dep_insn) >= 0))
30030 attr_type = get_attr_type (insn);
30032 switch (attr_type)
30034 case TYPE_FP:
30035 case TYPE_FPSIMPLE:
30036 if (get_attr_type (dep_insn) == TYPE_FP
30037 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30038 return 1;
30039 break;
30040 case TYPE_FPLOAD:
30041 if (get_attr_update (insn) == UPDATE_NO
30042 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30043 return 2;
30044 break;
30045 default:
30046 break;
30049 /* Fall through, no cost for output dependency. */
30050 /* FALLTHRU */
30052 case REG_DEP_ANTI:
30053 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30054 cycles later. */
30055 return 0;
30057 default:
30058 gcc_unreachable ();
30061 return cost;
30064 /* Debug version of rs6000_adjust_cost. */
30066 static int
30067 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30068 int cost, unsigned int dw)
30070 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30072 if (ret != cost)
30074 const char *dep;
30076 switch (dep_type)
30078 default: dep = "unknown depencency"; break;
30079 case REG_DEP_TRUE: dep = "data dependency"; break;
30080 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30081 case REG_DEP_ANTI: dep = "anti depencency"; break;
30084 fprintf (stderr,
30085 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30086 "%s, insn:\n", ret, cost, dep);
30088 debug_rtx (insn);
30091 return ret;
30094 /* The function returns a true if INSN is microcoded.
30095 Return false otherwise. */
30097 static bool
30098 is_microcoded_insn (rtx_insn *insn)
30100 if (!insn || !NONDEBUG_INSN_P (insn)
30101 || GET_CODE (PATTERN (insn)) == USE
30102 || GET_CODE (PATTERN (insn)) == CLOBBER)
30103 return false;
30105 if (rs6000_tune == PROCESSOR_CELL)
30106 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30108 if (rs6000_sched_groups
30109 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30111 enum attr_type type = get_attr_type (insn);
30112 if ((type == TYPE_LOAD
30113 && get_attr_update (insn) == UPDATE_YES
30114 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30115 || ((type == TYPE_LOAD || type == TYPE_STORE)
30116 && get_attr_update (insn) == UPDATE_YES
30117 && get_attr_indexed (insn) == INDEXED_YES)
30118 || type == TYPE_MFCR)
30119 return true;
30122 return false;
30125 /* The function returns true if INSN is cracked into 2 instructions
30126 by the processor (and therefore occupies 2 issue slots). */
30128 static bool
30129 is_cracked_insn (rtx_insn *insn)
30131 if (!insn || !NONDEBUG_INSN_P (insn)
30132 || GET_CODE (PATTERN (insn)) == USE
30133 || GET_CODE (PATTERN (insn)) == CLOBBER)
30134 return false;
30136 if (rs6000_sched_groups
30137 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30139 enum attr_type type = get_attr_type (insn);
30140 if ((type == TYPE_LOAD
30141 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30142 && get_attr_update (insn) == UPDATE_NO)
30143 || (type == TYPE_LOAD
30144 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30145 && get_attr_update (insn) == UPDATE_YES
30146 && get_attr_indexed (insn) == INDEXED_NO)
30147 || (type == TYPE_STORE
30148 && get_attr_update (insn) == UPDATE_YES
30149 && get_attr_indexed (insn) == INDEXED_NO)
30150 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30151 && get_attr_update (insn) == UPDATE_YES)
30152 || (type == TYPE_CR_LOGICAL
30153 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30154 || (type == TYPE_EXTS
30155 && get_attr_dot (insn) == DOT_YES)
30156 || (type == TYPE_SHIFT
30157 && get_attr_dot (insn) == DOT_YES
30158 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30159 || (type == TYPE_MUL
30160 && get_attr_dot (insn) == DOT_YES)
30161 || type == TYPE_DIV
30162 || (type == TYPE_INSERT
30163 && get_attr_size (insn) == SIZE_32))
30164 return true;
30167 return false;
30170 /* The function returns true if INSN can be issued only from
30171 the branch slot. */
30173 static bool
30174 is_branch_slot_insn (rtx_insn *insn)
30176 if (!insn || !NONDEBUG_INSN_P (insn)
30177 || GET_CODE (PATTERN (insn)) == USE
30178 || GET_CODE (PATTERN (insn)) == CLOBBER)
30179 return false;
30181 if (rs6000_sched_groups)
30183 enum attr_type type = get_attr_type (insn);
30184 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30185 return true;
30186 return false;
30189 return false;
30192 /* The function returns true if out_inst sets a value that is
30193 used in the address generation computation of in_insn */
30194 static bool
30195 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30197 rtx out_set, in_set;
30199 /* For performance reasons, only handle the simple case where
30200 both loads are a single_set. */
30201 out_set = single_set (out_insn);
30202 if (out_set)
30204 in_set = single_set (in_insn);
30205 if (in_set)
30206 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30209 return false;
30212 /* Try to determine base/offset/size parts of the given MEM.
30213 Return true if successful, false if all the values couldn't
30214 be determined.
30216 This function only looks for REG or REG+CONST address forms.
30217 REG+REG address form will return false. */
30219 static bool
30220 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30221 HOST_WIDE_INT *size)
30223 rtx addr_rtx;
30224 if MEM_SIZE_KNOWN_P (mem)
30225 *size = MEM_SIZE (mem);
30226 else
30227 return false;
30229 addr_rtx = (XEXP (mem, 0));
30230 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30231 addr_rtx = XEXP (addr_rtx, 1);
30233 *offset = 0;
30234 while (GET_CODE (addr_rtx) == PLUS
30235 && CONST_INT_P (XEXP (addr_rtx, 1)))
30237 *offset += INTVAL (XEXP (addr_rtx, 1));
30238 addr_rtx = XEXP (addr_rtx, 0);
30240 if (!REG_P (addr_rtx))
30241 return false;
30243 *base = addr_rtx;
30244 return true;
30247 /* The function returns true if the target storage location of
30248 mem1 is adjacent to the target storage location of mem2 */
30249 /* Return 1 if memory locations are adjacent. */
30251 static bool
30252 adjacent_mem_locations (rtx mem1, rtx mem2)
30254 rtx reg1, reg2;
30255 HOST_WIDE_INT off1, size1, off2, size2;
30257 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30258 && get_memref_parts (mem2, &reg2, &off2, &size2))
30259 return ((REGNO (reg1) == REGNO (reg2))
30260 && ((off1 + size1 == off2)
30261 || (off2 + size2 == off1)));
30263 return false;
30266 /* This function returns true if it can be determined that the two MEM
30267 locations overlap by at least 1 byte based on base reg/offset/size. */
30269 static bool
30270 mem_locations_overlap (rtx mem1, rtx mem2)
30272 rtx reg1, reg2;
30273 HOST_WIDE_INT off1, size1, off2, size2;
30275 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30276 && get_memref_parts (mem2, &reg2, &off2, &size2))
30277 return ((REGNO (reg1) == REGNO (reg2))
30278 && (((off1 <= off2) && (off1 + size1 > off2))
30279 || ((off2 <= off1) && (off2 + size2 > off1))));
30281 return false;
30284 /* A C statement (sans semicolon) to update the integer scheduling
30285 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30286 INSN earlier, reduce the priority to execute INSN later. Do not
30287 define this macro if you do not need to adjust the scheduling
30288 priorities of insns. */
30290 static int
30291 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30293 rtx load_mem, str_mem;
30294 /* On machines (like the 750) which have asymmetric integer units,
30295 where one integer unit can do multiply and divides and the other
30296 can't, reduce the priority of multiply/divide so it is scheduled
30297 before other integer operations. */
30299 #if 0
30300 if (! INSN_P (insn))
30301 return priority;
30303 if (GET_CODE (PATTERN (insn)) == USE)
30304 return priority;
30306 switch (rs6000_tune) {
30307 case PROCESSOR_PPC750:
30308 switch (get_attr_type (insn))
30310 default:
30311 break;
30313 case TYPE_MUL:
30314 case TYPE_DIV:
30315 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30316 priority, priority);
30317 if (priority >= 0 && priority < 0x01000000)
30318 priority >>= 3;
30319 break;
30322 #endif
30324 if (insn_must_be_first_in_group (insn)
30325 && reload_completed
30326 && current_sched_info->sched_max_insns_priority
30327 && rs6000_sched_restricted_insns_priority)
30330 /* Prioritize insns that can be dispatched only in the first
30331 dispatch slot. */
30332 if (rs6000_sched_restricted_insns_priority == 1)
30333 /* Attach highest priority to insn. This means that in
30334 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30335 precede 'priority' (critical path) considerations. */
30336 return current_sched_info->sched_max_insns_priority;
30337 else if (rs6000_sched_restricted_insns_priority == 2)
30338 /* Increase priority of insn by a minimal amount. This means that in
30339 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30340 considerations precede dispatch-slot restriction considerations. */
30341 return (priority + 1);
30344 if (rs6000_tune == PROCESSOR_POWER6
30345 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30346 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30347 /* Attach highest priority to insn if the scheduler has just issued two
30348 stores and this instruction is a load, or two loads and this instruction
30349 is a store. Power6 wants loads and stores scheduled alternately
30350 when possible */
30351 return current_sched_info->sched_max_insns_priority;
30353 return priority;
30356 /* Return true if the instruction is nonpipelined on the Cell. */
30357 static bool
30358 is_nonpipeline_insn (rtx_insn *insn)
30360 enum attr_type type;
30361 if (!insn || !NONDEBUG_INSN_P (insn)
30362 || GET_CODE (PATTERN (insn)) == USE
30363 || GET_CODE (PATTERN (insn)) == CLOBBER)
30364 return false;
30366 type = get_attr_type (insn);
30367 if (type == TYPE_MUL
30368 || type == TYPE_DIV
30369 || type == TYPE_SDIV
30370 || type == TYPE_DDIV
30371 || type == TYPE_SSQRT
30372 || type == TYPE_DSQRT
30373 || type == TYPE_MFCR
30374 || type == TYPE_MFCRF
30375 || type == TYPE_MFJMPR)
30377 return true;
30379 return false;
30383 /* Return how many instructions the machine can issue per cycle. */
30385 static int
30386 rs6000_issue_rate (void)
30388 /* Unless scheduling for register pressure, use issue rate of 1 for
30389 first scheduling pass to decrease degradation. */
30390 if (!reload_completed && !flag_sched_pressure)
30391 return 1;
30393 switch (rs6000_tune) {
30394 case PROCESSOR_RS64A:
30395 case PROCESSOR_PPC601: /* ? */
30396 case PROCESSOR_PPC7450:
30397 return 3;
30398 case PROCESSOR_PPC440:
30399 case PROCESSOR_PPC603:
30400 case PROCESSOR_PPC750:
30401 case PROCESSOR_PPC7400:
30402 case PROCESSOR_PPC8540:
30403 case PROCESSOR_PPC8548:
30404 case PROCESSOR_CELL:
30405 case PROCESSOR_PPCE300C2:
30406 case PROCESSOR_PPCE300C3:
30407 case PROCESSOR_PPCE500MC:
30408 case PROCESSOR_PPCE500MC64:
30409 case PROCESSOR_PPCE5500:
30410 case PROCESSOR_PPCE6500:
30411 case PROCESSOR_TITAN:
30412 return 2;
30413 case PROCESSOR_PPC476:
30414 case PROCESSOR_PPC604:
30415 case PROCESSOR_PPC604e:
30416 case PROCESSOR_PPC620:
30417 case PROCESSOR_PPC630:
30418 return 4;
30419 case PROCESSOR_POWER4:
30420 case PROCESSOR_POWER5:
30421 case PROCESSOR_POWER6:
30422 case PROCESSOR_POWER7:
30423 return 5;
30424 case PROCESSOR_POWER8:
30425 return 7;
30426 case PROCESSOR_POWER9:
30427 return 6;
30428 default:
30429 return 1;
30433 /* Return how many instructions to look ahead for better insn
30434 scheduling. */
30436 static int
30437 rs6000_use_sched_lookahead (void)
30439 switch (rs6000_tune)
30441 case PROCESSOR_PPC8540:
30442 case PROCESSOR_PPC8548:
30443 return 4;
30445 case PROCESSOR_CELL:
30446 return (reload_completed ? 8 : 0);
30448 default:
30449 return 0;
30453 /* We are choosing insn from the ready queue. Return zero if INSN can be
30454 chosen. */
30455 static int
30456 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30458 if (ready_index == 0)
30459 return 0;
30461 if (rs6000_tune != PROCESSOR_CELL)
30462 return 0;
30464 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30466 if (!reload_completed
30467 || is_nonpipeline_insn (insn)
30468 || is_microcoded_insn (insn))
30469 return 1;
30471 return 0;
30474 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30475 and return true. */
30477 static bool
30478 find_mem_ref (rtx pat, rtx *mem_ref)
30480 const char * fmt;
30481 int i, j;
30483 /* stack_tie does not produce any real memory traffic. */
30484 if (tie_operand (pat, VOIDmode))
30485 return false;
30487 if (GET_CODE (pat) == MEM)
30489 *mem_ref = pat;
30490 return true;
30493 /* Recursively process the pattern. */
30494 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30496 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30498 if (fmt[i] == 'e')
30500 if (find_mem_ref (XEXP (pat, i), mem_ref))
30501 return true;
30503 else if (fmt[i] == 'E')
30504 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30506 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30507 return true;
30511 return false;
30514 /* Determine if PAT is a PATTERN of a load insn. */
30516 static bool
30517 is_load_insn1 (rtx pat, rtx *load_mem)
30519 if (!pat || pat == NULL_RTX)
30520 return false;
30522 if (GET_CODE (pat) == SET)
30523 return find_mem_ref (SET_SRC (pat), load_mem);
30525 if (GET_CODE (pat) == PARALLEL)
30527 int i;
30529 for (i = 0; i < XVECLEN (pat, 0); i++)
30530 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30531 return true;
30534 return false;
30537 /* Determine if INSN loads from memory. */
30539 static bool
30540 is_load_insn (rtx insn, rtx *load_mem)
30542 if (!insn || !INSN_P (insn))
30543 return false;
30545 if (CALL_P (insn))
30546 return false;
30548 return is_load_insn1 (PATTERN (insn), load_mem);
30551 /* Determine if PAT is a PATTERN of a store insn. */
30553 static bool
30554 is_store_insn1 (rtx pat, rtx *str_mem)
30556 if (!pat || pat == NULL_RTX)
30557 return false;
30559 if (GET_CODE (pat) == SET)
30560 return find_mem_ref (SET_DEST (pat), str_mem);
30562 if (GET_CODE (pat) == PARALLEL)
30564 int i;
30566 for (i = 0; i < XVECLEN (pat, 0); i++)
30567 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30568 return true;
30571 return false;
30574 /* Determine if INSN stores to memory. */
30576 static bool
30577 is_store_insn (rtx insn, rtx *str_mem)
30579 if (!insn || !INSN_P (insn))
30580 return false;
30582 return is_store_insn1 (PATTERN (insn), str_mem);
30585 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30587 static bool
30588 is_power9_pairable_vec_type (enum attr_type type)
30590 switch (type)
30592 case TYPE_VECSIMPLE:
30593 case TYPE_VECCOMPLEX:
30594 case TYPE_VECDIV:
30595 case TYPE_VECCMP:
30596 case TYPE_VECPERM:
30597 case TYPE_VECFLOAT:
30598 case TYPE_VECFDIV:
30599 case TYPE_VECDOUBLE:
30600 return true;
30601 default:
30602 break;
30604 return false;
30607 /* Returns whether the dependence between INSN and NEXT is considered
30608 costly by the given target. */
30610 static bool
30611 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30613 rtx insn;
30614 rtx next;
30615 rtx load_mem, str_mem;
30617 /* If the flag is not enabled - no dependence is considered costly;
30618 allow all dependent insns in the same group.
30619 This is the most aggressive option. */
30620 if (rs6000_sched_costly_dep == no_dep_costly)
30621 return false;
30623 /* If the flag is set to 1 - a dependence is always considered costly;
30624 do not allow dependent instructions in the same group.
30625 This is the most conservative option. */
30626 if (rs6000_sched_costly_dep == all_deps_costly)
30627 return true;
30629 insn = DEP_PRO (dep);
30630 next = DEP_CON (dep);
30632 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30633 && is_load_insn (next, &load_mem)
30634 && is_store_insn (insn, &str_mem))
30635 /* Prevent load after store in the same group. */
30636 return true;
30638 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30639 && is_load_insn (next, &load_mem)
30640 && is_store_insn (insn, &str_mem)
30641 && DEP_TYPE (dep) == REG_DEP_TRUE
30642 && mem_locations_overlap(str_mem, load_mem))
30643 /* Prevent load after store in the same group if it is a true
30644 dependence. */
30645 return true;
30647 /* The flag is set to X; dependences with latency >= X are considered costly,
30648 and will not be scheduled in the same group. */
30649 if (rs6000_sched_costly_dep <= max_dep_latency
30650 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
30651 return true;
30653 return false;
30656 /* Return the next insn after INSN that is found before TAIL is reached,
30657 skipping any "non-active" insns - insns that will not actually occupy
30658 an issue slot. Return NULL_RTX if such an insn is not found. */
30660 static rtx_insn *
30661 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
30663 if (insn == NULL_RTX || insn == tail)
30664 return NULL;
30666 while (1)
30668 insn = NEXT_INSN (insn);
30669 if (insn == NULL_RTX || insn == tail)
30670 return NULL;
30672 if (CALL_P (insn)
30673 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
30674 || (NONJUMP_INSN_P (insn)
30675 && GET_CODE (PATTERN (insn)) != USE
30676 && GET_CODE (PATTERN (insn)) != CLOBBER
30677 && INSN_CODE (insn) != CODE_FOR_stack_tie))
30678 break;
30680 return insn;
30683 /* Do Power9 specific sched_reorder2 reordering of ready list. */
30685 static int
30686 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
30688 int pos;
30689 int i;
30690 rtx_insn *tmp;
30691 enum attr_type type, type2;
30693 type = get_attr_type (last_scheduled_insn);
30695 /* Try to issue fixed point divides back-to-back in pairs so they will be
30696 routed to separate execution units and execute in parallel. */
30697 if (type == TYPE_DIV && divide_cnt == 0)
30699 /* First divide has been scheduled. */
30700 divide_cnt = 1;
30702 /* Scan the ready list looking for another divide, if found move it
30703 to the end of the list so it is chosen next. */
30704 pos = lastpos;
30705 while (pos >= 0)
30707 if (recog_memoized (ready[pos]) >= 0
30708 && get_attr_type (ready[pos]) == TYPE_DIV)
30710 tmp = ready[pos];
30711 for (i = pos; i < lastpos; i++)
30712 ready[i] = ready[i + 1];
30713 ready[lastpos] = tmp;
30714 break;
30716 pos--;
30719 else
30721 /* Last insn was the 2nd divide or not a divide, reset the counter. */
30722 divide_cnt = 0;
30724 /* The best dispatch throughput for vector and vector load insns can be
30725 achieved by interleaving a vector and vector load such that they'll
30726 dispatch to the same superslice. If this pairing cannot be achieved
30727 then it is best to pair vector insns together and vector load insns
30728 together.
30730 To aid in this pairing, vec_pairing maintains the current state with
30731 the following values:
30733 0 : Initial state, no vecload/vector pairing has been started.
30735 1 : A vecload or vector insn has been issued and a candidate for
30736 pairing has been found and moved to the end of the ready
30737 list. */
30738 if (type == TYPE_VECLOAD)
30740 /* Issued a vecload. */
30741 if (vec_pairing == 0)
30743 int vecload_pos = -1;
30744 /* We issued a single vecload, look for a vector insn to pair it
30745 with. If one isn't found, try to pair another vecload. */
30746 pos = lastpos;
30747 while (pos >= 0)
30749 if (recog_memoized (ready[pos]) >= 0)
30751 type2 = get_attr_type (ready[pos]);
30752 if (is_power9_pairable_vec_type (type2))
30754 /* Found a vector insn to pair with, move it to the
30755 end of the ready list so it is scheduled next. */
30756 tmp = ready[pos];
30757 for (i = pos; i < lastpos; i++)
30758 ready[i] = ready[i + 1];
30759 ready[lastpos] = tmp;
30760 vec_pairing = 1;
30761 return cached_can_issue_more;
30763 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
30764 /* Remember position of first vecload seen. */
30765 vecload_pos = pos;
30767 pos--;
30769 if (vecload_pos >= 0)
30771 /* Didn't find a vector to pair with but did find a vecload,
30772 move it to the end of the ready list. */
30773 tmp = ready[vecload_pos];
30774 for (i = vecload_pos; i < lastpos; i++)
30775 ready[i] = ready[i + 1];
30776 ready[lastpos] = tmp;
30777 vec_pairing = 1;
30778 return cached_can_issue_more;
30782 else if (is_power9_pairable_vec_type (type))
30784 /* Issued a vector operation. */
30785 if (vec_pairing == 0)
30787 int vec_pos = -1;
30788 /* We issued a single vector insn, look for a vecload to pair it
30789 with. If one isn't found, try to pair another vector. */
30790 pos = lastpos;
30791 while (pos >= 0)
30793 if (recog_memoized (ready[pos]) >= 0)
30795 type2 = get_attr_type (ready[pos]);
30796 if (type2 == TYPE_VECLOAD)
30798 /* Found a vecload insn to pair with, move it to the
30799 end of the ready list so it is scheduled next. */
30800 tmp = ready[pos];
30801 for (i = pos; i < lastpos; i++)
30802 ready[i] = ready[i + 1];
30803 ready[lastpos] = tmp;
30804 vec_pairing = 1;
30805 return cached_can_issue_more;
30807 else if (is_power9_pairable_vec_type (type2)
30808 && vec_pos == -1)
30809 /* Remember position of first vector insn seen. */
30810 vec_pos = pos;
30812 pos--;
30814 if (vec_pos >= 0)
30816 /* Didn't find a vecload to pair with but did find a vector
30817 insn, move it to the end of the ready list. */
30818 tmp = ready[vec_pos];
30819 for (i = vec_pos; i < lastpos; i++)
30820 ready[i] = ready[i + 1];
30821 ready[lastpos] = tmp;
30822 vec_pairing = 1;
30823 return cached_can_issue_more;
30828 /* We've either finished a vec/vecload pair, couldn't find an insn to
30829 continue the current pair, or the last insn had nothing to do with
30830 with pairing. In any case, reset the state. */
30831 vec_pairing = 0;
30834 return cached_can_issue_more;
30837 /* We are about to begin issuing insns for this clock cycle. */
30839 static int
30840 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
30841 rtx_insn **ready ATTRIBUTE_UNUSED,
30842 int *pn_ready ATTRIBUTE_UNUSED,
30843 int clock_var ATTRIBUTE_UNUSED)
30845 int n_ready = *pn_ready;
30847 if (sched_verbose)
30848 fprintf (dump, "// rs6000_sched_reorder :\n");
30850 /* Reorder the ready list, if the second to last ready insn
30851 is a nonepipeline insn. */
30852 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
30854 if (is_nonpipeline_insn (ready[n_ready - 1])
30855 && (recog_memoized (ready[n_ready - 2]) > 0))
30856 /* Simply swap first two insns. */
30857 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
30860 if (rs6000_tune == PROCESSOR_POWER6)
30861 load_store_pendulum = 0;
30863 return rs6000_issue_rate ();
30866 /* Like rs6000_sched_reorder, but called after issuing each insn. */
30868 static int
30869 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
30870 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
30872 if (sched_verbose)
30873 fprintf (dump, "// rs6000_sched_reorder2 :\n");
30875 /* For Power6, we need to handle some special cases to try and keep the
30876 store queue from overflowing and triggering expensive flushes.
30878 This code monitors how load and store instructions are being issued
30879 and skews the ready list one way or the other to increase the likelihood
30880 that a desired instruction is issued at the proper time.
30882 A couple of things are done. First, we maintain a "load_store_pendulum"
30883 to track the current state of load/store issue.
30885 - If the pendulum is at zero, then no loads or stores have been
30886 issued in the current cycle so we do nothing.
30888 - If the pendulum is 1, then a single load has been issued in this
30889 cycle and we attempt to locate another load in the ready list to
30890 issue with it.
30892 - If the pendulum is -2, then two stores have already been
30893 issued in this cycle, so we increase the priority of the first load
30894 in the ready list to increase it's likelihood of being chosen first
30895 in the next cycle.
30897 - If the pendulum is -1, then a single store has been issued in this
30898 cycle and we attempt to locate another store in the ready list to
30899 issue with it, preferring a store to an adjacent memory location to
30900 facilitate store pairing in the store queue.
30902 - If the pendulum is 2, then two loads have already been
30903 issued in this cycle, so we increase the priority of the first store
30904 in the ready list to increase it's likelihood of being chosen first
30905 in the next cycle.
30907 - If the pendulum < -2 or > 2, then do nothing.
30909 Note: This code covers the most common scenarios. There exist non
30910 load/store instructions which make use of the LSU and which
30911 would need to be accounted for to strictly model the behavior
30912 of the machine. Those instructions are currently unaccounted
30913 for to help minimize compile time overhead of this code.
30915 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
30917 int pos;
30918 int i;
30919 rtx_insn *tmp;
30920 rtx load_mem, str_mem;
30922 if (is_store_insn (last_scheduled_insn, &str_mem))
30923 /* Issuing a store, swing the load_store_pendulum to the left */
30924 load_store_pendulum--;
30925 else if (is_load_insn (last_scheduled_insn, &load_mem))
30926 /* Issuing a load, swing the load_store_pendulum to the right */
30927 load_store_pendulum++;
30928 else
30929 return cached_can_issue_more;
30931 /* If the pendulum is balanced, or there is only one instruction on
30932 the ready list, then all is well, so return. */
30933 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
30934 return cached_can_issue_more;
30936 if (load_store_pendulum == 1)
30938 /* A load has been issued in this cycle. Scan the ready list
30939 for another load to issue with it */
30940 pos = *pn_ready-1;
30942 while (pos >= 0)
30944 if (is_load_insn (ready[pos], &load_mem))
30946 /* Found a load. Move it to the head of the ready list,
30947 and adjust it's priority so that it is more likely to
30948 stay there */
30949 tmp = ready[pos];
30950 for (i=pos; i<*pn_ready-1; i++)
30951 ready[i] = ready[i + 1];
30952 ready[*pn_ready-1] = tmp;
30954 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
30955 INSN_PRIORITY (tmp)++;
30956 break;
30958 pos--;
30961 else if (load_store_pendulum == -2)
30963 /* Two stores have been issued in this cycle. Increase the
30964 priority of the first load in the ready list to favor it for
30965 issuing in the next cycle. */
30966 pos = *pn_ready-1;
30968 while (pos >= 0)
30970 if (is_load_insn (ready[pos], &load_mem)
30971 && !sel_sched_p ()
30972 && INSN_PRIORITY_KNOWN (ready[pos]))
30974 INSN_PRIORITY (ready[pos])++;
30976 /* Adjust the pendulum to account for the fact that a load
30977 was found and increased in priority. This is to prevent
30978 increasing the priority of multiple loads */
30979 load_store_pendulum--;
30981 break;
30983 pos--;
30986 else if (load_store_pendulum == -1)
30988 /* A store has been issued in this cycle. Scan the ready list for
30989 another store to issue with it, preferring a store to an adjacent
30990 memory location */
30991 int first_store_pos = -1;
30993 pos = *pn_ready-1;
30995 while (pos >= 0)
30997 if (is_store_insn (ready[pos], &str_mem))
30999 rtx str_mem2;
31000 /* Maintain the index of the first store found on the
31001 list */
31002 if (first_store_pos == -1)
31003 first_store_pos = pos;
31005 if (is_store_insn (last_scheduled_insn, &str_mem2)
31006 && adjacent_mem_locations (str_mem, str_mem2))
31008 /* Found an adjacent store. Move it to the head of the
31009 ready list, and adjust it's priority so that it is
31010 more likely to stay there */
31011 tmp = ready[pos];
31012 for (i=pos; i<*pn_ready-1; i++)
31013 ready[i] = ready[i + 1];
31014 ready[*pn_ready-1] = tmp;
31016 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31017 INSN_PRIORITY (tmp)++;
31019 first_store_pos = -1;
31021 break;
31024 pos--;
31027 if (first_store_pos >= 0)
31029 /* An adjacent store wasn't found, but a non-adjacent store was,
31030 so move the non-adjacent store to the front of the ready
31031 list, and adjust its priority so that it is more likely to
31032 stay there. */
31033 tmp = ready[first_store_pos];
31034 for (i=first_store_pos; i<*pn_ready-1; i++)
31035 ready[i] = ready[i + 1];
31036 ready[*pn_ready-1] = tmp;
31037 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31038 INSN_PRIORITY (tmp)++;
31041 else if (load_store_pendulum == 2)
31043 /* Two loads have been issued in this cycle. Increase the priority
31044 of the first store in the ready list to favor it for issuing in
31045 the next cycle. */
31046 pos = *pn_ready-1;
31048 while (pos >= 0)
31050 if (is_store_insn (ready[pos], &str_mem)
31051 && !sel_sched_p ()
31052 && INSN_PRIORITY_KNOWN (ready[pos]))
31054 INSN_PRIORITY (ready[pos])++;
31056 /* Adjust the pendulum to account for the fact that a store
31057 was found and increased in priority. This is to prevent
31058 increasing the priority of multiple stores */
31059 load_store_pendulum++;
31061 break;
31063 pos--;
31068 /* Do Power9 dependent reordering if necessary. */
31069 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31070 && recog_memoized (last_scheduled_insn) >= 0)
31071 return power9_sched_reorder2 (ready, *pn_ready - 1);
31073 return cached_can_issue_more;
31076 /* Return whether the presence of INSN causes a dispatch group termination
31077 of group WHICH_GROUP.
31079 If WHICH_GROUP == current_group, this function will return true if INSN
31080 causes the termination of the current group (i.e, the dispatch group to
31081 which INSN belongs). This means that INSN will be the last insn in the
31082 group it belongs to.
31084 If WHICH_GROUP == previous_group, this function will return true if INSN
31085 causes the termination of the previous group (i.e, the dispatch group that
31086 precedes the group to which INSN belongs). This means that INSN will be
31087 the first insn in the group it belongs to). */
31089 static bool
31090 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31092 bool first, last;
31094 if (! insn)
31095 return false;
31097 first = insn_must_be_first_in_group (insn);
31098 last = insn_must_be_last_in_group (insn);
31100 if (first && last)
31101 return true;
31103 if (which_group == current_group)
31104 return last;
31105 else if (which_group == previous_group)
31106 return first;
31108 return false;
31112 static bool
31113 insn_must_be_first_in_group (rtx_insn *insn)
31115 enum attr_type type;
31117 if (!insn
31118 || NOTE_P (insn)
31119 || DEBUG_INSN_P (insn)
31120 || GET_CODE (PATTERN (insn)) == USE
31121 || GET_CODE (PATTERN (insn)) == CLOBBER)
31122 return false;
31124 switch (rs6000_tune)
31126 case PROCESSOR_POWER5:
31127 if (is_cracked_insn (insn))
31128 return true;
31129 /* FALLTHRU */
31130 case PROCESSOR_POWER4:
31131 if (is_microcoded_insn (insn))
31132 return true;
31134 if (!rs6000_sched_groups)
31135 return false;
31137 type = get_attr_type (insn);
31139 switch (type)
31141 case TYPE_MFCR:
31142 case TYPE_MFCRF:
31143 case TYPE_MTCR:
31144 case TYPE_CR_LOGICAL:
31145 case TYPE_MTJMPR:
31146 case TYPE_MFJMPR:
31147 case TYPE_DIV:
31148 case TYPE_LOAD_L:
31149 case TYPE_STORE_C:
31150 case TYPE_ISYNC:
31151 case TYPE_SYNC:
31152 return true;
31153 default:
31154 break;
31156 break;
31157 case PROCESSOR_POWER6:
31158 type = get_attr_type (insn);
31160 switch (type)
31162 case TYPE_EXTS:
31163 case TYPE_CNTLZ:
31164 case TYPE_TRAP:
31165 case TYPE_MUL:
31166 case TYPE_INSERT:
31167 case TYPE_FPCOMPARE:
31168 case TYPE_MFCR:
31169 case TYPE_MTCR:
31170 case TYPE_MFJMPR:
31171 case TYPE_MTJMPR:
31172 case TYPE_ISYNC:
31173 case TYPE_SYNC:
31174 case TYPE_LOAD_L:
31175 case TYPE_STORE_C:
31176 return true;
31177 case TYPE_SHIFT:
31178 if (get_attr_dot (insn) == DOT_NO
31179 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31180 return true;
31181 else
31182 break;
31183 case TYPE_DIV:
31184 if (get_attr_size (insn) == SIZE_32)
31185 return true;
31186 else
31187 break;
31188 case TYPE_LOAD:
31189 case TYPE_STORE:
31190 case TYPE_FPLOAD:
31191 case TYPE_FPSTORE:
31192 if (get_attr_update (insn) == UPDATE_YES)
31193 return true;
31194 else
31195 break;
31196 default:
31197 break;
31199 break;
31200 case PROCESSOR_POWER7:
31201 type = get_attr_type (insn);
31203 switch (type)
31205 case TYPE_CR_LOGICAL:
31206 case TYPE_MFCR:
31207 case TYPE_MFCRF:
31208 case TYPE_MTCR:
31209 case TYPE_DIV:
31210 case TYPE_ISYNC:
31211 case TYPE_LOAD_L:
31212 case TYPE_STORE_C:
31213 case TYPE_MFJMPR:
31214 case TYPE_MTJMPR:
31215 return true;
31216 case TYPE_MUL:
31217 case TYPE_SHIFT:
31218 case TYPE_EXTS:
31219 if (get_attr_dot (insn) == DOT_YES)
31220 return true;
31221 else
31222 break;
31223 case TYPE_LOAD:
31224 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31225 || get_attr_update (insn) == UPDATE_YES)
31226 return true;
31227 else
31228 break;
31229 case TYPE_STORE:
31230 case TYPE_FPLOAD:
31231 case TYPE_FPSTORE:
31232 if (get_attr_update (insn) == UPDATE_YES)
31233 return true;
31234 else
31235 break;
31236 default:
31237 break;
31239 break;
31240 case PROCESSOR_POWER8:
31241 type = get_attr_type (insn);
31243 switch (type)
31245 case TYPE_CR_LOGICAL:
31246 case TYPE_MFCR:
31247 case TYPE_MFCRF:
31248 case TYPE_MTCR:
31249 case TYPE_SYNC:
31250 case TYPE_ISYNC:
31251 case TYPE_LOAD_L:
31252 case TYPE_STORE_C:
31253 case TYPE_VECSTORE:
31254 case TYPE_MFJMPR:
31255 case TYPE_MTJMPR:
31256 return true;
31257 case TYPE_SHIFT:
31258 case TYPE_EXTS:
31259 case TYPE_MUL:
31260 if (get_attr_dot (insn) == DOT_YES)
31261 return true;
31262 else
31263 break;
31264 case TYPE_LOAD:
31265 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31266 || get_attr_update (insn) == UPDATE_YES)
31267 return true;
31268 else
31269 break;
31270 case TYPE_STORE:
31271 if (get_attr_update (insn) == UPDATE_YES
31272 && get_attr_indexed (insn) == INDEXED_YES)
31273 return true;
31274 else
31275 break;
31276 default:
31277 break;
31279 break;
31280 default:
31281 break;
31284 return false;
31287 static bool
31288 insn_must_be_last_in_group (rtx_insn *insn)
31290 enum attr_type type;
31292 if (!insn
31293 || NOTE_P (insn)
31294 || DEBUG_INSN_P (insn)
31295 || GET_CODE (PATTERN (insn)) == USE
31296 || GET_CODE (PATTERN (insn)) == CLOBBER)
31297 return false;
31299 switch (rs6000_tune) {
31300 case PROCESSOR_POWER4:
31301 case PROCESSOR_POWER5:
31302 if (is_microcoded_insn (insn))
31303 return true;
31305 if (is_branch_slot_insn (insn))
31306 return true;
31308 break;
31309 case PROCESSOR_POWER6:
31310 type = get_attr_type (insn);
31312 switch (type)
31314 case TYPE_EXTS:
31315 case TYPE_CNTLZ:
31316 case TYPE_TRAP:
31317 case TYPE_MUL:
31318 case TYPE_FPCOMPARE:
31319 case TYPE_MFCR:
31320 case TYPE_MTCR:
31321 case TYPE_MFJMPR:
31322 case TYPE_MTJMPR:
31323 case TYPE_ISYNC:
31324 case TYPE_SYNC:
31325 case TYPE_LOAD_L:
31326 case TYPE_STORE_C:
31327 return true;
31328 case TYPE_SHIFT:
31329 if (get_attr_dot (insn) == DOT_NO
31330 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31331 return true;
31332 else
31333 break;
31334 case TYPE_DIV:
31335 if (get_attr_size (insn) == SIZE_32)
31336 return true;
31337 else
31338 break;
31339 default:
31340 break;
31342 break;
31343 case PROCESSOR_POWER7:
31344 type = get_attr_type (insn);
31346 switch (type)
31348 case TYPE_ISYNC:
31349 case TYPE_SYNC:
31350 case TYPE_LOAD_L:
31351 case TYPE_STORE_C:
31352 return true;
31353 case TYPE_LOAD:
31354 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31355 && get_attr_update (insn) == UPDATE_YES)
31356 return true;
31357 else
31358 break;
31359 case TYPE_STORE:
31360 if (get_attr_update (insn) == UPDATE_YES
31361 && get_attr_indexed (insn) == INDEXED_YES)
31362 return true;
31363 else
31364 break;
31365 default:
31366 break;
31368 break;
31369 case PROCESSOR_POWER8:
31370 type = get_attr_type (insn);
31372 switch (type)
31374 case TYPE_MFCR:
31375 case TYPE_MTCR:
31376 case TYPE_ISYNC:
31377 case TYPE_SYNC:
31378 case TYPE_LOAD_L:
31379 case TYPE_STORE_C:
31380 return true;
31381 case TYPE_LOAD:
31382 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31383 && get_attr_update (insn) == UPDATE_YES)
31384 return true;
31385 else
31386 break;
31387 case TYPE_STORE:
31388 if (get_attr_update (insn) == UPDATE_YES
31389 && get_attr_indexed (insn) == INDEXED_YES)
31390 return true;
31391 else
31392 break;
31393 default:
31394 break;
31396 break;
31397 default:
31398 break;
31401 return false;
31404 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31405 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31407 static bool
31408 is_costly_group (rtx *group_insns, rtx next_insn)
31410 int i;
31411 int issue_rate = rs6000_issue_rate ();
31413 for (i = 0; i < issue_rate; i++)
31415 sd_iterator_def sd_it;
31416 dep_t dep;
31417 rtx insn = group_insns[i];
31419 if (!insn)
31420 continue;
31422 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31424 rtx next = DEP_CON (dep);
31426 if (next == next_insn
31427 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31428 return true;
31432 return false;
31435 /* Utility of the function redefine_groups.
31436 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31437 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31438 to keep it "far" (in a separate group) from GROUP_INSNS, following
31439 one of the following schemes, depending on the value of the flag
31440 -minsert_sched_nops = X:
31441 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31442 in order to force NEXT_INSN into a separate group.
31443 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31444 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31445 insertion (has a group just ended, how many vacant issue slots remain in the
31446 last group, and how many dispatch groups were encountered so far). */
31448 static int
31449 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31450 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31451 int *group_count)
31453 rtx nop;
31454 bool force;
31455 int issue_rate = rs6000_issue_rate ();
31456 bool end = *group_end;
31457 int i;
31459 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31460 return can_issue_more;
31462 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31463 return can_issue_more;
31465 force = is_costly_group (group_insns, next_insn);
31466 if (!force)
31467 return can_issue_more;
31469 if (sched_verbose > 6)
31470 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31471 *group_count ,can_issue_more);
31473 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31475 if (*group_end)
31476 can_issue_more = 0;
31478 /* Since only a branch can be issued in the last issue_slot, it is
31479 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31480 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31481 in this case the last nop will start a new group and the branch
31482 will be forced to the new group. */
31483 if (can_issue_more && !is_branch_slot_insn (next_insn))
31484 can_issue_more--;
31486 /* Do we have a special group ending nop? */
31487 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31488 || rs6000_tune == PROCESSOR_POWER8)
31490 nop = gen_group_ending_nop ();
31491 emit_insn_before (nop, next_insn);
31492 can_issue_more = 0;
31494 else
31495 while (can_issue_more > 0)
31497 nop = gen_nop ();
31498 emit_insn_before (nop, next_insn);
31499 can_issue_more--;
31502 *group_end = true;
31503 return 0;
31506 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31508 int n_nops = rs6000_sched_insert_nops;
31510 /* Nops can't be issued from the branch slot, so the effective
31511 issue_rate for nops is 'issue_rate - 1'. */
31512 if (can_issue_more == 0)
31513 can_issue_more = issue_rate;
31514 can_issue_more--;
31515 if (can_issue_more == 0)
31517 can_issue_more = issue_rate - 1;
31518 (*group_count)++;
31519 end = true;
31520 for (i = 0; i < issue_rate; i++)
31522 group_insns[i] = 0;
31526 while (n_nops > 0)
31528 nop = gen_nop ();
31529 emit_insn_before (nop, next_insn);
31530 if (can_issue_more == issue_rate - 1) /* new group begins */
31531 end = false;
31532 can_issue_more--;
31533 if (can_issue_more == 0)
31535 can_issue_more = issue_rate - 1;
31536 (*group_count)++;
31537 end = true;
31538 for (i = 0; i < issue_rate; i++)
31540 group_insns[i] = 0;
31543 n_nops--;
31546 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31547 can_issue_more++;
31549 /* Is next_insn going to start a new group? */
31550 *group_end
31551 = (end
31552 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31553 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31554 || (can_issue_more < issue_rate &&
31555 insn_terminates_group_p (next_insn, previous_group)));
31556 if (*group_end && end)
31557 (*group_count)--;
31559 if (sched_verbose > 6)
31560 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31561 *group_count, can_issue_more);
31562 return can_issue_more;
31565 return can_issue_more;
31568 /* This function tries to synch the dispatch groups that the compiler "sees"
31569 with the dispatch groups that the processor dispatcher is expected to
31570 form in practice. It tries to achieve this synchronization by forcing the
31571 estimated processor grouping on the compiler (as opposed to the function
31572 'pad_goups' which tries to force the scheduler's grouping on the processor).
31574 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31575 examines the (estimated) dispatch groups that will be formed by the processor
31576 dispatcher. It marks these group boundaries to reflect the estimated
31577 processor grouping, overriding the grouping that the scheduler had marked.
31578 Depending on the value of the flag '-minsert-sched-nops' this function can
31579 force certain insns into separate groups or force a certain distance between
31580 them by inserting nops, for example, if there exists a "costly dependence"
31581 between the insns.
31583 The function estimates the group boundaries that the processor will form as
31584 follows: It keeps track of how many vacant issue slots are available after
31585 each insn. A subsequent insn will start a new group if one of the following
31586 4 cases applies:
31587 - no more vacant issue slots remain in the current dispatch group.
31588 - only the last issue slot, which is the branch slot, is vacant, but the next
31589 insn is not a branch.
31590 - only the last 2 or less issue slots, including the branch slot, are vacant,
31591 which means that a cracked insn (which occupies two issue slots) can't be
31592 issued in this group.
31593 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31594 start a new group. */
31596 static int
31597 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31598 rtx_insn *tail)
31600 rtx_insn *insn, *next_insn;
31601 int issue_rate;
31602 int can_issue_more;
31603 int slot, i;
31604 bool group_end;
31605 int group_count = 0;
31606 rtx *group_insns;
31608 /* Initialize. */
31609 issue_rate = rs6000_issue_rate ();
31610 group_insns = XALLOCAVEC (rtx, issue_rate);
31611 for (i = 0; i < issue_rate; i++)
31613 group_insns[i] = 0;
31615 can_issue_more = issue_rate;
31616 slot = 0;
31617 insn = get_next_active_insn (prev_head_insn, tail);
31618 group_end = false;
31620 while (insn != NULL_RTX)
31622 slot = (issue_rate - can_issue_more);
31623 group_insns[slot] = insn;
31624 can_issue_more =
31625 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31626 if (insn_terminates_group_p (insn, current_group))
31627 can_issue_more = 0;
31629 next_insn = get_next_active_insn (insn, tail);
31630 if (next_insn == NULL_RTX)
31631 return group_count + 1;
31633 /* Is next_insn going to start a new group? */
31634 group_end
31635 = (can_issue_more == 0
31636 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31637 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31638 || (can_issue_more < issue_rate &&
31639 insn_terminates_group_p (next_insn, previous_group)));
31641 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
31642 next_insn, &group_end, can_issue_more,
31643 &group_count);
31645 if (group_end)
31647 group_count++;
31648 can_issue_more = 0;
31649 for (i = 0; i < issue_rate; i++)
31651 group_insns[i] = 0;
31655 if (GET_MODE (next_insn) == TImode && can_issue_more)
31656 PUT_MODE (next_insn, VOIDmode);
31657 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
31658 PUT_MODE (next_insn, TImode);
31660 insn = next_insn;
31661 if (can_issue_more == 0)
31662 can_issue_more = issue_rate;
31663 } /* while */
31665 return group_count;
31668 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
31669 dispatch group boundaries that the scheduler had marked. Pad with nops
31670 any dispatch groups which have vacant issue slots, in order to force the
31671 scheduler's grouping on the processor dispatcher. The function
31672 returns the number of dispatch groups found. */
31674 static int
31675 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31676 rtx_insn *tail)
31678 rtx_insn *insn, *next_insn;
31679 rtx nop;
31680 int issue_rate;
31681 int can_issue_more;
31682 int group_end;
31683 int group_count = 0;
31685 /* Initialize issue_rate. */
31686 issue_rate = rs6000_issue_rate ();
31687 can_issue_more = issue_rate;
31689 insn = get_next_active_insn (prev_head_insn, tail);
31690 next_insn = get_next_active_insn (insn, tail);
31692 while (insn != NULL_RTX)
31694 can_issue_more =
31695 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31697 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
31699 if (next_insn == NULL_RTX)
31700 break;
31702 if (group_end)
31704 /* If the scheduler had marked group termination at this location
31705 (between insn and next_insn), and neither insn nor next_insn will
31706 force group termination, pad the group with nops to force group
31707 termination. */
31708 if (can_issue_more
31709 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
31710 && !insn_terminates_group_p (insn, current_group)
31711 && !insn_terminates_group_p (next_insn, previous_group))
31713 if (!is_branch_slot_insn (next_insn))
31714 can_issue_more--;
31716 while (can_issue_more)
31718 nop = gen_nop ();
31719 emit_insn_before (nop, next_insn);
31720 can_issue_more--;
31724 can_issue_more = issue_rate;
31725 group_count++;
31728 insn = next_insn;
31729 next_insn = get_next_active_insn (insn, tail);
31732 return group_count;
31735 /* We're beginning a new block. Initialize data structures as necessary. */
31737 static void
31738 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
31739 int sched_verbose ATTRIBUTE_UNUSED,
31740 int max_ready ATTRIBUTE_UNUSED)
31742 last_scheduled_insn = NULL;
31743 load_store_pendulum = 0;
31744 divide_cnt = 0;
31745 vec_pairing = 0;
31748 /* The following function is called at the end of scheduling BB.
31749 After reload, it inserts nops at insn group bundling. */
31751 static void
31752 rs6000_sched_finish (FILE *dump, int sched_verbose)
31754 int n_groups;
31756 if (sched_verbose)
31757 fprintf (dump, "=== Finishing schedule.\n");
31759 if (reload_completed && rs6000_sched_groups)
31761 /* Do not run sched_finish hook when selective scheduling enabled. */
31762 if (sel_sched_p ())
31763 return;
31765 if (rs6000_sched_insert_nops == sched_finish_none)
31766 return;
31768 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
31769 n_groups = pad_groups (dump, sched_verbose,
31770 current_sched_info->prev_head,
31771 current_sched_info->next_tail);
31772 else
31773 n_groups = redefine_groups (dump, sched_verbose,
31774 current_sched_info->prev_head,
31775 current_sched_info->next_tail);
31777 if (sched_verbose >= 6)
31779 fprintf (dump, "ngroups = %d\n", n_groups);
31780 print_rtl (dump, current_sched_info->prev_head);
31781 fprintf (dump, "Done finish_sched\n");
31786 struct rs6000_sched_context
31788 short cached_can_issue_more;
31789 rtx_insn *last_scheduled_insn;
31790 int load_store_pendulum;
31791 int divide_cnt;
31792 int vec_pairing;
31795 typedef struct rs6000_sched_context rs6000_sched_context_def;
31796 typedef rs6000_sched_context_def *rs6000_sched_context_t;
31798 /* Allocate store for new scheduling context. */
31799 static void *
31800 rs6000_alloc_sched_context (void)
31802 return xmalloc (sizeof (rs6000_sched_context_def));
31805 /* If CLEAN_P is true then initializes _SC with clean data,
31806 and from the global context otherwise. */
31807 static void
31808 rs6000_init_sched_context (void *_sc, bool clean_p)
31810 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
31812 if (clean_p)
31814 sc->cached_can_issue_more = 0;
31815 sc->last_scheduled_insn = NULL;
31816 sc->load_store_pendulum = 0;
31817 sc->divide_cnt = 0;
31818 sc->vec_pairing = 0;
31820 else
31822 sc->cached_can_issue_more = cached_can_issue_more;
31823 sc->last_scheduled_insn = last_scheduled_insn;
31824 sc->load_store_pendulum = load_store_pendulum;
31825 sc->divide_cnt = divide_cnt;
31826 sc->vec_pairing = vec_pairing;
31830 /* Sets the global scheduling context to the one pointed to by _SC. */
31831 static void
31832 rs6000_set_sched_context (void *_sc)
31834 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
31836 gcc_assert (sc != NULL);
31838 cached_can_issue_more = sc->cached_can_issue_more;
31839 last_scheduled_insn = sc->last_scheduled_insn;
31840 load_store_pendulum = sc->load_store_pendulum;
31841 divide_cnt = sc->divide_cnt;
31842 vec_pairing = sc->vec_pairing;
31845 /* Free _SC. */
31846 static void
31847 rs6000_free_sched_context (void *_sc)
31849 gcc_assert (_sc != NULL);
31851 free (_sc);
31854 static bool
31855 rs6000_sched_can_speculate_insn (rtx_insn *insn)
31857 switch (get_attr_type (insn))
31859 case TYPE_DIV:
31860 case TYPE_SDIV:
31861 case TYPE_DDIV:
31862 case TYPE_VECDIV:
31863 case TYPE_SSQRT:
31864 case TYPE_DSQRT:
31865 return false;
31867 default:
31868 return true;
31872 /* Length in units of the trampoline for entering a nested function. */
31875 rs6000_trampoline_size (void)
31877 int ret = 0;
31879 switch (DEFAULT_ABI)
31881 default:
31882 gcc_unreachable ();
31884 case ABI_AIX:
31885 ret = (TARGET_32BIT) ? 12 : 24;
31886 break;
31888 case ABI_ELFv2:
31889 gcc_assert (!TARGET_32BIT);
31890 ret = 32;
31891 break;
31893 case ABI_DARWIN:
31894 case ABI_V4:
31895 ret = (TARGET_32BIT) ? 40 : 48;
31896 break;
31899 return ret;
31902 /* Emit RTL insns to initialize the variable parts of a trampoline.
31903 FNADDR is an RTX for the address of the function's pure code.
31904 CXT is an RTX for the static chain value for the function. */
31906 static void
31907 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
31909 int regsize = (TARGET_32BIT) ? 4 : 8;
31910 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
31911 rtx ctx_reg = force_reg (Pmode, cxt);
31912 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
31914 switch (DEFAULT_ABI)
31916 default:
31917 gcc_unreachable ();
31919 /* Under AIX, just build the 3 word function descriptor */
31920 case ABI_AIX:
31922 rtx fnmem, fn_reg, toc_reg;
31924 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
31925 error ("you cannot take the address of a nested function if you use "
31926 "the %qs option", "-mno-pointers-to-nested-functions");
31928 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
31929 fn_reg = gen_reg_rtx (Pmode);
31930 toc_reg = gen_reg_rtx (Pmode);
31932 /* Macro to shorten the code expansions below. */
31933 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
31935 m_tramp = replace_equiv_address (m_tramp, addr);
31937 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
31938 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
31939 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
31940 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
31941 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
31943 # undef MEM_PLUS
31945 break;
31947 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
31948 case ABI_ELFv2:
31949 case ABI_DARWIN:
31950 case ABI_V4:
31951 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
31952 LCT_NORMAL, VOIDmode,
31953 addr, Pmode,
31954 GEN_INT (rs6000_trampoline_size ()), SImode,
31955 fnaddr, Pmode,
31956 ctx_reg, Pmode);
31957 break;
31962 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
31963 identifier as an argument, so the front end shouldn't look it up. */
31965 static bool
31966 rs6000_attribute_takes_identifier_p (const_tree attr_id)
31968 return is_attribute_p ("altivec", attr_id);
31971 /* Handle the "altivec" attribute. The attribute may have
31972 arguments as follows:
31974 __attribute__((altivec(vector__)))
31975 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
31976 __attribute__((altivec(bool__))) (always followed by 'unsigned')
31978 and may appear more than once (e.g., 'vector bool char') in a
31979 given declaration. */
31981 static tree
31982 rs6000_handle_altivec_attribute (tree *node,
31983 tree name ATTRIBUTE_UNUSED,
31984 tree args,
31985 int flags ATTRIBUTE_UNUSED,
31986 bool *no_add_attrs)
31988 tree type = *node, result = NULL_TREE;
31989 machine_mode mode;
31990 int unsigned_p;
31991 char altivec_type
31992 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
31993 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
31994 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
31995 : '?');
31997 while (POINTER_TYPE_P (type)
31998 || TREE_CODE (type) == FUNCTION_TYPE
31999 || TREE_CODE (type) == METHOD_TYPE
32000 || TREE_CODE (type) == ARRAY_TYPE)
32001 type = TREE_TYPE (type);
32003 mode = TYPE_MODE (type);
32005 /* Check for invalid AltiVec type qualifiers. */
32006 if (type == long_double_type_node)
32007 error ("use of %<long double%> in AltiVec types is invalid");
32008 else if (type == boolean_type_node)
32009 error ("use of boolean types in AltiVec types is invalid");
32010 else if (TREE_CODE (type) == COMPLEX_TYPE)
32011 error ("use of %<complex%> in AltiVec types is invalid");
32012 else if (DECIMAL_FLOAT_MODE_P (mode))
32013 error ("use of decimal floating point types in AltiVec types is invalid");
32014 else if (!TARGET_VSX)
32016 if (type == long_unsigned_type_node || type == long_integer_type_node)
32018 if (TARGET_64BIT)
32019 error ("use of %<long%> in AltiVec types is invalid for "
32020 "64-bit code without %qs", "-mvsx");
32021 else if (rs6000_warn_altivec_long)
32022 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32023 "use %<int%>");
32025 else if (type == long_long_unsigned_type_node
32026 || type == long_long_integer_type_node)
32027 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32028 "-mvsx");
32029 else if (type == double_type_node)
32030 error ("use of %<double%> in AltiVec types is invalid without %qs",
32031 "-mvsx");
32034 switch (altivec_type)
32036 case 'v':
32037 unsigned_p = TYPE_UNSIGNED (type);
32038 switch (mode)
32040 case E_TImode:
32041 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32042 break;
32043 case E_DImode:
32044 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32045 break;
32046 case E_SImode:
32047 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32048 break;
32049 case E_HImode:
32050 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32051 break;
32052 case E_QImode:
32053 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32054 break;
32055 case E_SFmode: result = V4SF_type_node; break;
32056 case E_DFmode: result = V2DF_type_node; break;
32057 /* If the user says 'vector int bool', we may be handed the 'bool'
32058 attribute _before_ the 'vector' attribute, and so select the
32059 proper type in the 'b' case below. */
32060 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32061 case E_V2DImode: case E_V2DFmode:
32062 result = type;
32063 default: break;
32065 break;
32066 case 'b':
32067 switch (mode)
32069 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32070 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32071 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32072 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32073 default: break;
32075 break;
32076 case 'p':
32077 switch (mode)
32079 case E_V8HImode: result = pixel_V8HI_type_node;
32080 default: break;
32082 default: break;
32085 /* Propagate qualifiers attached to the element type
32086 onto the vector type. */
32087 if (result && result != type && TYPE_QUALS (type))
32088 result = build_qualified_type (result, TYPE_QUALS (type));
32090 *no_add_attrs = true; /* No need to hang on to the attribute. */
32092 if (result)
32093 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32095 return NULL_TREE;
32098 /* AltiVec defines four built-in scalar types that serve as vector
32099 elements; we must teach the compiler how to mangle them. */
32101 static const char *
32102 rs6000_mangle_type (const_tree type)
32104 type = TYPE_MAIN_VARIANT (type);
32106 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32107 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32108 return NULL;
32110 if (type == bool_char_type_node) return "U6__boolc";
32111 if (type == bool_short_type_node) return "U6__bools";
32112 if (type == pixel_type_node) return "u7__pixel";
32113 if (type == bool_int_type_node) return "U6__booli";
32114 if (type == bool_long_long_type_node) return "U6__boolx";
32116 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32117 "g" for IBM extended double, no matter whether it is long double (using
32118 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32119 if (TARGET_FLOAT128_TYPE)
32121 if (type == ieee128_float_type_node)
32122 return "U10__float128";
32124 if (type == ibm128_float_type_node)
32125 return "u8__ibm128";
32127 if (TARGET_LONG_DOUBLE_128 && type == long_double_type_node)
32128 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32131 /* Mangle IBM extended float long double as `g' (__float128) on
32132 powerpc*-linux where long-double-64 previously was the default. */
32133 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32134 && TARGET_ELF
32135 && TARGET_LONG_DOUBLE_128
32136 && !TARGET_IEEEQUAD)
32137 return "g";
32139 /* For all other types, use normal C++ mangling. */
32140 return NULL;
32143 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32144 struct attribute_spec.handler. */
32146 static tree
32147 rs6000_handle_longcall_attribute (tree *node, tree name,
32148 tree args ATTRIBUTE_UNUSED,
32149 int flags ATTRIBUTE_UNUSED,
32150 bool *no_add_attrs)
32152 if (TREE_CODE (*node) != FUNCTION_TYPE
32153 && TREE_CODE (*node) != FIELD_DECL
32154 && TREE_CODE (*node) != TYPE_DECL)
32156 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32157 name);
32158 *no_add_attrs = true;
32161 return NULL_TREE;
32164 /* Set longcall attributes on all functions declared when
32165 rs6000_default_long_calls is true. */
32166 static void
32167 rs6000_set_default_type_attributes (tree type)
32169 if (rs6000_default_long_calls
32170 && (TREE_CODE (type) == FUNCTION_TYPE
32171 || TREE_CODE (type) == METHOD_TYPE))
32172 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32173 NULL_TREE,
32174 TYPE_ATTRIBUTES (type));
32176 #if TARGET_MACHO
32177 darwin_set_default_type_attributes (type);
32178 #endif
32181 /* Return a reference suitable for calling a function with the
32182 longcall attribute. */
32185 rs6000_longcall_ref (rtx call_ref)
32187 const char *call_name;
32188 tree node;
32190 if (GET_CODE (call_ref) != SYMBOL_REF)
32191 return call_ref;
32193 /* System V adds '.' to the internal name, so skip them. */
32194 call_name = XSTR (call_ref, 0);
32195 if (*call_name == '.')
32197 while (*call_name == '.')
32198 call_name++;
32200 node = get_identifier (call_name);
32201 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32204 return force_reg (Pmode, call_ref);
32207 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32208 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32209 #endif
32211 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32212 struct attribute_spec.handler. */
32213 static tree
32214 rs6000_handle_struct_attribute (tree *node, tree name,
32215 tree args ATTRIBUTE_UNUSED,
32216 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32218 tree *type = NULL;
32219 if (DECL_P (*node))
32221 if (TREE_CODE (*node) == TYPE_DECL)
32222 type = &TREE_TYPE (*node);
32224 else
32225 type = node;
32227 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32228 || TREE_CODE (*type) == UNION_TYPE)))
32230 warning (OPT_Wattributes, "%qE attribute ignored", name);
32231 *no_add_attrs = true;
32234 else if ((is_attribute_p ("ms_struct", name)
32235 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32236 || ((is_attribute_p ("gcc_struct", name)
32237 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32239 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32240 name);
32241 *no_add_attrs = true;
32244 return NULL_TREE;
32247 static bool
32248 rs6000_ms_bitfield_layout_p (const_tree record_type)
32250 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32251 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32252 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32255 #ifdef USING_ELFOS_H
32257 /* A get_unnamed_section callback, used for switching to toc_section. */
32259 static void
32260 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32262 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32263 && TARGET_MINIMAL_TOC)
32265 if (!toc_initialized)
32267 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32268 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32269 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32270 fprintf (asm_out_file, "\t.tc ");
32271 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32272 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32273 fprintf (asm_out_file, "\n");
32275 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32276 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32277 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32278 fprintf (asm_out_file, " = .+32768\n");
32279 toc_initialized = 1;
32281 else
32282 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32284 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32286 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32287 if (!toc_initialized)
32289 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32290 toc_initialized = 1;
32293 else
32295 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32296 if (!toc_initialized)
32298 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32299 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32300 fprintf (asm_out_file, " = .+32768\n");
32301 toc_initialized = 1;
32306 /* Implement TARGET_ASM_INIT_SECTIONS. */
32308 static void
32309 rs6000_elf_asm_init_sections (void)
32311 toc_section
32312 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32314 sdata2_section
32315 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32316 SDATA2_SECTION_ASM_OP);
32319 /* Implement TARGET_SELECT_RTX_SECTION. */
32321 static section *
32322 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32323 unsigned HOST_WIDE_INT align)
32325 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32326 return toc_section;
32327 else
32328 return default_elf_select_rtx_section (mode, x, align);
32331 /* For a SYMBOL_REF, set generic flags and then perform some
32332 target-specific processing.
32334 When the AIX ABI is requested on a non-AIX system, replace the
32335 function name with the real name (with a leading .) rather than the
32336 function descriptor name. This saves a lot of overriding code to
32337 read the prefixes. */
32339 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32340 static void
32341 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32343 default_encode_section_info (decl, rtl, first);
32345 if (first
32346 && TREE_CODE (decl) == FUNCTION_DECL
32347 && !TARGET_AIX
32348 && DEFAULT_ABI == ABI_AIX)
32350 rtx sym_ref = XEXP (rtl, 0);
32351 size_t len = strlen (XSTR (sym_ref, 0));
32352 char *str = XALLOCAVEC (char, len + 2);
32353 str[0] = '.';
32354 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32355 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32359 static inline bool
32360 compare_section_name (const char *section, const char *templ)
32362 int len;
32364 len = strlen (templ);
32365 return (strncmp (section, templ, len) == 0
32366 && (section[len] == 0 || section[len] == '.'));
32369 bool
32370 rs6000_elf_in_small_data_p (const_tree decl)
32372 if (rs6000_sdata == SDATA_NONE)
32373 return false;
32375 /* We want to merge strings, so we never consider them small data. */
32376 if (TREE_CODE (decl) == STRING_CST)
32377 return false;
32379 /* Functions are never in the small data area. */
32380 if (TREE_CODE (decl) == FUNCTION_DECL)
32381 return false;
32383 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32385 const char *section = DECL_SECTION_NAME (decl);
32386 if (compare_section_name (section, ".sdata")
32387 || compare_section_name (section, ".sdata2")
32388 || compare_section_name (section, ".gnu.linkonce.s")
32389 || compare_section_name (section, ".sbss")
32390 || compare_section_name (section, ".sbss2")
32391 || compare_section_name (section, ".gnu.linkonce.sb")
32392 || strcmp (section, ".PPC.EMB.sdata0") == 0
32393 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32394 return true;
32396 else
32398 /* If we are told not to put readonly data in sdata, then don't. */
32399 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32400 && !rs6000_readonly_in_sdata)
32401 return false;
32403 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32405 if (size > 0
32406 && size <= g_switch_value
32407 /* If it's not public, and we're not going to reference it there,
32408 there's no need to put it in the small data section. */
32409 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32410 return true;
32413 return false;
32416 #endif /* USING_ELFOS_H */
32418 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32420 static bool
32421 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32423 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32426 /* Do not place thread-local symbols refs in the object blocks. */
32428 static bool
32429 rs6000_use_blocks_for_decl_p (const_tree decl)
32431 return !DECL_THREAD_LOCAL_P (decl);
32434 /* Return a REG that occurs in ADDR with coefficient 1.
32435 ADDR can be effectively incremented by incrementing REG.
32437 r0 is special and we must not select it as an address
32438 register by this routine since our caller will try to
32439 increment the returned register via an "la" instruction. */
32442 find_addr_reg (rtx addr)
32444 while (GET_CODE (addr) == PLUS)
32446 if (GET_CODE (XEXP (addr, 0)) == REG
32447 && REGNO (XEXP (addr, 0)) != 0)
32448 addr = XEXP (addr, 0);
32449 else if (GET_CODE (XEXP (addr, 1)) == REG
32450 && REGNO (XEXP (addr, 1)) != 0)
32451 addr = XEXP (addr, 1);
32452 else if (CONSTANT_P (XEXP (addr, 0)))
32453 addr = XEXP (addr, 1);
32454 else if (CONSTANT_P (XEXP (addr, 1)))
32455 addr = XEXP (addr, 0);
32456 else
32457 gcc_unreachable ();
32459 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32460 return addr;
32463 void
32464 rs6000_fatal_bad_address (rtx op)
32466 fatal_insn ("bad address", op);
32469 #if TARGET_MACHO
32471 typedef struct branch_island_d {
32472 tree function_name;
32473 tree label_name;
32474 int line_number;
32475 } branch_island;
32478 static vec<branch_island, va_gc> *branch_islands;
32480 /* Remember to generate a branch island for far calls to the given
32481 function. */
32483 static void
32484 add_compiler_branch_island (tree label_name, tree function_name,
32485 int line_number)
32487 branch_island bi = {function_name, label_name, line_number};
32488 vec_safe_push (branch_islands, bi);
32491 /* Generate far-jump branch islands for everything recorded in
32492 branch_islands. Invoked immediately after the last instruction of
32493 the epilogue has been emitted; the branch islands must be appended
32494 to, and contiguous with, the function body. Mach-O stubs are
32495 generated in machopic_output_stub(). */
32497 static void
32498 macho_branch_islands (void)
32500 char tmp_buf[512];
32502 while (!vec_safe_is_empty (branch_islands))
32504 branch_island *bi = &branch_islands->last ();
32505 const char *label = IDENTIFIER_POINTER (bi->label_name);
32506 const char *name = IDENTIFIER_POINTER (bi->function_name);
32507 char name_buf[512];
32508 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32509 if (name[0] == '*' || name[0] == '&')
32510 strcpy (name_buf, name+1);
32511 else
32513 name_buf[0] = '_';
32514 strcpy (name_buf+1, name);
32516 strcpy (tmp_buf, "\n");
32517 strcat (tmp_buf, label);
32518 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32519 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32520 dbxout_stabd (N_SLINE, bi->line_number);
32521 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32522 if (flag_pic)
32524 if (TARGET_LINK_STACK)
32526 char name[32];
32527 get_ppc476_thunk_name (name);
32528 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32529 strcat (tmp_buf, name);
32530 strcat (tmp_buf, "\n");
32531 strcat (tmp_buf, label);
32532 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32534 else
32536 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32537 strcat (tmp_buf, label);
32538 strcat (tmp_buf, "_pic\n");
32539 strcat (tmp_buf, label);
32540 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32543 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32544 strcat (tmp_buf, name_buf);
32545 strcat (tmp_buf, " - ");
32546 strcat (tmp_buf, label);
32547 strcat (tmp_buf, "_pic)\n");
32549 strcat (tmp_buf, "\tmtlr r0\n");
32551 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32552 strcat (tmp_buf, name_buf);
32553 strcat (tmp_buf, " - ");
32554 strcat (tmp_buf, label);
32555 strcat (tmp_buf, "_pic)\n");
32557 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32559 else
32561 strcat (tmp_buf, ":\nlis r12,hi16(");
32562 strcat (tmp_buf, name_buf);
32563 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32564 strcat (tmp_buf, name_buf);
32565 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32567 output_asm_insn (tmp_buf, 0);
32568 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32569 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32570 dbxout_stabd (N_SLINE, bi->line_number);
32571 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32572 branch_islands->pop ();
32576 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32577 already there or not. */
32579 static int
32580 no_previous_def (tree function_name)
32582 branch_island *bi;
32583 unsigned ix;
32585 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32586 if (function_name == bi->function_name)
32587 return 0;
32588 return 1;
32591 /* GET_PREV_LABEL gets the label name from the previous definition of
32592 the function. */
32594 static tree
32595 get_prev_label (tree function_name)
32597 branch_island *bi;
32598 unsigned ix;
32600 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32601 if (function_name == bi->function_name)
32602 return bi->label_name;
32603 return NULL_TREE;
32606 /* INSN is either a function call or a millicode call. It may have an
32607 unconditional jump in its delay slot.
32609 CALL_DEST is the routine we are calling. */
32611 char *
32612 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32613 int cookie_operand_number)
32615 static char buf[256];
32616 if (darwin_emit_branch_islands
32617 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32618 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32620 tree labelname;
32621 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32623 if (no_previous_def (funname))
32625 rtx label_rtx = gen_label_rtx ();
32626 char *label_buf, temp_buf[256];
32627 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32628 CODE_LABEL_NUMBER (label_rtx));
32629 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32630 labelname = get_identifier (label_buf);
32631 add_compiler_branch_island (labelname, funname, insn_line (insn));
32633 else
32634 labelname = get_prev_label (funname);
32636 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32637 instruction will reach 'foo', otherwise link as 'bl L42'".
32638 "L42" should be a 'branch island', that will do a far jump to
32639 'foo'. Branch islands are generated in
32640 macho_branch_islands(). */
32641 sprintf (buf, "jbsr %%z%d,%.246s",
32642 dest_operand_number, IDENTIFIER_POINTER (labelname));
32644 else
32645 sprintf (buf, "bl %%z%d", dest_operand_number);
32646 return buf;
32649 /* Generate PIC and indirect symbol stubs. */
32651 void
32652 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32654 unsigned int length;
32655 char *symbol_name, *lazy_ptr_name;
32656 char *local_label_0;
32657 static int label = 0;
32659 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32660 symb = (*targetm.strip_name_encoding) (symb);
32663 length = strlen (symb);
32664 symbol_name = XALLOCAVEC (char, length + 32);
32665 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
32667 lazy_ptr_name = XALLOCAVEC (char, length + 32);
32668 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
32670 if (flag_pic == 2)
32671 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
32672 else
32673 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
32675 if (flag_pic == 2)
32677 fprintf (file, "\t.align 5\n");
32679 fprintf (file, "%s:\n", stub);
32680 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32682 label++;
32683 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
32684 sprintf (local_label_0, "\"L%011d$spb\"", label);
32686 fprintf (file, "\tmflr r0\n");
32687 if (TARGET_LINK_STACK)
32689 char name[32];
32690 get_ppc476_thunk_name (name);
32691 fprintf (file, "\tbl %s\n", name);
32692 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32694 else
32696 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
32697 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32699 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
32700 lazy_ptr_name, local_label_0);
32701 fprintf (file, "\tmtlr r0\n");
32702 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
32703 (TARGET_64BIT ? "ldu" : "lwzu"),
32704 lazy_ptr_name, local_label_0);
32705 fprintf (file, "\tmtctr r12\n");
32706 fprintf (file, "\tbctr\n");
32708 else
32710 fprintf (file, "\t.align 4\n");
32712 fprintf (file, "%s:\n", stub);
32713 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32715 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
32716 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
32717 (TARGET_64BIT ? "ldu" : "lwzu"),
32718 lazy_ptr_name);
32719 fprintf (file, "\tmtctr r12\n");
32720 fprintf (file, "\tbctr\n");
32723 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
32724 fprintf (file, "%s:\n", lazy_ptr_name);
32725 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32726 fprintf (file, "%sdyld_stub_binding_helper\n",
32727 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
32730 /* Legitimize PIC addresses. If the address is already
32731 position-independent, we return ORIG. Newly generated
32732 position-independent addresses go into a reg. This is REG if non
32733 zero, otherwise we allocate register(s) as necessary. */
32735 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
32738 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
32739 rtx reg)
32741 rtx base, offset;
32743 if (reg == NULL && !reload_completed)
32744 reg = gen_reg_rtx (Pmode);
32746 if (GET_CODE (orig) == CONST)
32748 rtx reg_temp;
32750 if (GET_CODE (XEXP (orig, 0)) == PLUS
32751 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
32752 return orig;
32754 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
32756 /* Use a different reg for the intermediate value, as
32757 it will be marked UNCHANGING. */
32758 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
32759 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
32760 Pmode, reg_temp);
32761 offset =
32762 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
32763 Pmode, reg);
32765 if (GET_CODE (offset) == CONST_INT)
32767 if (SMALL_INT (offset))
32768 return plus_constant (Pmode, base, INTVAL (offset));
32769 else if (!reload_completed)
32770 offset = force_reg (Pmode, offset);
32771 else
32773 rtx mem = force_const_mem (Pmode, orig);
32774 return machopic_legitimize_pic_address (mem, Pmode, reg);
32777 return gen_rtx_PLUS (Pmode, base, offset);
32780 /* Fall back on generic machopic code. */
32781 return machopic_legitimize_pic_address (orig, mode, reg);
32784 /* Output a .machine directive for the Darwin assembler, and call
32785 the generic start_file routine. */
32787 static void
32788 rs6000_darwin_file_start (void)
32790 static const struct
32792 const char *arg;
32793 const char *name;
32794 HOST_WIDE_INT if_set;
32795 } mapping[] = {
32796 { "ppc64", "ppc64", MASK_64BIT },
32797 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
32798 { "power4", "ppc970", 0 },
32799 { "G5", "ppc970", 0 },
32800 { "7450", "ppc7450", 0 },
32801 { "7400", "ppc7400", MASK_ALTIVEC },
32802 { "G4", "ppc7400", 0 },
32803 { "750", "ppc750", 0 },
32804 { "740", "ppc750", 0 },
32805 { "G3", "ppc750", 0 },
32806 { "604e", "ppc604e", 0 },
32807 { "604", "ppc604", 0 },
32808 { "603e", "ppc603", 0 },
32809 { "603", "ppc603", 0 },
32810 { "601", "ppc601", 0 },
32811 { NULL, "ppc", 0 } };
32812 const char *cpu_id = "";
32813 size_t i;
32815 rs6000_file_start ();
32816 darwin_file_start ();
32818 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
32820 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
32821 cpu_id = rs6000_default_cpu;
32823 if (global_options_set.x_rs6000_cpu_index)
32824 cpu_id = processor_target_table[rs6000_cpu_index].name;
32826 /* Look through the mapping array. Pick the first name that either
32827 matches the argument, has a bit set in IF_SET that is also set
32828 in the target flags, or has a NULL name. */
32830 i = 0;
32831 while (mapping[i].arg != NULL
32832 && strcmp (mapping[i].arg, cpu_id) != 0
32833 && (mapping[i].if_set & rs6000_isa_flags) == 0)
32834 i++;
32836 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
32839 #endif /* TARGET_MACHO */
32841 #if TARGET_ELF
32842 static int
32843 rs6000_elf_reloc_rw_mask (void)
32845 if (flag_pic)
32846 return 3;
32847 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32848 return 2;
32849 else
32850 return 0;
32853 /* Record an element in the table of global constructors. SYMBOL is
32854 a SYMBOL_REF of the function to be called; PRIORITY is a number
32855 between 0 and MAX_INIT_PRIORITY.
32857 This differs from default_named_section_asm_out_constructor in
32858 that we have special handling for -mrelocatable. */
32860 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
32861 static void
32862 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
32864 const char *section = ".ctors";
32865 char buf[18];
32867 if (priority != DEFAULT_INIT_PRIORITY)
32869 sprintf (buf, ".ctors.%.5u",
32870 /* Invert the numbering so the linker puts us in the proper
32871 order; constructors are run from right to left, and the
32872 linker sorts in increasing order. */
32873 MAX_INIT_PRIORITY - priority);
32874 section = buf;
32877 switch_to_section (get_section (section, SECTION_WRITE, NULL));
32878 assemble_align (POINTER_SIZE);
32880 if (DEFAULT_ABI == ABI_V4
32881 && (TARGET_RELOCATABLE || flag_pic > 1))
32883 fputs ("\t.long (", asm_out_file);
32884 output_addr_const (asm_out_file, symbol);
32885 fputs (")@fixup\n", asm_out_file);
32887 else
32888 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
32891 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
32892 static void
32893 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
32895 const char *section = ".dtors";
32896 char buf[18];
32898 if (priority != DEFAULT_INIT_PRIORITY)
32900 sprintf (buf, ".dtors.%.5u",
32901 /* Invert the numbering so the linker puts us in the proper
32902 order; constructors are run from right to left, and the
32903 linker sorts in increasing order. */
32904 MAX_INIT_PRIORITY - priority);
32905 section = buf;
32908 switch_to_section (get_section (section, SECTION_WRITE, NULL));
32909 assemble_align (POINTER_SIZE);
32911 if (DEFAULT_ABI == ABI_V4
32912 && (TARGET_RELOCATABLE || flag_pic > 1))
32914 fputs ("\t.long (", asm_out_file);
32915 output_addr_const (asm_out_file, symbol);
32916 fputs (")@fixup\n", asm_out_file);
32918 else
32919 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
32922 void
32923 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
32925 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
32927 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
32928 ASM_OUTPUT_LABEL (file, name);
32929 fputs (DOUBLE_INT_ASM_OP, file);
32930 rs6000_output_function_entry (file, name);
32931 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
32932 if (DOT_SYMBOLS)
32934 fputs ("\t.size\t", file);
32935 assemble_name (file, name);
32936 fputs (",24\n\t.type\t.", file);
32937 assemble_name (file, name);
32938 fputs (",@function\n", file);
32939 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
32941 fputs ("\t.globl\t.", file);
32942 assemble_name (file, name);
32943 putc ('\n', file);
32946 else
32947 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
32948 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
32949 rs6000_output_function_entry (file, name);
32950 fputs (":\n", file);
32951 return;
32954 int uses_toc;
32955 if (DEFAULT_ABI == ABI_V4
32956 && (TARGET_RELOCATABLE || flag_pic > 1)
32957 && !TARGET_SECURE_PLT
32958 && (!constant_pool_empty_p () || crtl->profile)
32959 && (uses_toc = uses_TOC ()))
32961 char buf[256];
32963 if (uses_toc == 2)
32964 switch_to_other_text_partition ();
32965 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
32967 fprintf (file, "\t.long ");
32968 assemble_name (file, toc_label_name);
32969 need_toc_init = 1;
32970 putc ('-', file);
32971 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
32972 assemble_name (file, buf);
32973 putc ('\n', file);
32974 if (uses_toc == 2)
32975 switch_to_other_text_partition ();
32978 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
32979 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
32981 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
32983 char buf[256];
32985 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
32987 fprintf (file, "\t.quad .TOC.-");
32988 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
32989 assemble_name (file, buf);
32990 putc ('\n', file);
32993 if (DEFAULT_ABI == ABI_AIX)
32995 const char *desc_name, *orig_name;
32997 orig_name = (*targetm.strip_name_encoding) (name);
32998 desc_name = orig_name;
32999 while (*desc_name == '.')
33000 desc_name++;
33002 if (TREE_PUBLIC (decl))
33003 fprintf (file, "\t.globl %s\n", desc_name);
33005 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33006 fprintf (file, "%s:\n", desc_name);
33007 fprintf (file, "\t.long %s\n", orig_name);
33008 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33009 fputs ("\t.long 0\n", file);
33010 fprintf (file, "\t.previous\n");
33012 ASM_OUTPUT_LABEL (file, name);
33015 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33016 static void
33017 rs6000_elf_file_end (void)
33019 #ifdef HAVE_AS_GNU_ATTRIBUTE
33020 /* ??? The value emitted depends on options active at file end.
33021 Assume anyone using #pragma or attributes that might change
33022 options knows what they are doing. */
33023 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33024 && rs6000_passes_float)
33026 int fp;
33028 if (TARGET_HARD_FLOAT)
33029 fp = 1;
33030 else
33031 fp = 2;
33032 if (rs6000_passes_long_double)
33034 if (!TARGET_LONG_DOUBLE_128)
33035 fp |= 2 * 4;
33036 else if (TARGET_IEEEQUAD)
33037 fp |= 3 * 4;
33038 else
33039 fp |= 1 * 4;
33041 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33043 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33045 if (rs6000_passes_vector)
33046 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33047 (TARGET_ALTIVEC_ABI ? 2 : 1));
33048 if (rs6000_returns_struct)
33049 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33050 aix_struct_return ? 2 : 1);
33052 #endif
33053 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33054 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33055 file_end_indicate_exec_stack ();
33056 #endif
33058 if (flag_split_stack)
33059 file_end_indicate_split_stack ();
33061 if (cpu_builtin_p)
33063 /* We have expanded a CPU builtin, so we need to emit a reference to
33064 the special symbol that LIBC uses to declare it supports the
33065 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33066 switch_to_section (data_section);
33067 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33068 fprintf (asm_out_file, "\t%s %s\n",
33069 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33072 #endif
33074 #if TARGET_XCOFF
33076 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33077 #define HAVE_XCOFF_DWARF_EXTRAS 0
33078 #endif
33080 static enum unwind_info_type
33081 rs6000_xcoff_debug_unwind_info (void)
33083 return UI_NONE;
33086 static void
33087 rs6000_xcoff_asm_output_anchor (rtx symbol)
33089 char buffer[100];
33091 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33092 SYMBOL_REF_BLOCK_OFFSET (symbol));
33093 fprintf (asm_out_file, "%s", SET_ASM_OP);
33094 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33095 fprintf (asm_out_file, ",");
33096 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33097 fprintf (asm_out_file, "\n");
33100 static void
33101 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33103 fputs (GLOBAL_ASM_OP, stream);
33104 RS6000_OUTPUT_BASENAME (stream, name);
33105 putc ('\n', stream);
33108 /* A get_unnamed_decl callback, used for read-only sections. PTR
33109 points to the section string variable. */
33111 static void
33112 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33114 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33115 *(const char *const *) directive,
33116 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33119 /* Likewise for read-write sections. */
33121 static void
33122 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33124 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33125 *(const char *const *) directive,
33126 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33129 static void
33130 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33132 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33133 *(const char *const *) directive,
33134 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33137 /* A get_unnamed_section callback, used for switching to toc_section. */
33139 static void
33140 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33142 if (TARGET_MINIMAL_TOC)
33144 /* toc_section is always selected at least once from
33145 rs6000_xcoff_file_start, so this is guaranteed to
33146 always be defined once and only once in each file. */
33147 if (!toc_initialized)
33149 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33150 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33151 toc_initialized = 1;
33153 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33154 (TARGET_32BIT ? "" : ",3"));
33156 else
33157 fputs ("\t.toc\n", asm_out_file);
33160 /* Implement TARGET_ASM_INIT_SECTIONS. */
33162 static void
33163 rs6000_xcoff_asm_init_sections (void)
33165 read_only_data_section
33166 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33167 &xcoff_read_only_section_name);
33169 private_data_section
33170 = get_unnamed_section (SECTION_WRITE,
33171 rs6000_xcoff_output_readwrite_section_asm_op,
33172 &xcoff_private_data_section_name);
33174 tls_data_section
33175 = get_unnamed_section (SECTION_TLS,
33176 rs6000_xcoff_output_tls_section_asm_op,
33177 &xcoff_tls_data_section_name);
33179 tls_private_data_section
33180 = get_unnamed_section (SECTION_TLS,
33181 rs6000_xcoff_output_tls_section_asm_op,
33182 &xcoff_private_data_section_name);
33184 read_only_private_data_section
33185 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33186 &xcoff_private_data_section_name);
33188 toc_section
33189 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33191 readonly_data_section = read_only_data_section;
33194 static int
33195 rs6000_xcoff_reloc_rw_mask (void)
33197 return 3;
33200 static void
33201 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33202 tree decl ATTRIBUTE_UNUSED)
33204 int smclass;
33205 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33207 if (flags & SECTION_EXCLUDE)
33208 smclass = 4;
33209 else if (flags & SECTION_DEBUG)
33211 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33212 return;
33214 else if (flags & SECTION_CODE)
33215 smclass = 0;
33216 else if (flags & SECTION_TLS)
33217 smclass = 3;
33218 else if (flags & SECTION_WRITE)
33219 smclass = 2;
33220 else
33221 smclass = 1;
33223 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33224 (flags & SECTION_CODE) ? "." : "",
33225 name, suffix[smclass], flags & SECTION_ENTSIZE);
33228 #define IN_NAMED_SECTION(DECL) \
33229 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33230 && DECL_SECTION_NAME (DECL) != NULL)
33232 static section *
33233 rs6000_xcoff_select_section (tree decl, int reloc,
33234 unsigned HOST_WIDE_INT align)
33236 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33237 named section. */
33238 if (align > BIGGEST_ALIGNMENT)
33240 resolve_unique_section (decl, reloc, true);
33241 if (IN_NAMED_SECTION (decl))
33242 return get_named_section (decl, NULL, reloc);
33245 if (decl_readonly_section (decl, reloc))
33247 if (TREE_PUBLIC (decl))
33248 return read_only_data_section;
33249 else
33250 return read_only_private_data_section;
33252 else
33254 #if HAVE_AS_TLS
33255 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33257 if (TREE_PUBLIC (decl))
33258 return tls_data_section;
33259 else if (bss_initializer_p (decl))
33261 /* Convert to COMMON to emit in BSS. */
33262 DECL_COMMON (decl) = 1;
33263 return tls_comm_section;
33265 else
33266 return tls_private_data_section;
33268 else
33269 #endif
33270 if (TREE_PUBLIC (decl))
33271 return data_section;
33272 else
33273 return private_data_section;
33277 static void
33278 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33280 const char *name;
33282 /* Use select_section for private data and uninitialized data with
33283 alignment <= BIGGEST_ALIGNMENT. */
33284 if (!TREE_PUBLIC (decl)
33285 || DECL_COMMON (decl)
33286 || (DECL_INITIAL (decl) == NULL_TREE
33287 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33288 || DECL_INITIAL (decl) == error_mark_node
33289 || (flag_zero_initialized_in_bss
33290 && initializer_zerop (DECL_INITIAL (decl))))
33291 return;
33293 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33294 name = (*targetm.strip_name_encoding) (name);
33295 set_decl_section_name (decl, name);
33298 /* Select section for constant in constant pool.
33300 On RS/6000, all constants are in the private read-only data area.
33301 However, if this is being placed in the TOC it must be output as a
33302 toc entry. */
33304 static section *
33305 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33306 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33308 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33309 return toc_section;
33310 else
33311 return read_only_private_data_section;
33314 /* Remove any trailing [DS] or the like from the symbol name. */
33316 static const char *
33317 rs6000_xcoff_strip_name_encoding (const char *name)
33319 size_t len;
33320 if (*name == '*')
33321 name++;
33322 len = strlen (name);
33323 if (name[len - 1] == ']')
33324 return ggc_alloc_string (name, len - 4);
33325 else
33326 return name;
33329 /* Section attributes. AIX is always PIC. */
33331 static unsigned int
33332 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33334 unsigned int align;
33335 unsigned int flags = default_section_type_flags (decl, name, reloc);
33337 /* Align to at least UNIT size. */
33338 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33339 align = MIN_UNITS_PER_WORD;
33340 else
33341 /* Increase alignment of large objects if not already stricter. */
33342 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33343 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33344 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33346 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33349 /* Output at beginning of assembler file.
33351 Initialize the section names for the RS/6000 at this point.
33353 Specify filename, including full path, to assembler.
33355 We want to go into the TOC section so at least one .toc will be emitted.
33356 Also, in order to output proper .bs/.es pairs, we need at least one static
33357 [RW] section emitted.
33359 Finally, declare mcount when profiling to make the assembler happy. */
33361 static void
33362 rs6000_xcoff_file_start (void)
33364 rs6000_gen_section_name (&xcoff_bss_section_name,
33365 main_input_filename, ".bss_");
33366 rs6000_gen_section_name (&xcoff_private_data_section_name,
33367 main_input_filename, ".rw_");
33368 rs6000_gen_section_name (&xcoff_read_only_section_name,
33369 main_input_filename, ".ro_");
33370 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33371 main_input_filename, ".tls_");
33372 rs6000_gen_section_name (&xcoff_tbss_section_name,
33373 main_input_filename, ".tbss_[UL]");
33375 fputs ("\t.file\t", asm_out_file);
33376 output_quoted_string (asm_out_file, main_input_filename);
33377 fputc ('\n', asm_out_file);
33378 if (write_symbols != NO_DEBUG)
33379 switch_to_section (private_data_section);
33380 switch_to_section (toc_section);
33381 switch_to_section (text_section);
33382 if (profile_flag)
33383 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33384 rs6000_file_start ();
33387 /* Output at end of assembler file.
33388 On the RS/6000, referencing data should automatically pull in text. */
33390 static void
33391 rs6000_xcoff_file_end (void)
33393 switch_to_section (text_section);
33394 fputs ("_section_.text:\n", asm_out_file);
33395 switch_to_section (data_section);
33396 fputs (TARGET_32BIT
33397 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33398 asm_out_file);
33401 struct declare_alias_data
33403 FILE *file;
33404 bool function_descriptor;
33407 /* Declare alias N. A helper function for for_node_and_aliases. */
33409 static bool
33410 rs6000_declare_alias (struct symtab_node *n, void *d)
33412 struct declare_alias_data *data = (struct declare_alias_data *)d;
33413 /* Main symbol is output specially, because varasm machinery does part of
33414 the job for us - we do not need to declare .globl/lglobs and such. */
33415 if (!n->alias || n->weakref)
33416 return false;
33418 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33419 return false;
33421 /* Prevent assemble_alias from trying to use .set pseudo operation
33422 that does not behave as expected by the middle-end. */
33423 TREE_ASM_WRITTEN (n->decl) = true;
33425 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33426 char *buffer = (char *) alloca (strlen (name) + 2);
33427 char *p;
33428 int dollar_inside = 0;
33430 strcpy (buffer, name);
33431 p = strchr (buffer, '$');
33432 while (p) {
33433 *p = '_';
33434 dollar_inside++;
33435 p = strchr (p + 1, '$');
33437 if (TREE_PUBLIC (n->decl))
33439 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33441 if (dollar_inside) {
33442 if (data->function_descriptor)
33443 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33444 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33446 if (data->function_descriptor)
33448 fputs ("\t.globl .", data->file);
33449 RS6000_OUTPUT_BASENAME (data->file, buffer);
33450 putc ('\n', data->file);
33452 fputs ("\t.globl ", data->file);
33453 RS6000_OUTPUT_BASENAME (data->file, buffer);
33454 putc ('\n', data->file);
33456 #ifdef ASM_WEAKEN_DECL
33457 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33458 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33459 #endif
33461 else
33463 if (dollar_inside)
33465 if (data->function_descriptor)
33466 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33467 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33469 if (data->function_descriptor)
33471 fputs ("\t.lglobl .", data->file);
33472 RS6000_OUTPUT_BASENAME (data->file, buffer);
33473 putc ('\n', data->file);
33475 fputs ("\t.lglobl ", data->file);
33476 RS6000_OUTPUT_BASENAME (data->file, buffer);
33477 putc ('\n', data->file);
33479 if (data->function_descriptor)
33480 fputs (".", data->file);
33481 RS6000_OUTPUT_BASENAME (data->file, buffer);
33482 fputs (":\n", data->file);
33483 return false;
33487 #ifdef HAVE_GAS_HIDDEN
33488 /* Helper function to calculate visibility of a DECL
33489 and return the value as a const string. */
33491 static const char *
33492 rs6000_xcoff_visibility (tree decl)
33494 static const char * const visibility_types[] = {
33495 "", ",protected", ",hidden", ",internal"
33498 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33500 if (TREE_CODE (decl) == FUNCTION_DECL
33501 && cgraph_node::get (decl)
33502 && cgraph_node::get (decl)->instrumentation_clone
33503 && cgraph_node::get (decl)->instrumented_version)
33504 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
33506 return visibility_types[vis];
33508 #endif
33511 /* This macro produces the initial definition of a function name.
33512 On the RS/6000, we need to place an extra '.' in the function name and
33513 output the function descriptor.
33514 Dollar signs are converted to underscores.
33516 The csect for the function will have already been created when
33517 text_section was selected. We do have to go back to that csect, however.
33519 The third and fourth parameters to the .function pseudo-op (16 and 044)
33520 are placeholders which no longer have any use.
33522 Because AIX assembler's .set command has unexpected semantics, we output
33523 all aliases as alternative labels in front of the definition. */
33525 void
33526 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33528 char *buffer = (char *) alloca (strlen (name) + 1);
33529 char *p;
33530 int dollar_inside = 0;
33531 struct declare_alias_data data = {file, false};
33533 strcpy (buffer, name);
33534 p = strchr (buffer, '$');
33535 while (p) {
33536 *p = '_';
33537 dollar_inside++;
33538 p = strchr (p + 1, '$');
33540 if (TREE_PUBLIC (decl))
33542 if (!RS6000_WEAK || !DECL_WEAK (decl))
33544 if (dollar_inside) {
33545 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33546 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33548 fputs ("\t.globl .", file);
33549 RS6000_OUTPUT_BASENAME (file, buffer);
33550 #ifdef HAVE_GAS_HIDDEN
33551 fputs (rs6000_xcoff_visibility (decl), file);
33552 #endif
33553 putc ('\n', file);
33556 else
33558 if (dollar_inside) {
33559 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33560 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33562 fputs ("\t.lglobl .", file);
33563 RS6000_OUTPUT_BASENAME (file, buffer);
33564 putc ('\n', file);
33566 fputs ("\t.csect ", file);
33567 RS6000_OUTPUT_BASENAME (file, buffer);
33568 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33569 RS6000_OUTPUT_BASENAME (file, buffer);
33570 fputs (":\n", file);
33571 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33572 &data, true);
33573 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33574 RS6000_OUTPUT_BASENAME (file, buffer);
33575 fputs (", TOC[tc0], 0\n", file);
33576 in_section = NULL;
33577 switch_to_section (function_section (decl));
33578 putc ('.', file);
33579 RS6000_OUTPUT_BASENAME (file, buffer);
33580 fputs (":\n", file);
33581 data.function_descriptor = true;
33582 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33583 &data, true);
33584 if (!DECL_IGNORED_P (decl))
33586 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33587 xcoffout_declare_function (file, decl, buffer);
33588 else if (write_symbols == DWARF2_DEBUG)
33590 name = (*targetm.strip_name_encoding) (name);
33591 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33594 return;
33598 /* Output assembly language to globalize a symbol from a DECL,
33599 possibly with visibility. */
33601 void
33602 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33604 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33605 fputs (GLOBAL_ASM_OP, stream);
33606 RS6000_OUTPUT_BASENAME (stream, name);
33607 #ifdef HAVE_GAS_HIDDEN
33608 fputs (rs6000_xcoff_visibility (decl), stream);
33609 #endif
33610 putc ('\n', stream);
33613 /* Output assembly language to define a symbol as COMMON from a DECL,
33614 possibly with visibility. */
33616 void
33617 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33618 tree decl ATTRIBUTE_UNUSED,
33619 const char *name,
33620 unsigned HOST_WIDE_INT size,
33621 unsigned HOST_WIDE_INT align)
33623 unsigned HOST_WIDE_INT align2 = 2;
33625 if (align > 32)
33626 align2 = floor_log2 (align / BITS_PER_UNIT);
33627 else if (size > 4)
33628 align2 = 3;
33630 fputs (COMMON_ASM_OP, stream);
33631 RS6000_OUTPUT_BASENAME (stream, name);
33633 fprintf (stream,
33634 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33635 size, align2);
33637 #ifdef HAVE_GAS_HIDDEN
33638 if (decl != NULL)
33639 fputs (rs6000_xcoff_visibility (decl), stream);
33640 #endif
33641 putc ('\n', stream);
33644 /* This macro produces the initial definition of a object (variable) name.
33645 Because AIX assembler's .set command has unexpected semantics, we output
33646 all aliases as alternative labels in front of the definition. */
33648 void
33649 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33651 struct declare_alias_data data = {file, false};
33652 RS6000_OUTPUT_BASENAME (file, name);
33653 fputs (":\n", file);
33654 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33655 &data, true);
33658 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33660 void
33661 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33663 fputs (integer_asm_op (size, FALSE), file);
33664 assemble_name (file, label);
33665 fputs ("-$", file);
33668 /* Output a symbol offset relative to the dbase for the current object.
33669 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33670 signed offsets.
33672 __gcc_unwind_dbase is embedded in all executables/libraries through
33673 libgcc/config/rs6000/crtdbase.S. */
33675 void
33676 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
33678 fputs (integer_asm_op (size, FALSE), file);
33679 assemble_name (file, label);
33680 fputs("-__gcc_unwind_dbase", file);
33683 #ifdef HAVE_AS_TLS
33684 static void
33685 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
33687 rtx symbol;
33688 int flags;
33689 const char *symname;
33691 default_encode_section_info (decl, rtl, first);
33693 /* Careful not to prod global register variables. */
33694 if (!MEM_P (rtl))
33695 return;
33696 symbol = XEXP (rtl, 0);
33697 if (GET_CODE (symbol) != SYMBOL_REF)
33698 return;
33700 flags = SYMBOL_REF_FLAGS (symbol);
33702 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33703 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
33705 SYMBOL_REF_FLAGS (symbol) = flags;
33707 /* Append mapping class to extern decls. */
33708 symname = XSTR (symbol, 0);
33709 if (decl /* sync condition with assemble_external () */
33710 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
33711 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
33712 || TREE_CODE (decl) == FUNCTION_DECL)
33713 && symname[strlen (symname) - 1] != ']')
33715 char *newname = (char *) alloca (strlen (symname) + 5);
33716 strcpy (newname, symname);
33717 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
33718 ? "[DS]" : "[UA]"));
33719 XSTR (symbol, 0) = ggc_strdup (newname);
33722 #endif /* HAVE_AS_TLS */
33723 #endif /* TARGET_XCOFF */
33725 void
33726 rs6000_asm_weaken_decl (FILE *stream, tree decl,
33727 const char *name, const char *val)
33729 fputs ("\t.weak\t", stream);
33730 RS6000_OUTPUT_BASENAME (stream, name);
33731 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33732 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33734 if (TARGET_XCOFF)
33735 fputs ("[DS]", stream);
33736 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33737 if (TARGET_XCOFF)
33738 fputs (rs6000_xcoff_visibility (decl), stream);
33739 #endif
33740 fputs ("\n\t.weak\t.", stream);
33741 RS6000_OUTPUT_BASENAME (stream, name);
33743 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33744 if (TARGET_XCOFF)
33745 fputs (rs6000_xcoff_visibility (decl), stream);
33746 #endif
33747 fputc ('\n', stream);
33748 if (val)
33750 #ifdef ASM_OUTPUT_DEF
33751 ASM_OUTPUT_DEF (stream, name, val);
33752 #endif
33753 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33754 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33756 fputs ("\t.set\t.", stream);
33757 RS6000_OUTPUT_BASENAME (stream, name);
33758 fputs (",.", stream);
33759 RS6000_OUTPUT_BASENAME (stream, val);
33760 fputc ('\n', stream);
33766 /* Return true if INSN should not be copied. */
33768 static bool
33769 rs6000_cannot_copy_insn_p (rtx_insn *insn)
33771 return recog_memoized (insn) >= 0
33772 && get_attr_cannot_copy (insn);
33775 /* Compute a (partial) cost for rtx X. Return true if the complete
33776 cost has been computed, and false if subexpressions should be
33777 scanned. In either case, *TOTAL contains the cost result. */
33779 static bool
33780 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
33781 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
33783 int code = GET_CODE (x);
33785 switch (code)
33787 /* On the RS/6000, if it is valid in the insn, it is free. */
33788 case CONST_INT:
33789 if (((outer_code == SET
33790 || outer_code == PLUS
33791 || outer_code == MINUS)
33792 && (satisfies_constraint_I (x)
33793 || satisfies_constraint_L (x)))
33794 || (outer_code == AND
33795 && (satisfies_constraint_K (x)
33796 || (mode == SImode
33797 ? satisfies_constraint_L (x)
33798 : satisfies_constraint_J (x))))
33799 || ((outer_code == IOR || outer_code == XOR)
33800 && (satisfies_constraint_K (x)
33801 || (mode == SImode
33802 ? satisfies_constraint_L (x)
33803 : satisfies_constraint_J (x))))
33804 || outer_code == ASHIFT
33805 || outer_code == ASHIFTRT
33806 || outer_code == LSHIFTRT
33807 || outer_code == ROTATE
33808 || outer_code == ROTATERT
33809 || outer_code == ZERO_EXTRACT
33810 || (outer_code == MULT
33811 && satisfies_constraint_I (x))
33812 || ((outer_code == DIV || outer_code == UDIV
33813 || outer_code == MOD || outer_code == UMOD)
33814 && exact_log2 (INTVAL (x)) >= 0)
33815 || (outer_code == COMPARE
33816 && (satisfies_constraint_I (x)
33817 || satisfies_constraint_K (x)))
33818 || ((outer_code == EQ || outer_code == NE)
33819 && (satisfies_constraint_I (x)
33820 || satisfies_constraint_K (x)
33821 || (mode == SImode
33822 ? satisfies_constraint_L (x)
33823 : satisfies_constraint_J (x))))
33824 || (outer_code == GTU
33825 && satisfies_constraint_I (x))
33826 || (outer_code == LTU
33827 && satisfies_constraint_P (x)))
33829 *total = 0;
33830 return true;
33832 else if ((outer_code == PLUS
33833 && reg_or_add_cint_operand (x, VOIDmode))
33834 || (outer_code == MINUS
33835 && reg_or_sub_cint_operand (x, VOIDmode))
33836 || ((outer_code == SET
33837 || outer_code == IOR
33838 || outer_code == XOR)
33839 && (INTVAL (x)
33840 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
33842 *total = COSTS_N_INSNS (1);
33843 return true;
33845 /* FALLTHRU */
33847 case CONST_DOUBLE:
33848 case CONST_WIDE_INT:
33849 case CONST:
33850 case HIGH:
33851 case SYMBOL_REF:
33852 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
33853 return true;
33855 case MEM:
33856 /* When optimizing for size, MEM should be slightly more expensive
33857 than generating address, e.g., (plus (reg) (const)).
33858 L1 cache latency is about two instructions. */
33859 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
33860 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
33861 *total += COSTS_N_INSNS (100);
33862 return true;
33864 case LABEL_REF:
33865 *total = 0;
33866 return true;
33868 case PLUS:
33869 case MINUS:
33870 if (FLOAT_MODE_P (mode))
33871 *total = rs6000_cost->fp;
33872 else
33873 *total = COSTS_N_INSNS (1);
33874 return false;
33876 case MULT:
33877 if (GET_CODE (XEXP (x, 1)) == CONST_INT
33878 && satisfies_constraint_I (XEXP (x, 1)))
33880 if (INTVAL (XEXP (x, 1)) >= -256
33881 && INTVAL (XEXP (x, 1)) <= 255)
33882 *total = rs6000_cost->mulsi_const9;
33883 else
33884 *total = rs6000_cost->mulsi_const;
33886 else if (mode == SFmode)
33887 *total = rs6000_cost->fp;
33888 else if (FLOAT_MODE_P (mode))
33889 *total = rs6000_cost->dmul;
33890 else if (mode == DImode)
33891 *total = rs6000_cost->muldi;
33892 else
33893 *total = rs6000_cost->mulsi;
33894 return false;
33896 case FMA:
33897 if (mode == SFmode)
33898 *total = rs6000_cost->fp;
33899 else
33900 *total = rs6000_cost->dmul;
33901 break;
33903 case DIV:
33904 case MOD:
33905 if (FLOAT_MODE_P (mode))
33907 *total = mode == DFmode ? rs6000_cost->ddiv
33908 : rs6000_cost->sdiv;
33909 return false;
33911 /* FALLTHRU */
33913 case UDIV:
33914 case UMOD:
33915 if (GET_CODE (XEXP (x, 1)) == CONST_INT
33916 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
33918 if (code == DIV || code == MOD)
33919 /* Shift, addze */
33920 *total = COSTS_N_INSNS (2);
33921 else
33922 /* Shift */
33923 *total = COSTS_N_INSNS (1);
33925 else
33927 if (GET_MODE (XEXP (x, 1)) == DImode)
33928 *total = rs6000_cost->divdi;
33929 else
33930 *total = rs6000_cost->divsi;
33932 /* Add in shift and subtract for MOD unless we have a mod instruction. */
33933 if (!TARGET_MODULO && (code == MOD || code == UMOD))
33934 *total += COSTS_N_INSNS (2);
33935 return false;
33937 case CTZ:
33938 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
33939 return false;
33941 case FFS:
33942 *total = COSTS_N_INSNS (4);
33943 return false;
33945 case POPCOUNT:
33946 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
33947 return false;
33949 case PARITY:
33950 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
33951 return false;
33953 case NOT:
33954 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
33955 *total = 0;
33956 else
33957 *total = COSTS_N_INSNS (1);
33958 return false;
33960 case AND:
33961 if (CONST_INT_P (XEXP (x, 1)))
33963 rtx left = XEXP (x, 0);
33964 rtx_code left_code = GET_CODE (left);
33966 /* rotate-and-mask: 1 insn. */
33967 if ((left_code == ROTATE
33968 || left_code == ASHIFT
33969 || left_code == LSHIFTRT)
33970 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
33972 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
33973 if (!CONST_INT_P (XEXP (left, 1)))
33974 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
33975 *total += COSTS_N_INSNS (1);
33976 return true;
33979 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
33980 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
33981 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
33982 || (val & 0xffff) == val
33983 || (val & 0xffff0000) == val
33984 || ((val & 0xffff) == 0 && mode == SImode))
33986 *total = rtx_cost (left, mode, AND, 0, speed);
33987 *total += COSTS_N_INSNS (1);
33988 return true;
33991 /* 2 insns. */
33992 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
33994 *total = rtx_cost (left, mode, AND, 0, speed);
33995 *total += COSTS_N_INSNS (2);
33996 return true;
34000 *total = COSTS_N_INSNS (1);
34001 return false;
34003 case IOR:
34004 /* FIXME */
34005 *total = COSTS_N_INSNS (1);
34006 return true;
34008 case CLZ:
34009 case XOR:
34010 case ZERO_EXTRACT:
34011 *total = COSTS_N_INSNS (1);
34012 return false;
34014 case ASHIFT:
34015 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34016 the sign extend and shift separately within the insn. */
34017 if (TARGET_EXTSWSLI && mode == DImode
34018 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34019 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34021 *total = 0;
34022 return false;
34024 /* fall through */
34026 case ASHIFTRT:
34027 case LSHIFTRT:
34028 case ROTATE:
34029 case ROTATERT:
34030 /* Handle mul_highpart. */
34031 if (outer_code == TRUNCATE
34032 && GET_CODE (XEXP (x, 0)) == MULT)
34034 if (mode == DImode)
34035 *total = rs6000_cost->muldi;
34036 else
34037 *total = rs6000_cost->mulsi;
34038 return true;
34040 else if (outer_code == AND)
34041 *total = 0;
34042 else
34043 *total = COSTS_N_INSNS (1);
34044 return false;
34046 case SIGN_EXTEND:
34047 case ZERO_EXTEND:
34048 if (GET_CODE (XEXP (x, 0)) == MEM)
34049 *total = 0;
34050 else
34051 *total = COSTS_N_INSNS (1);
34052 return false;
34054 case COMPARE:
34055 case NEG:
34056 case ABS:
34057 if (!FLOAT_MODE_P (mode))
34059 *total = COSTS_N_INSNS (1);
34060 return false;
34062 /* FALLTHRU */
34064 case FLOAT:
34065 case UNSIGNED_FLOAT:
34066 case FIX:
34067 case UNSIGNED_FIX:
34068 case FLOAT_TRUNCATE:
34069 *total = rs6000_cost->fp;
34070 return false;
34072 case FLOAT_EXTEND:
34073 if (mode == DFmode)
34074 *total = rs6000_cost->sfdf_convert;
34075 else
34076 *total = rs6000_cost->fp;
34077 return false;
34079 case UNSPEC:
34080 switch (XINT (x, 1))
34082 case UNSPEC_FRSP:
34083 *total = rs6000_cost->fp;
34084 return true;
34086 default:
34087 break;
34089 break;
34091 case CALL:
34092 case IF_THEN_ELSE:
34093 if (!speed)
34095 *total = COSTS_N_INSNS (1);
34096 return true;
34098 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34100 *total = rs6000_cost->fp;
34101 return false;
34103 break;
34105 case NE:
34106 case EQ:
34107 case GTU:
34108 case LTU:
34109 /* Carry bit requires mode == Pmode.
34110 NEG or PLUS already counted so only add one. */
34111 if (mode == Pmode
34112 && (outer_code == NEG || outer_code == PLUS))
34114 *total = COSTS_N_INSNS (1);
34115 return true;
34117 /* FALLTHRU */
34119 case GT:
34120 case LT:
34121 case UNORDERED:
34122 if (outer_code == SET)
34124 if (XEXP (x, 1) == const0_rtx)
34126 *total = COSTS_N_INSNS (2);
34127 return true;
34129 else
34131 *total = COSTS_N_INSNS (3);
34132 return false;
34135 /* CC COMPARE. */
34136 if (outer_code == COMPARE)
34138 *total = 0;
34139 return true;
34141 break;
34143 default:
34144 break;
34147 return false;
34150 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34152 static bool
34153 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34154 int opno, int *total, bool speed)
34156 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34158 fprintf (stderr,
34159 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34160 "opno = %d, total = %d, speed = %s, x:\n",
34161 ret ? "complete" : "scan inner",
34162 GET_MODE_NAME (mode),
34163 GET_RTX_NAME (outer_code),
34164 opno,
34165 *total,
34166 speed ? "true" : "false");
34168 debug_rtx (x);
34170 return ret;
34173 static int
34174 rs6000_insn_cost (rtx_insn *insn, bool speed)
34176 if (recog_memoized (insn) < 0)
34177 return 0;
34179 if (!speed)
34180 return get_attr_length (insn);
34182 int cost = get_attr_cost (insn);
34183 if (cost > 0)
34184 return cost;
34186 int n = get_attr_length (insn) / 4;
34187 enum attr_type type = get_attr_type (insn);
34189 switch (type)
34191 case TYPE_LOAD:
34192 case TYPE_FPLOAD:
34193 case TYPE_VECLOAD:
34194 cost = COSTS_N_INSNS (n + 1);
34195 break;
34197 case TYPE_MUL:
34198 switch (get_attr_size (insn))
34200 case SIZE_8:
34201 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34202 break;
34203 case SIZE_16:
34204 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34205 break;
34206 case SIZE_32:
34207 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34208 break;
34209 case SIZE_64:
34210 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34211 break;
34212 default:
34213 gcc_unreachable ();
34215 break;
34216 case TYPE_DIV:
34217 switch (get_attr_size (insn))
34219 case SIZE_32:
34220 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34221 break;
34222 case SIZE_64:
34223 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34224 break;
34225 default:
34226 gcc_unreachable ();
34228 break;
34230 case TYPE_FP:
34231 cost = n * rs6000_cost->fp;
34232 break;
34233 case TYPE_DMUL:
34234 cost = n * rs6000_cost->dmul;
34235 break;
34236 case TYPE_SDIV:
34237 cost = n * rs6000_cost->sdiv;
34238 break;
34239 case TYPE_DDIV:
34240 cost = n * rs6000_cost->ddiv;
34241 break;
34243 case TYPE_SYNC:
34244 case TYPE_LOAD_L:
34245 case TYPE_MFCR:
34246 case TYPE_MFCRF:
34247 cost = COSTS_N_INSNS (n + 2);
34248 break;
34250 default:
34251 cost = COSTS_N_INSNS (n);
34254 return cost;
34257 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34259 static int
34260 rs6000_debug_address_cost (rtx x, machine_mode mode,
34261 addr_space_t as, bool speed)
34263 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34265 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34266 ret, speed ? "true" : "false");
34267 debug_rtx (x);
34269 return ret;
34273 /* A C expression returning the cost of moving data from a register of class
34274 CLASS1 to one of CLASS2. */
34276 static int
34277 rs6000_register_move_cost (machine_mode mode,
34278 reg_class_t from, reg_class_t to)
34280 int ret;
34282 if (TARGET_DEBUG_COST)
34283 dbg_cost_ctrl++;
34285 /* Moves from/to GENERAL_REGS. */
34286 if (reg_classes_intersect_p (to, GENERAL_REGS)
34287 || reg_classes_intersect_p (from, GENERAL_REGS))
34289 reg_class_t rclass = from;
34291 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34292 rclass = to;
34294 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34295 ret = (rs6000_memory_move_cost (mode, rclass, false)
34296 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34298 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34299 shift. */
34300 else if (rclass == CR_REGS)
34301 ret = 4;
34303 /* For those processors that have slow LR/CTR moves, make them more
34304 expensive than memory in order to bias spills to memory .*/
34305 else if ((rs6000_tune == PROCESSOR_POWER6
34306 || rs6000_tune == PROCESSOR_POWER7
34307 || rs6000_tune == PROCESSOR_POWER8
34308 || rs6000_tune == PROCESSOR_POWER9)
34309 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34310 ret = 6 * hard_regno_nregs (0, mode);
34312 else
34313 /* A move will cost one instruction per GPR moved. */
34314 ret = 2 * hard_regno_nregs (0, mode);
34317 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34318 else if (VECTOR_MEM_VSX_P (mode)
34319 && reg_classes_intersect_p (to, VSX_REGS)
34320 && reg_classes_intersect_p (from, VSX_REGS))
34321 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34323 /* Moving between two similar registers is just one instruction. */
34324 else if (reg_classes_intersect_p (to, from))
34325 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34327 /* Everything else has to go through GENERAL_REGS. */
34328 else
34329 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34330 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34332 if (TARGET_DEBUG_COST)
34334 if (dbg_cost_ctrl == 1)
34335 fprintf (stderr,
34336 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34337 ret, GET_MODE_NAME (mode), reg_class_names[from],
34338 reg_class_names[to]);
34339 dbg_cost_ctrl--;
34342 return ret;
34345 /* A C expressions returning the cost of moving data of MODE from a register to
34346 or from memory. */
34348 static int
34349 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34350 bool in ATTRIBUTE_UNUSED)
34352 int ret;
34354 if (TARGET_DEBUG_COST)
34355 dbg_cost_ctrl++;
34357 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34358 ret = 4 * hard_regno_nregs (0, mode);
34359 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34360 || reg_classes_intersect_p (rclass, VSX_REGS)))
34361 ret = 4 * hard_regno_nregs (32, mode);
34362 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34363 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34364 else
34365 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34367 if (TARGET_DEBUG_COST)
34369 if (dbg_cost_ctrl == 1)
34370 fprintf (stderr,
34371 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34372 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34373 dbg_cost_ctrl--;
34376 return ret;
34379 /* Returns a code for a target-specific builtin that implements
34380 reciprocal of the function, or NULL_TREE if not available. */
34382 static tree
34383 rs6000_builtin_reciprocal (tree fndecl)
34385 switch (DECL_FUNCTION_CODE (fndecl))
34387 case VSX_BUILTIN_XVSQRTDP:
34388 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34389 return NULL_TREE;
34391 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34393 case VSX_BUILTIN_XVSQRTSP:
34394 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34395 return NULL_TREE;
34397 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34399 default:
34400 return NULL_TREE;
34404 /* Load up a constant. If the mode is a vector mode, splat the value across
34405 all of the vector elements. */
34407 static rtx
34408 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34410 rtx reg;
34412 if (mode == SFmode || mode == DFmode)
34414 rtx d = const_double_from_real_value (dconst, mode);
34415 reg = force_reg (mode, d);
34417 else if (mode == V4SFmode)
34419 rtx d = const_double_from_real_value (dconst, SFmode);
34420 rtvec v = gen_rtvec (4, d, d, d, d);
34421 reg = gen_reg_rtx (mode);
34422 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34424 else if (mode == V2DFmode)
34426 rtx d = const_double_from_real_value (dconst, DFmode);
34427 rtvec v = gen_rtvec (2, d, d);
34428 reg = gen_reg_rtx (mode);
34429 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34431 else
34432 gcc_unreachable ();
34434 return reg;
34437 /* Generate an FMA instruction. */
34439 static void
34440 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34442 machine_mode mode = GET_MODE (target);
34443 rtx dst;
34445 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34446 gcc_assert (dst != NULL);
34448 if (dst != target)
34449 emit_move_insn (target, dst);
34452 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34454 static void
34455 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34457 machine_mode mode = GET_MODE (dst);
34458 rtx r;
34460 /* This is a tad more complicated, since the fnma_optab is for
34461 a different expression: fma(-m1, m2, a), which is the same
34462 thing except in the case of signed zeros.
34464 Fortunately we know that if FMA is supported that FNMSUB is
34465 also supported in the ISA. Just expand it directly. */
34467 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34469 r = gen_rtx_NEG (mode, a);
34470 r = gen_rtx_FMA (mode, m1, m2, r);
34471 r = gen_rtx_NEG (mode, r);
34472 emit_insn (gen_rtx_SET (dst, r));
34475 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34476 add a reg_note saying that this was a division. Support both scalar and
34477 vector divide. Assumes no trapping math and finite arguments. */
34479 void
34480 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34482 machine_mode mode = GET_MODE (dst);
34483 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34484 int i;
34486 /* Low precision estimates guarantee 5 bits of accuracy. High
34487 precision estimates guarantee 14 bits of accuracy. SFmode
34488 requires 23 bits of accuracy. DFmode requires 52 bits of
34489 accuracy. Each pass at least doubles the accuracy, leading
34490 to the following. */
34491 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34492 if (mode == DFmode || mode == V2DFmode)
34493 passes++;
34495 enum insn_code code = optab_handler (smul_optab, mode);
34496 insn_gen_fn gen_mul = GEN_FCN (code);
34498 gcc_assert (code != CODE_FOR_nothing);
34500 one = rs6000_load_constant_and_splat (mode, dconst1);
34502 /* x0 = 1./d estimate */
34503 x0 = gen_reg_rtx (mode);
34504 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34505 UNSPEC_FRES)));
34507 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34508 if (passes > 1) {
34510 /* e0 = 1. - d * x0 */
34511 e0 = gen_reg_rtx (mode);
34512 rs6000_emit_nmsub (e0, d, x0, one);
34514 /* x1 = x0 + e0 * x0 */
34515 x1 = gen_reg_rtx (mode);
34516 rs6000_emit_madd (x1, e0, x0, x0);
34518 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34519 ++i, xprev = xnext, eprev = enext) {
34521 /* enext = eprev * eprev */
34522 enext = gen_reg_rtx (mode);
34523 emit_insn (gen_mul (enext, eprev, eprev));
34525 /* xnext = xprev + enext * xprev */
34526 xnext = gen_reg_rtx (mode);
34527 rs6000_emit_madd (xnext, enext, xprev, xprev);
34530 } else
34531 xprev = x0;
34533 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34535 /* u = n * xprev */
34536 u = gen_reg_rtx (mode);
34537 emit_insn (gen_mul (u, n, xprev));
34539 /* v = n - (d * u) */
34540 v = gen_reg_rtx (mode);
34541 rs6000_emit_nmsub (v, d, u, n);
34543 /* dst = (v * xprev) + u */
34544 rs6000_emit_madd (dst, v, xprev, u);
34546 if (note_p)
34547 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34550 /* Goldschmidt's Algorithm for single/double-precision floating point
34551 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34553 void
34554 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34556 machine_mode mode = GET_MODE (src);
34557 rtx e = gen_reg_rtx (mode);
34558 rtx g = gen_reg_rtx (mode);
34559 rtx h = gen_reg_rtx (mode);
34561 /* Low precision estimates guarantee 5 bits of accuracy. High
34562 precision estimates guarantee 14 bits of accuracy. SFmode
34563 requires 23 bits of accuracy. DFmode requires 52 bits of
34564 accuracy. Each pass at least doubles the accuracy, leading
34565 to the following. */
34566 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34567 if (mode == DFmode || mode == V2DFmode)
34568 passes++;
34570 int i;
34571 rtx mhalf;
34572 enum insn_code code = optab_handler (smul_optab, mode);
34573 insn_gen_fn gen_mul = GEN_FCN (code);
34575 gcc_assert (code != CODE_FOR_nothing);
34577 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34579 /* e = rsqrt estimate */
34580 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34581 UNSPEC_RSQRT)));
34583 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34584 if (!recip)
34586 rtx zero = force_reg (mode, CONST0_RTX (mode));
34588 if (mode == SFmode)
34590 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34591 e, zero, mode, 0);
34592 if (target != e)
34593 emit_move_insn (e, target);
34595 else
34597 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34598 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34602 /* g = sqrt estimate. */
34603 emit_insn (gen_mul (g, e, src));
34604 /* h = 1/(2*sqrt) estimate. */
34605 emit_insn (gen_mul (h, e, mhalf));
34607 if (recip)
34609 if (passes == 1)
34611 rtx t = gen_reg_rtx (mode);
34612 rs6000_emit_nmsub (t, g, h, mhalf);
34613 /* Apply correction directly to 1/rsqrt estimate. */
34614 rs6000_emit_madd (dst, e, t, e);
34616 else
34618 for (i = 0; i < passes; i++)
34620 rtx t1 = gen_reg_rtx (mode);
34621 rtx g1 = gen_reg_rtx (mode);
34622 rtx h1 = gen_reg_rtx (mode);
34624 rs6000_emit_nmsub (t1, g, h, mhalf);
34625 rs6000_emit_madd (g1, g, t1, g);
34626 rs6000_emit_madd (h1, h, t1, h);
34628 g = g1;
34629 h = h1;
34631 /* Multiply by 2 for 1/rsqrt. */
34632 emit_insn (gen_add3_insn (dst, h, h));
34635 else
34637 rtx t = gen_reg_rtx (mode);
34638 rs6000_emit_nmsub (t, g, h, mhalf);
34639 rs6000_emit_madd (dst, g, t, g);
34642 return;
34645 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34646 (Power7) targets. DST is the target, and SRC is the argument operand. */
34648 void
34649 rs6000_emit_popcount (rtx dst, rtx src)
34651 machine_mode mode = GET_MODE (dst);
34652 rtx tmp1, tmp2;
34654 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34655 if (TARGET_POPCNTD)
34657 if (mode == SImode)
34658 emit_insn (gen_popcntdsi2 (dst, src));
34659 else
34660 emit_insn (gen_popcntddi2 (dst, src));
34661 return;
34664 tmp1 = gen_reg_rtx (mode);
34666 if (mode == SImode)
34668 emit_insn (gen_popcntbsi2 (tmp1, src));
34669 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34670 NULL_RTX, 0);
34671 tmp2 = force_reg (SImode, tmp2);
34672 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34674 else
34676 emit_insn (gen_popcntbdi2 (tmp1, src));
34677 tmp2 = expand_mult (DImode, tmp1,
34678 GEN_INT ((HOST_WIDE_INT)
34679 0x01010101 << 32 | 0x01010101),
34680 NULL_RTX, 0);
34681 tmp2 = force_reg (DImode, tmp2);
34682 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34687 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34688 target, and SRC is the argument operand. */
34690 void
34691 rs6000_emit_parity (rtx dst, rtx src)
34693 machine_mode mode = GET_MODE (dst);
34694 rtx tmp;
34696 tmp = gen_reg_rtx (mode);
34698 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34699 if (TARGET_CMPB)
34701 if (mode == SImode)
34703 emit_insn (gen_popcntbsi2 (tmp, src));
34704 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34706 else
34708 emit_insn (gen_popcntbdi2 (tmp, src));
34709 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34711 return;
34714 if (mode == SImode)
34716 /* Is mult+shift >= shift+xor+shift+xor? */
34717 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34719 rtx tmp1, tmp2, tmp3, tmp4;
34721 tmp1 = gen_reg_rtx (SImode);
34722 emit_insn (gen_popcntbsi2 (tmp1, src));
34724 tmp2 = gen_reg_rtx (SImode);
34725 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
34726 tmp3 = gen_reg_rtx (SImode);
34727 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
34729 tmp4 = gen_reg_rtx (SImode);
34730 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
34731 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
34733 else
34734 rs6000_emit_popcount (tmp, src);
34735 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
34737 else
34739 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34740 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
34742 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
34744 tmp1 = gen_reg_rtx (DImode);
34745 emit_insn (gen_popcntbdi2 (tmp1, src));
34747 tmp2 = gen_reg_rtx (DImode);
34748 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
34749 tmp3 = gen_reg_rtx (DImode);
34750 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
34752 tmp4 = gen_reg_rtx (DImode);
34753 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
34754 tmp5 = gen_reg_rtx (DImode);
34755 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
34757 tmp6 = gen_reg_rtx (DImode);
34758 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
34759 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
34761 else
34762 rs6000_emit_popcount (tmp, src);
34763 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
34767 /* Expand an Altivec constant permutation for little endian mode.
34768 OP0 and OP1 are the input vectors and TARGET is the output vector.
34769 SEL specifies the constant permutation vector.
34771 There are two issues: First, the two input operands must be
34772 swapped so that together they form a double-wide array in LE
34773 order. Second, the vperm instruction has surprising behavior
34774 in LE mode: it interprets the elements of the source vectors
34775 in BE mode ("left to right") and interprets the elements of
34776 the destination vector in LE mode ("right to left"). To
34777 correct for this, we must subtract each element of the permute
34778 control vector from 31.
34780 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34781 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34782 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34783 serve as the permute control vector. Then, in BE mode,
34785 vperm 9,10,11,12
34787 places the desired result in vr9. However, in LE mode the
34788 vector contents will be
34790 vr10 = 00000003 00000002 00000001 00000000
34791 vr11 = 00000007 00000006 00000005 00000004
34793 The result of the vperm using the same permute control vector is
34795 vr9 = 05000000 07000000 01000000 03000000
34797 That is, the leftmost 4 bytes of vr10 are interpreted as the
34798 source for the rightmost 4 bytes of vr9, and so on.
34800 If we change the permute control vector to
34802 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
34804 and issue
34806 vperm 9,11,10,12
34808 we get the desired
34810 vr9 = 00000006 00000004 00000002 00000000. */
34812 static void
34813 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
34814 const vec_perm_indices &sel)
34816 unsigned int i;
34817 rtx perm[16];
34818 rtx constv, unspec;
34820 /* Unpack and adjust the constant selector. */
34821 for (i = 0; i < 16; ++i)
34823 unsigned int elt = 31 - (sel[i] & 31);
34824 perm[i] = GEN_INT (elt);
34827 /* Expand to a permute, swapping the inputs and using the
34828 adjusted selector. */
34829 if (!REG_P (op0))
34830 op0 = force_reg (V16QImode, op0);
34831 if (!REG_P (op1))
34832 op1 = force_reg (V16QImode, op1);
34834 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
34835 constv = force_reg (V16QImode, constv);
34836 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
34837 UNSPEC_VPERM);
34838 if (!REG_P (target))
34840 rtx tmp = gen_reg_rtx (V16QImode);
34841 emit_move_insn (tmp, unspec);
34842 unspec = tmp;
34845 emit_move_insn (target, unspec);
34848 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
34849 permute control vector. But here it's not a constant, so we must
34850 generate a vector NAND or NOR to do the adjustment. */
34852 void
34853 altivec_expand_vec_perm_le (rtx operands[4])
34855 rtx notx, iorx, unspec;
34856 rtx target = operands[0];
34857 rtx op0 = operands[1];
34858 rtx op1 = operands[2];
34859 rtx sel = operands[3];
34860 rtx tmp = target;
34861 rtx norreg = gen_reg_rtx (V16QImode);
34862 machine_mode mode = GET_MODE (target);
34864 /* Get everything in regs so the pattern matches. */
34865 if (!REG_P (op0))
34866 op0 = force_reg (mode, op0);
34867 if (!REG_P (op1))
34868 op1 = force_reg (mode, op1);
34869 if (!REG_P (sel))
34870 sel = force_reg (V16QImode, sel);
34871 if (!REG_P (target))
34872 tmp = gen_reg_rtx (mode);
34874 if (TARGET_P9_VECTOR)
34876 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
34877 UNSPEC_VPERMR);
34879 else
34881 /* Invert the selector with a VNAND if available, else a VNOR.
34882 The VNAND is preferred for future fusion opportunities. */
34883 notx = gen_rtx_NOT (V16QImode, sel);
34884 iorx = (TARGET_P8_VECTOR
34885 ? gen_rtx_IOR (V16QImode, notx, notx)
34886 : gen_rtx_AND (V16QImode, notx, notx));
34887 emit_insn (gen_rtx_SET (norreg, iorx));
34889 /* Permute with operands reversed and adjusted selector. */
34890 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
34891 UNSPEC_VPERM);
34894 /* Copy into target, possibly by way of a register. */
34895 if (!REG_P (target))
34897 emit_move_insn (tmp, unspec);
34898 unspec = tmp;
34901 emit_move_insn (target, unspec);
34904 /* Expand an Altivec constant permutation. Return true if we match
34905 an efficient implementation; false to fall back to VPERM.
34907 OP0 and OP1 are the input vectors and TARGET is the output vector.
34908 SEL specifies the constant permutation vector. */
34910 static bool
34911 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
34912 const vec_perm_indices &sel)
34914 struct altivec_perm_insn {
34915 HOST_WIDE_INT mask;
34916 enum insn_code impl;
34917 unsigned char perm[16];
34919 static const struct altivec_perm_insn patterns[] = {
34920 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
34921 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
34922 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
34923 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
34924 { OPTION_MASK_ALTIVEC,
34925 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
34926 : CODE_FOR_altivec_vmrglb_direct),
34927 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
34928 { OPTION_MASK_ALTIVEC,
34929 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
34930 : CODE_FOR_altivec_vmrglh_direct),
34931 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
34932 { OPTION_MASK_ALTIVEC,
34933 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
34934 : CODE_FOR_altivec_vmrglw_direct),
34935 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
34936 { OPTION_MASK_ALTIVEC,
34937 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
34938 : CODE_FOR_altivec_vmrghb_direct),
34939 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
34940 { OPTION_MASK_ALTIVEC,
34941 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
34942 : CODE_FOR_altivec_vmrghh_direct),
34943 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
34944 { OPTION_MASK_ALTIVEC,
34945 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
34946 : CODE_FOR_altivec_vmrghw_direct),
34947 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
34948 { OPTION_MASK_P8_VECTOR,
34949 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
34950 : CODE_FOR_p8_vmrgow_v4sf_direct),
34951 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
34952 { OPTION_MASK_P8_VECTOR,
34953 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
34954 : CODE_FOR_p8_vmrgew_v4sf_direct),
34955 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
34958 unsigned int i, j, elt, which;
34959 unsigned char perm[16];
34960 rtx x;
34961 bool one_vec;
34963 /* Unpack the constant selector. */
34964 for (i = which = 0; i < 16; ++i)
34966 elt = sel[i] & 31;
34967 which |= (elt < 16 ? 1 : 2);
34968 perm[i] = elt;
34971 /* Simplify the constant selector based on operands. */
34972 switch (which)
34974 default:
34975 gcc_unreachable ();
34977 case 3:
34978 one_vec = false;
34979 if (!rtx_equal_p (op0, op1))
34980 break;
34981 /* FALLTHRU */
34983 case 2:
34984 for (i = 0; i < 16; ++i)
34985 perm[i] &= 15;
34986 op0 = op1;
34987 one_vec = true;
34988 break;
34990 case 1:
34991 op1 = op0;
34992 one_vec = true;
34993 break;
34996 /* Look for splat patterns. */
34997 if (one_vec)
34999 elt = perm[0];
35001 for (i = 0; i < 16; ++i)
35002 if (perm[i] != elt)
35003 break;
35004 if (i == 16)
35006 if (!BYTES_BIG_ENDIAN)
35007 elt = 15 - elt;
35008 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35009 return true;
35012 if (elt % 2 == 0)
35014 for (i = 0; i < 16; i += 2)
35015 if (perm[i] != elt || perm[i + 1] != elt + 1)
35016 break;
35017 if (i == 16)
35019 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35020 x = gen_reg_rtx (V8HImode);
35021 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35022 GEN_INT (field)));
35023 emit_move_insn (target, gen_lowpart (V16QImode, x));
35024 return true;
35028 if (elt % 4 == 0)
35030 for (i = 0; i < 16; i += 4)
35031 if (perm[i] != elt
35032 || perm[i + 1] != elt + 1
35033 || perm[i + 2] != elt + 2
35034 || perm[i + 3] != elt + 3)
35035 break;
35036 if (i == 16)
35038 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35039 x = gen_reg_rtx (V4SImode);
35040 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35041 GEN_INT (field)));
35042 emit_move_insn (target, gen_lowpart (V16QImode, x));
35043 return true;
35048 /* Look for merge and pack patterns. */
35049 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35051 bool swapped;
35053 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35054 continue;
35056 elt = patterns[j].perm[0];
35057 if (perm[0] == elt)
35058 swapped = false;
35059 else if (perm[0] == elt + 16)
35060 swapped = true;
35061 else
35062 continue;
35063 for (i = 1; i < 16; ++i)
35065 elt = patterns[j].perm[i];
35066 if (swapped)
35067 elt = (elt >= 16 ? elt - 16 : elt + 16);
35068 else if (one_vec && elt >= 16)
35069 elt -= 16;
35070 if (perm[i] != elt)
35071 break;
35073 if (i == 16)
35075 enum insn_code icode = patterns[j].impl;
35076 machine_mode omode = insn_data[icode].operand[0].mode;
35077 machine_mode imode = insn_data[icode].operand[1].mode;
35079 /* For little-endian, don't use vpkuwum and vpkuhum if the
35080 underlying vector type is not V4SI and V8HI, respectively.
35081 For example, using vpkuwum with a V8HI picks up the even
35082 halfwords (BE numbering) when the even halfwords (LE
35083 numbering) are what we need. */
35084 if (!BYTES_BIG_ENDIAN
35085 && icode == CODE_FOR_altivec_vpkuwum_direct
35086 && ((GET_CODE (op0) == REG
35087 && GET_MODE (op0) != V4SImode)
35088 || (GET_CODE (op0) == SUBREG
35089 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35090 continue;
35091 if (!BYTES_BIG_ENDIAN
35092 && icode == CODE_FOR_altivec_vpkuhum_direct
35093 && ((GET_CODE (op0) == REG
35094 && GET_MODE (op0) != V8HImode)
35095 || (GET_CODE (op0) == SUBREG
35096 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35097 continue;
35099 /* For little-endian, the two input operands must be swapped
35100 (or swapped back) to ensure proper right-to-left numbering
35101 from 0 to 2N-1. */
35102 if (swapped ^ !BYTES_BIG_ENDIAN)
35103 std::swap (op0, op1);
35104 if (imode != V16QImode)
35106 op0 = gen_lowpart (imode, op0);
35107 op1 = gen_lowpart (imode, op1);
35109 if (omode == V16QImode)
35110 x = target;
35111 else
35112 x = gen_reg_rtx (omode);
35113 emit_insn (GEN_FCN (icode) (x, op0, op1));
35114 if (omode != V16QImode)
35115 emit_move_insn (target, gen_lowpart (V16QImode, x));
35116 return true;
35120 if (!BYTES_BIG_ENDIAN)
35122 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35123 return true;
35126 return false;
35129 /* Expand a VSX Permute Doubleword constant permutation.
35130 Return true if we match an efficient implementation. */
35132 static bool
35133 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35134 unsigned char perm0, unsigned char perm1)
35136 rtx x;
35138 /* If both selectors come from the same operand, fold to single op. */
35139 if ((perm0 & 2) == (perm1 & 2))
35141 if (perm0 & 2)
35142 op0 = op1;
35143 else
35144 op1 = op0;
35146 /* If both operands are equal, fold to simpler permutation. */
35147 if (rtx_equal_p (op0, op1))
35149 perm0 = perm0 & 1;
35150 perm1 = (perm1 & 1) + 2;
35152 /* If the first selector comes from the second operand, swap. */
35153 else if (perm0 & 2)
35155 if (perm1 & 2)
35156 return false;
35157 perm0 -= 2;
35158 perm1 += 2;
35159 std::swap (op0, op1);
35161 /* If the second selector does not come from the second operand, fail. */
35162 else if ((perm1 & 2) == 0)
35163 return false;
35165 /* Success! */
35166 if (target != NULL)
35168 machine_mode vmode, dmode;
35169 rtvec v;
35171 vmode = GET_MODE (target);
35172 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35173 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35174 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35175 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35176 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35177 emit_insn (gen_rtx_SET (target, x));
35179 return true;
35182 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35184 static bool
35185 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35186 rtx op1, const vec_perm_indices &sel)
35188 bool testing_p = !target;
35190 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35191 if (TARGET_ALTIVEC && testing_p)
35192 return true;
35194 /* Check for ps_merge* or xxpermdi insns. */
35195 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35197 if (testing_p)
35199 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35200 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35202 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35203 return true;
35206 if (TARGET_ALTIVEC)
35208 /* Force the target-independent code to lower to V16QImode. */
35209 if (vmode != V16QImode)
35210 return false;
35211 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35212 return true;
35215 return false;
35218 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35219 OP0 and OP1 are the input vectors and TARGET is the output vector.
35220 PERM specifies the constant permutation vector. */
35222 static void
35223 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35224 machine_mode vmode, const vec_perm_builder &perm)
35226 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35227 if (x != target)
35228 emit_move_insn (target, x);
35231 /* Expand an extract even operation. */
35233 void
35234 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35236 machine_mode vmode = GET_MODE (target);
35237 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35238 vec_perm_builder perm (nelt, nelt, 1);
35240 for (i = 0; i < nelt; i++)
35241 perm.quick_push (i * 2);
35243 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35246 /* Expand a vector interleave operation. */
35248 void
35249 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35251 machine_mode vmode = GET_MODE (target);
35252 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35253 vec_perm_builder perm (nelt, nelt, 1);
35255 high = (highp ? 0 : nelt / 2);
35256 for (i = 0; i < nelt / 2; i++)
35258 perm.quick_push (i + high);
35259 perm.quick_push (i + nelt + high);
35262 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35265 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35266 void
35267 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35269 HOST_WIDE_INT hwi_scale (scale);
35270 REAL_VALUE_TYPE r_pow;
35271 rtvec v = rtvec_alloc (2);
35272 rtx elt;
35273 rtx scale_vec = gen_reg_rtx (V2DFmode);
35274 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35275 elt = const_double_from_real_value (r_pow, DFmode);
35276 RTVEC_ELT (v, 0) = elt;
35277 RTVEC_ELT (v, 1) = elt;
35278 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35279 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35282 /* Return an RTX representing where to find the function value of a
35283 function returning MODE. */
35284 static rtx
35285 rs6000_complex_function_value (machine_mode mode)
35287 unsigned int regno;
35288 rtx r1, r2;
35289 machine_mode inner = GET_MODE_INNER (mode);
35290 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35292 if (TARGET_FLOAT128_TYPE
35293 && (mode == KCmode
35294 || (mode == TCmode && TARGET_IEEEQUAD)))
35295 regno = ALTIVEC_ARG_RETURN;
35297 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35298 regno = FP_ARG_RETURN;
35300 else
35302 regno = GP_ARG_RETURN;
35304 /* 32-bit is OK since it'll go in r3/r4. */
35305 if (TARGET_32BIT && inner_bytes >= 4)
35306 return gen_rtx_REG (mode, regno);
35309 if (inner_bytes >= 8)
35310 return gen_rtx_REG (mode, regno);
35312 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35313 const0_rtx);
35314 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35315 GEN_INT (inner_bytes));
35316 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35319 /* Return an rtx describing a return value of MODE as a PARALLEL
35320 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35321 stride REG_STRIDE. */
35323 static rtx
35324 rs6000_parallel_return (machine_mode mode,
35325 int n_elts, machine_mode elt_mode,
35326 unsigned int regno, unsigned int reg_stride)
35328 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35330 int i;
35331 for (i = 0; i < n_elts; i++)
35333 rtx r = gen_rtx_REG (elt_mode, regno);
35334 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35335 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35336 regno += reg_stride;
35339 return par;
35342 /* Target hook for TARGET_FUNCTION_VALUE.
35344 An integer value is in r3 and a floating-point value is in fp1,
35345 unless -msoft-float. */
35347 static rtx
35348 rs6000_function_value (const_tree valtype,
35349 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35350 bool outgoing ATTRIBUTE_UNUSED)
35352 machine_mode mode;
35353 unsigned int regno;
35354 machine_mode elt_mode;
35355 int n_elts;
35357 /* Special handling for structs in darwin64. */
35358 if (TARGET_MACHO
35359 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35361 CUMULATIVE_ARGS valcum;
35362 rtx valret;
35364 valcum.words = 0;
35365 valcum.fregno = FP_ARG_MIN_REG;
35366 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35367 /* Do a trial code generation as if this were going to be passed as
35368 an argument; if any part goes in memory, we return NULL. */
35369 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35370 if (valret)
35371 return valret;
35372 /* Otherwise fall through to standard ABI rules. */
35375 mode = TYPE_MODE (valtype);
35377 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35378 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35380 int first_reg, n_regs;
35382 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35384 /* _Decimal128 must use even/odd register pairs. */
35385 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35386 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35388 else
35390 first_reg = ALTIVEC_ARG_RETURN;
35391 n_regs = 1;
35394 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35397 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35398 if (TARGET_32BIT && TARGET_POWERPC64)
35399 switch (mode)
35401 default:
35402 break;
35403 case E_DImode:
35404 case E_SCmode:
35405 case E_DCmode:
35406 case E_TCmode:
35407 int count = GET_MODE_SIZE (mode) / 4;
35408 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35411 if ((INTEGRAL_TYPE_P (valtype)
35412 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35413 || POINTER_TYPE_P (valtype))
35414 mode = TARGET_32BIT ? SImode : DImode;
35416 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35417 /* _Decimal128 must use an even/odd register pair. */
35418 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35419 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35420 && !FLOAT128_VECTOR_P (mode))
35421 regno = FP_ARG_RETURN;
35422 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35423 && targetm.calls.split_complex_arg)
35424 return rs6000_complex_function_value (mode);
35425 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35426 return register is used in both cases, and we won't see V2DImode/V2DFmode
35427 for pure altivec, combine the two cases. */
35428 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35429 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35430 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35431 regno = ALTIVEC_ARG_RETURN;
35432 else
35433 regno = GP_ARG_RETURN;
35435 return gen_rtx_REG (mode, regno);
35438 /* Define how to find the value returned by a library function
35439 assuming the value has mode MODE. */
35441 rs6000_libcall_value (machine_mode mode)
35443 unsigned int regno;
35445 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35446 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35447 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35449 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35450 /* _Decimal128 must use an even/odd register pair. */
35451 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35452 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35453 regno = FP_ARG_RETURN;
35454 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35455 return register is used in both cases, and we won't see V2DImode/V2DFmode
35456 for pure altivec, combine the two cases. */
35457 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35458 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35459 regno = ALTIVEC_ARG_RETURN;
35460 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35461 return rs6000_complex_function_value (mode);
35462 else
35463 regno = GP_ARG_RETURN;
35465 return gen_rtx_REG (mode, regno);
35468 /* Compute register pressure classes. We implement the target hook to avoid
35469 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35470 lead to incorrect estimates of number of available registers and therefor
35471 increased register pressure/spill. */
35472 static int
35473 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35475 int n;
35477 n = 0;
35478 pressure_classes[n++] = GENERAL_REGS;
35479 if (TARGET_VSX)
35480 pressure_classes[n++] = VSX_REGS;
35481 else
35483 if (TARGET_ALTIVEC)
35484 pressure_classes[n++] = ALTIVEC_REGS;
35485 if (TARGET_HARD_FLOAT)
35486 pressure_classes[n++] = FLOAT_REGS;
35488 pressure_classes[n++] = CR_REGS;
35489 pressure_classes[n++] = SPECIAL_REGS;
35491 return n;
35494 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35495 Frame pointer elimination is automatically handled.
35497 For the RS/6000, if frame pointer elimination is being done, we would like
35498 to convert ap into fp, not sp.
35500 We need r30 if -mminimal-toc was specified, and there are constant pool
35501 references. */
35503 static bool
35504 rs6000_can_eliminate (const int from, const int to)
35506 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35507 ? ! frame_pointer_needed
35508 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35509 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35510 || constant_pool_empty_p ()
35511 : true);
35514 /* Define the offset between two registers, FROM to be eliminated and its
35515 replacement TO, at the start of a routine. */
35516 HOST_WIDE_INT
35517 rs6000_initial_elimination_offset (int from, int to)
35519 rs6000_stack_t *info = rs6000_stack_info ();
35520 HOST_WIDE_INT offset;
35522 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35523 offset = info->push_p ? 0 : -info->total_size;
35524 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35526 offset = info->push_p ? 0 : -info->total_size;
35527 if (FRAME_GROWS_DOWNWARD)
35528 offset += info->fixed_size + info->vars_size + info->parm_size;
35530 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35531 offset = FRAME_GROWS_DOWNWARD
35532 ? info->fixed_size + info->vars_size + info->parm_size
35533 : 0;
35534 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35535 offset = info->total_size;
35536 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35537 offset = info->push_p ? info->total_size : 0;
35538 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35539 offset = 0;
35540 else
35541 gcc_unreachable ();
35543 return offset;
35546 /* Fill in sizes of registers used by unwinder. */
35548 static void
35549 rs6000_init_dwarf_reg_sizes_extra (tree address)
35551 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35553 int i;
35554 machine_mode mode = TYPE_MODE (char_type_node);
35555 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35556 rtx mem = gen_rtx_MEM (BLKmode, addr);
35557 rtx value = gen_int_mode (16, mode);
35559 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35560 The unwinder still needs to know the size of Altivec registers. */
35562 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35564 int column = DWARF_REG_TO_UNWIND_COLUMN
35565 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35566 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35568 emit_move_insn (adjust_address (mem, mode, offset), value);
35573 /* Map internal gcc register numbers to debug format register numbers.
35574 FORMAT specifies the type of debug register number to use:
35575 0 -- debug information, except for frame-related sections
35576 1 -- DWARF .debug_frame section
35577 2 -- DWARF .eh_frame section */
35579 unsigned int
35580 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35582 /* Except for the above, we use the internal number for non-DWARF
35583 debug information, and also for .eh_frame. */
35584 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35585 return regno;
35587 /* On some platforms, we use the standard DWARF register
35588 numbering for .debug_info and .debug_frame. */
35589 #ifdef RS6000_USE_DWARF_NUMBERING
35590 if (regno <= 63)
35591 return regno;
35592 if (regno == LR_REGNO)
35593 return 108;
35594 if (regno == CTR_REGNO)
35595 return 109;
35596 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35597 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35598 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35599 to the DWARF reg for CR. */
35600 if (format == 1 && regno == CR2_REGNO)
35601 return 64;
35602 if (CR_REGNO_P (regno))
35603 return regno - CR0_REGNO + 86;
35604 if (regno == CA_REGNO)
35605 return 101; /* XER */
35606 if (ALTIVEC_REGNO_P (regno))
35607 return regno - FIRST_ALTIVEC_REGNO + 1124;
35608 if (regno == VRSAVE_REGNO)
35609 return 356;
35610 if (regno == VSCR_REGNO)
35611 return 67;
35612 #endif
35613 return regno;
35616 /* target hook eh_return_filter_mode */
35617 static scalar_int_mode
35618 rs6000_eh_return_filter_mode (void)
35620 return TARGET_32BIT ? SImode : word_mode;
35623 /* Target hook for scalar_mode_supported_p. */
35624 static bool
35625 rs6000_scalar_mode_supported_p (scalar_mode mode)
35627 /* -m32 does not support TImode. This is the default, from
35628 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35629 same ABI as for -m32. But default_scalar_mode_supported_p allows
35630 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35631 for -mpowerpc64. */
35632 if (TARGET_32BIT && mode == TImode)
35633 return false;
35635 if (DECIMAL_FLOAT_MODE_P (mode))
35636 return default_decimal_float_supported_p ();
35637 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
35638 return true;
35639 else
35640 return default_scalar_mode_supported_p (mode);
35643 /* Target hook for vector_mode_supported_p. */
35644 static bool
35645 rs6000_vector_mode_supported_p (machine_mode mode)
35647 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35648 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35649 double-double. */
35650 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35651 return true;
35653 else
35654 return false;
35657 /* Target hook for floatn_mode. */
35658 static opt_scalar_float_mode
35659 rs6000_floatn_mode (int n, bool extended)
35661 if (extended)
35663 switch (n)
35665 case 32:
35666 return DFmode;
35668 case 64:
35669 if (TARGET_FLOAT128_TYPE)
35670 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35671 else
35672 return opt_scalar_float_mode ();
35674 case 128:
35675 return opt_scalar_float_mode ();
35677 default:
35678 /* Those are the only valid _FloatNx types. */
35679 gcc_unreachable ();
35682 else
35684 switch (n)
35686 case 32:
35687 return SFmode;
35689 case 64:
35690 return DFmode;
35692 case 128:
35693 if (TARGET_FLOAT128_TYPE)
35694 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35695 else
35696 return opt_scalar_float_mode ();
35698 default:
35699 return opt_scalar_float_mode ();
35705 /* Target hook for c_mode_for_suffix. */
35706 static machine_mode
35707 rs6000_c_mode_for_suffix (char suffix)
35709 if (TARGET_FLOAT128_TYPE)
35711 if (suffix == 'q' || suffix == 'Q')
35712 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35714 /* At the moment, we are not defining a suffix for IBM extended double.
35715 If/when the default for -mabi=ieeelongdouble is changed, and we want
35716 to support __ibm128 constants in legacy library code, we may need to
35717 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35718 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35719 __float80 constants. */
35722 return VOIDmode;
35725 /* Target hook for invalid_arg_for_unprototyped_fn. */
35726 static const char *
35727 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
35729 return (!rs6000_darwin64_abi
35730 && typelist == 0
35731 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
35732 && (funcdecl == NULL_TREE
35733 || (TREE_CODE (funcdecl) == FUNCTION_DECL
35734 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
35735 ? N_("AltiVec argument passed to unprototyped function")
35736 : NULL;
35739 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35740 setup by using __stack_chk_fail_local hidden function instead of
35741 calling __stack_chk_fail directly. Otherwise it is better to call
35742 __stack_chk_fail directly. */
35744 static tree ATTRIBUTE_UNUSED
35745 rs6000_stack_protect_fail (void)
35747 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
35748 ? default_hidden_stack_protect_fail ()
35749 : default_external_stack_protect_fail ();
35752 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35754 #if TARGET_ELF
35755 static unsigned HOST_WIDE_INT
35756 rs6000_asan_shadow_offset (void)
35758 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
35760 #endif
35762 /* Mask options that we want to support inside of attribute((target)) and
35763 #pragma GCC target operations. Note, we do not include things like
35764 64/32-bit, endianness, hard/soft floating point, etc. that would have
35765 different calling sequences. */
35767 struct rs6000_opt_mask {
35768 const char *name; /* option name */
35769 HOST_WIDE_INT mask; /* mask to set */
35770 bool invert; /* invert sense of mask */
35771 bool valid_target; /* option is a target option */
35774 static struct rs6000_opt_mask const rs6000_opt_masks[] =
35776 { "altivec", OPTION_MASK_ALTIVEC, false, true },
35777 { "cmpb", OPTION_MASK_CMPB, false, true },
35778 { "crypto", OPTION_MASK_CRYPTO, false, true },
35779 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
35780 { "dlmzb", OPTION_MASK_DLMZB, false, true },
35781 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
35782 false, true },
35783 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
35784 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
35785 { "fprnd", OPTION_MASK_FPRND, false, true },
35786 { "hard-dfp", OPTION_MASK_DFP, false, true },
35787 { "htm", OPTION_MASK_HTM, false, true },
35788 { "isel", OPTION_MASK_ISEL, false, true },
35789 { "mfcrf", OPTION_MASK_MFCRF, false, true },
35790 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
35791 { "modulo", OPTION_MASK_MODULO, false, true },
35792 { "mulhw", OPTION_MASK_MULHW, false, true },
35793 { "multiple", OPTION_MASK_MULTIPLE, false, true },
35794 { "popcntb", OPTION_MASK_POPCNTB, false, true },
35795 { "popcntd", OPTION_MASK_POPCNTD, false, true },
35796 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
35797 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
35798 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
35799 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
35800 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
35801 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
35802 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
35803 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
35804 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
35805 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
35806 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
35807 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
35808 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
35809 { "string", 0, false, true },
35810 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
35811 { "update", OPTION_MASK_NO_UPDATE, true , true },
35812 { "vsx", OPTION_MASK_VSX, false, true },
35813 #ifdef OPTION_MASK_64BIT
35814 #if TARGET_AIX_OS
35815 { "aix64", OPTION_MASK_64BIT, false, false },
35816 { "aix32", OPTION_MASK_64BIT, true, false },
35817 #else
35818 { "64", OPTION_MASK_64BIT, false, false },
35819 { "32", OPTION_MASK_64BIT, true, false },
35820 #endif
35821 #endif
35822 #ifdef OPTION_MASK_EABI
35823 { "eabi", OPTION_MASK_EABI, false, false },
35824 #endif
35825 #ifdef OPTION_MASK_LITTLE_ENDIAN
35826 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
35827 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
35828 #endif
35829 #ifdef OPTION_MASK_RELOCATABLE
35830 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
35831 #endif
35832 #ifdef OPTION_MASK_STRICT_ALIGN
35833 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
35834 #endif
35835 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
35836 { "string", 0, false, false },
35839 /* Builtin mask mapping for printing the flags. */
35840 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
35842 { "altivec", RS6000_BTM_ALTIVEC, false, false },
35843 { "vsx", RS6000_BTM_VSX, false, false },
35844 { "fre", RS6000_BTM_FRE, false, false },
35845 { "fres", RS6000_BTM_FRES, false, false },
35846 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
35847 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
35848 { "popcntd", RS6000_BTM_POPCNTD, false, false },
35849 { "cell", RS6000_BTM_CELL, false, false },
35850 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
35851 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
35852 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
35853 { "crypto", RS6000_BTM_CRYPTO, false, false },
35854 { "htm", RS6000_BTM_HTM, false, false },
35855 { "hard-dfp", RS6000_BTM_DFP, false, false },
35856 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
35857 { "long-double-128", RS6000_BTM_LDBL128, false, false },
35858 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
35859 { "float128", RS6000_BTM_FLOAT128, false, false },
35860 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
35863 /* Option variables that we want to support inside attribute((target)) and
35864 #pragma GCC target operations. */
35866 struct rs6000_opt_var {
35867 const char *name; /* option name */
35868 size_t global_offset; /* offset of the option in global_options. */
35869 size_t target_offset; /* offset of the option in target options. */
35872 static struct rs6000_opt_var const rs6000_opt_vars[] =
35874 { "friz",
35875 offsetof (struct gcc_options, x_TARGET_FRIZ),
35876 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
35877 { "avoid-indexed-addresses",
35878 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
35879 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
35880 { "longcall",
35881 offsetof (struct gcc_options, x_rs6000_default_long_calls),
35882 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
35883 { "optimize-swaps",
35884 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
35885 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
35886 { "allow-movmisalign",
35887 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
35888 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
35889 { "sched-groups",
35890 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
35891 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
35892 { "always-hint",
35893 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
35894 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
35895 { "align-branch-targets",
35896 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
35897 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
35898 { "tls-markers",
35899 offsetof (struct gcc_options, x_tls_markers),
35900 offsetof (struct cl_target_option, x_tls_markers), },
35901 { "sched-prolog",
35902 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
35903 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
35904 { "sched-epilog",
35905 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
35906 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
35907 { "speculate-indirect-jumps",
35908 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
35909 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
35912 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
35913 parsing. Return true if there were no errors. */
35915 static bool
35916 rs6000_inner_target_options (tree args, bool attr_p)
35918 bool ret = true;
35920 if (args == NULL_TREE)
35923 else if (TREE_CODE (args) == STRING_CST)
35925 char *p = ASTRDUP (TREE_STRING_POINTER (args));
35926 char *q;
35928 while ((q = strtok (p, ",")) != NULL)
35930 bool error_p = false;
35931 bool not_valid_p = false;
35932 const char *cpu_opt = NULL;
35934 p = NULL;
35935 if (strncmp (q, "cpu=", 4) == 0)
35937 int cpu_index = rs6000_cpu_name_lookup (q+4);
35938 if (cpu_index >= 0)
35939 rs6000_cpu_index = cpu_index;
35940 else
35942 error_p = true;
35943 cpu_opt = q+4;
35946 else if (strncmp (q, "tune=", 5) == 0)
35948 int tune_index = rs6000_cpu_name_lookup (q+5);
35949 if (tune_index >= 0)
35950 rs6000_tune_index = tune_index;
35951 else
35953 error_p = true;
35954 cpu_opt = q+5;
35957 else
35959 size_t i;
35960 bool invert = false;
35961 char *r = q;
35963 error_p = true;
35964 if (strncmp (r, "no-", 3) == 0)
35966 invert = true;
35967 r += 3;
35970 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
35971 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
35973 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
35975 if (!rs6000_opt_masks[i].valid_target)
35976 not_valid_p = true;
35977 else
35979 error_p = false;
35980 rs6000_isa_flags_explicit |= mask;
35982 /* VSX needs altivec, so -mvsx automagically sets
35983 altivec and disables -mavoid-indexed-addresses. */
35984 if (!invert)
35986 if (mask == OPTION_MASK_VSX)
35988 mask |= OPTION_MASK_ALTIVEC;
35989 TARGET_AVOID_XFORM = 0;
35993 if (rs6000_opt_masks[i].invert)
35994 invert = !invert;
35996 if (invert)
35997 rs6000_isa_flags &= ~mask;
35998 else
35999 rs6000_isa_flags |= mask;
36001 break;
36004 if (error_p && !not_valid_p)
36006 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36007 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36009 size_t j = rs6000_opt_vars[i].global_offset;
36010 *((int *) ((char *)&global_options + j)) = !invert;
36011 error_p = false;
36012 not_valid_p = false;
36013 break;
36018 if (error_p)
36020 const char *eprefix, *esuffix;
36022 ret = false;
36023 if (attr_p)
36025 eprefix = "__attribute__((__target__(";
36026 esuffix = ")))";
36028 else
36030 eprefix = "#pragma GCC target ";
36031 esuffix = "";
36034 if (cpu_opt)
36035 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36036 q, esuffix);
36037 else if (not_valid_p)
36038 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36039 else
36040 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36045 else if (TREE_CODE (args) == TREE_LIST)
36049 tree value = TREE_VALUE (args);
36050 if (value)
36052 bool ret2 = rs6000_inner_target_options (value, attr_p);
36053 if (!ret2)
36054 ret = false;
36056 args = TREE_CHAIN (args);
36058 while (args != NULL_TREE);
36061 else
36063 error ("attribute %<target%> argument not a string");
36064 return false;
36067 return ret;
36070 /* Print out the target options as a list for -mdebug=target. */
36072 static void
36073 rs6000_debug_target_options (tree args, const char *prefix)
36075 if (args == NULL_TREE)
36076 fprintf (stderr, "%s<NULL>", prefix);
36078 else if (TREE_CODE (args) == STRING_CST)
36080 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36081 char *q;
36083 while ((q = strtok (p, ",")) != NULL)
36085 p = NULL;
36086 fprintf (stderr, "%s\"%s\"", prefix, q);
36087 prefix = ", ";
36091 else if (TREE_CODE (args) == TREE_LIST)
36095 tree value = TREE_VALUE (args);
36096 if (value)
36098 rs6000_debug_target_options (value, prefix);
36099 prefix = ", ";
36101 args = TREE_CHAIN (args);
36103 while (args != NULL_TREE);
36106 else
36107 gcc_unreachable ();
36109 return;
36113 /* Hook to validate attribute((target("..."))). */
36115 static bool
36116 rs6000_valid_attribute_p (tree fndecl,
36117 tree ARG_UNUSED (name),
36118 tree args,
36119 int flags)
36121 struct cl_target_option cur_target;
36122 bool ret;
36123 tree old_optimize;
36124 tree new_target, new_optimize;
36125 tree func_optimize;
36127 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36129 if (TARGET_DEBUG_TARGET)
36131 tree tname = DECL_NAME (fndecl);
36132 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36133 if (tname)
36134 fprintf (stderr, "function: %.*s\n",
36135 (int) IDENTIFIER_LENGTH (tname),
36136 IDENTIFIER_POINTER (tname));
36137 else
36138 fprintf (stderr, "function: unknown\n");
36140 fprintf (stderr, "args:");
36141 rs6000_debug_target_options (args, " ");
36142 fprintf (stderr, "\n");
36144 if (flags)
36145 fprintf (stderr, "flags: 0x%x\n", flags);
36147 fprintf (stderr, "--------------------\n");
36150 /* attribute((target("default"))) does nothing, beyond
36151 affecting multi-versioning. */
36152 if (TREE_VALUE (args)
36153 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36154 && TREE_CHAIN (args) == NULL_TREE
36155 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36156 return true;
36158 old_optimize = build_optimization_node (&global_options);
36159 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36161 /* If the function changed the optimization levels as well as setting target
36162 options, start with the optimizations specified. */
36163 if (func_optimize && func_optimize != old_optimize)
36164 cl_optimization_restore (&global_options,
36165 TREE_OPTIMIZATION (func_optimize));
36167 /* The target attributes may also change some optimization flags, so update
36168 the optimization options if necessary. */
36169 cl_target_option_save (&cur_target, &global_options);
36170 rs6000_cpu_index = rs6000_tune_index = -1;
36171 ret = rs6000_inner_target_options (args, true);
36173 /* Set up any additional state. */
36174 if (ret)
36176 ret = rs6000_option_override_internal (false);
36177 new_target = build_target_option_node (&global_options);
36179 else
36180 new_target = NULL;
36182 new_optimize = build_optimization_node (&global_options);
36184 if (!new_target)
36185 ret = false;
36187 else if (fndecl)
36189 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36191 if (old_optimize != new_optimize)
36192 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36195 cl_target_option_restore (&global_options, &cur_target);
36197 if (old_optimize != new_optimize)
36198 cl_optimization_restore (&global_options,
36199 TREE_OPTIMIZATION (old_optimize));
36201 return ret;
36205 /* Hook to validate the current #pragma GCC target and set the state, and
36206 update the macros based on what was changed. If ARGS is NULL, then
36207 POP_TARGET is used to reset the options. */
36209 bool
36210 rs6000_pragma_target_parse (tree args, tree pop_target)
36212 tree prev_tree = build_target_option_node (&global_options);
36213 tree cur_tree;
36214 struct cl_target_option *prev_opt, *cur_opt;
36215 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36216 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36218 if (TARGET_DEBUG_TARGET)
36220 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36221 fprintf (stderr, "args:");
36222 rs6000_debug_target_options (args, " ");
36223 fprintf (stderr, "\n");
36225 if (pop_target)
36227 fprintf (stderr, "pop_target:\n");
36228 debug_tree (pop_target);
36230 else
36231 fprintf (stderr, "pop_target: <NULL>\n");
36233 fprintf (stderr, "--------------------\n");
36236 if (! args)
36238 cur_tree = ((pop_target)
36239 ? pop_target
36240 : target_option_default_node);
36241 cl_target_option_restore (&global_options,
36242 TREE_TARGET_OPTION (cur_tree));
36244 else
36246 rs6000_cpu_index = rs6000_tune_index = -1;
36247 if (!rs6000_inner_target_options (args, false)
36248 || !rs6000_option_override_internal (false)
36249 || (cur_tree = build_target_option_node (&global_options))
36250 == NULL_TREE)
36252 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36253 fprintf (stderr, "invalid pragma\n");
36255 return false;
36259 target_option_current_node = cur_tree;
36260 rs6000_activate_target_options (target_option_current_node);
36262 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36263 change the macros that are defined. */
36264 if (rs6000_target_modify_macros_ptr)
36266 prev_opt = TREE_TARGET_OPTION (prev_tree);
36267 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36268 prev_flags = prev_opt->x_rs6000_isa_flags;
36270 cur_opt = TREE_TARGET_OPTION (cur_tree);
36271 cur_flags = cur_opt->x_rs6000_isa_flags;
36272 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36274 diff_bumask = (prev_bumask ^ cur_bumask);
36275 diff_flags = (prev_flags ^ cur_flags);
36277 if ((diff_flags != 0) || (diff_bumask != 0))
36279 /* Delete old macros. */
36280 rs6000_target_modify_macros_ptr (false,
36281 prev_flags & diff_flags,
36282 prev_bumask & diff_bumask);
36284 /* Define new macros. */
36285 rs6000_target_modify_macros_ptr (true,
36286 cur_flags & diff_flags,
36287 cur_bumask & diff_bumask);
36291 return true;
36295 /* Remember the last target of rs6000_set_current_function. */
36296 static GTY(()) tree rs6000_previous_fndecl;
36298 /* Restore target's globals from NEW_TREE and invalidate the
36299 rs6000_previous_fndecl cache. */
36301 void
36302 rs6000_activate_target_options (tree new_tree)
36304 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36305 if (TREE_TARGET_GLOBALS (new_tree))
36306 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36307 else if (new_tree == target_option_default_node)
36308 restore_target_globals (&default_target_globals);
36309 else
36310 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36311 rs6000_previous_fndecl = NULL_TREE;
36314 /* Establish appropriate back-end context for processing the function
36315 FNDECL. The argument might be NULL to indicate processing at top
36316 level, outside of any function scope. */
36317 static void
36318 rs6000_set_current_function (tree fndecl)
36320 if (TARGET_DEBUG_TARGET)
36322 fprintf (stderr, "\n==================== rs6000_set_current_function");
36324 if (fndecl)
36325 fprintf (stderr, ", fndecl %s (%p)",
36326 (DECL_NAME (fndecl)
36327 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36328 : "<unknown>"), (void *)fndecl);
36330 if (rs6000_previous_fndecl)
36331 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36333 fprintf (stderr, "\n");
36336 /* Only change the context if the function changes. This hook is called
36337 several times in the course of compiling a function, and we don't want to
36338 slow things down too much or call target_reinit when it isn't safe. */
36339 if (fndecl == rs6000_previous_fndecl)
36340 return;
36342 tree old_tree;
36343 if (rs6000_previous_fndecl == NULL_TREE)
36344 old_tree = target_option_current_node;
36345 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36346 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36347 else
36348 old_tree = target_option_default_node;
36350 tree new_tree;
36351 if (fndecl == NULL_TREE)
36353 if (old_tree != target_option_current_node)
36354 new_tree = target_option_current_node;
36355 else
36356 new_tree = NULL_TREE;
36358 else
36360 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36361 if (new_tree == NULL_TREE)
36362 new_tree = target_option_default_node;
36365 if (TARGET_DEBUG_TARGET)
36367 if (new_tree)
36369 fprintf (stderr, "\nnew fndecl target specific options:\n");
36370 debug_tree (new_tree);
36373 if (old_tree)
36375 fprintf (stderr, "\nold fndecl target specific options:\n");
36376 debug_tree (old_tree);
36379 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36380 fprintf (stderr, "--------------------\n");
36383 if (new_tree && old_tree != new_tree)
36384 rs6000_activate_target_options (new_tree);
36386 if (fndecl)
36387 rs6000_previous_fndecl = fndecl;
36391 /* Save the current options */
36393 static void
36394 rs6000_function_specific_save (struct cl_target_option *ptr,
36395 struct gcc_options *opts)
36397 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36398 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36401 /* Restore the current options */
36403 static void
36404 rs6000_function_specific_restore (struct gcc_options *opts,
36405 struct cl_target_option *ptr)
36408 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36409 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36410 (void) rs6000_option_override_internal (false);
36413 /* Print the current options */
36415 static void
36416 rs6000_function_specific_print (FILE *file, int indent,
36417 struct cl_target_option *ptr)
36419 rs6000_print_isa_options (file, indent, "Isa options set",
36420 ptr->x_rs6000_isa_flags);
36422 rs6000_print_isa_options (file, indent, "Isa options explicit",
36423 ptr->x_rs6000_isa_flags_explicit);
36426 /* Helper function to print the current isa or misc options on a line. */
36428 static void
36429 rs6000_print_options_internal (FILE *file,
36430 int indent,
36431 const char *string,
36432 HOST_WIDE_INT flags,
36433 const char *prefix,
36434 const struct rs6000_opt_mask *opts,
36435 size_t num_elements)
36437 size_t i;
36438 size_t start_column = 0;
36439 size_t cur_column;
36440 size_t max_column = 120;
36441 size_t prefix_len = strlen (prefix);
36442 size_t comma_len = 0;
36443 const char *comma = "";
36445 if (indent)
36446 start_column += fprintf (file, "%*s", indent, "");
36448 if (!flags)
36450 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36451 return;
36454 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36456 /* Print the various mask options. */
36457 cur_column = start_column;
36458 for (i = 0; i < num_elements; i++)
36460 bool invert = opts[i].invert;
36461 const char *name = opts[i].name;
36462 const char *no_str = "";
36463 HOST_WIDE_INT mask = opts[i].mask;
36464 size_t len = comma_len + prefix_len + strlen (name);
36466 if (!invert)
36468 if ((flags & mask) == 0)
36470 no_str = "no-";
36471 len += sizeof ("no-") - 1;
36474 flags &= ~mask;
36477 else
36479 if ((flags & mask) != 0)
36481 no_str = "no-";
36482 len += sizeof ("no-") - 1;
36485 flags |= mask;
36488 cur_column += len;
36489 if (cur_column > max_column)
36491 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36492 cur_column = start_column + len;
36493 comma = "";
36496 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36497 comma = ", ";
36498 comma_len = sizeof (", ") - 1;
36501 fputs ("\n", file);
36504 /* Helper function to print the current isa options on a line. */
36506 static void
36507 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36508 HOST_WIDE_INT flags)
36510 rs6000_print_options_internal (file, indent, string, flags, "-m",
36511 &rs6000_opt_masks[0],
36512 ARRAY_SIZE (rs6000_opt_masks));
36515 static void
36516 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36517 HOST_WIDE_INT flags)
36519 rs6000_print_options_internal (file, indent, string, flags, "",
36520 &rs6000_builtin_mask_names[0],
36521 ARRAY_SIZE (rs6000_builtin_mask_names));
36524 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36525 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36526 -mupper-regs-df, etc.).
36528 If the user used -mno-power8-vector, we need to turn off all of the implicit
36529 ISA 2.07 and 3.0 options that relate to the vector unit.
36531 If the user used -mno-power9-vector, we need to turn off all of the implicit
36532 ISA 3.0 options that relate to the vector unit.
36534 This function does not handle explicit options such as the user specifying
36535 -mdirect-move. These are handled in rs6000_option_override_internal, and
36536 the appropriate error is given if needed.
36538 We return a mask of all of the implicit options that should not be enabled
36539 by default. */
36541 static HOST_WIDE_INT
36542 rs6000_disable_incompatible_switches (void)
36544 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36545 size_t i, j;
36547 static const struct {
36548 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36549 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36550 const char *const name; /* name of the switch. */
36551 } flags[] = {
36552 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36553 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36554 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36557 for (i = 0; i < ARRAY_SIZE (flags); i++)
36559 HOST_WIDE_INT no_flag = flags[i].no_flag;
36561 if ((rs6000_isa_flags & no_flag) == 0
36562 && (rs6000_isa_flags_explicit & no_flag) != 0)
36564 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36565 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36566 & rs6000_isa_flags
36567 & dep_flags);
36569 if (set_flags)
36571 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36572 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36574 set_flags &= ~rs6000_opt_masks[j].mask;
36575 error ("%<-mno-%s%> turns off %<-m%s%>",
36576 flags[i].name,
36577 rs6000_opt_masks[j].name);
36580 gcc_assert (!set_flags);
36583 rs6000_isa_flags &= ~dep_flags;
36584 ignore_masks |= no_flag | dep_flags;
36588 return ignore_masks;
36592 /* Helper function for printing the function name when debugging. */
36594 static const char *
36595 get_decl_name (tree fn)
36597 tree name;
36599 if (!fn)
36600 return "<null>";
36602 name = DECL_NAME (fn);
36603 if (!name)
36604 return "<no-name>";
36606 return IDENTIFIER_POINTER (name);
36609 /* Return the clone id of the target we are compiling code for in a target
36610 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36611 the priority list for the target clones (ordered from lowest to
36612 highest). */
36614 static int
36615 rs6000_clone_priority (tree fndecl)
36617 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36618 HOST_WIDE_INT isa_masks;
36619 int ret = CLONE_DEFAULT;
36620 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36621 const char *attrs_str = NULL;
36623 attrs = TREE_VALUE (TREE_VALUE (attrs));
36624 attrs_str = TREE_STRING_POINTER (attrs);
36626 /* Return priority zero for default function. Return the ISA needed for the
36627 function if it is not the default. */
36628 if (strcmp (attrs_str, "default") != 0)
36630 if (fn_opts == NULL_TREE)
36631 fn_opts = target_option_default_node;
36633 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
36634 isa_masks = rs6000_isa_flags;
36635 else
36636 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
36638 for (ret = CLONE_MAX - 1; ret != 0; ret--)
36639 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
36640 break;
36643 if (TARGET_DEBUG_TARGET)
36644 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
36645 get_decl_name (fndecl), ret);
36647 return ret;
36650 /* This compares the priority of target features in function DECL1 and DECL2.
36651 It returns positive value if DECL1 is higher priority, negative value if
36652 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36653 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36655 static int
36656 rs6000_compare_version_priority (tree decl1, tree decl2)
36658 int priority1 = rs6000_clone_priority (decl1);
36659 int priority2 = rs6000_clone_priority (decl2);
36660 int ret = priority1 - priority2;
36662 if (TARGET_DEBUG_TARGET)
36663 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
36664 get_decl_name (decl1), get_decl_name (decl2), ret);
36666 return ret;
36669 /* Make a dispatcher declaration for the multi-versioned function DECL.
36670 Calls to DECL function will be replaced with calls to the dispatcher
36671 by the front-end. Returns the decl of the dispatcher function. */
36673 static tree
36674 rs6000_get_function_versions_dispatcher (void *decl)
36676 tree fn = (tree) decl;
36677 struct cgraph_node *node = NULL;
36678 struct cgraph_node *default_node = NULL;
36679 struct cgraph_function_version_info *node_v = NULL;
36680 struct cgraph_function_version_info *first_v = NULL;
36682 tree dispatch_decl = NULL;
36684 struct cgraph_function_version_info *default_version_info = NULL;
36685 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
36687 if (TARGET_DEBUG_TARGET)
36688 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
36689 get_decl_name (fn));
36691 node = cgraph_node::get (fn);
36692 gcc_assert (node != NULL);
36694 node_v = node->function_version ();
36695 gcc_assert (node_v != NULL);
36697 if (node_v->dispatcher_resolver != NULL)
36698 return node_v->dispatcher_resolver;
36700 /* Find the default version and make it the first node. */
36701 first_v = node_v;
36702 /* Go to the beginning of the chain. */
36703 while (first_v->prev != NULL)
36704 first_v = first_v->prev;
36706 default_version_info = first_v;
36707 while (default_version_info != NULL)
36709 const tree decl2 = default_version_info->this_node->decl;
36710 if (is_function_default_version (decl2))
36711 break;
36712 default_version_info = default_version_info->next;
36715 /* If there is no default node, just return NULL. */
36716 if (default_version_info == NULL)
36717 return NULL;
36719 /* Make default info the first node. */
36720 if (first_v != default_version_info)
36722 default_version_info->prev->next = default_version_info->next;
36723 if (default_version_info->next)
36724 default_version_info->next->prev = default_version_info->prev;
36725 first_v->prev = default_version_info;
36726 default_version_info->next = first_v;
36727 default_version_info->prev = NULL;
36730 default_node = default_version_info->this_node;
36732 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
36733 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36734 "target_clones attribute needs GLIBC (2.23 and newer) that "
36735 "exports hardware capability bits");
36736 #else
36738 if (targetm.has_ifunc_p ())
36740 struct cgraph_function_version_info *it_v = NULL;
36741 struct cgraph_node *dispatcher_node = NULL;
36742 struct cgraph_function_version_info *dispatcher_version_info = NULL;
36744 /* Right now, the dispatching is done via ifunc. */
36745 dispatch_decl = make_dispatcher_decl (default_node->decl);
36747 dispatcher_node = cgraph_node::get_create (dispatch_decl);
36748 gcc_assert (dispatcher_node != NULL);
36749 dispatcher_node->dispatcher_function = 1;
36750 dispatcher_version_info
36751 = dispatcher_node->insert_new_function_version ();
36752 dispatcher_version_info->next = default_version_info;
36753 dispatcher_node->definition = 1;
36755 /* Set the dispatcher for all the versions. */
36756 it_v = default_version_info;
36757 while (it_v != NULL)
36759 it_v->dispatcher_resolver = dispatch_decl;
36760 it_v = it_v->next;
36763 else
36765 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36766 "multiversioning needs ifunc which is not supported "
36767 "on this target");
36769 #endif
36771 return dispatch_decl;
36774 /* Make the resolver function decl to dispatch the versions of a multi-
36775 versioned function, DEFAULT_DECL. Create an empty basic block in the
36776 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
36777 function. */
36779 static tree
36780 make_resolver_func (const tree default_decl,
36781 const tree dispatch_decl,
36782 basic_block *empty_bb)
36784 /* Make the resolver function static. The resolver function returns
36785 void *. */
36786 tree decl_name = clone_function_name (default_decl, "resolver");
36787 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
36788 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
36789 tree decl = build_fn_decl (resolver_name, type);
36790 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
36792 DECL_NAME (decl) = decl_name;
36793 TREE_USED (decl) = 1;
36794 DECL_ARTIFICIAL (decl) = 1;
36795 DECL_IGNORED_P (decl) = 0;
36796 TREE_PUBLIC (decl) = 0;
36797 DECL_UNINLINABLE (decl) = 1;
36799 /* Resolver is not external, body is generated. */
36800 DECL_EXTERNAL (decl) = 0;
36801 DECL_EXTERNAL (dispatch_decl) = 0;
36803 DECL_CONTEXT (decl) = NULL_TREE;
36804 DECL_INITIAL (decl) = make_node (BLOCK);
36805 DECL_STATIC_CONSTRUCTOR (decl) = 0;
36807 /* Build result decl and add to function_decl. */
36808 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
36809 DECL_ARTIFICIAL (t) = 1;
36810 DECL_IGNORED_P (t) = 1;
36811 DECL_RESULT (decl) = t;
36813 gimplify_function_tree (decl);
36814 push_cfun (DECL_STRUCT_FUNCTION (decl));
36815 *empty_bb = init_lowered_empty_function (decl, false,
36816 profile_count::uninitialized ());
36818 cgraph_node::add_new_function (decl, true);
36819 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
36821 pop_cfun ();
36823 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
36824 DECL_ATTRIBUTES (dispatch_decl)
36825 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
36827 cgraph_node::create_same_body_alias (dispatch_decl, decl);
36829 return decl;
36832 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
36833 return a pointer to VERSION_DECL if we are running on a machine that
36834 supports the index CLONE_ISA hardware architecture bits. This function will
36835 be called during version dispatch to decide which function version to
36836 execute. It returns the basic block at the end, to which more conditions
36837 can be added. */
36839 static basic_block
36840 add_condition_to_bb (tree function_decl, tree version_decl,
36841 int clone_isa, basic_block new_bb)
36843 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
36845 gcc_assert (new_bb != NULL);
36846 gimple_seq gseq = bb_seq (new_bb);
36849 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
36850 build_fold_addr_expr (version_decl));
36851 tree result_var = create_tmp_var (ptr_type_node);
36852 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
36853 gimple *return_stmt = gimple_build_return (result_var);
36855 if (clone_isa == CLONE_DEFAULT)
36857 gimple_seq_add_stmt (&gseq, convert_stmt);
36858 gimple_seq_add_stmt (&gseq, return_stmt);
36859 set_bb_seq (new_bb, gseq);
36860 gimple_set_bb (convert_stmt, new_bb);
36861 gimple_set_bb (return_stmt, new_bb);
36862 pop_cfun ();
36863 return new_bb;
36866 tree bool_zero = build_int_cst (bool_int_type_node, 0);
36867 tree cond_var = create_tmp_var (bool_int_type_node);
36868 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
36869 const char *arg_str = rs6000_clone_map[clone_isa].name;
36870 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
36871 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
36872 gimple_call_set_lhs (call_cond_stmt, cond_var);
36874 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
36875 gimple_set_bb (call_cond_stmt, new_bb);
36876 gimple_seq_add_stmt (&gseq, call_cond_stmt);
36878 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
36879 NULL_TREE, NULL_TREE);
36880 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
36881 gimple_set_bb (if_else_stmt, new_bb);
36882 gimple_seq_add_stmt (&gseq, if_else_stmt);
36884 gimple_seq_add_stmt (&gseq, convert_stmt);
36885 gimple_seq_add_stmt (&gseq, return_stmt);
36886 set_bb_seq (new_bb, gseq);
36888 basic_block bb1 = new_bb;
36889 edge e12 = split_block (bb1, if_else_stmt);
36890 basic_block bb2 = e12->dest;
36891 e12->flags &= ~EDGE_FALLTHRU;
36892 e12->flags |= EDGE_TRUE_VALUE;
36894 edge e23 = split_block (bb2, return_stmt);
36895 gimple_set_bb (convert_stmt, bb2);
36896 gimple_set_bb (return_stmt, bb2);
36898 basic_block bb3 = e23->dest;
36899 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
36901 remove_edge (e23);
36902 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
36904 pop_cfun ();
36905 return bb3;
36908 /* This function generates the dispatch function for multi-versioned functions.
36909 DISPATCH_DECL is the function which will contain the dispatch logic.
36910 FNDECLS are the function choices for dispatch, and is a tree chain.
36911 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
36912 code is generated. */
36914 static int
36915 dispatch_function_versions (tree dispatch_decl,
36916 void *fndecls_p,
36917 basic_block *empty_bb)
36919 int ix;
36920 tree ele;
36921 vec<tree> *fndecls;
36922 tree clones[CLONE_MAX];
36924 if (TARGET_DEBUG_TARGET)
36925 fputs ("dispatch_function_versions, top\n", stderr);
36927 gcc_assert (dispatch_decl != NULL
36928 && fndecls_p != NULL
36929 && empty_bb != NULL);
36931 /* fndecls_p is actually a vector. */
36932 fndecls = static_cast<vec<tree> *> (fndecls_p);
36934 /* At least one more version other than the default. */
36935 gcc_assert (fndecls->length () >= 2);
36937 /* The first version in the vector is the default decl. */
36938 memset ((void *) clones, '\0', sizeof (clones));
36939 clones[CLONE_DEFAULT] = (*fndecls)[0];
36941 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
36942 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
36943 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
36944 recent glibc. If we ever need to call __builtin_cpu_init, we would need
36945 to insert the code here to do the call. */
36947 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
36949 int priority = rs6000_clone_priority (ele);
36950 if (!clones[priority])
36951 clones[priority] = ele;
36954 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
36955 if (clones[ix])
36957 if (TARGET_DEBUG_TARGET)
36958 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
36959 ix, get_decl_name (clones[ix]));
36961 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
36962 *empty_bb);
36965 return 0;
36968 /* Generate the dispatching code body to dispatch multi-versioned function
36969 DECL. The target hook is called to process the "target" attributes and
36970 provide the code to dispatch the right function at run-time. NODE points
36971 to the dispatcher decl whose body will be created. */
36973 static tree
36974 rs6000_generate_version_dispatcher_body (void *node_p)
36976 tree resolver;
36977 basic_block empty_bb;
36978 struct cgraph_node *node = (cgraph_node *) node_p;
36979 struct cgraph_function_version_info *ninfo = node->function_version ();
36981 if (ninfo->dispatcher_resolver)
36982 return ninfo->dispatcher_resolver;
36984 /* node is going to be an alias, so remove the finalized bit. */
36985 node->definition = false;
36987 /* The first version in the chain corresponds to the default version. */
36988 ninfo->dispatcher_resolver = resolver
36989 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
36991 if (TARGET_DEBUG_TARGET)
36992 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
36993 get_decl_name (resolver));
36995 push_cfun (DECL_STRUCT_FUNCTION (resolver));
36996 auto_vec<tree, 2> fn_ver_vec;
36998 for (struct cgraph_function_version_info *vinfo = ninfo->next;
36999 vinfo;
37000 vinfo = vinfo->next)
37002 struct cgraph_node *version = vinfo->this_node;
37003 /* Check for virtual functions here again, as by this time it should
37004 have been determined if this function needs a vtable index or
37005 not. This happens for methods in derived classes that override
37006 virtual methods in base classes but are not explicitly marked as
37007 virtual. */
37008 if (DECL_VINDEX (version->decl))
37009 sorry ("Virtual function multiversioning not supported");
37011 fn_ver_vec.safe_push (version->decl);
37014 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37015 cgraph_edge::rebuild_edges ();
37016 pop_cfun ();
37017 return resolver;
37021 /* Hook to determine if one function can safely inline another. */
37023 static bool
37024 rs6000_can_inline_p (tree caller, tree callee)
37026 bool ret = false;
37027 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37028 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37030 /* If callee has no option attributes, then it is ok to inline. */
37031 if (!callee_tree)
37032 ret = true;
37034 /* If caller has no option attributes, but callee does then it is not ok to
37035 inline. */
37036 else if (!caller_tree)
37037 ret = false;
37039 else
37041 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37042 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37044 /* Callee's options should a subset of the caller's, i.e. a vsx function
37045 can inline an altivec function but a non-vsx function can't inline a
37046 vsx function. */
37047 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37048 == callee_opts->x_rs6000_isa_flags)
37049 ret = true;
37052 if (TARGET_DEBUG_TARGET)
37053 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37054 get_decl_name (caller), get_decl_name (callee),
37055 (ret ? "can" : "cannot"));
37057 return ret;
37060 /* Allocate a stack temp and fixup the address so it meets the particular
37061 memory requirements (either offetable or REG+REG addressing). */
37064 rs6000_allocate_stack_temp (machine_mode mode,
37065 bool offsettable_p,
37066 bool reg_reg_p)
37068 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37069 rtx addr = XEXP (stack, 0);
37070 int strict_p = reload_completed;
37072 if (!legitimate_indirect_address_p (addr, strict_p))
37074 if (offsettable_p
37075 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37076 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37078 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37079 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37082 return stack;
37085 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37086 to such a form to deal with memory reference instructions like STFIWX that
37087 only take reg+reg addressing. */
37090 rs6000_address_for_fpconvert (rtx x)
37092 rtx addr;
37094 gcc_assert (MEM_P (x));
37095 addr = XEXP (x, 0);
37096 if (can_create_pseudo_p ()
37097 && ! legitimate_indirect_address_p (addr, reload_completed)
37098 && ! legitimate_indexed_address_p (addr, reload_completed))
37100 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37102 rtx reg = XEXP (addr, 0);
37103 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37104 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37105 gcc_assert (REG_P (reg));
37106 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37107 addr = reg;
37109 else if (GET_CODE (addr) == PRE_MODIFY)
37111 rtx reg = XEXP (addr, 0);
37112 rtx expr = XEXP (addr, 1);
37113 gcc_assert (REG_P (reg));
37114 gcc_assert (GET_CODE (expr) == PLUS);
37115 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37116 addr = reg;
37119 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37122 return x;
37125 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37127 On the RS/6000, all integer constants are acceptable, most won't be valid
37128 for particular insns, though. Only easy FP constants are acceptable. */
37130 static bool
37131 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37133 if (TARGET_ELF && tls_referenced_p (x))
37134 return false;
37136 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37137 || GET_MODE (x) == VOIDmode
37138 || (TARGET_POWERPC64 && mode == DImode)
37139 || easy_fp_constant (x, mode)
37140 || easy_vector_constant (x, mode));
37144 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37146 static bool
37147 chain_already_loaded (rtx_insn *last)
37149 for (; last != NULL; last = PREV_INSN (last))
37151 if (NONJUMP_INSN_P (last))
37153 rtx patt = PATTERN (last);
37155 if (GET_CODE (patt) == SET)
37157 rtx lhs = XEXP (patt, 0);
37159 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37160 return true;
37164 return false;
37167 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37169 void
37170 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37172 const bool direct_call_p
37173 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37174 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37175 rtx toc_load = NULL_RTX;
37176 rtx toc_restore = NULL_RTX;
37177 rtx func_addr;
37178 rtx abi_reg = NULL_RTX;
37179 rtx call[4];
37180 int n_call;
37181 rtx insn;
37183 /* Handle longcall attributes. */
37184 if (INTVAL (cookie) & CALL_LONG)
37185 func_desc = rs6000_longcall_ref (func_desc);
37187 /* Handle indirect calls. */
37188 if (GET_CODE (func_desc) != SYMBOL_REF
37189 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37191 /* Save the TOC into its reserved slot before the call,
37192 and prepare to restore it after the call. */
37193 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37194 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37195 rtx stack_toc_mem = gen_frame_mem (Pmode,
37196 gen_rtx_PLUS (Pmode, stack_ptr,
37197 stack_toc_offset));
37198 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37199 gen_rtvec (1, stack_toc_offset),
37200 UNSPEC_TOCSLOT);
37201 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37203 /* Can we optimize saving the TOC in the prologue or
37204 do we need to do it at every call? */
37205 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37206 cfun->machine->save_toc_in_prologue = true;
37207 else
37209 MEM_VOLATILE_P (stack_toc_mem) = 1;
37210 emit_move_insn (stack_toc_mem, toc_reg);
37213 if (DEFAULT_ABI == ABI_ELFv2)
37215 /* A function pointer in the ELFv2 ABI is just a plain address, but
37216 the ABI requires it to be loaded into r12 before the call. */
37217 func_addr = gen_rtx_REG (Pmode, 12);
37218 emit_move_insn (func_addr, func_desc);
37219 abi_reg = func_addr;
37221 else
37223 /* A function pointer under AIX is a pointer to a data area whose
37224 first word contains the actual address of the function, whose
37225 second word contains a pointer to its TOC, and whose third word
37226 contains a value to place in the static chain register (r11).
37227 Note that if we load the static chain, our "trampoline" need
37228 not have any executable code. */
37230 /* Load up address of the actual function. */
37231 func_desc = force_reg (Pmode, func_desc);
37232 func_addr = gen_reg_rtx (Pmode);
37233 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37235 /* Prepare to load the TOC of the called function. Note that the
37236 TOC load must happen immediately before the actual call so
37237 that unwinding the TOC registers works correctly. See the
37238 comment in frob_update_context. */
37239 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37240 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37241 gen_rtx_PLUS (Pmode, func_desc,
37242 func_toc_offset));
37243 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37245 /* If we have a static chain, load it up. But, if the call was
37246 originally direct, the 3rd word has not been written since no
37247 trampoline has been built, so we ought not to load it, lest we
37248 override a static chain value. */
37249 if (!direct_call_p
37250 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37251 && !chain_already_loaded (get_current_sequence ()->next->last))
37253 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37254 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37255 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37256 gen_rtx_PLUS (Pmode, func_desc,
37257 func_sc_offset));
37258 emit_move_insn (sc_reg, func_sc_mem);
37259 abi_reg = sc_reg;
37263 else
37265 /* Direct calls use the TOC: for local calls, the callee will
37266 assume the TOC register is set; for non-local calls, the
37267 PLT stub needs the TOC register. */
37268 abi_reg = toc_reg;
37269 func_addr = func_desc;
37272 /* Create the call. */
37273 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37274 if (value != NULL_RTX)
37275 call[0] = gen_rtx_SET (value, call[0]);
37276 n_call = 1;
37278 if (toc_load)
37279 call[n_call++] = toc_load;
37280 if (toc_restore)
37281 call[n_call++] = toc_restore;
37283 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37285 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37286 insn = emit_call_insn (insn);
37288 /* Mention all registers defined by the ABI to hold information
37289 as uses in CALL_INSN_FUNCTION_USAGE. */
37290 if (abi_reg)
37291 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37294 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37296 void
37297 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37299 rtx call[2];
37300 rtx insn;
37302 gcc_assert (INTVAL (cookie) == 0);
37304 /* Create the call. */
37305 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37306 if (value != NULL_RTX)
37307 call[0] = gen_rtx_SET (value, call[0]);
37309 call[1] = simple_return_rtx;
37311 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37312 insn = emit_call_insn (insn);
37314 /* Note use of the TOC register. */
37315 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37318 /* Return whether we need to always update the saved TOC pointer when we update
37319 the stack pointer. */
37321 static bool
37322 rs6000_save_toc_in_prologue_p (void)
37324 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37327 #ifdef HAVE_GAS_HIDDEN
37328 # define USE_HIDDEN_LINKONCE 1
37329 #else
37330 # define USE_HIDDEN_LINKONCE 0
37331 #endif
37333 /* Fills in the label name that should be used for a 476 link stack thunk. */
37335 void
37336 get_ppc476_thunk_name (char name[32])
37338 gcc_assert (TARGET_LINK_STACK);
37340 if (USE_HIDDEN_LINKONCE)
37341 sprintf (name, "__ppc476.get_thunk");
37342 else
37343 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37346 /* This function emits the simple thunk routine that is used to preserve
37347 the link stack on the 476 cpu. */
37349 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37350 static void
37351 rs6000_code_end (void)
37353 char name[32];
37354 tree decl;
37356 if (!TARGET_LINK_STACK)
37357 return;
37359 get_ppc476_thunk_name (name);
37361 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37362 build_function_type_list (void_type_node, NULL_TREE));
37363 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37364 NULL_TREE, void_type_node);
37365 TREE_PUBLIC (decl) = 1;
37366 TREE_STATIC (decl) = 1;
37368 #if RS6000_WEAK
37369 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37371 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37372 targetm.asm_out.unique_section (decl, 0);
37373 switch_to_section (get_named_section (decl, NULL, 0));
37374 DECL_WEAK (decl) = 1;
37375 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37376 targetm.asm_out.globalize_label (asm_out_file, name);
37377 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37378 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37380 else
37381 #endif
37383 switch_to_section (text_section);
37384 ASM_OUTPUT_LABEL (asm_out_file, name);
37387 DECL_INITIAL (decl) = make_node (BLOCK);
37388 current_function_decl = decl;
37389 allocate_struct_function (decl, false);
37390 init_function_start (decl);
37391 first_function_block_is_cold = false;
37392 /* Make sure unwind info is emitted for the thunk if needed. */
37393 final_start_function (emit_barrier (), asm_out_file, 1);
37395 fputs ("\tblr\n", asm_out_file);
37397 final_end_function ();
37398 init_insn_lengths ();
37399 free_after_compilation (cfun);
37400 set_cfun (NULL);
37401 current_function_decl = NULL;
37404 /* Add r30 to hard reg set if the prologue sets it up and it is not
37405 pic_offset_table_rtx. */
37407 static void
37408 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37410 if (!TARGET_SINGLE_PIC_BASE
37411 && TARGET_TOC
37412 && TARGET_MINIMAL_TOC
37413 && !constant_pool_empty_p ())
37414 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37415 if (cfun->machine->split_stack_argp_used)
37416 add_to_hard_reg_set (&set->set, Pmode, 12);
37418 /* Make sure the hard reg set doesn't include r2, which was possibly added
37419 via PIC_OFFSET_TABLE_REGNUM. */
37420 if (TARGET_TOC)
37421 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
37425 /* Helper function for rs6000_split_logical to emit a logical instruction after
37426 spliting the operation to single GPR registers.
37428 DEST is the destination register.
37429 OP1 and OP2 are the input source registers.
37430 CODE is the base operation (AND, IOR, XOR, NOT).
37431 MODE is the machine mode.
37432 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37433 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37434 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37436 static void
37437 rs6000_split_logical_inner (rtx dest,
37438 rtx op1,
37439 rtx op2,
37440 enum rtx_code code,
37441 machine_mode mode,
37442 bool complement_final_p,
37443 bool complement_op1_p,
37444 bool complement_op2_p)
37446 rtx bool_rtx;
37448 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37449 if (op2 && GET_CODE (op2) == CONST_INT
37450 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37451 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37453 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37454 HOST_WIDE_INT value = INTVAL (op2) & mask;
37456 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37457 if (code == AND)
37459 if (value == 0)
37461 emit_insn (gen_rtx_SET (dest, const0_rtx));
37462 return;
37465 else if (value == mask)
37467 if (!rtx_equal_p (dest, op1))
37468 emit_insn (gen_rtx_SET (dest, op1));
37469 return;
37473 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37474 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37475 else if (code == IOR || code == XOR)
37477 if (value == 0)
37479 if (!rtx_equal_p (dest, op1))
37480 emit_insn (gen_rtx_SET (dest, op1));
37481 return;
37486 if (code == AND && mode == SImode
37487 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37489 emit_insn (gen_andsi3 (dest, op1, op2));
37490 return;
37493 if (complement_op1_p)
37494 op1 = gen_rtx_NOT (mode, op1);
37496 if (complement_op2_p)
37497 op2 = gen_rtx_NOT (mode, op2);
37499 /* For canonical RTL, if only one arm is inverted it is the first. */
37500 if (!complement_op1_p && complement_op2_p)
37501 std::swap (op1, op2);
37503 bool_rtx = ((code == NOT)
37504 ? gen_rtx_NOT (mode, op1)
37505 : gen_rtx_fmt_ee (code, mode, op1, op2));
37507 if (complement_final_p)
37508 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37510 emit_insn (gen_rtx_SET (dest, bool_rtx));
37513 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37514 operations are split immediately during RTL generation to allow for more
37515 optimizations of the AND/IOR/XOR.
37517 OPERANDS is an array containing the destination and two input operands.
37518 CODE is the base operation (AND, IOR, XOR, NOT).
37519 MODE is the machine mode.
37520 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37521 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37522 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37523 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37524 formation of the AND instructions. */
37526 static void
37527 rs6000_split_logical_di (rtx operands[3],
37528 enum rtx_code code,
37529 bool complement_final_p,
37530 bool complement_op1_p,
37531 bool complement_op2_p)
37533 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37534 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37535 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37536 enum hi_lo { hi = 0, lo = 1 };
37537 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37538 size_t i;
37540 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37541 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37542 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37543 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37545 if (code == NOT)
37546 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37547 else
37549 if (GET_CODE (operands[2]) != CONST_INT)
37551 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37552 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37554 else
37556 HOST_WIDE_INT value = INTVAL (operands[2]);
37557 HOST_WIDE_INT value_hi_lo[2];
37559 gcc_assert (!complement_final_p);
37560 gcc_assert (!complement_op1_p);
37561 gcc_assert (!complement_op2_p);
37563 value_hi_lo[hi] = value >> 32;
37564 value_hi_lo[lo] = value & lower_32bits;
37566 for (i = 0; i < 2; i++)
37568 HOST_WIDE_INT sub_value = value_hi_lo[i];
37570 if (sub_value & sign_bit)
37571 sub_value |= upper_32bits;
37573 op2_hi_lo[i] = GEN_INT (sub_value);
37575 /* If this is an AND instruction, check to see if we need to load
37576 the value in a register. */
37577 if (code == AND && sub_value != -1 && sub_value != 0
37578 && !and_operand (op2_hi_lo[i], SImode))
37579 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37584 for (i = 0; i < 2; i++)
37586 /* Split large IOR/XOR operations. */
37587 if ((code == IOR || code == XOR)
37588 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37589 && !complement_final_p
37590 && !complement_op1_p
37591 && !complement_op2_p
37592 && !logical_const_operand (op2_hi_lo[i], SImode))
37594 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37595 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37596 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37597 rtx tmp = gen_reg_rtx (SImode);
37599 /* Make sure the constant is sign extended. */
37600 if ((hi_16bits & sign_bit) != 0)
37601 hi_16bits |= upper_32bits;
37603 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37604 code, SImode, false, false, false);
37606 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37607 code, SImode, false, false, false);
37609 else
37610 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37611 code, SImode, complement_final_p,
37612 complement_op1_p, complement_op2_p);
37615 return;
37618 /* Split the insns that make up boolean operations operating on multiple GPR
37619 registers. The boolean MD patterns ensure that the inputs either are
37620 exactly the same as the output registers, or there is no overlap.
37622 OPERANDS is an array containing the destination and two input operands.
37623 CODE is the base operation (AND, IOR, XOR, NOT).
37624 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37625 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37626 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37628 void
37629 rs6000_split_logical (rtx operands[3],
37630 enum rtx_code code,
37631 bool complement_final_p,
37632 bool complement_op1_p,
37633 bool complement_op2_p)
37635 machine_mode mode = GET_MODE (operands[0]);
37636 machine_mode sub_mode;
37637 rtx op0, op1, op2;
37638 int sub_size, regno0, regno1, nregs, i;
37640 /* If this is DImode, use the specialized version that can run before
37641 register allocation. */
37642 if (mode == DImode && !TARGET_POWERPC64)
37644 rs6000_split_logical_di (operands, code, complement_final_p,
37645 complement_op1_p, complement_op2_p);
37646 return;
37649 op0 = operands[0];
37650 op1 = operands[1];
37651 op2 = (code == NOT) ? NULL_RTX : operands[2];
37652 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37653 sub_size = GET_MODE_SIZE (sub_mode);
37654 regno0 = REGNO (op0);
37655 regno1 = REGNO (op1);
37657 gcc_assert (reload_completed);
37658 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37659 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37661 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37662 gcc_assert (nregs > 1);
37664 if (op2 && REG_P (op2))
37665 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37667 for (i = 0; i < nregs; i++)
37669 int offset = i * sub_size;
37670 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37671 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37672 rtx sub_op2 = ((code == NOT)
37673 ? NULL_RTX
37674 : simplify_subreg (sub_mode, op2, mode, offset));
37676 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37677 complement_final_p, complement_op1_p,
37678 complement_op2_p);
37681 return;
37685 /* Return true if the peephole2 can combine a load involving a combination of
37686 an addis instruction and a load with an offset that can be fused together on
37687 a power8. */
37689 bool
37690 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
37691 rtx addis_value, /* addis value. */
37692 rtx target, /* target register that is loaded. */
37693 rtx mem) /* bottom part of the memory addr. */
37695 rtx addr;
37696 rtx base_reg;
37698 /* Validate arguments. */
37699 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37700 return false;
37702 if (!base_reg_operand (target, GET_MODE (target)))
37703 return false;
37705 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37706 return false;
37708 /* Allow sign/zero extension. */
37709 if (GET_CODE (mem) == ZERO_EXTEND
37710 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
37711 mem = XEXP (mem, 0);
37713 if (!MEM_P (mem))
37714 return false;
37716 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
37717 return false;
37719 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37720 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
37721 return false;
37723 /* Validate that the register used to load the high value is either the
37724 register being loaded, or we can safely replace its use.
37726 This function is only called from the peephole2 pass and we assume that
37727 there are 2 instructions in the peephole (addis and load), so we want to
37728 check if the target register was not used in the memory address and the
37729 register to hold the addis result is dead after the peephole. */
37730 if (REGNO (addis_reg) != REGNO (target))
37732 if (reg_mentioned_p (target, mem))
37733 return false;
37735 if (!peep2_reg_dead_p (2, addis_reg))
37736 return false;
37738 /* If the target register being loaded is the stack pointer, we must
37739 avoid loading any other value into it, even temporarily. */
37740 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37741 return false;
37744 base_reg = XEXP (addr, 0);
37745 return REGNO (addis_reg) == REGNO (base_reg);
37748 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37749 sequence. We adjust the addis register to use the target register. If the
37750 load sign extends, we adjust the code to do the zero extending load, and an
37751 explicit sign extension later since the fusion only covers zero extending
37752 loads.
37754 The operands are:
37755 operands[0] register set with addis (to be replaced with target)
37756 operands[1] value set via addis
37757 operands[2] target register being loaded
37758 operands[3] D-form memory reference using operands[0]. */
37760 void
37761 expand_fusion_gpr_load (rtx *operands)
37763 rtx addis_value = operands[1];
37764 rtx target = operands[2];
37765 rtx orig_mem = operands[3];
37766 rtx new_addr, new_mem, orig_addr, offset;
37767 enum rtx_code plus_or_lo_sum;
37768 machine_mode target_mode = GET_MODE (target);
37769 machine_mode extend_mode = target_mode;
37770 machine_mode ptr_mode = Pmode;
37771 enum rtx_code extend = UNKNOWN;
37773 if (GET_CODE (orig_mem) == ZERO_EXTEND
37774 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37776 extend = GET_CODE (orig_mem);
37777 orig_mem = XEXP (orig_mem, 0);
37778 target_mode = GET_MODE (orig_mem);
37781 gcc_assert (MEM_P (orig_mem));
37783 orig_addr = XEXP (orig_mem, 0);
37784 plus_or_lo_sum = GET_CODE (orig_addr);
37785 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37787 offset = XEXP (orig_addr, 1);
37788 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37789 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37791 if (extend != UNKNOWN)
37792 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
37794 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37795 UNSPEC_FUSION_GPR);
37796 emit_insn (gen_rtx_SET (target, new_mem));
37798 if (extend == SIGN_EXTEND)
37800 int sub_off = ((BYTES_BIG_ENDIAN)
37801 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
37802 : 0);
37803 rtx sign_reg
37804 = simplify_subreg (target_mode, target, extend_mode, sub_off);
37806 emit_insn (gen_rtx_SET (target,
37807 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
37810 return;
37813 /* Emit the addis instruction that will be part of a fused instruction
37814 sequence. */
37816 void
37817 emit_fusion_addis (rtx target, rtx addis_value)
37819 rtx fuse_ops[10];
37820 const char *addis_str = NULL;
37822 /* Emit the addis instruction. */
37823 fuse_ops[0] = target;
37824 if (satisfies_constraint_L (addis_value))
37826 fuse_ops[1] = addis_value;
37827 addis_str = "lis %0,%v1";
37830 else if (GET_CODE (addis_value) == PLUS)
37832 rtx op0 = XEXP (addis_value, 0);
37833 rtx op1 = XEXP (addis_value, 1);
37835 if (REG_P (op0) && CONST_INT_P (op1)
37836 && satisfies_constraint_L (op1))
37838 fuse_ops[1] = op0;
37839 fuse_ops[2] = op1;
37840 addis_str = "addis %0,%1,%v2";
37844 else if (GET_CODE (addis_value) == HIGH)
37846 rtx value = XEXP (addis_value, 0);
37847 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
37849 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
37850 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
37851 if (TARGET_ELF)
37852 addis_str = "addis %0,%2,%1@toc@ha";
37854 else if (TARGET_XCOFF)
37855 addis_str = "addis %0,%1@u(%2)";
37857 else
37858 gcc_unreachable ();
37861 else if (GET_CODE (value) == PLUS)
37863 rtx op0 = XEXP (value, 0);
37864 rtx op1 = XEXP (value, 1);
37866 if (GET_CODE (op0) == UNSPEC
37867 && XINT (op0, 1) == UNSPEC_TOCREL
37868 && CONST_INT_P (op1))
37870 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
37871 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
37872 fuse_ops[3] = op1;
37873 if (TARGET_ELF)
37874 addis_str = "addis %0,%2,%1+%3@toc@ha";
37876 else if (TARGET_XCOFF)
37877 addis_str = "addis %0,%1+%3@u(%2)";
37879 else
37880 gcc_unreachable ();
37884 else if (satisfies_constraint_L (value))
37886 fuse_ops[1] = value;
37887 addis_str = "lis %0,%v1";
37890 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
37892 fuse_ops[1] = value;
37893 addis_str = "lis %0,%1@ha";
37897 if (!addis_str)
37898 fatal_insn ("Could not generate addis value for fusion", addis_value);
37900 output_asm_insn (addis_str, fuse_ops);
37903 /* Emit a D-form load or store instruction that is the second instruction
37904 of a fusion sequence. */
37906 void
37907 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
37908 const char *insn_str)
37910 rtx fuse_ops[10];
37911 char insn_template[80];
37913 fuse_ops[0] = load_store_reg;
37914 fuse_ops[1] = addis_reg;
37916 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
37918 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
37919 fuse_ops[2] = offset;
37920 output_asm_insn (insn_template, fuse_ops);
37923 else if (GET_CODE (offset) == UNSPEC
37924 && XINT (offset, 1) == UNSPEC_TOCREL)
37926 if (TARGET_ELF)
37927 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
37929 else if (TARGET_XCOFF)
37930 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37932 else
37933 gcc_unreachable ();
37935 fuse_ops[2] = XVECEXP (offset, 0, 0);
37936 output_asm_insn (insn_template, fuse_ops);
37939 else if (GET_CODE (offset) == PLUS
37940 && GET_CODE (XEXP (offset, 0)) == UNSPEC
37941 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
37942 && CONST_INT_P (XEXP (offset, 1)))
37944 rtx tocrel_unspec = XEXP (offset, 0);
37945 if (TARGET_ELF)
37946 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
37948 else if (TARGET_XCOFF)
37949 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
37951 else
37952 gcc_unreachable ();
37954 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
37955 fuse_ops[3] = XEXP (offset, 1);
37956 output_asm_insn (insn_template, fuse_ops);
37959 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
37961 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37963 fuse_ops[2] = offset;
37964 output_asm_insn (insn_template, fuse_ops);
37967 else
37968 fatal_insn ("Unable to generate load/store offset for fusion", offset);
37970 return;
37973 /* Wrap a TOC address that can be fused to indicate that special fusion
37974 processing is needed. */
37977 fusion_wrap_memory_address (rtx old_mem)
37979 rtx old_addr = XEXP (old_mem, 0);
37980 rtvec v = gen_rtvec (1, old_addr);
37981 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
37982 return replace_equiv_address_nv (old_mem, new_addr, false);
37985 /* Given an address, convert it into the addis and load offset parts. Addresses
37986 created during the peephole2 process look like:
37987 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
37988 (unspec [(...)] UNSPEC_TOCREL))
37990 Addresses created via toc fusion look like:
37991 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
37993 static void
37994 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
37996 rtx hi, lo;
37998 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38000 lo = XVECEXP (addr, 0, 0);
38001 hi = gen_rtx_HIGH (Pmode, lo);
38003 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38005 hi = XEXP (addr, 0);
38006 lo = XEXP (addr, 1);
38008 else
38009 gcc_unreachable ();
38011 *p_hi = hi;
38012 *p_lo = lo;
38015 /* Return a string to fuse an addis instruction with a gpr load to the same
38016 register that we loaded up the addis instruction. The address that is used
38017 is the logical address that was formed during peephole2:
38018 (lo_sum (high) (low-part))
38020 Or the address is the TOC address that is wrapped before register allocation:
38021 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38023 The code is complicated, so we call output_asm_insn directly, and just
38024 return "". */
38026 const char *
38027 emit_fusion_gpr_load (rtx target, rtx mem)
38029 rtx addis_value;
38030 rtx addr;
38031 rtx load_offset;
38032 const char *load_str = NULL;
38033 machine_mode mode;
38035 if (GET_CODE (mem) == ZERO_EXTEND)
38036 mem = XEXP (mem, 0);
38038 gcc_assert (REG_P (target) && MEM_P (mem));
38040 addr = XEXP (mem, 0);
38041 fusion_split_address (addr, &addis_value, &load_offset);
38043 /* Now emit the load instruction to the same register. */
38044 mode = GET_MODE (mem);
38045 switch (mode)
38047 case E_QImode:
38048 load_str = "lbz";
38049 break;
38051 case E_HImode:
38052 load_str = "lhz";
38053 break;
38055 case E_SImode:
38056 case E_SFmode:
38057 load_str = "lwz";
38058 break;
38060 case E_DImode:
38061 case E_DFmode:
38062 gcc_assert (TARGET_POWERPC64);
38063 load_str = "ld";
38064 break;
38066 default:
38067 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38070 /* Emit the addis instruction. */
38071 emit_fusion_addis (target, addis_value);
38073 /* Emit the D-form load instruction. */
38074 emit_fusion_load_store (target, target, load_offset, load_str);
38076 return "";
38080 /* Return true if the peephole2 can combine a load/store involving a
38081 combination of an addis instruction and the memory operation. This was
38082 added to the ISA 3.0 (power9) hardware. */
38084 bool
38085 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38086 rtx addis_value, /* addis value. */
38087 rtx dest, /* destination (memory or register). */
38088 rtx src) /* source (register or memory). */
38090 rtx addr, mem, offset;
38091 machine_mode mode = GET_MODE (src);
38093 /* Validate arguments. */
38094 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38095 return false;
38097 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38098 return false;
38100 /* Ignore extend operations that are part of the load. */
38101 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38102 src = XEXP (src, 0);
38104 /* Test for memory<-register or register<-memory. */
38105 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38107 if (!MEM_P (dest))
38108 return false;
38110 mem = dest;
38113 else if (MEM_P (src))
38115 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38116 return false;
38118 mem = src;
38121 else
38122 return false;
38124 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38125 if (GET_CODE (addr) == PLUS)
38127 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38128 return false;
38130 return satisfies_constraint_I (XEXP (addr, 1));
38133 else if (GET_CODE (addr) == LO_SUM)
38135 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38136 return false;
38138 offset = XEXP (addr, 1);
38139 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38140 return small_toc_ref (offset, GET_MODE (offset));
38142 else if (TARGET_ELF && !TARGET_POWERPC64)
38143 return CONSTANT_P (offset);
38146 return false;
38149 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38150 load sequence.
38152 The operands are:
38153 operands[0] register set with addis
38154 operands[1] value set via addis
38155 operands[2] target register being loaded
38156 operands[3] D-form memory reference using operands[0].
38158 This is similar to the fusion introduced with power8, except it scales to
38159 both loads/stores and does not require the result register to be the same as
38160 the base register. At the moment, we only do this if register set with addis
38161 is dead. */
38163 void
38164 expand_fusion_p9_load (rtx *operands)
38166 rtx tmp_reg = operands[0];
38167 rtx addis_value = operands[1];
38168 rtx target = operands[2];
38169 rtx orig_mem = operands[3];
38170 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38171 enum rtx_code plus_or_lo_sum;
38172 machine_mode target_mode = GET_MODE (target);
38173 machine_mode extend_mode = target_mode;
38174 machine_mode ptr_mode = Pmode;
38175 enum rtx_code extend = UNKNOWN;
38177 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38179 extend = GET_CODE (orig_mem);
38180 orig_mem = XEXP (orig_mem, 0);
38181 target_mode = GET_MODE (orig_mem);
38184 gcc_assert (MEM_P (orig_mem));
38186 orig_addr = XEXP (orig_mem, 0);
38187 plus_or_lo_sum = GET_CODE (orig_addr);
38188 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38190 offset = XEXP (orig_addr, 1);
38191 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38192 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38194 if (extend != UNKNOWN)
38195 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38197 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38198 UNSPEC_FUSION_P9);
38200 set = gen_rtx_SET (target, new_mem);
38201 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38202 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38203 emit_insn (insn);
38205 return;
38208 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38209 store sequence.
38211 The operands are:
38212 operands[0] register set with addis
38213 operands[1] value set via addis
38214 operands[2] target D-form memory being stored to
38215 operands[3] register being stored
38217 This is similar to the fusion introduced with power8, except it scales to
38218 both loads/stores and does not require the result register to be the same as
38219 the base register. At the moment, we only do this if register set with addis
38220 is dead. */
38222 void
38223 expand_fusion_p9_store (rtx *operands)
38225 rtx tmp_reg = operands[0];
38226 rtx addis_value = operands[1];
38227 rtx orig_mem = operands[2];
38228 rtx src = operands[3];
38229 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38230 enum rtx_code plus_or_lo_sum;
38231 machine_mode target_mode = GET_MODE (orig_mem);
38232 machine_mode ptr_mode = Pmode;
38234 gcc_assert (MEM_P (orig_mem));
38236 orig_addr = XEXP (orig_mem, 0);
38237 plus_or_lo_sum = GET_CODE (orig_addr);
38238 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38240 offset = XEXP (orig_addr, 1);
38241 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38242 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38244 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38245 UNSPEC_FUSION_P9);
38247 set = gen_rtx_SET (new_mem, new_src);
38248 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38249 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38250 emit_insn (insn);
38252 return;
38255 /* Return a string to fuse an addis instruction with a load using extended
38256 fusion. The address that is used is the logical address that was formed
38257 during peephole2: (lo_sum (high) (low-part))
38259 The code is complicated, so we call output_asm_insn directly, and just
38260 return "". */
38262 const char *
38263 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38265 machine_mode mode = GET_MODE (reg);
38266 rtx hi;
38267 rtx lo;
38268 rtx addr;
38269 const char *load_string;
38270 int r;
38272 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38274 mem = XEXP (mem, 0);
38275 mode = GET_MODE (mem);
38278 if (GET_CODE (reg) == SUBREG)
38280 gcc_assert (SUBREG_BYTE (reg) == 0);
38281 reg = SUBREG_REG (reg);
38284 if (!REG_P (reg))
38285 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38287 r = REGNO (reg);
38288 if (FP_REGNO_P (r))
38290 if (mode == SFmode)
38291 load_string = "lfs";
38292 else if (mode == DFmode || mode == DImode)
38293 load_string = "lfd";
38294 else
38295 gcc_unreachable ();
38297 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38299 if (mode == SFmode)
38300 load_string = "lxssp";
38301 else if (mode == DFmode || mode == DImode)
38302 load_string = "lxsd";
38303 else
38304 gcc_unreachable ();
38306 else if (INT_REGNO_P (r))
38308 switch (mode)
38310 case E_QImode:
38311 load_string = "lbz";
38312 break;
38313 case E_HImode:
38314 load_string = "lhz";
38315 break;
38316 case E_SImode:
38317 case E_SFmode:
38318 load_string = "lwz";
38319 break;
38320 case E_DImode:
38321 case E_DFmode:
38322 if (!TARGET_POWERPC64)
38323 gcc_unreachable ();
38324 load_string = "ld";
38325 break;
38326 default:
38327 gcc_unreachable ();
38330 else
38331 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38333 if (!MEM_P (mem))
38334 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38336 addr = XEXP (mem, 0);
38337 fusion_split_address (addr, &hi, &lo);
38339 /* Emit the addis instruction. */
38340 emit_fusion_addis (tmp_reg, hi);
38342 /* Emit the D-form load instruction. */
38343 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38345 return "";
38348 /* Return a string to fuse an addis instruction with a store using extended
38349 fusion. The address that is used is the logical address that was formed
38350 during peephole2: (lo_sum (high) (low-part))
38352 The code is complicated, so we call output_asm_insn directly, and just
38353 return "". */
38355 const char *
38356 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38358 machine_mode mode = GET_MODE (reg);
38359 rtx hi;
38360 rtx lo;
38361 rtx addr;
38362 const char *store_string;
38363 int r;
38365 if (GET_CODE (reg) == SUBREG)
38367 gcc_assert (SUBREG_BYTE (reg) == 0);
38368 reg = SUBREG_REG (reg);
38371 if (!REG_P (reg))
38372 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38374 r = REGNO (reg);
38375 if (FP_REGNO_P (r))
38377 if (mode == SFmode)
38378 store_string = "stfs";
38379 else if (mode == DFmode)
38380 store_string = "stfd";
38381 else
38382 gcc_unreachable ();
38384 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38386 if (mode == SFmode)
38387 store_string = "stxssp";
38388 else if (mode == DFmode || mode == DImode)
38389 store_string = "stxsd";
38390 else
38391 gcc_unreachable ();
38393 else if (INT_REGNO_P (r))
38395 switch (mode)
38397 case E_QImode:
38398 store_string = "stb";
38399 break;
38400 case E_HImode:
38401 store_string = "sth";
38402 break;
38403 case E_SImode:
38404 case E_SFmode:
38405 store_string = "stw";
38406 break;
38407 case E_DImode:
38408 case E_DFmode:
38409 if (!TARGET_POWERPC64)
38410 gcc_unreachable ();
38411 store_string = "std";
38412 break;
38413 default:
38414 gcc_unreachable ();
38417 else
38418 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38420 if (!MEM_P (mem))
38421 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38423 addr = XEXP (mem, 0);
38424 fusion_split_address (addr, &hi, &lo);
38426 /* Emit the addis instruction. */
38427 emit_fusion_addis (tmp_reg, hi);
38429 /* Emit the D-form load instruction. */
38430 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38432 return "";
38435 #ifdef RS6000_GLIBC_ATOMIC_FENV
38436 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38437 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38438 #endif
38440 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38442 static void
38443 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38445 if (!TARGET_HARD_FLOAT)
38447 #ifdef RS6000_GLIBC_ATOMIC_FENV
38448 if (atomic_hold_decl == NULL_TREE)
38450 atomic_hold_decl
38451 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38452 get_identifier ("__atomic_feholdexcept"),
38453 build_function_type_list (void_type_node,
38454 double_ptr_type_node,
38455 NULL_TREE));
38456 TREE_PUBLIC (atomic_hold_decl) = 1;
38457 DECL_EXTERNAL (atomic_hold_decl) = 1;
38460 if (atomic_clear_decl == NULL_TREE)
38462 atomic_clear_decl
38463 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38464 get_identifier ("__atomic_feclearexcept"),
38465 build_function_type_list (void_type_node,
38466 NULL_TREE));
38467 TREE_PUBLIC (atomic_clear_decl) = 1;
38468 DECL_EXTERNAL (atomic_clear_decl) = 1;
38471 tree const_double = build_qualified_type (double_type_node,
38472 TYPE_QUAL_CONST);
38473 tree const_double_ptr = build_pointer_type (const_double);
38474 if (atomic_update_decl == NULL_TREE)
38476 atomic_update_decl
38477 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38478 get_identifier ("__atomic_feupdateenv"),
38479 build_function_type_list (void_type_node,
38480 const_double_ptr,
38481 NULL_TREE));
38482 TREE_PUBLIC (atomic_update_decl) = 1;
38483 DECL_EXTERNAL (atomic_update_decl) = 1;
38486 tree fenv_var = create_tmp_var_raw (double_type_node);
38487 TREE_ADDRESSABLE (fenv_var) = 1;
38488 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38490 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38491 *clear = build_call_expr (atomic_clear_decl, 0);
38492 *update = build_call_expr (atomic_update_decl, 1,
38493 fold_convert (const_double_ptr, fenv_addr));
38494 #endif
38495 return;
38498 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38499 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38500 tree call_mffs = build_call_expr (mffs, 0);
38502 /* Generates the equivalent of feholdexcept (&fenv_var)
38504 *fenv_var = __builtin_mffs ();
38505 double fenv_hold;
38506 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38507 __builtin_mtfsf (0xff, fenv_hold); */
38509 /* Mask to clear everything except for the rounding modes and non-IEEE
38510 arithmetic flag. */
38511 const unsigned HOST_WIDE_INT hold_exception_mask =
38512 HOST_WIDE_INT_C (0xffffffff00000007);
38514 tree fenv_var = create_tmp_var_raw (double_type_node);
38516 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38518 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38519 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38520 build_int_cst (uint64_type_node,
38521 hold_exception_mask));
38523 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38524 fenv_llu_and);
38526 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38527 build_int_cst (unsigned_type_node, 0xff),
38528 fenv_hold_mtfsf);
38530 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38532 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38534 double fenv_clear = __builtin_mffs ();
38535 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38536 __builtin_mtfsf (0xff, fenv_clear); */
38538 /* Mask to clear everything except for the rounding modes and non-IEEE
38539 arithmetic flag. */
38540 const unsigned HOST_WIDE_INT clear_exception_mask =
38541 HOST_WIDE_INT_C (0xffffffff00000000);
38543 tree fenv_clear = create_tmp_var_raw (double_type_node);
38545 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38547 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38548 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38549 fenv_clean_llu,
38550 build_int_cst (uint64_type_node,
38551 clear_exception_mask));
38553 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38554 fenv_clear_llu_and);
38556 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38557 build_int_cst (unsigned_type_node, 0xff),
38558 fenv_clear_mtfsf);
38560 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38562 /* Generates the equivalent of feupdateenv (&fenv_var)
38564 double old_fenv = __builtin_mffs ();
38565 double fenv_update;
38566 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38567 (*(uint64_t*)fenv_var 0x1ff80fff);
38568 __builtin_mtfsf (0xff, fenv_update); */
38570 const unsigned HOST_WIDE_INT update_exception_mask =
38571 HOST_WIDE_INT_C (0xffffffff1fffff00);
38572 const unsigned HOST_WIDE_INT new_exception_mask =
38573 HOST_WIDE_INT_C (0x1ff80fff);
38575 tree old_fenv = create_tmp_var_raw (double_type_node);
38576 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38578 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38579 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38580 build_int_cst (uint64_type_node,
38581 update_exception_mask));
38583 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38584 build_int_cst (uint64_type_node,
38585 new_exception_mask));
38587 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38588 old_llu_and, new_llu_and);
38590 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38591 new_llu_mask);
38593 tree update_mtfsf = build_call_expr (mtfsf, 2,
38594 build_int_cst (unsigned_type_node, 0xff),
38595 fenv_update_mtfsf);
38597 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38600 void
38601 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
38603 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38605 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38606 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38608 /* The destination of the vmrgew instruction layout is:
38609 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38610 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38611 vmrgew instruction will be correct. */
38612 if (BYTES_BIG_ENDIAN)
38614 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
38615 GEN_INT (0)));
38616 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
38617 GEN_INT (3)));
38619 else
38621 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
38622 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
38625 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38626 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38628 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
38629 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
38631 if (BYTES_BIG_ENDIAN)
38632 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38633 else
38634 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38637 void
38638 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
38640 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38642 rtx_tmp0 = gen_reg_rtx (V2DImode);
38643 rtx_tmp1 = gen_reg_rtx (V2DImode);
38645 /* The destination of the vmrgew instruction layout is:
38646 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38647 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38648 vmrgew instruction will be correct. */
38649 if (BYTES_BIG_ENDIAN)
38651 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
38652 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
38654 else
38656 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
38657 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
38660 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38661 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38663 if (signed_convert)
38665 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
38666 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
38668 else
38670 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
38671 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
38674 if (BYTES_BIG_ENDIAN)
38675 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38676 else
38677 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38680 void
38681 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
38682 rtx src2)
38684 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38686 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38687 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38689 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
38690 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
38692 rtx_tmp2 = gen_reg_rtx (V4SImode);
38693 rtx_tmp3 = gen_reg_rtx (V4SImode);
38695 if (signed_convert)
38697 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
38698 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
38700 else
38702 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
38703 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
38706 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
38709 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
38711 static bool
38712 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
38713 optimization_type opt_type)
38715 switch (op)
38717 case rsqrt_optab:
38718 return (opt_type == OPTIMIZE_FOR_SPEED
38719 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
38721 default:
38722 return true;
38726 /* Implement TARGET_CONSTANT_ALIGNMENT. */
38728 static HOST_WIDE_INT
38729 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
38731 if (TREE_CODE (exp) == STRING_CST
38732 && (STRICT_ALIGNMENT || !optimize_size))
38733 return MAX (align, BITS_PER_WORD);
38734 return align;
38737 /* Implement TARGET_STARTING_FRAME_OFFSET. */
38739 static HOST_WIDE_INT
38740 rs6000_starting_frame_offset (void)
38742 if (FRAME_GROWS_DOWNWARD)
38743 return 0;
38744 return RS6000_STARTING_FRAME_OFFSET;
38747 struct gcc_target targetm = TARGET_INITIALIZER;
38749 #include "gt-rs6000.h"