Merge trunk version 214779 into gupc branch.
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobca76ffcc7da8fbeec249b7c6d8bb53bf222fc1c6
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2014 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "stringpool.h"
36 #include "stor-layout.h"
37 #include "calls.h"
38 #include "print-tree.h"
39 #include "varasm.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "function.h"
44 #include "output.h"
45 #include "dbxout.h"
46 #include "basic-block.h"
47 #include "diagnostic-core.h"
48 #include "toplev.h"
49 #include "ggc.h"
50 #include "hashtab.h"
51 #include "tm_p.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "cfgloop.h"
58 #include "sched-int.h"
59 #include "hash-table.h"
60 #include "vec.h"
61 #include "basic-block.h"
62 #include "tree-ssa-alias.h"
63 #include "internal-fn.h"
64 #include "gimple-fold.h"
65 #include "tree-eh.h"
66 #include "gimple-expr.h"
67 #include "is-a.h"
68 #include "gimple.h"
69 #include "gimplify.h"
70 #include "gimple-iterator.h"
71 #include "gimple-walk.h"
72 #include "intl.h"
73 #include "params.h"
74 #include "tm-constrs.h"
75 #include "ira.h"
76 #include "opts.h"
77 #include "tree-vectorizer.h"
78 #include "dumpfile.h"
79 #include "cgraph.h"
80 #include "target-globals.h"
81 #include "builtins.h"
82 #include "context.h"
83 #include "tree-pass.h"
84 #if TARGET_XCOFF
85 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
86 #endif
87 #if TARGET_MACHO
88 #include "gstab.h" /* for N_SLINE */
89 #endif
91 #ifndef TARGET_NO_PROTOTYPE
92 #define TARGET_NO_PROTOTYPE 0
93 #endif
95 #define min(A,B) ((A) < (B) ? (A) : (B))
96 #define max(A,B) ((A) > (B) ? (A) : (B))
98 /* Structure used to define the rs6000 stack */
99 typedef struct rs6000_stack {
100 int reload_completed; /* stack info won't change from here on */
101 int first_gp_reg_save; /* first callee saved GP register used */
102 int first_fp_reg_save; /* first callee saved FP register used */
103 int first_altivec_reg_save; /* first callee saved AltiVec register used */
104 int lr_save_p; /* true if the link reg needs to be saved */
105 int cr_save_p; /* true if the CR reg needs to be saved */
106 unsigned int vrsave_mask; /* mask of vec registers to save */
107 int push_p; /* true if we need to allocate stack space */
108 int calls_p; /* true if the function makes any calls */
109 int world_save_p; /* true if we're saving *everything*:
110 r13-r31, cr, f14-f31, vrsave, v20-v31 */
111 enum rs6000_abi abi; /* which ABI to use */
112 int gp_save_offset; /* offset to save GP regs from initial SP */
113 int fp_save_offset; /* offset to save FP regs from initial SP */
114 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
115 int lr_save_offset; /* offset to save LR from initial SP */
116 int cr_save_offset; /* offset to save CR from initial SP */
117 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
118 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
119 int varargs_save_offset; /* offset to save the varargs registers */
120 int ehrd_offset; /* offset to EH return data */
121 int ehcr_offset; /* offset to EH CR field data */
122 int reg_size; /* register size (4 or 8) */
123 HOST_WIDE_INT vars_size; /* variable save area size */
124 int parm_size; /* outgoing parameter size */
125 int save_size; /* save area size */
126 int fixed_size; /* fixed size of stack frame */
127 int gp_size; /* size of saved GP registers */
128 int fp_size; /* size of saved FP registers */
129 int altivec_size; /* size of saved AltiVec registers */
130 int cr_size; /* size to hold CR if not in save_size */
131 int vrsave_size; /* size to hold VRSAVE if not in save_size */
132 int altivec_padding_size; /* size of altivec alignment padding if
133 not in save_size */
134 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
135 int spe_padding_size;
136 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
137 int spe_64bit_regs_used;
138 int savres_strategy;
139 } rs6000_stack_t;
141 /* A C structure for machine-specific, per-function data.
142 This is added to the cfun structure. */
143 typedef struct GTY(()) machine_function
145 /* Some local-dynamic symbol. */
146 const char *some_ld_name;
147 /* Whether the instruction chain has been scanned already. */
148 int insn_chain_scanned_p;
149 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
150 int ra_needs_full_frame;
151 /* Flags if __builtin_return_address (0) was used. */
152 int ra_need_lr;
153 /* Cache lr_save_p after expansion of builtin_eh_return. */
154 int lr_save_state;
155 /* Whether we need to save the TOC to the reserved stack location in the
156 function prologue. */
157 bool save_toc_in_prologue;
158 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
159 varargs save area. */
160 HOST_WIDE_INT varargs_save_offset;
161 /* Temporary stack slot to use for SDmode copies. This slot is
162 64-bits wide and is allocated early enough so that the offset
163 does not overflow the 16-bit load/store offset field. */
164 rtx sdmode_stack_slot;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 } machine_function;
169 /* Support targetm.vectorize.builtin_mask_for_load. */
170 static GTY(()) tree altivec_builtin_mask_for_load;
172 /* Set to nonzero once AIX common-mode calls have been defined. */
173 static GTY(()) int common_mode_defined;
175 /* Label number of label created for -mrelocatable, to call to so we can
176 get the address of the GOT section */
177 static int rs6000_pic_labelno;
179 #ifdef USING_ELFOS_H
180 /* Counter for labels which are to be placed in .fixup. */
181 int fixuplabelno = 0;
182 #endif
184 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
185 int dot_symbols;
187 /* Specify the machine mode that pointers have. After generation of rtl, the
188 compiler makes no further distinction between pointers and any other objects
189 of this machine mode. The type is unsigned since not all things that
190 include rs6000.h also include machmode.h. */
191 unsigned rs6000_pmode;
193 /* Width in bits of a pointer. */
194 unsigned rs6000_pointer_size;
196 #ifdef HAVE_AS_GNU_ATTRIBUTE
197 /* Flag whether floating point values have been passed/returned. */
198 static bool rs6000_passes_float;
199 /* Flag whether vector values have been passed/returned. */
200 static bool rs6000_passes_vector;
201 /* Flag whether small (<= 8 byte) structures have been returned. */
202 static bool rs6000_returns_struct;
203 #endif
205 /* Value is TRUE if register/mode pair is acceptable. */
206 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
208 /* Maximum number of registers needed for a given register class and mode. */
209 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
211 /* How many registers are needed for a given register and mode. */
212 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
214 /* Map register number to register class. */
215 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
217 static int dbg_cost_ctrl;
219 /* Built in types. */
220 tree rs6000_builtin_types[RS6000_BTI_MAX];
221 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
223 /* Flag to say the TOC is initialized */
224 int toc_initialized;
225 char toc_label_name[10];
227 /* Cached value of rs6000_variable_issue. This is cached in
228 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
229 static short cached_can_issue_more;
231 static GTY(()) section *read_only_data_section;
232 static GTY(()) section *private_data_section;
233 static GTY(()) section *tls_data_section;
234 static GTY(()) section *tls_private_data_section;
235 static GTY(()) section *read_only_private_data_section;
236 static GTY(()) section *sdata2_section;
237 static GTY(()) section *toc_section;
239 struct builtin_description
241 const HOST_WIDE_INT mask;
242 const enum insn_code icode;
243 const char *const name;
244 const enum rs6000_builtins code;
247 /* Describe the vector unit used for modes. */
248 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
249 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
251 /* Register classes for various constraints that are based on the target
252 switches. */
253 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
255 /* Describe the alignment of a vector. */
256 int rs6000_vector_align[NUM_MACHINE_MODES];
258 /* Map selected modes to types for builtins. */
259 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
261 /* What modes to automatically generate reciprocal divide estimate (fre) and
262 reciprocal sqrt (frsqrte) for. */
263 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
265 /* Masks to determine which reciprocal esitmate instructions to generate
266 automatically. */
267 enum rs6000_recip_mask {
268 RECIP_SF_DIV = 0x001, /* Use divide estimate */
269 RECIP_DF_DIV = 0x002,
270 RECIP_V4SF_DIV = 0x004,
271 RECIP_V2DF_DIV = 0x008,
273 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
274 RECIP_DF_RSQRT = 0x020,
275 RECIP_V4SF_RSQRT = 0x040,
276 RECIP_V2DF_RSQRT = 0x080,
278 /* Various combination of flags for -mrecip=xxx. */
279 RECIP_NONE = 0,
280 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
281 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
282 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
284 RECIP_HIGH_PRECISION = RECIP_ALL,
286 /* On low precision machines like the power5, don't enable double precision
287 reciprocal square root estimate, since it isn't accurate enough. */
288 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
291 /* -mrecip options. */
292 static struct
294 const char *string; /* option name */
295 unsigned int mask; /* mask bits to set */
296 } recip_options[] = {
297 { "all", RECIP_ALL },
298 { "none", RECIP_NONE },
299 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
300 | RECIP_V2DF_DIV) },
301 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
302 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
303 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
304 | RECIP_V2DF_RSQRT) },
305 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
306 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
309 /* Pointer to function (in rs6000-c.c) that can define or undefine target
310 macros that have changed. Languages that don't support the preprocessor
311 don't link in rs6000-c.c, so we can't call it directly. */
312 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
314 /* Simplfy register classes into simpler classifications. We assume
315 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
316 check for standard register classes (gpr/floating/altivec/vsx) and
317 floating/vector classes (float/altivec/vsx). */
319 enum rs6000_reg_type {
320 NO_REG_TYPE,
321 PSEUDO_REG_TYPE,
322 GPR_REG_TYPE,
323 VSX_REG_TYPE,
324 ALTIVEC_REG_TYPE,
325 FPR_REG_TYPE,
326 SPR_REG_TYPE,
327 CR_REG_TYPE,
328 SPE_ACC_TYPE,
329 SPEFSCR_REG_TYPE
332 /* Map register class to register type. */
333 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
335 /* First/last register type for the 'normal' register types (i.e. general
336 purpose, floating point, altivec, and VSX registers). */
337 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
339 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
342 /* Register classes we care about in secondary reload or go if legitimate
343 address. We only need to worry about GPR, FPR, and Altivec registers here,
344 along an ANY field that is the OR of the 3 register classes. */
346 enum rs6000_reload_reg_type {
347 RELOAD_REG_GPR, /* General purpose registers. */
348 RELOAD_REG_FPR, /* Traditional floating point regs. */
349 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
350 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
351 N_RELOAD_REG
354 /* For setting up register classes, loop through the 3 register classes mapping
355 into real registers, and skip the ANY class, which is just an OR of the
356 bits. */
357 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
358 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
360 /* Map reload register type to a register in the register class. */
361 struct reload_reg_map_type {
362 const char *name; /* Register class name. */
363 int reg; /* Register in the register class. */
366 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
367 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
368 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
369 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
370 { "Any", -1 }, /* RELOAD_REG_ANY. */
373 /* Mask bits for each register class, indexed per mode. Historically the
374 compiler has been more restrictive which types can do PRE_MODIFY instead of
375 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
376 typedef unsigned char addr_mask_type;
378 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
379 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
380 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
381 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
382 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
383 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
385 /* Register type masks based on the type, of valid addressing modes. */
386 struct rs6000_reg_addr {
387 enum insn_code reload_load; /* INSN to reload for loading. */
388 enum insn_code reload_store; /* INSN to reload for storing. */
389 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
390 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
391 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
392 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
393 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
396 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
398 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
399 static inline bool
400 mode_supports_pre_incdec_p (enum machine_mode mode)
402 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
403 != 0);
406 /* Helper function to say whether a mode supports PRE_MODIFY. */
407 static inline bool
408 mode_supports_pre_modify_p (enum machine_mode mode)
410 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
411 != 0);
415 /* Target cpu costs. */
417 struct processor_costs {
418 const int mulsi; /* cost of SImode multiplication. */
419 const int mulsi_const; /* cost of SImode multiplication by constant. */
420 const int mulsi_const9; /* cost of SImode mult by short constant. */
421 const int muldi; /* cost of DImode multiplication. */
422 const int divsi; /* cost of SImode division. */
423 const int divdi; /* cost of DImode division. */
424 const int fp; /* cost of simple SFmode and DFmode insns. */
425 const int dmul; /* cost of DFmode multiplication (and fmadd). */
426 const int sdiv; /* cost of SFmode division (fdivs). */
427 const int ddiv; /* cost of DFmode division (fdiv). */
428 const int cache_line_size; /* cache line size in bytes. */
429 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
430 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
431 const int simultaneous_prefetches; /* number of parallel prefetch
432 operations. */
435 const struct processor_costs *rs6000_cost;
437 /* Processor costs (relative to an add) */
439 /* Instruction size costs on 32bit processors. */
440 static const
441 struct processor_costs size32_cost = {
442 COSTS_N_INSNS (1), /* mulsi */
443 COSTS_N_INSNS (1), /* mulsi_const */
444 COSTS_N_INSNS (1), /* mulsi_const9 */
445 COSTS_N_INSNS (1), /* muldi */
446 COSTS_N_INSNS (1), /* divsi */
447 COSTS_N_INSNS (1), /* divdi */
448 COSTS_N_INSNS (1), /* fp */
449 COSTS_N_INSNS (1), /* dmul */
450 COSTS_N_INSNS (1), /* sdiv */
451 COSTS_N_INSNS (1), /* ddiv */
458 /* Instruction size costs on 64bit processors. */
459 static const
460 struct processor_costs size64_cost = {
461 COSTS_N_INSNS (1), /* mulsi */
462 COSTS_N_INSNS (1), /* mulsi_const */
463 COSTS_N_INSNS (1), /* mulsi_const9 */
464 COSTS_N_INSNS (1), /* muldi */
465 COSTS_N_INSNS (1), /* divsi */
466 COSTS_N_INSNS (1), /* divdi */
467 COSTS_N_INSNS (1), /* fp */
468 COSTS_N_INSNS (1), /* dmul */
469 COSTS_N_INSNS (1), /* sdiv */
470 COSTS_N_INSNS (1), /* ddiv */
471 128,
477 /* Instruction costs on RS64A processors. */
478 static const
479 struct processor_costs rs64a_cost = {
480 COSTS_N_INSNS (20), /* mulsi */
481 COSTS_N_INSNS (12), /* mulsi_const */
482 COSTS_N_INSNS (8), /* mulsi_const9 */
483 COSTS_N_INSNS (34), /* muldi */
484 COSTS_N_INSNS (65), /* divsi */
485 COSTS_N_INSNS (67), /* divdi */
486 COSTS_N_INSNS (4), /* fp */
487 COSTS_N_INSNS (4), /* dmul */
488 COSTS_N_INSNS (31), /* sdiv */
489 COSTS_N_INSNS (31), /* ddiv */
490 128, /* cache line size */
491 128, /* l1 cache */
492 2048, /* l2 cache */
493 1, /* streams */
496 /* Instruction costs on MPCCORE processors. */
497 static const
498 struct processor_costs mpccore_cost = {
499 COSTS_N_INSNS (2), /* mulsi */
500 COSTS_N_INSNS (2), /* mulsi_const */
501 COSTS_N_INSNS (2), /* mulsi_const9 */
502 COSTS_N_INSNS (2), /* muldi */
503 COSTS_N_INSNS (6), /* divsi */
504 COSTS_N_INSNS (6), /* divdi */
505 COSTS_N_INSNS (4), /* fp */
506 COSTS_N_INSNS (5), /* dmul */
507 COSTS_N_INSNS (10), /* sdiv */
508 COSTS_N_INSNS (17), /* ddiv */
509 32, /* cache line size */
510 4, /* l1 cache */
511 16, /* l2 cache */
512 1, /* streams */
515 /* Instruction costs on PPC403 processors. */
516 static const
517 struct processor_costs ppc403_cost = {
518 COSTS_N_INSNS (4), /* mulsi */
519 COSTS_N_INSNS (4), /* mulsi_const */
520 COSTS_N_INSNS (4), /* mulsi_const9 */
521 COSTS_N_INSNS (4), /* muldi */
522 COSTS_N_INSNS (33), /* divsi */
523 COSTS_N_INSNS (33), /* divdi */
524 COSTS_N_INSNS (11), /* fp */
525 COSTS_N_INSNS (11), /* dmul */
526 COSTS_N_INSNS (11), /* sdiv */
527 COSTS_N_INSNS (11), /* ddiv */
528 32, /* cache line size */
529 4, /* l1 cache */
530 16, /* l2 cache */
531 1, /* streams */
534 /* Instruction costs on PPC405 processors. */
535 static const
536 struct processor_costs ppc405_cost = {
537 COSTS_N_INSNS (5), /* mulsi */
538 COSTS_N_INSNS (4), /* mulsi_const */
539 COSTS_N_INSNS (3), /* mulsi_const9 */
540 COSTS_N_INSNS (5), /* muldi */
541 COSTS_N_INSNS (35), /* divsi */
542 COSTS_N_INSNS (35), /* divdi */
543 COSTS_N_INSNS (11), /* fp */
544 COSTS_N_INSNS (11), /* dmul */
545 COSTS_N_INSNS (11), /* sdiv */
546 COSTS_N_INSNS (11), /* ddiv */
547 32, /* cache line size */
548 16, /* l1 cache */
549 128, /* l2 cache */
550 1, /* streams */
553 /* Instruction costs on PPC440 processors. */
554 static const
555 struct processor_costs ppc440_cost = {
556 COSTS_N_INSNS (3), /* mulsi */
557 COSTS_N_INSNS (2), /* mulsi_const */
558 COSTS_N_INSNS (2), /* mulsi_const9 */
559 COSTS_N_INSNS (3), /* muldi */
560 COSTS_N_INSNS (34), /* divsi */
561 COSTS_N_INSNS (34), /* divdi */
562 COSTS_N_INSNS (5), /* fp */
563 COSTS_N_INSNS (5), /* dmul */
564 COSTS_N_INSNS (19), /* sdiv */
565 COSTS_N_INSNS (33), /* ddiv */
566 32, /* cache line size */
567 32, /* l1 cache */
568 256, /* l2 cache */
569 1, /* streams */
572 /* Instruction costs on PPC476 processors. */
573 static const
574 struct processor_costs ppc476_cost = {
575 COSTS_N_INSNS (4), /* mulsi */
576 COSTS_N_INSNS (4), /* mulsi_const */
577 COSTS_N_INSNS (4), /* mulsi_const9 */
578 COSTS_N_INSNS (4), /* muldi */
579 COSTS_N_INSNS (11), /* divsi */
580 COSTS_N_INSNS (11), /* divdi */
581 COSTS_N_INSNS (6), /* fp */
582 COSTS_N_INSNS (6), /* dmul */
583 COSTS_N_INSNS (19), /* sdiv */
584 COSTS_N_INSNS (33), /* ddiv */
585 32, /* l1 cache line size */
586 32, /* l1 cache */
587 512, /* l2 cache */
588 1, /* streams */
591 /* Instruction costs on PPC601 processors. */
592 static const
593 struct processor_costs ppc601_cost = {
594 COSTS_N_INSNS (5), /* mulsi */
595 COSTS_N_INSNS (5), /* mulsi_const */
596 COSTS_N_INSNS (5), /* mulsi_const9 */
597 COSTS_N_INSNS (5), /* muldi */
598 COSTS_N_INSNS (36), /* divsi */
599 COSTS_N_INSNS (36), /* divdi */
600 COSTS_N_INSNS (4), /* fp */
601 COSTS_N_INSNS (5), /* dmul */
602 COSTS_N_INSNS (17), /* sdiv */
603 COSTS_N_INSNS (31), /* ddiv */
604 32, /* cache line size */
605 32, /* l1 cache */
606 256, /* l2 cache */
607 1, /* streams */
610 /* Instruction costs on PPC603 processors. */
611 static const
612 struct processor_costs ppc603_cost = {
613 COSTS_N_INSNS (5), /* mulsi */
614 COSTS_N_INSNS (3), /* mulsi_const */
615 COSTS_N_INSNS (2), /* mulsi_const9 */
616 COSTS_N_INSNS (5), /* muldi */
617 COSTS_N_INSNS (37), /* divsi */
618 COSTS_N_INSNS (37), /* divdi */
619 COSTS_N_INSNS (3), /* fp */
620 COSTS_N_INSNS (4), /* dmul */
621 COSTS_N_INSNS (18), /* sdiv */
622 COSTS_N_INSNS (33), /* ddiv */
623 32, /* cache line size */
624 8, /* l1 cache */
625 64, /* l2 cache */
626 1, /* streams */
629 /* Instruction costs on PPC604 processors. */
630 static const
631 struct processor_costs ppc604_cost = {
632 COSTS_N_INSNS (4), /* mulsi */
633 COSTS_N_INSNS (4), /* mulsi_const */
634 COSTS_N_INSNS (4), /* mulsi_const9 */
635 COSTS_N_INSNS (4), /* muldi */
636 COSTS_N_INSNS (20), /* divsi */
637 COSTS_N_INSNS (20), /* divdi */
638 COSTS_N_INSNS (3), /* fp */
639 COSTS_N_INSNS (3), /* dmul */
640 COSTS_N_INSNS (18), /* sdiv */
641 COSTS_N_INSNS (32), /* ddiv */
642 32, /* cache line size */
643 16, /* l1 cache */
644 512, /* l2 cache */
645 1, /* streams */
648 /* Instruction costs on PPC604e processors. */
649 static const
650 struct processor_costs ppc604e_cost = {
651 COSTS_N_INSNS (2), /* mulsi */
652 COSTS_N_INSNS (2), /* mulsi_const */
653 COSTS_N_INSNS (2), /* mulsi_const9 */
654 COSTS_N_INSNS (2), /* muldi */
655 COSTS_N_INSNS (20), /* divsi */
656 COSTS_N_INSNS (20), /* divdi */
657 COSTS_N_INSNS (3), /* fp */
658 COSTS_N_INSNS (3), /* dmul */
659 COSTS_N_INSNS (18), /* sdiv */
660 COSTS_N_INSNS (32), /* ddiv */
661 32, /* cache line size */
662 32, /* l1 cache */
663 1024, /* l2 cache */
664 1, /* streams */
667 /* Instruction costs on PPC620 processors. */
668 static const
669 struct processor_costs ppc620_cost = {
670 COSTS_N_INSNS (5), /* mulsi */
671 COSTS_N_INSNS (4), /* mulsi_const */
672 COSTS_N_INSNS (3), /* mulsi_const9 */
673 COSTS_N_INSNS (7), /* muldi */
674 COSTS_N_INSNS (21), /* divsi */
675 COSTS_N_INSNS (37), /* divdi */
676 COSTS_N_INSNS (3), /* fp */
677 COSTS_N_INSNS (3), /* dmul */
678 COSTS_N_INSNS (18), /* sdiv */
679 COSTS_N_INSNS (32), /* ddiv */
680 128, /* cache line size */
681 32, /* l1 cache */
682 1024, /* l2 cache */
683 1, /* streams */
686 /* Instruction costs on PPC630 processors. */
687 static const
688 struct processor_costs ppc630_cost = {
689 COSTS_N_INSNS (5), /* mulsi */
690 COSTS_N_INSNS (4), /* mulsi_const */
691 COSTS_N_INSNS (3), /* mulsi_const9 */
692 COSTS_N_INSNS (7), /* muldi */
693 COSTS_N_INSNS (21), /* divsi */
694 COSTS_N_INSNS (37), /* divdi */
695 COSTS_N_INSNS (3), /* fp */
696 COSTS_N_INSNS (3), /* dmul */
697 COSTS_N_INSNS (17), /* sdiv */
698 COSTS_N_INSNS (21), /* ddiv */
699 128, /* cache line size */
700 64, /* l1 cache */
701 1024, /* l2 cache */
702 1, /* streams */
705 /* Instruction costs on Cell processor. */
706 /* COSTS_N_INSNS (1) ~ one add. */
707 static const
708 struct processor_costs ppccell_cost = {
709 COSTS_N_INSNS (9/2)+2, /* mulsi */
710 COSTS_N_INSNS (6/2), /* mulsi_const */
711 COSTS_N_INSNS (6/2), /* mulsi_const9 */
712 COSTS_N_INSNS (15/2)+2, /* muldi */
713 COSTS_N_INSNS (38/2), /* divsi */
714 COSTS_N_INSNS (70/2), /* divdi */
715 COSTS_N_INSNS (10/2), /* fp */
716 COSTS_N_INSNS (10/2), /* dmul */
717 COSTS_N_INSNS (74/2), /* sdiv */
718 COSTS_N_INSNS (74/2), /* ddiv */
719 128, /* cache line size */
720 32, /* l1 cache */
721 512, /* l2 cache */
722 6, /* streams */
725 /* Instruction costs on PPC750 and PPC7400 processors. */
726 static const
727 struct processor_costs ppc750_cost = {
728 COSTS_N_INSNS (5), /* mulsi */
729 COSTS_N_INSNS (3), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (5), /* muldi */
732 COSTS_N_INSNS (17), /* divsi */
733 COSTS_N_INSNS (17), /* divdi */
734 COSTS_N_INSNS (3), /* fp */
735 COSTS_N_INSNS (3), /* dmul */
736 COSTS_N_INSNS (17), /* sdiv */
737 COSTS_N_INSNS (31), /* ddiv */
738 32, /* cache line size */
739 32, /* l1 cache */
740 512, /* l2 cache */
741 1, /* streams */
744 /* Instruction costs on PPC7450 processors. */
745 static const
746 struct processor_costs ppc7450_cost = {
747 COSTS_N_INSNS (4), /* mulsi */
748 COSTS_N_INSNS (3), /* mulsi_const */
749 COSTS_N_INSNS (3), /* mulsi_const9 */
750 COSTS_N_INSNS (4), /* muldi */
751 COSTS_N_INSNS (23), /* divsi */
752 COSTS_N_INSNS (23), /* divdi */
753 COSTS_N_INSNS (5), /* fp */
754 COSTS_N_INSNS (5), /* dmul */
755 COSTS_N_INSNS (21), /* sdiv */
756 COSTS_N_INSNS (35), /* ddiv */
757 32, /* cache line size */
758 32, /* l1 cache */
759 1024, /* l2 cache */
760 1, /* streams */
763 /* Instruction costs on PPC8540 processors. */
764 static const
765 struct processor_costs ppc8540_cost = {
766 COSTS_N_INSNS (4), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (4), /* mulsi_const9 */
769 COSTS_N_INSNS (4), /* muldi */
770 COSTS_N_INSNS (19), /* divsi */
771 COSTS_N_INSNS (19), /* divdi */
772 COSTS_N_INSNS (4), /* fp */
773 COSTS_N_INSNS (4), /* dmul */
774 COSTS_N_INSNS (29), /* sdiv */
775 COSTS_N_INSNS (29), /* ddiv */
776 32, /* cache line size */
777 32, /* l1 cache */
778 256, /* l2 cache */
779 1, /* prefetch streams /*/
782 /* Instruction costs on E300C2 and E300C3 cores. */
783 static const
784 struct processor_costs ppce300c2c3_cost = {
785 COSTS_N_INSNS (4), /* mulsi */
786 COSTS_N_INSNS (4), /* mulsi_const */
787 COSTS_N_INSNS (4), /* mulsi_const9 */
788 COSTS_N_INSNS (4), /* muldi */
789 COSTS_N_INSNS (19), /* divsi */
790 COSTS_N_INSNS (19), /* divdi */
791 COSTS_N_INSNS (3), /* fp */
792 COSTS_N_INSNS (4), /* dmul */
793 COSTS_N_INSNS (18), /* sdiv */
794 COSTS_N_INSNS (33), /* ddiv */
796 16, /* l1 cache */
797 16, /* l2 cache */
798 1, /* prefetch streams /*/
801 /* Instruction costs on PPCE500MC processors. */
802 static const
803 struct processor_costs ppce500mc_cost = {
804 COSTS_N_INSNS (4), /* mulsi */
805 COSTS_N_INSNS (4), /* mulsi_const */
806 COSTS_N_INSNS (4), /* mulsi_const9 */
807 COSTS_N_INSNS (4), /* muldi */
808 COSTS_N_INSNS (14), /* divsi */
809 COSTS_N_INSNS (14), /* divdi */
810 COSTS_N_INSNS (8), /* fp */
811 COSTS_N_INSNS (10), /* dmul */
812 COSTS_N_INSNS (36), /* sdiv */
813 COSTS_N_INSNS (66), /* ddiv */
814 64, /* cache line size */
815 32, /* l1 cache */
816 128, /* l2 cache */
817 1, /* prefetch streams /*/
820 /* Instruction costs on PPCE500MC64 processors. */
821 static const
822 struct processor_costs ppce500mc64_cost = {
823 COSTS_N_INSNS (4), /* mulsi */
824 COSTS_N_INSNS (4), /* mulsi_const */
825 COSTS_N_INSNS (4), /* mulsi_const9 */
826 COSTS_N_INSNS (4), /* muldi */
827 COSTS_N_INSNS (14), /* divsi */
828 COSTS_N_INSNS (14), /* divdi */
829 COSTS_N_INSNS (4), /* fp */
830 COSTS_N_INSNS (10), /* dmul */
831 COSTS_N_INSNS (36), /* sdiv */
832 COSTS_N_INSNS (66), /* ddiv */
833 64, /* cache line size */
834 32, /* l1 cache */
835 128, /* l2 cache */
836 1, /* prefetch streams /*/
839 /* Instruction costs on PPCE5500 processors. */
840 static const
841 struct processor_costs ppce5500_cost = {
842 COSTS_N_INSNS (5), /* mulsi */
843 COSTS_N_INSNS (5), /* mulsi_const */
844 COSTS_N_INSNS (4), /* mulsi_const9 */
845 COSTS_N_INSNS (5), /* muldi */
846 COSTS_N_INSNS (14), /* divsi */
847 COSTS_N_INSNS (14), /* divdi */
848 COSTS_N_INSNS (7), /* fp */
849 COSTS_N_INSNS (10), /* dmul */
850 COSTS_N_INSNS (36), /* sdiv */
851 COSTS_N_INSNS (66), /* ddiv */
852 64, /* cache line size */
853 32, /* l1 cache */
854 128, /* l2 cache */
855 1, /* prefetch streams /*/
858 /* Instruction costs on PPCE6500 processors. */
859 static const
860 struct processor_costs ppce6500_cost = {
861 COSTS_N_INSNS (5), /* mulsi */
862 COSTS_N_INSNS (5), /* mulsi_const */
863 COSTS_N_INSNS (4), /* mulsi_const9 */
864 COSTS_N_INSNS (5), /* muldi */
865 COSTS_N_INSNS (14), /* divsi */
866 COSTS_N_INSNS (14), /* divdi */
867 COSTS_N_INSNS (7), /* fp */
868 COSTS_N_INSNS (10), /* dmul */
869 COSTS_N_INSNS (36), /* sdiv */
870 COSTS_N_INSNS (66), /* ddiv */
871 64, /* cache line size */
872 32, /* l1 cache */
873 128, /* l2 cache */
874 1, /* prefetch streams /*/
877 /* Instruction costs on AppliedMicro Titan processors. */
878 static const
879 struct processor_costs titan_cost = {
880 COSTS_N_INSNS (5), /* mulsi */
881 COSTS_N_INSNS (5), /* mulsi_const */
882 COSTS_N_INSNS (5), /* mulsi_const9 */
883 COSTS_N_INSNS (5), /* muldi */
884 COSTS_N_INSNS (18), /* divsi */
885 COSTS_N_INSNS (18), /* divdi */
886 COSTS_N_INSNS (10), /* fp */
887 COSTS_N_INSNS (10), /* dmul */
888 COSTS_N_INSNS (46), /* sdiv */
889 COSTS_N_INSNS (72), /* ddiv */
890 32, /* cache line size */
891 32, /* l1 cache */
892 512, /* l2 cache */
893 1, /* prefetch streams /*/
896 /* Instruction costs on POWER4 and POWER5 processors. */
897 static const
898 struct processor_costs power4_cost = {
899 COSTS_N_INSNS (3), /* mulsi */
900 COSTS_N_INSNS (2), /* mulsi_const */
901 COSTS_N_INSNS (2), /* mulsi_const9 */
902 COSTS_N_INSNS (4), /* muldi */
903 COSTS_N_INSNS (18), /* divsi */
904 COSTS_N_INSNS (34), /* divdi */
905 COSTS_N_INSNS (3), /* fp */
906 COSTS_N_INSNS (3), /* dmul */
907 COSTS_N_INSNS (17), /* sdiv */
908 COSTS_N_INSNS (17), /* ddiv */
909 128, /* cache line size */
910 32, /* l1 cache */
911 1024, /* l2 cache */
912 8, /* prefetch streams /*/
915 /* Instruction costs on POWER6 processors. */
916 static const
917 struct processor_costs power6_cost = {
918 COSTS_N_INSNS (8), /* mulsi */
919 COSTS_N_INSNS (8), /* mulsi_const */
920 COSTS_N_INSNS (8), /* mulsi_const9 */
921 COSTS_N_INSNS (8), /* muldi */
922 COSTS_N_INSNS (22), /* divsi */
923 COSTS_N_INSNS (28), /* divdi */
924 COSTS_N_INSNS (3), /* fp */
925 COSTS_N_INSNS (3), /* dmul */
926 COSTS_N_INSNS (13), /* sdiv */
927 COSTS_N_INSNS (16), /* ddiv */
928 128, /* cache line size */
929 64, /* l1 cache */
930 2048, /* l2 cache */
931 16, /* prefetch streams */
934 /* Instruction costs on POWER7 processors. */
935 static const
936 struct processor_costs power7_cost = {
937 COSTS_N_INSNS (2), /* mulsi */
938 COSTS_N_INSNS (2), /* mulsi_const */
939 COSTS_N_INSNS (2), /* mulsi_const9 */
940 COSTS_N_INSNS (2), /* muldi */
941 COSTS_N_INSNS (18), /* divsi */
942 COSTS_N_INSNS (34), /* divdi */
943 COSTS_N_INSNS (3), /* fp */
944 COSTS_N_INSNS (3), /* dmul */
945 COSTS_N_INSNS (13), /* sdiv */
946 COSTS_N_INSNS (16), /* ddiv */
947 128, /* cache line size */
948 32, /* l1 cache */
949 256, /* l2 cache */
950 12, /* prefetch streams */
953 /* Instruction costs on POWER8 processors. */
954 static const
955 struct processor_costs power8_cost = {
956 COSTS_N_INSNS (3), /* mulsi */
957 COSTS_N_INSNS (3), /* mulsi_const */
958 COSTS_N_INSNS (3), /* mulsi_const9 */
959 COSTS_N_INSNS (3), /* muldi */
960 COSTS_N_INSNS (19), /* divsi */
961 COSTS_N_INSNS (35), /* divdi */
962 COSTS_N_INSNS (3), /* fp */
963 COSTS_N_INSNS (3), /* dmul */
964 COSTS_N_INSNS (14), /* sdiv */
965 COSTS_N_INSNS (17), /* ddiv */
966 128, /* cache line size */
967 32, /* l1 cache */
968 256, /* l2 cache */
969 12, /* prefetch streams */
972 /* Instruction costs on POWER A2 processors. */
973 static const
974 struct processor_costs ppca2_cost = {
975 COSTS_N_INSNS (16), /* mulsi */
976 COSTS_N_INSNS (16), /* mulsi_const */
977 COSTS_N_INSNS (16), /* mulsi_const9 */
978 COSTS_N_INSNS (16), /* muldi */
979 COSTS_N_INSNS (22), /* divsi */
980 COSTS_N_INSNS (28), /* divdi */
981 COSTS_N_INSNS (3), /* fp */
982 COSTS_N_INSNS (3), /* dmul */
983 COSTS_N_INSNS (59), /* sdiv */
984 COSTS_N_INSNS (72), /* ddiv */
986 16, /* l1 cache */
987 2048, /* l2 cache */
988 16, /* prefetch streams */
992 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
993 #undef RS6000_BUILTIN_1
994 #undef RS6000_BUILTIN_2
995 #undef RS6000_BUILTIN_3
996 #undef RS6000_BUILTIN_A
997 #undef RS6000_BUILTIN_D
998 #undef RS6000_BUILTIN_E
999 #undef RS6000_BUILTIN_H
1000 #undef RS6000_BUILTIN_P
1001 #undef RS6000_BUILTIN_Q
1002 #undef RS6000_BUILTIN_S
1003 #undef RS6000_BUILTIN_X
1005 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1006 { NAME, ICODE, MASK, ATTR },
1008 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1009 { NAME, ICODE, MASK, ATTR },
1011 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1012 { NAME, ICODE, MASK, ATTR },
1014 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1015 { NAME, ICODE, MASK, ATTR },
1017 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1018 { NAME, ICODE, MASK, ATTR },
1020 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1021 { NAME, ICODE, MASK, ATTR },
1023 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1024 { NAME, ICODE, MASK, ATTR },
1026 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1027 { NAME, ICODE, MASK, ATTR },
1029 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1030 { NAME, ICODE, MASK, ATTR },
1032 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1033 { NAME, ICODE, MASK, ATTR },
1035 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1036 { NAME, ICODE, MASK, ATTR },
1038 struct rs6000_builtin_info_type {
1039 const char *name;
1040 const enum insn_code icode;
1041 const HOST_WIDE_INT mask;
1042 const unsigned attr;
1045 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1047 #include "rs6000-builtin.def"
1050 #undef RS6000_BUILTIN_1
1051 #undef RS6000_BUILTIN_2
1052 #undef RS6000_BUILTIN_3
1053 #undef RS6000_BUILTIN_A
1054 #undef RS6000_BUILTIN_D
1055 #undef RS6000_BUILTIN_E
1056 #undef RS6000_BUILTIN_H
1057 #undef RS6000_BUILTIN_P
1058 #undef RS6000_BUILTIN_Q
1059 #undef RS6000_BUILTIN_S
1060 #undef RS6000_BUILTIN_X
1062 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1063 static tree (*rs6000_veclib_handler) (tree, tree, tree);
1066 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
1067 static bool spe_func_has_64bit_regs_p (void);
1068 static struct machine_function * rs6000_init_machine_status (void);
1069 static int rs6000_ra_ever_killed (void);
1070 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1071 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1072 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1073 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
1074 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1075 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
1076 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
1077 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
1078 bool);
1079 static int rs6000_debug_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
1080 static bool is_microcoded_insn (rtx);
1081 static bool is_nonpipeline_insn (rtx);
1082 static bool is_cracked_insn (rtx);
1083 static bool is_load_insn (rtx, rtx *);
1084 static bool is_store_insn (rtx, rtx *);
1085 static bool set_to_load_agen (rtx,rtx);
1086 static bool insn_terminates_group_p (rtx , enum group_termination);
1087 static bool insn_must_be_first_in_group (rtx);
1088 static bool insn_must_be_last_in_group (rtx);
1089 static void altivec_init_builtins (void);
1090 static tree builtin_function_type (enum machine_mode, enum machine_mode,
1091 enum machine_mode, enum machine_mode,
1092 enum rs6000_builtins, const char *name);
1093 static void rs6000_common_init_builtins (void);
1094 static void paired_init_builtins (void);
1095 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1096 static void spe_init_builtins (void);
1097 static void htm_init_builtins (void);
1098 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1099 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1100 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1101 static rs6000_stack_t *rs6000_stack_info (void);
1102 static void is_altivec_return_reg (rtx, void *);
1103 int easy_vector_constant (rtx, enum machine_mode);
1104 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
1105 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1106 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
1107 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1108 bool, bool);
1109 #if TARGET_MACHO
1110 static void macho_branch_islands (void);
1111 #endif
1112 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
1113 int, int *);
1114 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
1115 int, int, int *);
1116 static bool rs6000_mode_dependent_address (const_rtx);
1117 static bool rs6000_debug_mode_dependent_address (const_rtx);
1118 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1119 enum machine_mode, rtx);
1120 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1121 enum machine_mode,
1122 rtx);
1123 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1124 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1125 enum reg_class);
1126 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1127 enum machine_mode);
1128 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1129 enum reg_class,
1130 enum machine_mode);
1131 static bool rs6000_cannot_change_mode_class (enum machine_mode,
1132 enum machine_mode,
1133 enum reg_class);
1134 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
1135 enum machine_mode,
1136 enum reg_class);
1137 static bool rs6000_save_toc_in_prologue_p (void);
1139 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
1140 int, int *)
1141 = rs6000_legitimize_reload_address;
1143 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1144 = rs6000_mode_dependent_address;
1146 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1147 enum machine_mode, rtx)
1148 = rs6000_secondary_reload_class;
1150 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1151 = rs6000_preferred_reload_class;
1153 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1154 enum machine_mode)
1155 = rs6000_secondary_memory_needed;
1157 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1158 enum machine_mode,
1159 enum reg_class)
1160 = rs6000_cannot_change_mode_class;
1162 const int INSN_NOT_AVAILABLE = -1;
1164 static void rs6000_print_isa_options (FILE *, int, const char *,
1165 HOST_WIDE_INT);
1166 static void rs6000_print_builtin_options (FILE *, int, const char *,
1167 HOST_WIDE_INT);
1169 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1170 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1171 enum rs6000_reg_type,
1172 enum machine_mode,
1173 secondary_reload_info *,
1174 bool);
1175 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1177 /* Hash table stuff for keeping track of TOC entries. */
1179 struct GTY(()) toc_hash_struct
1181 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1182 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1183 rtx key;
1184 enum machine_mode key_mode;
1185 int labelno;
1188 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1190 /* Hash table to keep track of the argument types for builtin functions. */
1192 struct GTY(()) builtin_hash_struct
1194 tree type;
1195 enum machine_mode mode[4]; /* return value + 3 arguments. */
1196 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1199 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1202 /* Default register names. */
1203 char rs6000_reg_names[][8] =
1205 "0", "1", "2", "3", "4", "5", "6", "7",
1206 "8", "9", "10", "11", "12", "13", "14", "15",
1207 "16", "17", "18", "19", "20", "21", "22", "23",
1208 "24", "25", "26", "27", "28", "29", "30", "31",
1209 "0", "1", "2", "3", "4", "5", "6", "7",
1210 "8", "9", "10", "11", "12", "13", "14", "15",
1211 "16", "17", "18", "19", "20", "21", "22", "23",
1212 "24", "25", "26", "27", "28", "29", "30", "31",
1213 "mq", "lr", "ctr","ap",
1214 "0", "1", "2", "3", "4", "5", "6", "7",
1215 "ca",
1216 /* AltiVec registers. */
1217 "0", "1", "2", "3", "4", "5", "6", "7",
1218 "8", "9", "10", "11", "12", "13", "14", "15",
1219 "16", "17", "18", "19", "20", "21", "22", "23",
1220 "24", "25", "26", "27", "28", "29", "30", "31",
1221 "vrsave", "vscr",
1222 /* SPE registers. */
1223 "spe_acc", "spefscr",
1224 /* Soft frame pointer. */
1225 "sfp",
1226 /* HTM SPR registers. */
1227 "tfhar", "tfiar", "texasr",
1228 /* SPE High registers. */
1229 "0", "1", "2", "3", "4", "5", "6", "7",
1230 "8", "9", "10", "11", "12", "13", "14", "15",
1231 "16", "17", "18", "19", "20", "21", "22", "23",
1232 "24", "25", "26", "27", "28", "29", "30", "31"
1235 #ifdef TARGET_REGNAMES
1236 static const char alt_reg_names[][8] =
1238 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1239 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1240 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1241 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1242 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1243 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1244 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1245 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1246 "mq", "lr", "ctr", "ap",
1247 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1248 "ca",
1249 /* AltiVec registers. */
1250 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1251 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1252 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1253 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1254 "vrsave", "vscr",
1255 /* SPE registers. */
1256 "spe_acc", "spefscr",
1257 /* Soft frame pointer. */
1258 "sfp",
1259 /* HTM SPR registers. */
1260 "tfhar", "tfiar", "texasr",
1261 /* SPE High registers. */
1262 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1263 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1264 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1265 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1267 #endif
1269 /* Table of valid machine attributes. */
1271 static const struct attribute_spec rs6000_attribute_table[] =
1273 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1274 affects_type_identity } */
1275 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1276 false },
1277 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1278 false },
1279 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1280 false },
1281 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1282 false },
1283 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1284 false },
1285 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1286 SUBTARGET_ATTRIBUTE_TABLE,
1287 #endif
1288 { NULL, 0, 0, false, false, false, NULL, false }
1291 #ifndef TARGET_PROFILE_KERNEL
1292 #define TARGET_PROFILE_KERNEL 0
1293 #endif
1295 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1296 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1298 /* Initialize the GCC target structure. */
1299 #undef TARGET_ATTRIBUTE_TABLE
1300 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1301 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1302 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1303 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1304 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1306 #undef TARGET_ASM_ALIGNED_DI_OP
1307 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1309 /* Default unaligned ops are only provided for ELF. Find the ops needed
1310 for non-ELF systems. */
1311 #ifndef OBJECT_FORMAT_ELF
1312 #if TARGET_XCOFF
1313 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1314 64-bit targets. */
1315 #undef TARGET_ASM_UNALIGNED_HI_OP
1316 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1317 #undef TARGET_ASM_UNALIGNED_SI_OP
1318 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1319 #undef TARGET_ASM_UNALIGNED_DI_OP
1320 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1321 #else
1322 /* For Darwin. */
1323 #undef TARGET_ASM_UNALIGNED_HI_OP
1324 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1325 #undef TARGET_ASM_UNALIGNED_SI_OP
1326 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1327 #undef TARGET_ASM_UNALIGNED_DI_OP
1328 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1329 #undef TARGET_ASM_ALIGNED_DI_OP
1330 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1331 #endif
1332 #endif
1334 /* This hook deals with fixups for relocatable code and DI-mode objects
1335 in 64-bit code. */
1336 #undef TARGET_ASM_INTEGER
1337 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1339 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1340 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1341 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1342 #endif
1344 #undef TARGET_SET_UP_BY_PROLOGUE
1345 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1347 #undef TARGET_HAVE_TLS
1348 #define TARGET_HAVE_TLS HAVE_AS_TLS
1350 #undef TARGET_CANNOT_FORCE_CONST_MEM
1351 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1353 #undef TARGET_DELEGITIMIZE_ADDRESS
1354 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1356 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1357 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1359 #undef TARGET_ASM_FUNCTION_PROLOGUE
1360 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1361 #undef TARGET_ASM_FUNCTION_EPILOGUE
1362 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1364 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1365 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1367 #undef TARGET_LEGITIMIZE_ADDRESS
1368 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1370 #undef TARGET_SCHED_VARIABLE_ISSUE
1371 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1373 #undef TARGET_SCHED_ISSUE_RATE
1374 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1375 #undef TARGET_SCHED_ADJUST_COST
1376 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1377 #undef TARGET_SCHED_ADJUST_PRIORITY
1378 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1379 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1380 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1381 #undef TARGET_SCHED_INIT
1382 #define TARGET_SCHED_INIT rs6000_sched_init
1383 #undef TARGET_SCHED_FINISH
1384 #define TARGET_SCHED_FINISH rs6000_sched_finish
1385 #undef TARGET_SCHED_REORDER
1386 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1387 #undef TARGET_SCHED_REORDER2
1388 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1390 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1391 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1393 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1394 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1396 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1397 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1398 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1399 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1400 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1401 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1402 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1403 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1405 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1406 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1407 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1408 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1409 rs6000_builtin_support_vector_misalignment
1410 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1411 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1412 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1413 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1414 rs6000_builtin_vectorization_cost
1415 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1416 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1417 rs6000_preferred_simd_mode
1418 #undef TARGET_VECTORIZE_INIT_COST
1419 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1420 #undef TARGET_VECTORIZE_ADD_STMT_COST
1421 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1422 #undef TARGET_VECTORIZE_FINISH_COST
1423 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1424 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1425 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1427 #undef TARGET_INIT_BUILTINS
1428 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1429 #undef TARGET_BUILTIN_DECL
1430 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1432 #undef TARGET_EXPAND_BUILTIN
1433 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1435 #undef TARGET_MANGLE_TYPE
1436 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1438 #undef TARGET_INIT_LIBFUNCS
1439 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1441 #if TARGET_MACHO
1442 #undef TARGET_BINDS_LOCAL_P
1443 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1444 #endif
1446 #undef TARGET_MS_BITFIELD_LAYOUT_P
1447 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1449 #undef TARGET_ASM_OUTPUT_MI_THUNK
1450 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1452 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1453 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1455 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1456 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1458 #undef TARGET_REGISTER_MOVE_COST
1459 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1460 #undef TARGET_MEMORY_MOVE_COST
1461 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1462 #undef TARGET_RTX_COSTS
1463 #define TARGET_RTX_COSTS rs6000_rtx_costs
1464 #undef TARGET_ADDRESS_COST
1465 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1467 #undef TARGET_DWARF_REGISTER_SPAN
1468 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1470 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1471 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1473 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1474 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1476 /* On rs6000, function arguments are promoted, as are function return
1477 values. */
1478 #undef TARGET_PROMOTE_FUNCTION_MODE
1479 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1481 #undef TARGET_RETURN_IN_MEMORY
1482 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1484 #undef TARGET_RETURN_IN_MSB
1485 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1487 #undef TARGET_SETUP_INCOMING_VARARGS
1488 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1490 /* Always strict argument naming on rs6000. */
1491 #undef TARGET_STRICT_ARGUMENT_NAMING
1492 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1493 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1494 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1495 #undef TARGET_SPLIT_COMPLEX_ARG
1496 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1497 #undef TARGET_MUST_PASS_IN_STACK
1498 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1499 #undef TARGET_PASS_BY_REFERENCE
1500 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1501 #undef TARGET_ARG_PARTIAL_BYTES
1502 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1503 #undef TARGET_FUNCTION_ARG_ADVANCE
1504 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1505 #undef TARGET_FUNCTION_ARG
1506 #define TARGET_FUNCTION_ARG rs6000_function_arg
1507 #undef TARGET_FUNCTION_ARG_BOUNDARY
1508 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1510 #undef TARGET_BUILD_BUILTIN_VA_LIST
1511 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1513 #undef TARGET_EXPAND_BUILTIN_VA_START
1514 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1516 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1517 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1519 #undef TARGET_EH_RETURN_FILTER_MODE
1520 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1522 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1523 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1525 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1526 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1528 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1529 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1531 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1532 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1534 #undef TARGET_OPTION_OVERRIDE
1535 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1537 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1538 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1539 rs6000_builtin_vectorized_function
1541 #if !TARGET_MACHO
1542 #undef TARGET_STACK_PROTECT_FAIL
1543 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1544 #endif
1546 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1547 The PowerPC architecture requires only weak consistency among
1548 processors--that is, memory accesses between processors need not be
1549 sequentially consistent and memory accesses among processors can occur
1550 in any order. The ability to order memory accesses weakly provides
1551 opportunities for more efficient use of the system bus. Unless a
1552 dependency exists, the 604e allows read operations to precede store
1553 operations. */
1554 #undef TARGET_RELAXED_ORDERING
1555 #define TARGET_RELAXED_ORDERING true
1557 #ifdef HAVE_AS_TLS
1558 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1559 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1560 #endif
1562 /* Use a 32-bit anchor range. This leads to sequences like:
1564 addis tmp,anchor,high
1565 add dest,tmp,low
1567 where tmp itself acts as an anchor, and can be shared between
1568 accesses to the same 64k page. */
1569 #undef TARGET_MIN_ANCHOR_OFFSET
1570 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1571 #undef TARGET_MAX_ANCHOR_OFFSET
1572 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1573 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1574 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1575 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1576 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1578 #undef TARGET_BUILTIN_RECIPROCAL
1579 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1581 #undef TARGET_EXPAND_TO_RTL_HOOK
1582 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1584 #undef TARGET_INSTANTIATE_DECLS
1585 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1587 #undef TARGET_SECONDARY_RELOAD
1588 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1590 #undef TARGET_LEGITIMATE_ADDRESS_P
1591 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1593 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1594 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1596 #undef TARGET_LRA_P
1597 #define TARGET_LRA_P rs6000_lra_p
1599 #undef TARGET_CAN_ELIMINATE
1600 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1602 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1603 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1605 #undef TARGET_TRAMPOLINE_INIT
1606 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1608 #undef TARGET_FUNCTION_VALUE
1609 #define TARGET_FUNCTION_VALUE rs6000_function_value
1611 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1612 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1614 #undef TARGET_OPTION_SAVE
1615 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1617 #undef TARGET_OPTION_RESTORE
1618 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1620 #undef TARGET_OPTION_PRINT
1621 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1623 #undef TARGET_CAN_INLINE_P
1624 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1626 #undef TARGET_SET_CURRENT_FUNCTION
1627 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1629 #undef TARGET_LEGITIMATE_CONSTANT_P
1630 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1632 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1633 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1635 #undef TARGET_CAN_USE_DOLOOP_P
1636 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1639 /* Processor table. */
1640 struct rs6000_ptt
1642 const char *const name; /* Canonical processor name. */
1643 const enum processor_type processor; /* Processor type enum value. */
1644 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1647 static struct rs6000_ptt const processor_target_table[] =
1649 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1650 #include "rs6000-cpus.def"
1651 #undef RS6000_CPU
1654 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1655 name is invalid. */
1657 static int
1658 rs6000_cpu_name_lookup (const char *name)
1660 size_t i;
1662 if (name != NULL)
1664 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1665 if (! strcmp (name, processor_target_table[i].name))
1666 return (int)i;
1669 return -1;
1673 /* Return number of consecutive hard regs needed starting at reg REGNO
1674 to hold something of mode MODE.
1675 This is ordinarily the length in words of a value of mode MODE
1676 but can be less for certain modes in special long registers.
1678 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1679 scalar instructions. The upper 32 bits are only available to the
1680 SIMD instructions.
1682 POWER and PowerPC GPRs hold 32 bits worth;
1683 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1685 static int
1686 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1688 unsigned HOST_WIDE_INT reg_size;
1690 /* TF/TD modes are special in that they always take 2 registers. */
1691 if (FP_REGNO_P (regno))
1692 reg_size = ((VECTOR_MEM_VSX_P (mode) && mode != TDmode && mode != TFmode)
1693 ? UNITS_PER_VSX_WORD
1694 : UNITS_PER_FP_WORD);
1696 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1697 reg_size = UNITS_PER_SPE_WORD;
1699 else if (ALTIVEC_REGNO_P (regno))
1700 reg_size = UNITS_PER_ALTIVEC_WORD;
1702 /* The value returned for SCmode in the E500 double case is 2 for
1703 ABI compatibility; storing an SCmode value in a single register
1704 would require function_arg and rs6000_spe_function_arg to handle
1705 SCmode so as to pass the value correctly in a pair of
1706 registers. */
1707 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1708 && !DECIMAL_FLOAT_MODE_P (mode))
1709 reg_size = UNITS_PER_FP_WORD;
1711 else
1712 reg_size = UNITS_PER_WORD;
1714 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1717 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1718 MODE. */
1719 static int
1720 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1722 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1724 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1725 register combinations, and use PTImode where we need to deal with quad
1726 word memory operations. Don't allow quad words in the argument or frame
1727 pointer registers, just registers 0..31. */
1728 if (mode == PTImode)
1729 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1730 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1731 && ((regno & 1) == 0));
1733 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1734 implementations. Don't allow an item to be split between a FP register
1735 and an Altivec register. Allow TImode in all VSX registers if the user
1736 asked for it. */
1737 if (TARGET_VSX && VSX_REGNO_P (regno)
1738 && (VECTOR_MEM_VSX_P (mode)
1739 || reg_addr[mode].scalar_in_vmx_p
1740 || (TARGET_VSX_TIMODE && mode == TImode)
1741 || (TARGET_VADDUQM && mode == V1TImode)))
1743 if (FP_REGNO_P (regno))
1744 return FP_REGNO_P (last_regno);
1746 if (ALTIVEC_REGNO_P (regno))
1748 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
1749 return 0;
1751 return ALTIVEC_REGNO_P (last_regno);
1755 /* The GPRs can hold any mode, but values bigger than one register
1756 cannot go past R31. */
1757 if (INT_REGNO_P (regno))
1758 return INT_REGNO_P (last_regno);
1760 /* The float registers (except for VSX vector modes) can only hold floating
1761 modes and DImode. */
1762 if (FP_REGNO_P (regno))
1764 if (SCALAR_FLOAT_MODE_P (mode)
1765 && (mode != TDmode || (regno % 2) == 0)
1766 && FP_REGNO_P (last_regno))
1767 return 1;
1769 if (GET_MODE_CLASS (mode) == MODE_INT
1770 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1771 return 1;
1773 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1774 && PAIRED_VECTOR_MODE (mode))
1775 return 1;
1777 return 0;
1780 /* The CR register can only hold CC modes. */
1781 if (CR_REGNO_P (regno))
1782 return GET_MODE_CLASS (mode) == MODE_CC;
1784 if (CA_REGNO_P (regno))
1785 return mode == BImode;
1787 /* AltiVec only in AldyVec registers. */
1788 if (ALTIVEC_REGNO_P (regno))
1789 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1790 || mode == V1TImode);
1792 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1793 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1794 return 1;
1796 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1797 and it must be able to fit within the register set. */
1799 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1802 /* Print interesting facts about registers. */
1803 static void
1804 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1806 int r, m;
1808 for (r = first_regno; r <= last_regno; ++r)
1810 const char *comma = "";
1811 int len;
1813 if (first_regno == last_regno)
1814 fprintf (stderr, "%s:\t", reg_name);
1815 else
1816 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1818 len = 8;
1819 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1820 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1822 if (len > 70)
1824 fprintf (stderr, ",\n\t");
1825 len = 8;
1826 comma = "";
1829 if (rs6000_hard_regno_nregs[m][r] > 1)
1830 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1831 rs6000_hard_regno_nregs[m][r]);
1832 else
1833 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1835 comma = ", ";
1838 if (call_used_regs[r])
1840 if (len > 70)
1842 fprintf (stderr, ",\n\t");
1843 len = 8;
1844 comma = "";
1847 len += fprintf (stderr, "%s%s", comma, "call-used");
1848 comma = ", ";
1851 if (fixed_regs[r])
1853 if (len > 70)
1855 fprintf (stderr, ",\n\t");
1856 len = 8;
1857 comma = "";
1860 len += fprintf (stderr, "%s%s", comma, "fixed");
1861 comma = ", ";
1864 if (len > 70)
1866 fprintf (stderr, ",\n\t");
1867 comma = "";
1870 len += fprintf (stderr, "%sreg-class = %s", comma,
1871 reg_class_names[(int)rs6000_regno_regclass[r]]);
1872 comma = ", ";
1874 if (len > 70)
1876 fprintf (stderr, ",\n\t");
1877 comma = "";
1880 fprintf (stderr, "%sregno = %d\n", comma, r);
1884 static const char *
1885 rs6000_debug_vector_unit (enum rs6000_vector v)
1887 const char *ret;
1889 switch (v)
1891 case VECTOR_NONE: ret = "none"; break;
1892 case VECTOR_ALTIVEC: ret = "altivec"; break;
1893 case VECTOR_VSX: ret = "vsx"; break;
1894 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
1895 case VECTOR_PAIRED: ret = "paired"; break;
1896 case VECTOR_SPE: ret = "spe"; break;
1897 case VECTOR_OTHER: ret = "other"; break;
1898 default: ret = "unknown"; break;
1901 return ret;
1904 /* Print the address masks in a human readble fashion. */
1905 DEBUG_FUNCTION void
1906 rs6000_debug_print_mode (ssize_t m)
1908 ssize_t rc;
1910 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
1911 for (rc = 0; rc < N_RELOAD_REG; rc++)
1913 addr_mask_type mask = reg_addr[m].addr_mask[rc];
1914 fprintf (stderr,
1915 " %s: %c%c%c%c%c%c",
1916 reload_reg_map[rc].name,
1917 (mask & RELOAD_REG_VALID) != 0 ? 'v' : ' ',
1918 (mask & RELOAD_REG_MULTIPLE) != 0 ? 'm' : ' ',
1919 (mask & RELOAD_REG_INDEXED) != 0 ? 'i' : ' ',
1920 (mask & RELOAD_REG_OFFSET) != 0 ? 'o' : ' ',
1921 (mask & RELOAD_REG_PRE_INCDEC) != 0 ? '+' : ' ',
1922 (mask & RELOAD_REG_PRE_MODIFY) != 0 ? '+' : ' ');
1925 if (rs6000_vector_unit[m] != VECTOR_NONE
1926 || rs6000_vector_mem[m] != VECTOR_NONE
1927 || (reg_addr[m].reload_store != CODE_FOR_nothing)
1928 || (reg_addr[m].reload_load != CODE_FOR_nothing)
1929 || reg_addr[m].scalar_in_vmx_p)
1931 fprintf (stderr,
1932 " Vector-arith=%-10s Vector-mem=%-10s Reload=%c%c Upper=%c",
1933 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
1934 rs6000_debug_vector_unit (rs6000_vector_mem[m]),
1935 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
1936 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*',
1937 (reg_addr[m].scalar_in_vmx_p) ? 'y' : 'n');
1940 fputs ("\n", stderr);
1943 #define DEBUG_FMT_ID "%-32s= "
1944 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1945 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1946 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1948 /* Print various interesting information with -mdebug=reg. */
1949 static void
1950 rs6000_debug_reg_global (void)
1952 static const char *const tf[2] = { "false", "true" };
1953 const char *nl = (const char *)0;
1954 int m;
1955 size_t m1, m2, v;
1956 char costly_num[20];
1957 char nop_num[20];
1958 char flags_buffer[40];
1959 const char *costly_str;
1960 const char *nop_str;
1961 const char *trace_str;
1962 const char *abi_str;
1963 const char *cmodel_str;
1964 struct cl_target_option cl_opts;
1966 /* Modes we want tieable information on. */
1967 static const enum machine_mode print_tieable_modes[] = {
1968 QImode,
1969 HImode,
1970 SImode,
1971 DImode,
1972 TImode,
1973 PTImode,
1974 SFmode,
1975 DFmode,
1976 TFmode,
1977 SDmode,
1978 DDmode,
1979 TDmode,
1980 V8QImode,
1981 V4HImode,
1982 V2SImode,
1983 V16QImode,
1984 V8HImode,
1985 V4SImode,
1986 V2DImode,
1987 V1TImode,
1988 V32QImode,
1989 V16HImode,
1990 V8SImode,
1991 V4DImode,
1992 V2TImode,
1993 V2SFmode,
1994 V4SFmode,
1995 V2DFmode,
1996 V8SFmode,
1997 V4DFmode,
1998 CCmode,
1999 CCUNSmode,
2000 CCEQmode,
2003 /* Virtual regs we are interested in. */
2004 const static struct {
2005 int regno; /* register number. */
2006 const char *name; /* register name. */
2007 } virtual_regs[] = {
2008 { STACK_POINTER_REGNUM, "stack pointer:" },
2009 { TOC_REGNUM, "toc: " },
2010 { STATIC_CHAIN_REGNUM, "static chain: " },
2011 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2012 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2013 { ARG_POINTER_REGNUM, "arg pointer: " },
2014 { FRAME_POINTER_REGNUM, "frame pointer:" },
2015 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2016 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2017 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2018 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2019 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2020 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2021 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2022 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2023 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2026 fputs ("\nHard register information:\n", stderr);
2027 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2028 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2029 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2030 LAST_ALTIVEC_REGNO,
2031 "vs");
2032 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2033 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2034 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2035 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2036 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2037 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2038 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2039 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2041 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2042 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2043 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2045 fprintf (stderr,
2046 "\n"
2047 "d reg_class = %s\n"
2048 "f reg_class = %s\n"
2049 "v reg_class = %s\n"
2050 "wa reg_class = %s\n"
2051 "wd reg_class = %s\n"
2052 "wf reg_class = %s\n"
2053 "wg reg_class = %s\n"
2054 "wh reg_class = %s\n"
2055 "wi reg_class = %s\n"
2056 "wj reg_class = %s\n"
2057 "wk reg_class = %s\n"
2058 "wl reg_class = %s\n"
2059 "wm reg_class = %s\n"
2060 "wr reg_class = %s\n"
2061 "ws reg_class = %s\n"
2062 "wt reg_class = %s\n"
2063 "wu reg_class = %s\n"
2064 "wv reg_class = %s\n"
2065 "ww reg_class = %s\n"
2066 "wx reg_class = %s\n"
2067 "wy reg_class = %s\n"
2068 "wz reg_class = %s\n"
2069 "\n",
2070 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2071 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2072 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2073 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2074 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2075 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2076 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2077 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2078 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2079 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2080 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2081 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2082 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2083 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2084 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2085 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2086 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2087 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2088 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2089 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2090 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2091 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2093 nl = "\n";
2094 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2095 rs6000_debug_print_mode (m);
2097 fputs ("\n", stderr);
2099 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2101 enum machine_mode mode1 = print_tieable_modes[m1];
2102 bool first_time = true;
2104 nl = (const char *)0;
2105 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2107 enum machine_mode mode2 = print_tieable_modes[m2];
2108 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2110 if (first_time)
2112 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2113 nl = "\n";
2114 first_time = false;
2117 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2121 if (!first_time)
2122 fputs ("\n", stderr);
2125 if (nl)
2126 fputs (nl, stderr);
2128 if (rs6000_recip_control)
2130 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2132 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2133 if (rs6000_recip_bits[m])
2135 fprintf (stderr,
2136 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2137 GET_MODE_NAME (m),
2138 (RS6000_RECIP_AUTO_RE_P (m)
2139 ? "auto"
2140 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2141 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2142 ? "auto"
2143 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2146 fputs ("\n", stderr);
2149 if (rs6000_cpu_index >= 0)
2151 const char *name = processor_target_table[rs6000_cpu_index].name;
2152 HOST_WIDE_INT flags
2153 = processor_target_table[rs6000_cpu_index].target_enable;
2155 sprintf (flags_buffer, "-mcpu=%s flags", name);
2156 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2158 else
2159 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2161 if (rs6000_tune_index >= 0)
2163 const char *name = processor_target_table[rs6000_tune_index].name;
2164 HOST_WIDE_INT flags
2165 = processor_target_table[rs6000_tune_index].target_enable;
2167 sprintf (flags_buffer, "-mtune=%s flags", name);
2168 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2170 else
2171 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2173 cl_target_option_save (&cl_opts, &global_options);
2174 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2175 rs6000_isa_flags);
2177 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2178 rs6000_isa_flags_explicit);
2180 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2181 rs6000_builtin_mask);
2183 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2185 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2186 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2188 switch (rs6000_sched_costly_dep)
2190 case max_dep_latency:
2191 costly_str = "max_dep_latency";
2192 break;
2194 case no_dep_costly:
2195 costly_str = "no_dep_costly";
2196 break;
2198 case all_deps_costly:
2199 costly_str = "all_deps_costly";
2200 break;
2202 case true_store_to_load_dep_costly:
2203 costly_str = "true_store_to_load_dep_costly";
2204 break;
2206 case store_to_load_dep_costly:
2207 costly_str = "store_to_load_dep_costly";
2208 break;
2210 default:
2211 costly_str = costly_num;
2212 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2213 break;
2216 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2218 switch (rs6000_sched_insert_nops)
2220 case sched_finish_regroup_exact:
2221 nop_str = "sched_finish_regroup_exact";
2222 break;
2224 case sched_finish_pad_groups:
2225 nop_str = "sched_finish_pad_groups";
2226 break;
2228 case sched_finish_none:
2229 nop_str = "sched_finish_none";
2230 break;
2232 default:
2233 nop_str = nop_num;
2234 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2235 break;
2238 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2240 switch (rs6000_sdata)
2242 default:
2243 case SDATA_NONE:
2244 break;
2246 case SDATA_DATA:
2247 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2248 break;
2250 case SDATA_SYSV:
2251 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2252 break;
2254 case SDATA_EABI:
2255 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2256 break;
2260 switch (rs6000_traceback)
2262 case traceback_default: trace_str = "default"; break;
2263 case traceback_none: trace_str = "none"; break;
2264 case traceback_part: trace_str = "part"; break;
2265 case traceback_full: trace_str = "full"; break;
2266 default: trace_str = "unknown"; break;
2269 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2271 switch (rs6000_current_cmodel)
2273 case CMODEL_SMALL: cmodel_str = "small"; break;
2274 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2275 case CMODEL_LARGE: cmodel_str = "large"; break;
2276 default: cmodel_str = "unknown"; break;
2279 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2281 switch (rs6000_current_abi)
2283 case ABI_NONE: abi_str = "none"; break;
2284 case ABI_AIX: abi_str = "aix"; break;
2285 case ABI_ELFv2: abi_str = "ELFv2"; break;
2286 case ABI_V4: abi_str = "V4"; break;
2287 case ABI_DARWIN: abi_str = "darwin"; break;
2288 default: abi_str = "unknown"; break;
2291 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2293 if (rs6000_altivec_abi)
2294 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2296 if (rs6000_spe_abi)
2297 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2299 if (rs6000_darwin64_abi)
2300 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2302 if (rs6000_float_gprs)
2303 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2305 fprintf (stderr, DEBUG_FMT_S, "fprs",
2306 (TARGET_FPRS ? "true" : "false"));
2308 fprintf (stderr, DEBUG_FMT_S, "single_float",
2309 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2311 fprintf (stderr, DEBUG_FMT_S, "double_float",
2312 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2314 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2315 (TARGET_SOFT_FLOAT ? "true" : "false"));
2317 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2318 (TARGET_E500_SINGLE ? "true" : "false"));
2320 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2321 (TARGET_E500_DOUBLE ? "true" : "false"));
2323 if (TARGET_LINK_STACK)
2324 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2326 if (targetm.lra_p ())
2327 fprintf (stderr, DEBUG_FMT_S, "lra", "true");
2329 if (TARGET_P8_FUSION)
2330 fprintf (stderr, DEBUG_FMT_S, "p8 fusion",
2331 (TARGET_P8_FUSION_SIGN) ? "zero+sign" : "zero");
2333 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2334 TARGET_SECURE_PLT ? "secure" : "bss");
2335 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2336 aix_struct_return ? "aix" : "sysv");
2337 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2338 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2339 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2340 tf[!!rs6000_align_branch_targets]);
2341 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2342 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2343 rs6000_long_double_type_size);
2344 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2345 (int)rs6000_sched_restricted_insns_priority);
2346 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2347 (int)END_BUILTINS);
2348 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2349 (int)RS6000_BUILTIN_COUNT);
2351 if (TARGET_VSX)
2352 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2353 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2357 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2358 legitimate address support to figure out the appropriate addressing to
2359 use. */
2361 static void
2362 rs6000_setup_reg_addr_masks (void)
2364 ssize_t rc, reg, m, nregs;
2365 addr_mask_type any_addr_mask, addr_mask;
2367 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2369 enum machine_mode m2 = (enum machine_mode)m;
2371 /* SDmode is special in that we want to access it only via REG+REG
2372 addressing on power7 and above, since we want to use the LFIWZX and
2373 STFIWZX instructions to load it. */
2374 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2376 any_addr_mask = 0;
2377 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2379 addr_mask = 0;
2380 reg = reload_reg_map[rc].reg;
2382 /* Can mode values go in the GPR/FPR/Altivec registers? */
2383 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2385 nregs = rs6000_hard_regno_nregs[m][reg];
2386 addr_mask |= RELOAD_REG_VALID;
2388 /* Indicate if the mode takes more than 1 physical register. If
2389 it takes a single register, indicate it can do REG+REG
2390 addressing. */
2391 if (nregs > 1 || m == BLKmode)
2392 addr_mask |= RELOAD_REG_MULTIPLE;
2393 else
2394 addr_mask |= RELOAD_REG_INDEXED;
2396 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2397 addressing. Restrict addressing on SPE for 64-bit types
2398 because of the SUBREG hackery used to address 64-bit floats in
2399 '32-bit' GPRs. To simplify secondary reload, don't allow
2400 update forms on scalar floating point types that can go in the
2401 upper registers. */
2403 if (TARGET_UPDATE
2404 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2405 && GET_MODE_SIZE (m2) <= 8
2406 && !VECTOR_MODE_P (m2)
2407 && !COMPLEX_MODE_P (m2)
2408 && !indexed_only_p
2409 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (m2) == 8)
2410 && !reg_addr[m2].scalar_in_vmx_p)
2412 addr_mask |= RELOAD_REG_PRE_INCDEC;
2414 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2415 we don't allow PRE_MODIFY for some multi-register
2416 operations. */
2417 switch (m)
2419 default:
2420 addr_mask |= RELOAD_REG_PRE_MODIFY;
2421 break;
2423 case DImode:
2424 if (TARGET_POWERPC64)
2425 addr_mask |= RELOAD_REG_PRE_MODIFY;
2426 break;
2428 case DFmode:
2429 case DDmode:
2430 if (TARGET_DF_INSN)
2431 addr_mask |= RELOAD_REG_PRE_MODIFY;
2432 break;
2437 /* GPR and FPR registers can do REG+OFFSET addressing, except
2438 possibly for SDmode. */
2439 if ((addr_mask != 0) && !indexed_only_p
2440 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR))
2441 addr_mask |= RELOAD_REG_OFFSET;
2443 reg_addr[m].addr_mask[rc] = addr_mask;
2444 any_addr_mask |= addr_mask;
2447 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2452 /* Initialize the various global tables that are based on register size. */
2453 static void
2454 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2456 ssize_t r, m, c;
2457 int align64;
2458 int align32;
2460 /* Precalculate REGNO_REG_CLASS. */
2461 rs6000_regno_regclass[0] = GENERAL_REGS;
2462 for (r = 1; r < 32; ++r)
2463 rs6000_regno_regclass[r] = BASE_REGS;
2465 for (r = 32; r < 64; ++r)
2466 rs6000_regno_regclass[r] = FLOAT_REGS;
2468 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2469 rs6000_regno_regclass[r] = NO_REGS;
2471 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2472 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2474 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2475 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2476 rs6000_regno_regclass[r] = CR_REGS;
2478 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2479 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2480 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
2481 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2482 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2483 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2484 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2485 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2486 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2487 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2488 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2489 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2491 /* Precalculate register class to simpler reload register class. We don't
2492 need all of the register classes that are combinations of different
2493 classes, just the simple ones that have constraint letters. */
2494 for (c = 0; c < N_REG_CLASSES; c++)
2495 reg_class_to_reg_type[c] = NO_REG_TYPE;
2497 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2498 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2499 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2500 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2501 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2502 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2503 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2504 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2505 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2506 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2507 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2508 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2510 if (TARGET_VSX)
2512 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2513 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2515 else
2517 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2518 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2521 /* Precalculate the valid memory formats as well as the vector information,
2522 this must be set up before the rs6000_hard_regno_nregs_internal calls
2523 below. */
2524 gcc_assert ((int)VECTOR_NONE == 0);
2525 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2526 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2528 gcc_assert ((int)CODE_FOR_nothing == 0);
2529 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2531 gcc_assert ((int)NO_REGS == 0);
2532 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2534 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2535 believes it can use native alignment or still uses 128-bit alignment. */
2536 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2538 align64 = 64;
2539 align32 = 32;
2541 else
2543 align64 = 128;
2544 align32 = 128;
2547 /* V2DF mode, VSX only. */
2548 if (TARGET_VSX)
2550 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2551 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2552 rs6000_vector_align[V2DFmode] = align64;
2555 /* V4SF mode, either VSX or Altivec. */
2556 if (TARGET_VSX)
2558 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2559 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2560 rs6000_vector_align[V4SFmode] = align32;
2562 else if (TARGET_ALTIVEC)
2564 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2565 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2566 rs6000_vector_align[V4SFmode] = align32;
2569 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2570 and stores. */
2571 if (TARGET_ALTIVEC)
2573 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2574 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2575 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2576 rs6000_vector_align[V4SImode] = align32;
2577 rs6000_vector_align[V8HImode] = align32;
2578 rs6000_vector_align[V16QImode] = align32;
2580 if (TARGET_VSX)
2582 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2583 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2584 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2586 else
2588 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2589 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2590 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2594 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2595 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2596 if (TARGET_VSX)
2598 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2599 rs6000_vector_unit[V2DImode]
2600 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2601 rs6000_vector_align[V2DImode] = align64;
2603 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
2604 rs6000_vector_unit[V1TImode]
2605 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2606 rs6000_vector_align[V1TImode] = 128;
2609 /* DFmode, see if we want to use the VSX unit. */
2610 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2612 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2613 rs6000_vector_mem[DFmode]
2614 = (TARGET_UPPER_REGS_DF ? VECTOR_VSX : VECTOR_NONE);
2615 rs6000_vector_align[DFmode] = align64;
2618 /* Allow TImode in VSX register and set the VSX memory macros. */
2619 if (TARGET_VSX && TARGET_VSX_TIMODE)
2621 rs6000_vector_mem[TImode] = VECTOR_VSX;
2622 rs6000_vector_align[TImode] = align64;
2625 /* TODO add SPE and paired floating point vector support. */
2627 /* Register class constraints for the constraints that depend on compile
2628 switches. When the VSX code was added, different constraints were added
2629 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
2630 of the VSX registers are used. The register classes for scalar floating
2631 point types is set, based on whether we allow that type into the upper
2632 (Altivec) registers. GCC has register classes to target the Altivec
2633 registers for load/store operations, to select using a VSX memory
2634 operation instead of the traditional floating point operation. The
2635 constraints are:
2637 d - Register class to use with traditional DFmode instructions.
2638 f - Register class to use with traditional SFmode instructions.
2639 v - Altivec register.
2640 wa - Any VSX register.
2641 wc - Reserved to represent individual CR bits (used in LLVM).
2642 wd - Preferred register class for V2DFmode.
2643 wf - Preferred register class for V4SFmode.
2644 wg - Float register for power6x move insns.
2645 wh - FP register for direct move instructions.
2646 wi - FP or VSX register to hold 64-bit integers for VSX insns.
2647 wj - FP or VSX register to hold 64-bit integers for direct moves.
2648 wk - FP or VSX register to hold 64-bit doubles for direct moves.
2649 wl - Float register if we can do 32-bit signed int loads.
2650 wm - VSX register for ISA 2.07 direct move operations.
2651 wn - always NO_REGS.
2652 wr - GPR if 64-bit mode is permitted.
2653 ws - Register class to do ISA 2.06 DF operations.
2654 wt - VSX register for TImode in VSX registers.
2655 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
2656 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
2657 ww - Register class to do SF conversions in with VSX operations.
2658 wx - Float register if we can do 32-bit int stores.
2659 wy - Register class to do ISA 2.07 SF operations.
2660 wz - Float register if we can do 32-bit unsigned int loads. */
2662 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2663 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
2665 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2666 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
2668 if (TARGET_VSX)
2670 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2671 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
2672 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
2673 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS; /* DImode */
2675 if (TARGET_VSX_TIMODE)
2676 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
2678 if (TARGET_UPPER_REGS_DF) /* DFmode */
2680 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
2681 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
2683 else
2684 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
2687 /* Add conditional constraints based on various options, to allow us to
2688 collapse multiple insn patterns. */
2689 if (TARGET_ALTIVEC)
2690 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2692 if (TARGET_MFPGPR) /* DFmode */
2693 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
2695 if (TARGET_LFIWAX)
2696 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
2698 if (TARGET_DIRECT_MOVE)
2700 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
2701 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
2702 = rs6000_constraints[RS6000_CONSTRAINT_wi];
2703 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
2704 = rs6000_constraints[RS6000_CONSTRAINT_ws];
2705 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
2708 if (TARGET_POWERPC64)
2709 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
2711 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
2713 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
2714 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
2715 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
2717 else if (TARGET_P8_VECTOR)
2719 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
2720 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2722 else if (TARGET_VSX)
2723 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2725 if (TARGET_STFIWX)
2726 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
2728 if (TARGET_LFIWZX)
2729 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
2731 /* Set up the reload helper and direct move functions. */
2732 if (TARGET_VSX || TARGET_ALTIVEC)
2734 if (TARGET_64BIT)
2736 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
2737 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
2738 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
2739 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
2740 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
2741 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
2742 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
2743 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
2744 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
2745 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
2746 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
2747 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
2748 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
2749 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
2750 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2752 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
2753 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
2754 reg_addr[DFmode].scalar_in_vmx_p = true;
2755 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
2756 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
2758 if (TARGET_P8_VECTOR)
2760 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
2761 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
2762 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
2763 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
2764 if (TARGET_UPPER_REGS_SF)
2765 reg_addr[SFmode].scalar_in_vmx_p = true;
2767 if (TARGET_VSX_TIMODE)
2769 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
2770 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
2772 if (TARGET_DIRECT_MOVE)
2774 if (TARGET_POWERPC64)
2776 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
2777 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
2778 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
2779 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
2780 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
2781 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
2782 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
2783 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
2784 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
2786 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
2787 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
2788 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
2789 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
2790 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
2791 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
2792 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
2793 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
2794 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
2796 else
2798 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
2799 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
2800 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
2804 else
2806 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
2807 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
2808 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
2809 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
2810 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
2811 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
2812 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
2813 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
2814 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
2815 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
2816 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
2817 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
2818 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
2819 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
2820 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2822 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
2823 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
2824 reg_addr[DFmode].scalar_in_vmx_p = true;
2825 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
2826 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
2828 if (TARGET_P8_VECTOR)
2830 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
2831 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
2832 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
2833 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
2834 if (TARGET_UPPER_REGS_SF)
2835 reg_addr[SFmode].scalar_in_vmx_p = true;
2837 if (TARGET_VSX_TIMODE)
2839 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
2840 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
2845 /* Precalculate HARD_REGNO_NREGS. */
2846 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2847 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2848 rs6000_hard_regno_nregs[m][r]
2849 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2851 /* Precalculate HARD_REGNO_MODE_OK. */
2852 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2853 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2854 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2855 rs6000_hard_regno_mode_ok_p[m][r] = true;
2857 /* Precalculate CLASS_MAX_NREGS sizes. */
2858 for (c = 0; c < LIM_REG_CLASSES; ++c)
2860 int reg_size;
2862 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2863 reg_size = UNITS_PER_VSX_WORD;
2865 else if (c == ALTIVEC_REGS)
2866 reg_size = UNITS_PER_ALTIVEC_WORD;
2868 else if (c == FLOAT_REGS)
2869 reg_size = UNITS_PER_FP_WORD;
2871 else
2872 reg_size = UNITS_PER_WORD;
2874 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2876 enum machine_mode m2 = (enum machine_mode)m;
2877 int reg_size2 = reg_size;
2879 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2880 if (TARGET_VSX && VSX_REG_CLASS_P (c)
2881 && (m == TDmode || m == TFmode))
2882 reg_size2 = UNITS_PER_FP_WORD;
2884 rs6000_class_max_nregs[m][c]
2885 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
2889 if (TARGET_E500_DOUBLE)
2890 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2892 /* Calculate which modes to automatically generate code to use a the
2893 reciprocal divide and square root instructions. In the future, possibly
2894 automatically generate the instructions even if the user did not specify
2895 -mrecip. The older machines double precision reciprocal sqrt estimate is
2896 not accurate enough. */
2897 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2898 if (TARGET_FRES)
2899 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2900 if (TARGET_FRE)
2901 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2902 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2903 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2904 if (VECTOR_UNIT_VSX_P (V2DFmode))
2905 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2907 if (TARGET_FRSQRTES)
2908 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2909 if (TARGET_FRSQRTE)
2910 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2911 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2912 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2913 if (VECTOR_UNIT_VSX_P (V2DFmode))
2914 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2916 if (rs6000_recip_control)
2918 if (!flag_finite_math_only)
2919 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2920 if (flag_trapping_math)
2921 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2922 if (!flag_reciprocal_math)
2923 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2924 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2926 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2927 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2928 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2930 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2931 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2932 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2934 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2935 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2936 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2938 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2939 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2940 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2942 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2943 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2944 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2946 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2947 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2948 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2950 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2951 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2952 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2954 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2955 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2956 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2960 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2961 legitimate address support to figure out the appropriate addressing to
2962 use. */
2963 rs6000_setup_reg_addr_masks ();
2965 if (global_init_p || TARGET_DEBUG_TARGET)
2967 if (TARGET_DEBUG_REG)
2968 rs6000_debug_reg_global ();
2970 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2971 fprintf (stderr,
2972 "SImode variable mult cost = %d\n"
2973 "SImode constant mult cost = %d\n"
2974 "SImode short constant mult cost = %d\n"
2975 "DImode multipliciation cost = %d\n"
2976 "SImode division cost = %d\n"
2977 "DImode division cost = %d\n"
2978 "Simple fp operation cost = %d\n"
2979 "DFmode multiplication cost = %d\n"
2980 "SFmode division cost = %d\n"
2981 "DFmode division cost = %d\n"
2982 "cache line size = %d\n"
2983 "l1 cache size = %d\n"
2984 "l2 cache size = %d\n"
2985 "simultaneous prefetches = %d\n"
2986 "\n",
2987 rs6000_cost->mulsi,
2988 rs6000_cost->mulsi_const,
2989 rs6000_cost->mulsi_const9,
2990 rs6000_cost->muldi,
2991 rs6000_cost->divsi,
2992 rs6000_cost->divdi,
2993 rs6000_cost->fp,
2994 rs6000_cost->dmul,
2995 rs6000_cost->sdiv,
2996 rs6000_cost->ddiv,
2997 rs6000_cost->cache_line_size,
2998 rs6000_cost->l1_cache_size,
2999 rs6000_cost->l2_cache_size,
3000 rs6000_cost->simultaneous_prefetches);
3004 #if TARGET_MACHO
3005 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3007 static void
3008 darwin_rs6000_override_options (void)
3010 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3011 off. */
3012 rs6000_altivec_abi = 1;
3013 TARGET_ALTIVEC_VRSAVE = 1;
3014 rs6000_current_abi = ABI_DARWIN;
3016 if (DEFAULT_ABI == ABI_DARWIN
3017 && TARGET_64BIT)
3018 darwin_one_byte_bool = 1;
3020 if (TARGET_64BIT && ! TARGET_POWERPC64)
3022 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3023 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3025 if (flag_mkernel)
3027 rs6000_default_long_calls = 1;
3028 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3031 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3032 Altivec. */
3033 if (!flag_mkernel && !flag_apple_kext
3034 && TARGET_64BIT
3035 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3036 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3038 /* Unless the user (not the configurer) has explicitly overridden
3039 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3040 G4 unless targeting the kernel. */
3041 if (!flag_mkernel
3042 && !flag_apple_kext
3043 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3044 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3045 && ! global_options_set.x_rs6000_cpu_index)
3047 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3050 #endif
3052 /* If not otherwise specified by a target, make 'long double' equivalent to
3053 'double'. */
3055 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3056 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3057 #endif
3059 /* Return the builtin mask of the various options used that could affect which
3060 builtins were used. In the past we used target_flags, but we've run out of
3061 bits, and some options like SPE and PAIRED are no longer in
3062 target_flags. */
3064 HOST_WIDE_INT
3065 rs6000_builtin_mask_calculate (void)
3067 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3068 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3069 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3070 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3071 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3072 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3073 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3074 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3075 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3076 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3077 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3078 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3079 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3080 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3081 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3082 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0));
3085 /* Override command line options. Mostly we process the processor type and
3086 sometimes adjust other TARGET_ options. */
3088 static bool
3089 rs6000_option_override_internal (bool global_init_p)
3091 bool ret = true;
3092 bool have_cpu = false;
3094 /* The default cpu requested at configure time, if any. */
3095 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3097 HOST_WIDE_INT set_masks;
3098 int cpu_index;
3099 int tune_index;
3100 struct cl_target_option *main_target_opt
3101 = ((global_init_p || target_option_default_node == NULL)
3102 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3104 /* Remember the explicit arguments. */
3105 if (global_init_p)
3106 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3108 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3109 library functions, so warn about it. The flag may be useful for
3110 performance studies from time to time though, so don't disable it
3111 entirely. */
3112 if (global_options_set.x_rs6000_alignment_flags
3113 && rs6000_alignment_flags == MASK_ALIGN_POWER
3114 && DEFAULT_ABI == ABI_DARWIN
3115 && TARGET_64BIT)
3116 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3117 " it is incompatible with the installed C and C++ libraries");
3119 /* Numerous experiment shows that IRA based loop pressure
3120 calculation works better for RTL loop invariant motion on targets
3121 with enough (>= 32) registers. It is an expensive optimization.
3122 So it is on only for peak performance. */
3123 if (optimize >= 3 && global_init_p
3124 && !global_options_set.x_flag_ira_loop_pressure)
3125 flag_ira_loop_pressure = 1;
3127 /* Set the pointer size. */
3128 if (TARGET_64BIT)
3130 rs6000_pmode = (int)DImode;
3131 rs6000_pointer_size = 64;
3133 else
3135 rs6000_pmode = (int)SImode;
3136 rs6000_pointer_size = 32;
3139 /* Some OSs don't support saving the high part of 64-bit registers on context
3140 switch. Other OSs don't support saving Altivec registers. On those OSs,
3141 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3142 if the user wants either, the user must explicitly specify them and we
3143 won't interfere with the user's specification. */
3145 set_masks = POWERPC_MASKS;
3146 #ifdef OS_MISSING_POWERPC64
3147 if (OS_MISSING_POWERPC64)
3148 set_masks &= ~OPTION_MASK_POWERPC64;
3149 #endif
3150 #ifdef OS_MISSING_ALTIVEC
3151 if (OS_MISSING_ALTIVEC)
3152 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3153 #endif
3155 /* Don't override by the processor default if given explicitly. */
3156 set_masks &= ~rs6000_isa_flags_explicit;
3158 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3159 the cpu in a target attribute or pragma, but did not specify a tuning
3160 option, use the cpu for the tuning option rather than the option specified
3161 with -mtune on the command line. Process a '--with-cpu' configuration
3162 request as an implicit --cpu. */
3163 if (rs6000_cpu_index >= 0)
3165 cpu_index = rs6000_cpu_index;
3166 have_cpu = true;
3168 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3170 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3171 have_cpu = true;
3173 else if (implicit_cpu)
3175 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3176 have_cpu = true;
3178 else
3180 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
3181 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3182 have_cpu = false;
3185 gcc_assert (cpu_index >= 0);
3187 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3188 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3189 with those from the cpu, except for options that were explicitly set. If
3190 we don't have a cpu, do not override the target bits set in
3191 TARGET_DEFAULT. */
3192 if (have_cpu)
3194 rs6000_isa_flags &= ~set_masks;
3195 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3196 & set_masks);
3198 else
3199 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3200 & ~rs6000_isa_flags_explicit);
3202 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3203 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3204 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3205 to using rs6000_isa_flags, we need to do the initialization here. */
3206 if (!have_cpu)
3207 rs6000_isa_flags |= (TARGET_DEFAULT & ~rs6000_isa_flags_explicit);
3209 if (rs6000_tune_index >= 0)
3210 tune_index = rs6000_tune_index;
3211 else if (have_cpu)
3212 rs6000_tune_index = tune_index = cpu_index;
3213 else
3215 size_t i;
3216 enum processor_type tune_proc
3217 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3219 tune_index = -1;
3220 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3221 if (processor_target_table[i].processor == tune_proc)
3223 rs6000_tune_index = tune_index = i;
3224 break;
3228 gcc_assert (tune_index >= 0);
3229 rs6000_cpu = processor_target_table[tune_index].processor;
3231 /* Pick defaults for SPE related control flags. Do this early to make sure
3232 that the TARGET_ macros are representative ASAP. */
3234 int spe_capable_cpu =
3235 (rs6000_cpu == PROCESSOR_PPC8540
3236 || rs6000_cpu == PROCESSOR_PPC8548);
3238 if (!global_options_set.x_rs6000_spe_abi)
3239 rs6000_spe_abi = spe_capable_cpu;
3241 if (!global_options_set.x_rs6000_spe)
3242 rs6000_spe = spe_capable_cpu;
3244 if (!global_options_set.x_rs6000_float_gprs)
3245 rs6000_float_gprs =
3246 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3247 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3248 : 0);
3251 if (global_options_set.x_rs6000_spe_abi
3252 && rs6000_spe_abi
3253 && !TARGET_SPE_ABI)
3254 error ("not configured for SPE ABI");
3256 if (global_options_set.x_rs6000_spe
3257 && rs6000_spe
3258 && !TARGET_SPE)
3259 error ("not configured for SPE instruction set");
3261 if (main_target_opt != NULL
3262 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3263 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3264 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3265 error ("target attribute or pragma changes SPE ABI");
3267 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3268 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3269 || rs6000_cpu == PROCESSOR_PPCE5500)
3271 if (TARGET_ALTIVEC)
3272 error ("AltiVec not supported in this target");
3273 if (TARGET_SPE)
3274 error ("SPE not supported in this target");
3276 if (rs6000_cpu == PROCESSOR_PPCE6500)
3278 if (TARGET_SPE)
3279 error ("SPE not supported in this target");
3282 /* Disable Cell microcode if we are optimizing for the Cell
3283 and not optimizing for size. */
3284 if (rs6000_gen_cell_microcode == -1)
3285 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3286 && !optimize_size);
3288 /* If we are optimizing big endian systems for space and it's OK to
3289 use instructions that would be microcoded on the Cell, use the
3290 load/store multiple and string instructions. */
3291 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3292 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3293 | OPTION_MASK_STRING);
3295 /* Don't allow -mmultiple or -mstring on little endian systems
3296 unless the cpu is a 750, because the hardware doesn't support the
3297 instructions used in little endian mode, and causes an alignment
3298 trap. The 750 does not cause an alignment trap (except when the
3299 target is unaligned). */
3301 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3303 if (TARGET_MULTIPLE)
3305 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3306 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3307 warning (0, "-mmultiple is not supported on little endian systems");
3310 if (TARGET_STRING)
3312 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3313 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3314 warning (0, "-mstring is not supported on little endian systems");
3318 /* If little-endian, default to -mstrict-align on older processors.
3319 Testing for htm matches power8 and later. */
3320 if (!BYTES_BIG_ENDIAN
3321 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3322 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3324 /* -maltivec={le,be} implies -maltivec. */
3325 if (rs6000_altivec_element_order != 0)
3326 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3328 /* Disallow -maltivec=le in big endian mode for now. This is not
3329 known to be useful for anyone. */
3330 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3332 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3333 rs6000_altivec_element_order = 0;
3336 /* Add some warnings for VSX. */
3337 if (TARGET_VSX)
3339 const char *msg = NULL;
3340 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3341 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3343 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3344 msg = N_("-mvsx requires hardware floating point");
3345 else
3347 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3348 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3351 else if (TARGET_PAIRED_FLOAT)
3352 msg = N_("-mvsx and -mpaired are incompatible");
3353 else if (TARGET_AVOID_XFORM > 0)
3354 msg = N_("-mvsx needs indexed addressing");
3355 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3356 & OPTION_MASK_ALTIVEC))
3358 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3359 msg = N_("-mvsx and -mno-altivec are incompatible");
3360 else
3361 msg = N_("-mno-altivec disables vsx");
3364 if (msg)
3366 warning (0, msg);
3367 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3368 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3372 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3373 the -mcpu setting to enable options that conflict. */
3374 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3375 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3376 | OPTION_MASK_ALTIVEC
3377 | OPTION_MASK_VSX)) != 0)
3378 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3379 | OPTION_MASK_DIRECT_MOVE)
3380 & ~rs6000_isa_flags_explicit);
3382 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3383 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3385 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3386 unless the user explicitly used the -mno-<option> to disable the code. */
3387 if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3388 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3389 else if (TARGET_VSX)
3390 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3391 else if (TARGET_POPCNTD)
3392 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3393 else if (TARGET_DFP)
3394 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3395 else if (TARGET_CMPB)
3396 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3397 else if (TARGET_FPRND)
3398 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
3399 else if (TARGET_POPCNTB)
3400 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
3401 else if (TARGET_ALTIVEC)
3402 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
3404 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3406 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3407 error ("-mcrypto requires -maltivec");
3408 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3411 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3413 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3414 error ("-mdirect-move requires -mvsx");
3415 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3418 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3420 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3421 error ("-mpower8-vector requires -maltivec");
3422 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3425 if (TARGET_P8_VECTOR && !TARGET_VSX)
3427 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3428 error ("-mpower8-vector requires -mvsx");
3429 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3432 if (TARGET_VSX_TIMODE && !TARGET_VSX)
3434 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
3435 error ("-mvsx-timode requires -mvsx");
3436 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
3439 if (TARGET_DFP && !TARGET_HARD_FLOAT)
3441 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
3442 error ("-mhard-dfp requires -mhard-float");
3443 rs6000_isa_flags &= ~OPTION_MASK_DFP;
3446 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3447 silently turn off quad memory mode. */
3448 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
3450 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3451 warning (0, N_("-mquad-memory requires 64-bit mode"));
3453 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
3454 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
3456 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
3457 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
3460 /* Non-atomic quad memory load/store are disabled for little endian, since
3461 the words are reversed, but atomic operations can still be done by
3462 swapping the words. */
3463 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
3465 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3466 warning (0, N_("-mquad-memory is not available in little endian mode"));
3468 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3471 /* Assume if the user asked for normal quad memory instructions, they want
3472 the atomic versions as well, unless they explicity told us not to use quad
3473 word atomic instructions. */
3474 if (TARGET_QUAD_MEMORY
3475 && !TARGET_QUAD_MEMORY_ATOMIC
3476 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
3477 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
3479 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3480 generating power8 instructions. */
3481 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3482 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
3483 & OPTION_MASK_P8_FUSION);
3485 /* Power8 does not fuse sign extended loads with the addis. If we are
3486 optimizing at high levels for speed, convert a sign extended load into a
3487 zero extending load, and an explicit sign extension. */
3488 if (TARGET_P8_FUSION
3489 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
3490 && optimize_function_for_speed_p (cfun)
3491 && optimize >= 3)
3492 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
3494 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3495 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
3497 /* E500mc does "better" if we inline more aggressively. Respect the
3498 user's opinion, though. */
3499 if (rs6000_block_move_inline_limit == 0
3500 && (rs6000_cpu == PROCESSOR_PPCE500MC
3501 || rs6000_cpu == PROCESSOR_PPCE500MC64
3502 || rs6000_cpu == PROCESSOR_PPCE5500
3503 || rs6000_cpu == PROCESSOR_PPCE6500))
3504 rs6000_block_move_inline_limit = 128;
3506 /* store_one_arg depends on expand_block_move to handle at least the
3507 size of reg_parm_stack_space. */
3508 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
3509 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
3511 if (global_init_p)
3513 /* If the appropriate debug option is enabled, replace the target hooks
3514 with debug versions that call the real version and then prints
3515 debugging information. */
3516 if (TARGET_DEBUG_COST)
3518 targetm.rtx_costs = rs6000_debug_rtx_costs;
3519 targetm.address_cost = rs6000_debug_address_cost;
3520 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
3523 if (TARGET_DEBUG_ADDR)
3525 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
3526 targetm.legitimize_address = rs6000_debug_legitimize_address;
3527 rs6000_secondary_reload_class_ptr
3528 = rs6000_debug_secondary_reload_class;
3529 rs6000_secondary_memory_needed_ptr
3530 = rs6000_debug_secondary_memory_needed;
3531 rs6000_cannot_change_mode_class_ptr
3532 = rs6000_debug_cannot_change_mode_class;
3533 rs6000_preferred_reload_class_ptr
3534 = rs6000_debug_preferred_reload_class;
3535 rs6000_legitimize_reload_address_ptr
3536 = rs6000_debug_legitimize_reload_address;
3537 rs6000_mode_dependent_address_ptr
3538 = rs6000_debug_mode_dependent_address;
3541 if (rs6000_veclibabi_name)
3543 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
3544 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
3545 else
3547 error ("unknown vectorization library ABI type (%s) for "
3548 "-mveclibabi= switch", rs6000_veclibabi_name);
3549 ret = false;
3554 if (!global_options_set.x_rs6000_long_double_type_size)
3556 if (main_target_opt != NULL
3557 && (main_target_opt->x_rs6000_long_double_type_size
3558 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
3559 error ("target attribute or pragma changes long double size");
3560 else
3561 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3564 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3565 if (!global_options_set.x_rs6000_ieeequad)
3566 rs6000_ieeequad = 1;
3567 #endif
3569 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3570 target attribute or pragma which automatically enables both options,
3571 unless the altivec ABI was set. This is set by default for 64-bit, but
3572 not for 32-bit. */
3573 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3574 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
3575 & ~rs6000_isa_flags_explicit);
3577 /* Enable Altivec ABI for AIX -maltivec. */
3578 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
3580 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3581 error ("target attribute or pragma changes AltiVec ABI");
3582 else
3583 rs6000_altivec_abi = 1;
3586 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3587 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3588 be explicitly overridden in either case. */
3589 if (TARGET_ELF)
3591 if (!global_options_set.x_rs6000_altivec_abi
3592 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
3594 if (main_target_opt != NULL &&
3595 !main_target_opt->x_rs6000_altivec_abi)
3596 error ("target attribute or pragma changes AltiVec ABI");
3597 else
3598 rs6000_altivec_abi = 1;
3602 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3603 So far, the only darwin64 targets are also MACH-O. */
3604 if (TARGET_MACHO
3605 && DEFAULT_ABI == ABI_DARWIN
3606 && TARGET_64BIT)
3608 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
3609 error ("target attribute or pragma changes darwin64 ABI");
3610 else
3612 rs6000_darwin64_abi = 1;
3613 /* Default to natural alignment, for better performance. */
3614 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3618 /* Place FP constants in the constant pool instead of TOC
3619 if section anchors enabled. */
3620 if (flag_section_anchors
3621 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
3622 TARGET_NO_FP_IN_TOC = 1;
3624 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3625 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
3627 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3628 SUBTARGET_OVERRIDE_OPTIONS;
3629 #endif
3630 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3631 SUBSUBTARGET_OVERRIDE_OPTIONS;
3632 #endif
3633 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3634 SUB3TARGET_OVERRIDE_OPTIONS;
3635 #endif
3637 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3638 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
3640 /* For the E500 family of cores, reset the single/double FP flags to let us
3641 check that they remain constant across attributes or pragmas. Also,
3642 clear a possible request for string instructions, not supported and which
3643 we might have silently queried above for -Os.
3645 For other families, clear ISEL in case it was set implicitly.
3648 switch (rs6000_cpu)
3650 case PROCESSOR_PPC8540:
3651 case PROCESSOR_PPC8548:
3652 case PROCESSOR_PPCE500MC:
3653 case PROCESSOR_PPCE500MC64:
3654 case PROCESSOR_PPCE5500:
3655 case PROCESSOR_PPCE6500:
3657 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
3658 rs6000_double_float = TARGET_E500_DOUBLE;
3660 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3662 break;
3664 default:
3666 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
3667 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
3669 break;
3672 if (main_target_opt)
3674 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
3675 error ("target attribute or pragma changes single precision floating "
3676 "point");
3677 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
3678 error ("target attribute or pragma changes double precision floating "
3679 "point");
3682 /* Detect invalid option combinations with E500. */
3683 CHECK_E500_OPTIONS;
3685 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
3686 && rs6000_cpu != PROCESSOR_POWER5
3687 && rs6000_cpu != PROCESSOR_POWER6
3688 && rs6000_cpu != PROCESSOR_POWER7
3689 && rs6000_cpu != PROCESSOR_POWER8
3690 && rs6000_cpu != PROCESSOR_PPCA2
3691 && rs6000_cpu != PROCESSOR_CELL
3692 && rs6000_cpu != PROCESSOR_PPC476);
3693 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
3694 || rs6000_cpu == PROCESSOR_POWER5
3695 || rs6000_cpu == PROCESSOR_POWER7
3696 || rs6000_cpu == PROCESSOR_POWER8);
3697 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
3698 || rs6000_cpu == PROCESSOR_POWER5
3699 || rs6000_cpu == PROCESSOR_POWER6
3700 || rs6000_cpu == PROCESSOR_POWER7
3701 || rs6000_cpu == PROCESSOR_POWER8
3702 || rs6000_cpu == PROCESSOR_PPCE500MC
3703 || rs6000_cpu == PROCESSOR_PPCE500MC64
3704 || rs6000_cpu == PROCESSOR_PPCE5500
3705 || rs6000_cpu == PROCESSOR_PPCE6500);
3707 /* Allow debug switches to override the above settings. These are set to -1
3708 in rs6000.opt to indicate the user hasn't directly set the switch. */
3709 if (TARGET_ALWAYS_HINT >= 0)
3710 rs6000_always_hint = TARGET_ALWAYS_HINT;
3712 if (TARGET_SCHED_GROUPS >= 0)
3713 rs6000_sched_groups = TARGET_SCHED_GROUPS;
3715 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
3716 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
3718 rs6000_sched_restricted_insns_priority
3719 = (rs6000_sched_groups ? 1 : 0);
3721 /* Handle -msched-costly-dep option. */
3722 rs6000_sched_costly_dep
3723 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
3725 if (rs6000_sched_costly_dep_str)
3727 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
3728 rs6000_sched_costly_dep = no_dep_costly;
3729 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
3730 rs6000_sched_costly_dep = all_deps_costly;
3731 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
3732 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
3733 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
3734 rs6000_sched_costly_dep = store_to_load_dep_costly;
3735 else
3736 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
3737 atoi (rs6000_sched_costly_dep_str));
3740 /* Handle -minsert-sched-nops option. */
3741 rs6000_sched_insert_nops
3742 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
3744 if (rs6000_sched_insert_nops_str)
3746 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
3747 rs6000_sched_insert_nops = sched_finish_none;
3748 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
3749 rs6000_sched_insert_nops = sched_finish_pad_groups;
3750 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
3751 rs6000_sched_insert_nops = sched_finish_regroup_exact;
3752 else
3753 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
3754 atoi (rs6000_sched_insert_nops_str));
3757 if (global_init_p)
3759 #ifdef TARGET_REGNAMES
3760 /* If the user desires alternate register names, copy in the
3761 alternate names now. */
3762 if (TARGET_REGNAMES)
3763 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
3764 #endif
3766 /* Set aix_struct_return last, after the ABI is determined.
3767 If -maix-struct-return or -msvr4-struct-return was explicitly
3768 used, don't override with the ABI default. */
3769 if (!global_options_set.x_aix_struct_return)
3770 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
3772 #if 0
3773 /* IBM XL compiler defaults to unsigned bitfields. */
3774 if (TARGET_XL_COMPAT)
3775 flag_signed_bitfields = 0;
3776 #endif
3778 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3779 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
3781 if (TARGET_TOC)
3782 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
3784 /* We can only guarantee the availability of DI pseudo-ops when
3785 assembling for 64-bit targets. */
3786 if (!TARGET_64BIT)
3788 targetm.asm_out.aligned_op.di = NULL;
3789 targetm.asm_out.unaligned_op.di = NULL;
3793 /* Set branch target alignment, if not optimizing for size. */
3794 if (!optimize_size)
3796 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3797 aligned 8byte to avoid misprediction by the branch predictor. */
3798 if (rs6000_cpu == PROCESSOR_TITAN
3799 || rs6000_cpu == PROCESSOR_CELL)
3801 if (align_functions <= 0)
3802 align_functions = 8;
3803 if (align_jumps <= 0)
3804 align_jumps = 8;
3805 if (align_loops <= 0)
3806 align_loops = 8;
3808 if (rs6000_align_branch_targets)
3810 if (align_functions <= 0)
3811 align_functions = 16;
3812 if (align_jumps <= 0)
3813 align_jumps = 16;
3814 if (align_loops <= 0)
3816 can_override_loop_align = 1;
3817 align_loops = 16;
3820 if (align_jumps_max_skip <= 0)
3821 align_jumps_max_skip = 15;
3822 if (align_loops_max_skip <= 0)
3823 align_loops_max_skip = 15;
3826 /* Arrange to save and restore machine status around nested functions. */
3827 init_machine_status = rs6000_init_machine_status;
3829 /* We should always be splitting complex arguments, but we can't break
3830 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3831 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
3832 targetm.calls.split_complex_arg = NULL;
3835 /* Initialize rs6000_cost with the appropriate target costs. */
3836 if (optimize_size)
3837 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
3838 else
3839 switch (rs6000_cpu)
3841 case PROCESSOR_RS64A:
3842 rs6000_cost = &rs64a_cost;
3843 break;
3845 case PROCESSOR_MPCCORE:
3846 rs6000_cost = &mpccore_cost;
3847 break;
3849 case PROCESSOR_PPC403:
3850 rs6000_cost = &ppc403_cost;
3851 break;
3853 case PROCESSOR_PPC405:
3854 rs6000_cost = &ppc405_cost;
3855 break;
3857 case PROCESSOR_PPC440:
3858 rs6000_cost = &ppc440_cost;
3859 break;
3861 case PROCESSOR_PPC476:
3862 rs6000_cost = &ppc476_cost;
3863 break;
3865 case PROCESSOR_PPC601:
3866 rs6000_cost = &ppc601_cost;
3867 break;
3869 case PROCESSOR_PPC603:
3870 rs6000_cost = &ppc603_cost;
3871 break;
3873 case PROCESSOR_PPC604:
3874 rs6000_cost = &ppc604_cost;
3875 break;
3877 case PROCESSOR_PPC604e:
3878 rs6000_cost = &ppc604e_cost;
3879 break;
3881 case PROCESSOR_PPC620:
3882 rs6000_cost = &ppc620_cost;
3883 break;
3885 case PROCESSOR_PPC630:
3886 rs6000_cost = &ppc630_cost;
3887 break;
3889 case PROCESSOR_CELL:
3890 rs6000_cost = &ppccell_cost;
3891 break;
3893 case PROCESSOR_PPC750:
3894 case PROCESSOR_PPC7400:
3895 rs6000_cost = &ppc750_cost;
3896 break;
3898 case PROCESSOR_PPC7450:
3899 rs6000_cost = &ppc7450_cost;
3900 break;
3902 case PROCESSOR_PPC8540:
3903 case PROCESSOR_PPC8548:
3904 rs6000_cost = &ppc8540_cost;
3905 break;
3907 case PROCESSOR_PPCE300C2:
3908 case PROCESSOR_PPCE300C3:
3909 rs6000_cost = &ppce300c2c3_cost;
3910 break;
3912 case PROCESSOR_PPCE500MC:
3913 rs6000_cost = &ppce500mc_cost;
3914 break;
3916 case PROCESSOR_PPCE500MC64:
3917 rs6000_cost = &ppce500mc64_cost;
3918 break;
3920 case PROCESSOR_PPCE5500:
3921 rs6000_cost = &ppce5500_cost;
3922 break;
3924 case PROCESSOR_PPCE6500:
3925 rs6000_cost = &ppce6500_cost;
3926 break;
3928 case PROCESSOR_TITAN:
3929 rs6000_cost = &titan_cost;
3930 break;
3932 case PROCESSOR_POWER4:
3933 case PROCESSOR_POWER5:
3934 rs6000_cost = &power4_cost;
3935 break;
3937 case PROCESSOR_POWER6:
3938 rs6000_cost = &power6_cost;
3939 break;
3941 case PROCESSOR_POWER7:
3942 rs6000_cost = &power7_cost;
3943 break;
3945 case PROCESSOR_POWER8:
3946 rs6000_cost = &power8_cost;
3947 break;
3949 case PROCESSOR_PPCA2:
3950 rs6000_cost = &ppca2_cost;
3951 break;
3953 default:
3954 gcc_unreachable ();
3957 if (global_init_p)
3959 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3960 rs6000_cost->simultaneous_prefetches,
3961 global_options.x_param_values,
3962 global_options_set.x_param_values);
3963 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3964 global_options.x_param_values,
3965 global_options_set.x_param_values);
3966 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3967 rs6000_cost->cache_line_size,
3968 global_options.x_param_values,
3969 global_options_set.x_param_values);
3970 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3971 global_options.x_param_values,
3972 global_options_set.x_param_values);
3974 /* Increase loop peeling limits based on performance analysis. */
3975 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
3976 global_options.x_param_values,
3977 global_options_set.x_param_values);
3978 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
3979 global_options.x_param_values,
3980 global_options_set.x_param_values);
3982 /* If using typedef char *va_list, signal that
3983 __builtin_va_start (&ap, 0) can be optimized to
3984 ap = __builtin_next_arg (0). */
3985 if (DEFAULT_ABI != ABI_V4)
3986 targetm.expand_builtin_va_start = NULL;
3989 /* Set up single/double float flags.
3990 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3991 then set both flags. */
3992 if (TARGET_HARD_FLOAT && TARGET_FPRS
3993 && rs6000_single_float == 0 && rs6000_double_float == 0)
3994 rs6000_single_float = rs6000_double_float = 1;
3996 /* If not explicitly specified via option, decide whether to generate indexed
3997 load/store instructions. */
3998 if (TARGET_AVOID_XFORM == -1)
3999 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4000 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4001 need indexed accesses and the type used is the scalar type of the element
4002 being loaded or stored. */
4003 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
4004 && !TARGET_ALTIVEC);
4006 /* Set the -mrecip options. */
4007 if (rs6000_recip_name)
4009 char *p = ASTRDUP (rs6000_recip_name);
4010 char *q;
4011 unsigned int mask, i;
4012 bool invert;
4014 while ((q = strtok (p, ",")) != NULL)
4016 p = NULL;
4017 if (*q == '!')
4019 invert = true;
4020 q++;
4022 else
4023 invert = false;
4025 if (!strcmp (q, "default"))
4026 mask = ((TARGET_RECIP_PRECISION)
4027 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4028 else
4030 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4031 if (!strcmp (q, recip_options[i].string))
4033 mask = recip_options[i].mask;
4034 break;
4037 if (i == ARRAY_SIZE (recip_options))
4039 error ("unknown option for -mrecip=%s", q);
4040 invert = false;
4041 mask = 0;
4042 ret = false;
4046 if (invert)
4047 rs6000_recip_control &= ~mask;
4048 else
4049 rs6000_recip_control |= mask;
4053 /* Set the builtin mask of the various options used that could affect which
4054 builtins were used. In the past we used target_flags, but we've run out
4055 of bits, and some options like SPE and PAIRED are no longer in
4056 target_flags. */
4057 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4058 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4060 fprintf (stderr,
4061 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX ", ",
4062 rs6000_builtin_mask);
4063 rs6000_print_builtin_options (stderr, 0, NULL, rs6000_builtin_mask);
4066 /* Initialize all of the registers. */
4067 rs6000_init_hard_regno_mode_ok (global_init_p);
4069 /* Save the initial options in case the user does function specific options */
4070 if (global_init_p)
4071 target_option_default_node = target_option_current_node
4072 = build_target_option_node (&global_options);
4074 /* If not explicitly specified via option, decide whether to generate the
4075 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4076 if (TARGET_LINK_STACK == -1)
4077 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
4079 return ret;
4082 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4083 define the target cpu type. */
4085 static void
4086 rs6000_option_override (void)
4088 (void) rs6000_option_override_internal (true);
4090 /* Register machine-specific passes. This needs to be done at start-up.
4091 It's convenient to do it here (like i386 does). */
4092 opt_pass *pass_analyze_swaps = make_pass_analyze_swaps (g);
4094 static struct register_pass_info analyze_swaps_info
4095 = { pass_analyze_swaps, "cse1", 1, PASS_POS_INSERT_BEFORE };
4097 register_pass (&analyze_swaps_info);
4101 /* Implement targetm.vectorize.builtin_mask_for_load. */
4102 static tree
4103 rs6000_builtin_mask_for_load (void)
4105 if (TARGET_ALTIVEC || TARGET_VSX)
4106 return altivec_builtin_mask_for_load;
4107 else
4108 return 0;
4111 /* Implement LOOP_ALIGN. */
4113 rs6000_loop_align (rtx label)
4115 basic_block bb;
4116 int ninsns;
4118 /* Don't override loop alignment if -falign-loops was specified. */
4119 if (!can_override_loop_align)
4120 return align_loops_log;
4122 bb = BLOCK_FOR_INSN (label);
4123 ninsns = num_loop_insns(bb->loop_father);
4125 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4126 if (ninsns > 4 && ninsns <= 8
4127 && (rs6000_cpu == PROCESSOR_POWER4
4128 || rs6000_cpu == PROCESSOR_POWER5
4129 || rs6000_cpu == PROCESSOR_POWER6
4130 || rs6000_cpu == PROCESSOR_POWER7
4131 || rs6000_cpu == PROCESSOR_POWER8))
4132 return 5;
4133 else
4134 return align_loops_log;
4137 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
4138 static int
4139 rs6000_loop_align_max_skip (rtx label)
4141 return (1 << rs6000_loop_align (label)) - 1;
4144 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4145 after applying N number of iterations. This routine does not determine
4146 how may iterations are required to reach desired alignment. */
4148 static bool
4149 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4151 if (is_packed)
4152 return false;
4154 if (TARGET_32BIT)
4156 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4157 return true;
4159 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4160 return true;
4162 return false;
4164 else
4166 if (TARGET_MACHO)
4167 return false;
4169 /* Assuming that all other types are naturally aligned. CHECKME! */
4170 return true;
4174 /* Return true if the vector misalignment factor is supported by the
4175 target. */
4176 static bool
4177 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
4178 const_tree type,
4179 int misalignment,
4180 bool is_packed)
4182 if (TARGET_VSX)
4184 /* Return if movmisalign pattern is not supported for this mode. */
4185 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4186 return false;
4188 if (misalignment == -1)
4190 /* Misalignment factor is unknown at compile time but we know
4191 it's word aligned. */
4192 if (rs6000_vector_alignment_reachable (type, is_packed))
4194 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
4196 if (element_size == 64 || element_size == 32)
4197 return true;
4200 return false;
4203 /* VSX supports word-aligned vector. */
4204 if (misalignment % 4 == 0)
4205 return true;
4207 return false;
4210 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4211 static int
4212 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4213 tree vectype, int misalign)
4215 unsigned elements;
4216 tree elem_type;
4218 switch (type_of_cost)
4220 case scalar_stmt:
4221 case scalar_load:
4222 case scalar_store:
4223 case vector_stmt:
4224 case vector_load:
4225 case vector_store:
4226 case vec_to_scalar:
4227 case scalar_to_vec:
4228 case cond_branch_not_taken:
4229 return 1;
4231 case vec_perm:
4232 if (TARGET_VSX)
4233 return 3;
4234 else
4235 return 1;
4237 case vec_promote_demote:
4238 if (TARGET_VSX)
4239 return 4;
4240 else
4241 return 1;
4243 case cond_branch_taken:
4244 return 3;
4246 case unaligned_load:
4247 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4249 elements = TYPE_VECTOR_SUBPARTS (vectype);
4250 if (elements == 2)
4251 /* Double word aligned. */
4252 return 2;
4254 if (elements == 4)
4256 switch (misalign)
4258 case 8:
4259 /* Double word aligned. */
4260 return 2;
4262 case -1:
4263 /* Unknown misalignment. */
4264 case 4:
4265 case 12:
4266 /* Word aligned. */
4267 return 22;
4269 default:
4270 gcc_unreachable ();
4275 if (TARGET_ALTIVEC)
4276 /* Misaligned loads are not supported. */
4277 gcc_unreachable ();
4279 return 2;
4281 case unaligned_store:
4282 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4284 elements = TYPE_VECTOR_SUBPARTS (vectype);
4285 if (elements == 2)
4286 /* Double word aligned. */
4287 return 2;
4289 if (elements == 4)
4291 switch (misalign)
4293 case 8:
4294 /* Double word aligned. */
4295 return 2;
4297 case -1:
4298 /* Unknown misalignment. */
4299 case 4:
4300 case 12:
4301 /* Word aligned. */
4302 return 23;
4304 default:
4305 gcc_unreachable ();
4310 if (TARGET_ALTIVEC)
4311 /* Misaligned stores are not supported. */
4312 gcc_unreachable ();
4314 return 2;
4316 case vec_construct:
4317 elements = TYPE_VECTOR_SUBPARTS (vectype);
4318 elem_type = TREE_TYPE (vectype);
4319 /* 32-bit vectors loaded into registers are stored as double
4320 precision, so we need n/2 converts in addition to the usual
4321 n/2 merges to construct a vector of short floats from them. */
4322 if (SCALAR_FLOAT_TYPE_P (elem_type)
4323 && TYPE_PRECISION (elem_type) == 32)
4324 return elements + 1;
4325 else
4326 return elements / 2 + 1;
4328 default:
4329 gcc_unreachable ();
4333 /* Implement targetm.vectorize.preferred_simd_mode. */
4335 static enum machine_mode
4336 rs6000_preferred_simd_mode (enum machine_mode mode)
4338 if (TARGET_VSX)
4339 switch (mode)
4341 case DFmode:
4342 return V2DFmode;
4343 default:;
4345 if (TARGET_ALTIVEC || TARGET_VSX)
4346 switch (mode)
4348 case SFmode:
4349 return V4SFmode;
4350 case TImode:
4351 return V1TImode;
4352 case DImode:
4353 return V2DImode;
4354 case SImode:
4355 return V4SImode;
4356 case HImode:
4357 return V8HImode;
4358 case QImode:
4359 return V16QImode;
4360 default:;
4362 if (TARGET_SPE)
4363 switch (mode)
4365 case SFmode:
4366 return V2SFmode;
4367 case SImode:
4368 return V2SImode;
4369 default:;
4371 if (TARGET_PAIRED_FLOAT
4372 && mode == SFmode)
4373 return V2SFmode;
4374 return word_mode;
4377 typedef struct _rs6000_cost_data
4379 struct loop *loop_info;
4380 unsigned cost[3];
4381 } rs6000_cost_data;
4383 /* Test for likely overcommitment of vector hardware resources. If a
4384 loop iteration is relatively large, and too large a percentage of
4385 instructions in the loop are vectorized, the cost model may not
4386 adequately reflect delays from unavailable vector resources.
4387 Penalize the loop body cost for this case. */
4389 static void
4390 rs6000_density_test (rs6000_cost_data *data)
4392 const int DENSITY_PCT_THRESHOLD = 85;
4393 const int DENSITY_SIZE_THRESHOLD = 70;
4394 const int DENSITY_PENALTY = 10;
4395 struct loop *loop = data->loop_info;
4396 basic_block *bbs = get_loop_body (loop);
4397 int nbbs = loop->num_nodes;
4398 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
4399 int i, density_pct;
4401 for (i = 0; i < nbbs; i++)
4403 basic_block bb = bbs[i];
4404 gimple_stmt_iterator gsi;
4406 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4408 gimple stmt = gsi_stmt (gsi);
4409 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4411 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4412 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
4413 not_vec_cost++;
4417 free (bbs);
4418 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
4420 if (density_pct > DENSITY_PCT_THRESHOLD
4421 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
4423 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
4424 if (dump_enabled_p ())
4425 dump_printf_loc (MSG_NOTE, vect_location,
4426 "density %d%%, cost %d exceeds threshold, penalizing "
4427 "loop body cost by %d%%", density_pct,
4428 vec_cost + not_vec_cost, DENSITY_PENALTY);
4432 /* Implement targetm.vectorize.init_cost. */
4434 static void *
4435 rs6000_init_cost (struct loop *loop_info)
4437 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
4438 data->loop_info = loop_info;
4439 data->cost[vect_prologue] = 0;
4440 data->cost[vect_body] = 0;
4441 data->cost[vect_epilogue] = 0;
4442 return data;
4445 /* Implement targetm.vectorize.add_stmt_cost. */
4447 static unsigned
4448 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
4449 struct _stmt_vec_info *stmt_info, int misalign,
4450 enum vect_cost_model_location where)
4452 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4453 unsigned retval = 0;
4455 if (flag_vect_cost_model)
4457 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
4458 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
4459 misalign);
4460 /* Statements in an inner loop relative to the loop being
4461 vectorized are weighted more heavily. The value here is
4462 arbitrary and could potentially be improved with analysis. */
4463 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
4464 count *= 50; /* FIXME. */
4466 retval = (unsigned) (count * stmt_cost);
4467 cost_data->cost[where] += retval;
4470 return retval;
4473 /* Implement targetm.vectorize.finish_cost. */
4475 static void
4476 rs6000_finish_cost (void *data, unsigned *prologue_cost,
4477 unsigned *body_cost, unsigned *epilogue_cost)
4479 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4481 if (cost_data->loop_info)
4482 rs6000_density_test (cost_data);
4484 *prologue_cost = cost_data->cost[vect_prologue];
4485 *body_cost = cost_data->cost[vect_body];
4486 *epilogue_cost = cost_data->cost[vect_epilogue];
4489 /* Implement targetm.vectorize.destroy_cost_data. */
4491 static void
4492 rs6000_destroy_cost_data (void *data)
4494 free (data);
4497 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4498 library with vectorized intrinsics. */
4500 static tree
4501 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
4503 char name[32];
4504 const char *suffix = NULL;
4505 tree fntype, new_fndecl, bdecl = NULL_TREE;
4506 int n_args = 1;
4507 const char *bname;
4508 enum machine_mode el_mode, in_mode;
4509 int n, in_n;
4511 /* Libmass is suitable for unsafe math only as it does not correctly support
4512 parts of IEEE with the required precision such as denormals. Only support
4513 it if we have VSX to use the simd d2 or f4 functions.
4514 XXX: Add variable length support. */
4515 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
4516 return NULL_TREE;
4518 el_mode = TYPE_MODE (TREE_TYPE (type_out));
4519 n = TYPE_VECTOR_SUBPARTS (type_out);
4520 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4521 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4522 if (el_mode != in_mode
4523 || n != in_n)
4524 return NULL_TREE;
4526 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4528 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4529 switch (fn)
4531 case BUILT_IN_ATAN2:
4532 case BUILT_IN_HYPOT:
4533 case BUILT_IN_POW:
4534 n_args = 2;
4535 /* fall through */
4537 case BUILT_IN_ACOS:
4538 case BUILT_IN_ACOSH:
4539 case BUILT_IN_ASIN:
4540 case BUILT_IN_ASINH:
4541 case BUILT_IN_ATAN:
4542 case BUILT_IN_ATANH:
4543 case BUILT_IN_CBRT:
4544 case BUILT_IN_COS:
4545 case BUILT_IN_COSH:
4546 case BUILT_IN_ERF:
4547 case BUILT_IN_ERFC:
4548 case BUILT_IN_EXP2:
4549 case BUILT_IN_EXP:
4550 case BUILT_IN_EXPM1:
4551 case BUILT_IN_LGAMMA:
4552 case BUILT_IN_LOG10:
4553 case BUILT_IN_LOG1P:
4554 case BUILT_IN_LOG2:
4555 case BUILT_IN_LOG:
4556 case BUILT_IN_SIN:
4557 case BUILT_IN_SINH:
4558 case BUILT_IN_SQRT:
4559 case BUILT_IN_TAN:
4560 case BUILT_IN_TANH:
4561 bdecl = builtin_decl_implicit (fn);
4562 suffix = "d2"; /* pow -> powd2 */
4563 if (el_mode != DFmode
4564 || n != 2
4565 || !bdecl)
4566 return NULL_TREE;
4567 break;
4569 case BUILT_IN_ATAN2F:
4570 case BUILT_IN_HYPOTF:
4571 case BUILT_IN_POWF:
4572 n_args = 2;
4573 /* fall through */
4575 case BUILT_IN_ACOSF:
4576 case BUILT_IN_ACOSHF:
4577 case BUILT_IN_ASINF:
4578 case BUILT_IN_ASINHF:
4579 case BUILT_IN_ATANF:
4580 case BUILT_IN_ATANHF:
4581 case BUILT_IN_CBRTF:
4582 case BUILT_IN_COSF:
4583 case BUILT_IN_COSHF:
4584 case BUILT_IN_ERFF:
4585 case BUILT_IN_ERFCF:
4586 case BUILT_IN_EXP2F:
4587 case BUILT_IN_EXPF:
4588 case BUILT_IN_EXPM1F:
4589 case BUILT_IN_LGAMMAF:
4590 case BUILT_IN_LOG10F:
4591 case BUILT_IN_LOG1PF:
4592 case BUILT_IN_LOG2F:
4593 case BUILT_IN_LOGF:
4594 case BUILT_IN_SINF:
4595 case BUILT_IN_SINHF:
4596 case BUILT_IN_SQRTF:
4597 case BUILT_IN_TANF:
4598 case BUILT_IN_TANHF:
4599 bdecl = builtin_decl_implicit (fn);
4600 suffix = "4"; /* powf -> powf4 */
4601 if (el_mode != SFmode
4602 || n != 4
4603 || !bdecl)
4604 return NULL_TREE;
4605 break;
4607 default:
4608 return NULL_TREE;
4611 else
4612 return NULL_TREE;
4614 gcc_assert (suffix != NULL);
4615 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
4616 if (!bname)
4617 return NULL_TREE;
4619 strcpy (name, bname + sizeof ("__builtin_") - 1);
4620 strcat (name, suffix);
4622 if (n_args == 1)
4623 fntype = build_function_type_list (type_out, type_in, NULL);
4624 else if (n_args == 2)
4625 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
4626 else
4627 gcc_unreachable ();
4629 /* Build a function declaration for the vectorized function. */
4630 new_fndecl = build_decl (BUILTINS_LOCATION,
4631 FUNCTION_DECL, get_identifier (name), fntype);
4632 TREE_PUBLIC (new_fndecl) = 1;
4633 DECL_EXTERNAL (new_fndecl) = 1;
4634 DECL_IS_NOVOPS (new_fndecl) = 1;
4635 TREE_READONLY (new_fndecl) = 1;
4637 return new_fndecl;
4640 /* Returns a function decl for a vectorized version of the builtin function
4641 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4642 if it is not available. */
4644 static tree
4645 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
4646 tree type_in)
4648 enum machine_mode in_mode, out_mode;
4649 int in_n, out_n;
4651 if (TARGET_DEBUG_BUILTIN)
4652 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4653 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
4654 GET_MODE_NAME (TYPE_MODE (type_out)),
4655 GET_MODE_NAME (TYPE_MODE (type_in)));
4657 if (TREE_CODE (type_out) != VECTOR_TYPE
4658 || TREE_CODE (type_in) != VECTOR_TYPE
4659 || !TARGET_VECTORIZE_BUILTINS)
4660 return NULL_TREE;
4662 out_mode = TYPE_MODE (TREE_TYPE (type_out));
4663 out_n = TYPE_VECTOR_SUBPARTS (type_out);
4664 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4665 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4667 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4669 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4670 switch (fn)
4672 case BUILT_IN_CLZIMAX:
4673 case BUILT_IN_CLZLL:
4674 case BUILT_IN_CLZL:
4675 case BUILT_IN_CLZ:
4676 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4678 if (out_mode == QImode && out_n == 16)
4679 return rs6000_builtin_decls[P8V_BUILTIN_VCLZB];
4680 else if (out_mode == HImode && out_n == 8)
4681 return rs6000_builtin_decls[P8V_BUILTIN_VCLZH];
4682 else if (out_mode == SImode && out_n == 4)
4683 return rs6000_builtin_decls[P8V_BUILTIN_VCLZW];
4684 else if (out_mode == DImode && out_n == 2)
4685 return rs6000_builtin_decls[P8V_BUILTIN_VCLZD];
4687 break;
4688 case BUILT_IN_COPYSIGN:
4689 if (VECTOR_UNIT_VSX_P (V2DFmode)
4690 && out_mode == DFmode && out_n == 2
4691 && in_mode == DFmode && in_n == 2)
4692 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
4693 break;
4694 case BUILT_IN_COPYSIGNF:
4695 if (out_mode != SFmode || out_n != 4
4696 || in_mode != SFmode || in_n != 4)
4697 break;
4698 if (VECTOR_UNIT_VSX_P (V4SFmode))
4699 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
4700 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4701 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
4702 break;
4703 case BUILT_IN_POPCOUNTIMAX:
4704 case BUILT_IN_POPCOUNTLL:
4705 case BUILT_IN_POPCOUNTL:
4706 case BUILT_IN_POPCOUNT:
4707 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4709 if (out_mode == QImode && out_n == 16)
4710 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTB];
4711 else if (out_mode == HImode && out_n == 8)
4712 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTH];
4713 else if (out_mode == SImode && out_n == 4)
4714 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTW];
4715 else if (out_mode == DImode && out_n == 2)
4716 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTD];
4718 break;
4719 case BUILT_IN_SQRT:
4720 if (VECTOR_UNIT_VSX_P (V2DFmode)
4721 && out_mode == DFmode && out_n == 2
4722 && in_mode == DFmode && in_n == 2)
4723 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
4724 break;
4725 case BUILT_IN_SQRTF:
4726 if (VECTOR_UNIT_VSX_P (V4SFmode)
4727 && out_mode == SFmode && out_n == 4
4728 && in_mode == SFmode && in_n == 4)
4729 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
4730 break;
4731 case BUILT_IN_CEIL:
4732 if (VECTOR_UNIT_VSX_P (V2DFmode)
4733 && out_mode == DFmode && out_n == 2
4734 && in_mode == DFmode && in_n == 2)
4735 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
4736 break;
4737 case BUILT_IN_CEILF:
4738 if (out_mode != SFmode || out_n != 4
4739 || in_mode != SFmode || in_n != 4)
4740 break;
4741 if (VECTOR_UNIT_VSX_P (V4SFmode))
4742 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
4743 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4744 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
4745 break;
4746 case BUILT_IN_FLOOR:
4747 if (VECTOR_UNIT_VSX_P (V2DFmode)
4748 && out_mode == DFmode && out_n == 2
4749 && in_mode == DFmode && in_n == 2)
4750 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
4751 break;
4752 case BUILT_IN_FLOORF:
4753 if (out_mode != SFmode || out_n != 4
4754 || in_mode != SFmode || in_n != 4)
4755 break;
4756 if (VECTOR_UNIT_VSX_P (V4SFmode))
4757 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
4758 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4759 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
4760 break;
4761 case BUILT_IN_FMA:
4762 if (VECTOR_UNIT_VSX_P (V2DFmode)
4763 && out_mode == DFmode && out_n == 2
4764 && in_mode == DFmode && in_n == 2)
4765 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
4766 break;
4767 case BUILT_IN_FMAF:
4768 if (VECTOR_UNIT_VSX_P (V4SFmode)
4769 && out_mode == SFmode && out_n == 4
4770 && in_mode == SFmode && in_n == 4)
4771 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
4772 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
4773 && out_mode == SFmode && out_n == 4
4774 && in_mode == SFmode && in_n == 4)
4775 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
4776 break;
4777 case BUILT_IN_TRUNC:
4778 if (VECTOR_UNIT_VSX_P (V2DFmode)
4779 && out_mode == DFmode && out_n == 2
4780 && in_mode == DFmode && in_n == 2)
4781 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
4782 break;
4783 case BUILT_IN_TRUNCF:
4784 if (out_mode != SFmode || out_n != 4
4785 || in_mode != SFmode || in_n != 4)
4786 break;
4787 if (VECTOR_UNIT_VSX_P (V4SFmode))
4788 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
4789 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4790 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
4791 break;
4792 case BUILT_IN_NEARBYINT:
4793 if (VECTOR_UNIT_VSX_P (V2DFmode)
4794 && flag_unsafe_math_optimizations
4795 && out_mode == DFmode && out_n == 2
4796 && in_mode == DFmode && in_n == 2)
4797 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
4798 break;
4799 case BUILT_IN_NEARBYINTF:
4800 if (VECTOR_UNIT_VSX_P (V4SFmode)
4801 && flag_unsafe_math_optimizations
4802 && out_mode == SFmode && out_n == 4
4803 && in_mode == SFmode && in_n == 4)
4804 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
4805 break;
4806 case BUILT_IN_RINT:
4807 if (VECTOR_UNIT_VSX_P (V2DFmode)
4808 && !flag_trapping_math
4809 && out_mode == DFmode && out_n == 2
4810 && in_mode == DFmode && in_n == 2)
4811 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
4812 break;
4813 case BUILT_IN_RINTF:
4814 if (VECTOR_UNIT_VSX_P (V4SFmode)
4815 && !flag_trapping_math
4816 && out_mode == SFmode && out_n == 4
4817 && in_mode == SFmode && in_n == 4)
4818 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
4819 break;
4820 default:
4821 break;
4825 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
4827 enum rs6000_builtins fn
4828 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
4829 switch (fn)
4831 case RS6000_BUILTIN_RSQRTF:
4832 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4833 && out_mode == SFmode && out_n == 4
4834 && in_mode == SFmode && in_n == 4)
4835 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
4836 break;
4837 case RS6000_BUILTIN_RSQRT:
4838 if (VECTOR_UNIT_VSX_P (V2DFmode)
4839 && out_mode == DFmode && out_n == 2
4840 && in_mode == DFmode && in_n == 2)
4841 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
4842 break;
4843 case RS6000_BUILTIN_RECIPF:
4844 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4845 && out_mode == SFmode && out_n == 4
4846 && in_mode == SFmode && in_n == 4)
4847 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
4848 break;
4849 case RS6000_BUILTIN_RECIP:
4850 if (VECTOR_UNIT_VSX_P (V2DFmode)
4851 && out_mode == DFmode && out_n == 2
4852 && in_mode == DFmode && in_n == 2)
4853 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
4854 break;
4855 default:
4856 break;
4860 /* Generate calls to libmass if appropriate. */
4861 if (rs6000_veclib_handler)
4862 return rs6000_veclib_handler (fndecl, type_out, type_in);
4864 return NULL_TREE;
4867 /* Default CPU string for rs6000*_file_start functions. */
4868 static const char *rs6000_default_cpu;
4870 /* Do anything needed at the start of the asm file. */
4872 static void
4873 rs6000_file_start (void)
4875 char buffer[80];
4876 const char *start = buffer;
4877 FILE *file = asm_out_file;
4879 rs6000_default_cpu = TARGET_CPU_DEFAULT;
4881 default_file_start ();
4883 if (flag_verbose_asm)
4885 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
4887 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
4889 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
4890 start = "";
4893 if (global_options_set.x_rs6000_cpu_index)
4895 fprintf (file, "%s -mcpu=%s", start,
4896 processor_target_table[rs6000_cpu_index].name);
4897 start = "";
4900 if (global_options_set.x_rs6000_tune_index)
4902 fprintf (file, "%s -mtune=%s", start,
4903 processor_target_table[rs6000_tune_index].name);
4904 start = "";
4907 if (PPC405_ERRATUM77)
4909 fprintf (file, "%s PPC405CR_ERRATUM77", start);
4910 start = "";
4913 #ifdef USING_ELFOS_H
4914 switch (rs6000_sdata)
4916 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
4917 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
4918 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
4919 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
4922 if (rs6000_sdata && g_switch_value)
4924 fprintf (file, "%s -G %d", start,
4925 g_switch_value);
4926 start = "";
4928 #endif
4930 if (*start == '\0')
4931 putc ('\n', file);
4934 if (DEFAULT_ABI == ABI_ELFv2)
4935 fprintf (file, "\t.abiversion 2\n");
4937 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2
4938 || (TARGET_ELF && flag_pic == 2))
4940 switch_to_section (toc_section);
4941 switch_to_section (text_section);
4946 /* Return nonzero if this function is known to have a null epilogue. */
4949 direct_return (void)
4951 if (reload_completed)
4953 rs6000_stack_t *info = rs6000_stack_info ();
4955 if (info->first_gp_reg_save == 32
4956 && info->first_fp_reg_save == 64
4957 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4958 && ! info->lr_save_p
4959 && ! info->cr_save_p
4960 && info->vrsave_mask == 0
4961 && ! info->push_p)
4962 return 1;
4965 return 0;
4968 /* Return the number of instructions it takes to form a constant in an
4969 integer register. */
4972 num_insns_constant_wide (HOST_WIDE_INT value)
4974 /* signed constant loadable with addi */
4975 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4976 return 1;
4978 /* constant loadable with addis */
4979 else if ((value & 0xffff) == 0
4980 && (value >> 31 == -1 || value >> 31 == 0))
4981 return 1;
4983 else if (TARGET_POWERPC64)
4985 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4986 HOST_WIDE_INT high = value >> 31;
4988 if (high == 0 || high == -1)
4989 return 2;
4991 high >>= 1;
4993 if (low == 0)
4994 return num_insns_constant_wide (high) + 1;
4995 else if (high == 0)
4996 return num_insns_constant_wide (low) + 1;
4997 else
4998 return (num_insns_constant_wide (high)
4999 + num_insns_constant_wide (low) + 1);
5002 else
5003 return 2;
5007 num_insns_constant (rtx op, enum machine_mode mode)
5009 HOST_WIDE_INT low, high;
5011 switch (GET_CODE (op))
5013 case CONST_INT:
5014 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5015 && mask64_operand (op, mode))
5016 return 2;
5017 else
5018 return num_insns_constant_wide (INTVAL (op));
5020 case CONST_WIDE_INT:
5022 int i;
5023 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5024 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5025 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5026 return ins;
5029 case CONST_DOUBLE:
5030 if (mode == SFmode || mode == SDmode)
5032 long l;
5033 REAL_VALUE_TYPE rv;
5035 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
5036 if (DECIMAL_FLOAT_MODE_P (mode))
5037 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
5038 else
5039 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
5040 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5043 long l[2];
5044 REAL_VALUE_TYPE rv;
5046 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
5047 if (DECIMAL_FLOAT_MODE_P (mode))
5048 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
5049 else
5050 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
5051 high = l[WORDS_BIG_ENDIAN == 0];
5052 low = l[WORDS_BIG_ENDIAN != 0];
5054 if (TARGET_32BIT)
5055 return (num_insns_constant_wide (low)
5056 + num_insns_constant_wide (high));
5057 else
5059 if ((high == 0 && low >= 0)
5060 || (high == -1 && low < 0))
5061 return num_insns_constant_wide (low);
5063 else if (mask64_operand (op, mode))
5064 return 2;
5066 else if (low == 0)
5067 return num_insns_constant_wide (high) + 1;
5069 else
5070 return (num_insns_constant_wide (high)
5071 + num_insns_constant_wide (low) + 1);
5074 default:
5075 gcc_unreachable ();
5079 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5080 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5081 corresponding element of the vector, but for V4SFmode and V2SFmode,
5082 the corresponding "float" is interpreted as an SImode integer. */
5084 HOST_WIDE_INT
5085 const_vector_elt_as_int (rtx op, unsigned int elt)
5087 rtx tmp;
5089 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5090 gcc_assert (GET_MODE (op) != V2DImode
5091 && GET_MODE (op) != V2DFmode);
5093 tmp = CONST_VECTOR_ELT (op, elt);
5094 if (GET_MODE (op) == V4SFmode
5095 || GET_MODE (op) == V2SFmode)
5096 tmp = gen_lowpart (SImode, tmp);
5097 return INTVAL (tmp);
5100 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5101 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5102 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5103 all items are set to the same value and contain COPIES replicas of the
5104 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5105 operand and the others are set to the value of the operand's msb. */
5107 static bool
5108 vspltis_constant (rtx op, unsigned step, unsigned copies)
5110 enum machine_mode mode = GET_MODE (op);
5111 enum machine_mode inner = GET_MODE_INNER (mode);
5113 unsigned i;
5114 unsigned nunits;
5115 unsigned bitsize;
5116 unsigned mask;
5118 HOST_WIDE_INT val;
5119 HOST_WIDE_INT splat_val;
5120 HOST_WIDE_INT msb_val;
5122 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5123 return false;
5125 nunits = GET_MODE_NUNITS (mode);
5126 bitsize = GET_MODE_BITSIZE (inner);
5127 mask = GET_MODE_MASK (inner);
5129 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5130 splat_val = val;
5131 msb_val = val >= 0 ? 0 : -1;
5133 /* Construct the value to be splatted, if possible. If not, return 0. */
5134 for (i = 2; i <= copies; i *= 2)
5136 HOST_WIDE_INT small_val;
5137 bitsize /= 2;
5138 small_val = splat_val >> bitsize;
5139 mask >>= bitsize;
5140 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
5141 return false;
5142 splat_val = small_val;
5145 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5146 if (EASY_VECTOR_15 (splat_val))
5149 /* Also check if we can splat, and then add the result to itself. Do so if
5150 the value is positive, of if the splat instruction is using OP's mode;
5151 for splat_val < 0, the splat and the add should use the same mode. */
5152 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5153 && (splat_val >= 0 || (step == 1 && copies == 1)))
5156 /* Also check if are loading up the most significant bit which can be done by
5157 loading up -1 and shifting the value left by -1. */
5158 else if (EASY_VECTOR_MSB (splat_val, inner))
5161 else
5162 return false;
5164 /* Check if VAL is present in every STEP-th element, and the
5165 other elements are filled with its most significant bit. */
5166 for (i = 1; i < nunits; ++i)
5168 HOST_WIDE_INT desired_val;
5169 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
5170 if ((i & (step - 1)) == 0)
5171 desired_val = val;
5172 else
5173 desired_val = msb_val;
5175 if (desired_val != const_vector_elt_as_int (op, elt))
5176 return false;
5179 return true;
5183 /* Return true if OP is of the given MODE and can be synthesized
5184 with a vspltisb, vspltish or vspltisw. */
5186 bool
5187 easy_altivec_constant (rtx op, enum machine_mode mode)
5189 unsigned step, copies;
5191 if (mode == VOIDmode)
5192 mode = GET_MODE (op);
5193 else if (mode != GET_MODE (op))
5194 return false;
5196 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
5197 constants. */
5198 if (mode == V2DFmode)
5199 return zero_constant (op, mode);
5201 else if (mode == V2DImode)
5203 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
5204 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
5205 return false;
5207 if (zero_constant (op, mode))
5208 return true;
5210 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
5211 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
5212 return true;
5214 return false;
5217 /* V1TImode is a special container for TImode. Ignore for now. */
5218 else if (mode == V1TImode)
5219 return false;
5221 /* Start with a vspltisw. */
5222 step = GET_MODE_NUNITS (mode) / 4;
5223 copies = 1;
5225 if (vspltis_constant (op, step, copies))
5226 return true;
5228 /* Then try with a vspltish. */
5229 if (step == 1)
5230 copies <<= 1;
5231 else
5232 step >>= 1;
5234 if (vspltis_constant (op, step, copies))
5235 return true;
5237 /* And finally a vspltisb. */
5238 if (step == 1)
5239 copies <<= 1;
5240 else
5241 step >>= 1;
5243 if (vspltis_constant (op, step, copies))
5244 return true;
5246 return false;
5249 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
5250 result is OP. Abort if it is not possible. */
5253 gen_easy_altivec_constant (rtx op)
5255 enum machine_mode mode = GET_MODE (op);
5256 int nunits = GET_MODE_NUNITS (mode);
5257 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5258 unsigned step = nunits / 4;
5259 unsigned copies = 1;
5261 /* Start with a vspltisw. */
5262 if (vspltis_constant (op, step, copies))
5263 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
5265 /* Then try with a vspltish. */
5266 if (step == 1)
5267 copies <<= 1;
5268 else
5269 step >>= 1;
5271 if (vspltis_constant (op, step, copies))
5272 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
5274 /* And finally a vspltisb. */
5275 if (step == 1)
5276 copies <<= 1;
5277 else
5278 step >>= 1;
5280 if (vspltis_constant (op, step, copies))
5281 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
5283 gcc_unreachable ();
5286 const char *
5287 output_vec_const_move (rtx *operands)
5289 int cst, cst2;
5290 enum machine_mode mode;
5291 rtx dest, vec;
5293 dest = operands[0];
5294 vec = operands[1];
5295 mode = GET_MODE (dest);
5297 if (TARGET_VSX)
5299 if (zero_constant (vec, mode))
5300 return "xxlxor %x0,%x0,%x0";
5302 if ((mode == V2DImode || mode == V1TImode)
5303 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
5304 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
5305 return "vspltisw %0,-1";
5308 if (TARGET_ALTIVEC)
5310 rtx splat_vec;
5311 if (zero_constant (vec, mode))
5312 return "vxor %0,%0,%0";
5314 splat_vec = gen_easy_altivec_constant (vec);
5315 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
5316 operands[1] = XEXP (splat_vec, 0);
5317 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
5318 return "#";
5320 switch (GET_MODE (splat_vec))
5322 case V4SImode:
5323 return "vspltisw %0,%1";
5325 case V8HImode:
5326 return "vspltish %0,%1";
5328 case V16QImode:
5329 return "vspltisb %0,%1";
5331 default:
5332 gcc_unreachable ();
5336 gcc_assert (TARGET_SPE);
5338 /* Vector constant 0 is handled as a splitter of V2SI, and in the
5339 pattern of V1DI, V4HI, and V2SF.
5341 FIXME: We should probably return # and add post reload
5342 splitters for these, but this way is so easy ;-). */
5343 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
5344 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
5345 operands[1] = CONST_VECTOR_ELT (vec, 0);
5346 operands[2] = CONST_VECTOR_ELT (vec, 1);
5347 if (cst == cst2)
5348 return "li %0,%1\n\tevmergelo %0,%0,%0";
5349 else if (WORDS_BIG_ENDIAN)
5350 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
5351 else
5352 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
5355 /* Initialize TARGET of vector PAIRED to VALS. */
5357 void
5358 paired_expand_vector_init (rtx target, rtx vals)
5360 enum machine_mode mode = GET_MODE (target);
5361 int n_elts = GET_MODE_NUNITS (mode);
5362 int n_var = 0;
5363 rtx x, new_rtx, tmp, constant_op, op1, op2;
5364 int i;
5366 for (i = 0; i < n_elts; ++i)
5368 x = XVECEXP (vals, 0, i);
5369 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
5370 ++n_var;
5372 if (n_var == 0)
5374 /* Load from constant pool. */
5375 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
5376 return;
5379 if (n_var == 2)
5381 /* The vector is initialized only with non-constants. */
5382 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
5383 XVECEXP (vals, 0, 1));
5385 emit_move_insn (target, new_rtx);
5386 return;
5389 /* One field is non-constant and the other one is a constant. Load the
5390 constant from the constant pool and use ps_merge instruction to
5391 construct the whole vector. */
5392 op1 = XVECEXP (vals, 0, 0);
5393 op2 = XVECEXP (vals, 0, 1);
5395 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
5397 tmp = gen_reg_rtx (GET_MODE (constant_op));
5398 emit_move_insn (tmp, constant_op);
5400 if (CONSTANT_P (op1))
5401 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
5402 else
5403 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
5405 emit_move_insn (target, new_rtx);
5408 void
5409 paired_expand_vector_move (rtx operands[])
5411 rtx op0 = operands[0], op1 = operands[1];
5413 emit_move_insn (op0, op1);
5416 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
5417 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
5418 operands for the relation operation COND. This is a recursive
5419 function. */
5421 static void
5422 paired_emit_vector_compare (enum rtx_code rcode,
5423 rtx dest, rtx op0, rtx op1,
5424 rtx cc_op0, rtx cc_op1)
5426 rtx tmp = gen_reg_rtx (V2SFmode);
5427 rtx tmp1, max, min;
5429 gcc_assert (TARGET_PAIRED_FLOAT);
5430 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
5432 switch (rcode)
5434 case LT:
5435 case LTU:
5436 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5437 return;
5438 case GE:
5439 case GEU:
5440 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5441 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
5442 return;
5443 case LE:
5444 case LEU:
5445 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
5446 return;
5447 case GT:
5448 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5449 return;
5450 case EQ:
5451 tmp1 = gen_reg_rtx (V2SFmode);
5452 max = gen_reg_rtx (V2SFmode);
5453 min = gen_reg_rtx (V2SFmode);
5454 gen_reg_rtx (V2SFmode);
5456 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5457 emit_insn (gen_selv2sf4
5458 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5459 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
5460 emit_insn (gen_selv2sf4
5461 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5462 emit_insn (gen_subv2sf3 (tmp1, min, max));
5463 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
5464 return;
5465 case NE:
5466 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
5467 return;
5468 case UNLE:
5469 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5470 return;
5471 case UNLT:
5472 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
5473 return;
5474 case UNGE:
5475 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5476 return;
5477 case UNGT:
5478 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
5479 return;
5480 default:
5481 gcc_unreachable ();
5484 return;
5487 /* Emit vector conditional expression.
5488 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5489 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5492 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
5493 rtx cond, rtx cc_op0, rtx cc_op1)
5495 enum rtx_code rcode = GET_CODE (cond);
5497 if (!TARGET_PAIRED_FLOAT)
5498 return 0;
5500 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
5502 return 1;
5505 /* Initialize vector TARGET to VALS. */
5507 void
5508 rs6000_expand_vector_init (rtx target, rtx vals)
5510 enum machine_mode mode = GET_MODE (target);
5511 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5512 int n_elts = GET_MODE_NUNITS (mode);
5513 int n_var = 0, one_var = -1;
5514 bool all_same = true, all_const_zero = true;
5515 rtx x, mem;
5516 int i;
5518 for (i = 0; i < n_elts; ++i)
5520 x = XVECEXP (vals, 0, i);
5521 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
5522 ++n_var, one_var = i;
5523 else if (x != CONST0_RTX (inner_mode))
5524 all_const_zero = false;
5526 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
5527 all_same = false;
5530 if (n_var == 0)
5532 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
5533 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
5534 if ((int_vector_p || TARGET_VSX) && all_const_zero)
5536 /* Zero register. */
5537 emit_insn (gen_rtx_SET (VOIDmode, target,
5538 gen_rtx_XOR (mode, target, target)));
5539 return;
5541 else if (int_vector_p && easy_vector_constant (const_vec, mode))
5543 /* Splat immediate. */
5544 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
5545 return;
5547 else
5549 /* Load from constant pool. */
5550 emit_move_insn (target, const_vec);
5551 return;
5555 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5556 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5558 rtx op0 = XVECEXP (vals, 0, 0);
5559 rtx op1 = XVECEXP (vals, 0, 1);
5560 if (all_same)
5562 if (!MEM_P (op0) && !REG_P (op0))
5563 op0 = force_reg (inner_mode, op0);
5564 if (mode == V2DFmode)
5565 emit_insn (gen_vsx_splat_v2df (target, op0));
5566 else
5567 emit_insn (gen_vsx_splat_v2di (target, op0));
5569 else
5571 op0 = force_reg (inner_mode, op0);
5572 op1 = force_reg (inner_mode, op1);
5573 if (mode == V2DFmode)
5574 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
5575 else
5576 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
5578 return;
5581 /* With single precision floating point on VSX, know that internally single
5582 precision is actually represented as a double, and either make 2 V2DF
5583 vectors, and convert these vectors to single precision, or do one
5584 conversion, and splat the result to the other elements. */
5585 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
5587 if (all_same)
5589 rtx freg = gen_reg_rtx (V4SFmode);
5590 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
5591 rtx cvt = ((TARGET_XSCVDPSPN)
5592 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
5593 : gen_vsx_xscvdpsp_scalar (freg, sreg));
5595 emit_insn (cvt);
5596 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg, const0_rtx));
5598 else
5600 rtx dbl_even = gen_reg_rtx (V2DFmode);
5601 rtx dbl_odd = gen_reg_rtx (V2DFmode);
5602 rtx flt_even = gen_reg_rtx (V4SFmode);
5603 rtx flt_odd = gen_reg_rtx (V4SFmode);
5604 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
5605 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
5606 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
5607 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
5609 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
5610 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
5611 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
5612 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
5613 rs6000_expand_extract_even (target, flt_even, flt_odd);
5615 return;
5618 /* Store value to stack temp. Load vector element. Splat. However, splat
5619 of 64-bit items is not supported on Altivec. */
5620 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
5622 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5623 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
5624 XVECEXP (vals, 0, 0));
5625 x = gen_rtx_UNSPEC (VOIDmode,
5626 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5627 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5628 gen_rtvec (2,
5629 gen_rtx_SET (VOIDmode,
5630 target, mem),
5631 x)));
5632 x = gen_rtx_VEC_SELECT (inner_mode, target,
5633 gen_rtx_PARALLEL (VOIDmode,
5634 gen_rtvec (1, const0_rtx)));
5635 emit_insn (gen_rtx_SET (VOIDmode, target,
5636 gen_rtx_VEC_DUPLICATE (mode, x)));
5637 return;
5640 /* One field is non-constant. Load constant then overwrite
5641 varying field. */
5642 if (n_var == 1)
5644 rtx copy = copy_rtx (vals);
5646 /* Load constant part of vector, substitute neighboring value for
5647 varying element. */
5648 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
5649 rs6000_expand_vector_init (target, copy);
5651 /* Insert variable. */
5652 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
5653 return;
5656 /* Construct the vector in memory one field at a time
5657 and load the whole vector. */
5658 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5659 for (i = 0; i < n_elts; i++)
5660 emit_move_insn (adjust_address_nv (mem, inner_mode,
5661 i * GET_MODE_SIZE (inner_mode)),
5662 XVECEXP (vals, 0, i));
5663 emit_move_insn (target, mem);
5666 /* Set field ELT of TARGET to VAL. */
5668 void
5669 rs6000_expand_vector_set (rtx target, rtx val, int elt)
5671 enum machine_mode mode = GET_MODE (target);
5672 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5673 rtx reg = gen_reg_rtx (mode);
5674 rtx mask, mem, x;
5675 int width = GET_MODE_SIZE (inner_mode);
5676 int i;
5678 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5680 rtx (*set_func) (rtx, rtx, rtx, rtx)
5681 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
5682 emit_insn (set_func (target, target, val, GEN_INT (elt)));
5683 return;
5686 /* Simplify setting single element vectors like V1TImode. */
5687 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
5689 emit_move_insn (target, gen_lowpart (mode, val));
5690 return;
5693 /* Load single variable value. */
5694 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5695 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
5696 x = gen_rtx_UNSPEC (VOIDmode,
5697 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5698 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5699 gen_rtvec (2,
5700 gen_rtx_SET (VOIDmode,
5701 reg, mem),
5702 x)));
5704 /* Linear sequence. */
5705 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
5706 for (i = 0; i < 16; ++i)
5707 XVECEXP (mask, 0, i) = GEN_INT (i);
5709 /* Set permute mask to insert element into target. */
5710 for (i = 0; i < width; ++i)
5711 XVECEXP (mask, 0, elt*width + i)
5712 = GEN_INT (i + 0x10);
5713 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
5715 if (BYTES_BIG_ENDIAN)
5716 x = gen_rtx_UNSPEC (mode,
5717 gen_rtvec (3, target, reg,
5718 force_reg (V16QImode, x)),
5719 UNSPEC_VPERM);
5720 else
5722 /* Invert selector. We prefer to generate VNAND on P8 so
5723 that future fusion opportunities can kick in, but must
5724 generate VNOR elsewhere. */
5725 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
5726 rtx iorx = (TARGET_P8_VECTOR
5727 ? gen_rtx_IOR (V16QImode, notx, notx)
5728 : gen_rtx_AND (V16QImode, notx, notx));
5729 rtx tmp = gen_reg_rtx (V16QImode);
5730 emit_insn (gen_rtx_SET (VOIDmode, tmp, iorx));
5732 /* Permute with operands reversed and adjusted selector. */
5733 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
5734 UNSPEC_VPERM);
5737 emit_insn (gen_rtx_SET (VOIDmode, target, x));
5740 /* Extract field ELT from VEC into TARGET. */
5742 void
5743 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
5745 enum machine_mode mode = GET_MODE (vec);
5746 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5747 rtx mem;
5749 if (VECTOR_MEM_VSX_P (mode))
5751 switch (mode)
5753 default:
5754 break;
5755 case V1TImode:
5756 gcc_assert (elt == 0 && inner_mode == TImode);
5757 emit_move_insn (target, gen_lowpart (TImode, vec));
5758 break;
5759 case V2DFmode:
5760 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
5761 return;
5762 case V2DImode:
5763 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
5764 return;
5765 case V4SFmode:
5766 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
5767 return;
5771 /* Allocate mode-sized buffer. */
5772 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5774 emit_move_insn (mem, vec);
5776 /* Add offset to field within buffer matching vector element. */
5777 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
5779 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
5782 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5783 implement ANDing by the mask IN. */
5784 void
5785 build_mask64_2_operands (rtx in, rtx *out)
5787 unsigned HOST_WIDE_INT c, lsb, m1, m2;
5788 int shift;
5790 gcc_assert (GET_CODE (in) == CONST_INT);
5792 c = INTVAL (in);
5793 if (c & 1)
5795 /* Assume c initially something like 0x00fff000000fffff. The idea
5796 is to rotate the word so that the middle ^^^^^^ group of zeros
5797 is at the MS end and can be cleared with an rldicl mask. We then
5798 rotate back and clear off the MS ^^ group of zeros with a
5799 second rldicl. */
5800 c = ~c; /* c == 0xff000ffffff00000 */
5801 lsb = c & -c; /* lsb == 0x0000000000100000 */
5802 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
5803 c = ~c; /* c == 0x00fff000000fffff */
5804 c &= -lsb; /* c == 0x00fff00000000000 */
5805 lsb = c & -c; /* lsb == 0x0000100000000000 */
5806 c = ~c; /* c == 0xff000fffffffffff */
5807 c &= -lsb; /* c == 0xff00000000000000 */
5808 shift = 0;
5809 while ((lsb >>= 1) != 0)
5810 shift++; /* shift == 44 on exit from loop */
5811 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
5812 m1 = ~m1; /* m1 == 0x000000ffffffffff */
5813 m2 = ~c; /* m2 == 0x00ffffffffffffff */
5815 else
5817 /* Assume c initially something like 0xff000f0000000000. The idea
5818 is to rotate the word so that the ^^^ middle group of zeros
5819 is at the LS end and can be cleared with an rldicr mask. We then
5820 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5821 a second rldicr. */
5822 lsb = c & -c; /* lsb == 0x0000010000000000 */
5823 m2 = -lsb; /* m2 == 0xffffff0000000000 */
5824 c = ~c; /* c == 0x00fff0ffffffffff */
5825 c &= -lsb; /* c == 0x00fff00000000000 */
5826 lsb = c & -c; /* lsb == 0x0000100000000000 */
5827 c = ~c; /* c == 0xff000fffffffffff */
5828 c &= -lsb; /* c == 0xff00000000000000 */
5829 shift = 0;
5830 while ((lsb >>= 1) != 0)
5831 shift++; /* shift == 44 on exit from loop */
5832 m1 = ~c; /* m1 == 0x00ffffffffffffff */
5833 m1 >>= shift; /* m1 == 0x0000000000000fff */
5834 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
5837 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5838 masks will be all 1's. We are guaranteed more than one transition. */
5839 out[0] = GEN_INT (64 - shift);
5840 out[1] = GEN_INT (m1);
5841 out[2] = GEN_INT (shift);
5842 out[3] = GEN_INT (m2);
5845 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5847 bool
5848 invalid_e500_subreg (rtx op, enum machine_mode mode)
5850 if (TARGET_E500_DOUBLE)
5852 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5853 subreg:TI and reg:TF. Decimal float modes are like integer
5854 modes (only low part of each register used) for this
5855 purpose. */
5856 if (GET_CODE (op) == SUBREG
5857 && (mode == SImode || mode == DImode || mode == TImode
5858 || mode == DDmode || mode == TDmode || mode == PTImode)
5859 && REG_P (SUBREG_REG (op))
5860 && (GET_MODE (SUBREG_REG (op)) == DFmode
5861 || GET_MODE (SUBREG_REG (op)) == TFmode))
5862 return true;
5864 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5865 reg:TI. */
5866 if (GET_CODE (op) == SUBREG
5867 && (mode == DFmode || mode == TFmode)
5868 && REG_P (SUBREG_REG (op))
5869 && (GET_MODE (SUBREG_REG (op)) == DImode
5870 || GET_MODE (SUBREG_REG (op)) == TImode
5871 || GET_MODE (SUBREG_REG (op)) == PTImode
5872 || GET_MODE (SUBREG_REG (op)) == DDmode
5873 || GET_MODE (SUBREG_REG (op)) == TDmode))
5874 return true;
5877 if (TARGET_SPE
5878 && GET_CODE (op) == SUBREG
5879 && mode == SImode
5880 && REG_P (SUBREG_REG (op))
5881 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
5882 return true;
5884 return false;
5887 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
5888 selects whether the alignment is abi mandated, optional, or
5889 both abi and optional alignment. */
5891 unsigned int
5892 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
5894 if (how != align_opt)
5896 if (TREE_CODE (type) == VECTOR_TYPE)
5898 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
5899 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
5901 if (align < 64)
5902 align = 64;
5904 else if (align < 128)
5905 align = 128;
5907 else if (TARGET_E500_DOUBLE
5908 && TREE_CODE (type) == REAL_TYPE
5909 && TYPE_MODE (type) == DFmode)
5911 if (align < 64)
5912 align = 64;
5916 if (how != align_abi)
5918 if (TREE_CODE (type) == ARRAY_TYPE
5919 && TYPE_MODE (TREE_TYPE (type)) == QImode)
5921 if (align < BITS_PER_WORD)
5922 align = BITS_PER_WORD;
5926 return align;
5929 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
5931 bool
5932 rs6000_special_adjust_field_align_p (tree field, unsigned int computed)
5934 if (TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5936 if (computed != 128)
5938 static bool warned;
5939 if (!warned && warn_psabi)
5941 warned = true;
5942 inform (input_location,
5943 "the layout of aggregates containing vectors with"
5944 " %d-byte alignment has changed in GCC 4.10",
5945 computed / BITS_PER_UNIT);
5948 /* In current GCC there is no special case. */
5949 return false;
5952 return false;
5955 /* AIX increases natural record alignment to doubleword if the first
5956 field is an FP double while the FP fields remain word aligned. */
5958 unsigned int
5959 rs6000_special_round_type_align (tree type, unsigned int computed,
5960 unsigned int specified)
5962 unsigned int align = MAX (computed, specified);
5963 tree field = TYPE_FIELDS (type);
5965 /* Skip all non field decls */
5966 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5967 field = DECL_CHAIN (field);
5969 if (field != NULL && field != type)
5971 type = TREE_TYPE (field);
5972 while (TREE_CODE (type) == ARRAY_TYPE)
5973 type = TREE_TYPE (type);
5975 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
5976 align = MAX (align, 64);
5979 return align;
5982 /* Darwin increases record alignment to the natural alignment of
5983 the first field. */
5985 unsigned int
5986 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
5987 unsigned int specified)
5989 unsigned int align = MAX (computed, specified);
5991 if (TYPE_PACKED (type))
5992 return align;
5994 /* Find the first field, looking down into aggregates. */
5995 do {
5996 tree field = TYPE_FIELDS (type);
5997 /* Skip all non field decls */
5998 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5999 field = DECL_CHAIN (field);
6000 if (! field)
6001 break;
6002 /* A packed field does not contribute any extra alignment. */
6003 if (DECL_PACKED (field))
6004 return align;
6005 type = TREE_TYPE (field);
6006 while (TREE_CODE (type) == ARRAY_TYPE)
6007 type = TREE_TYPE (type);
6008 } while (AGGREGATE_TYPE_P (type));
6010 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
6011 align = MAX (align, TYPE_ALIGN (type));
6013 return align;
6016 /* Return 1 for an operand in small memory on V.4/eabi. */
6019 small_data_operand (rtx op ATTRIBUTE_UNUSED,
6020 enum machine_mode mode ATTRIBUTE_UNUSED)
6022 #if TARGET_ELF
6023 rtx sym_ref;
6025 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
6026 return 0;
6028 if (DEFAULT_ABI != ABI_V4)
6029 return 0;
6031 /* Vector and float memory instructions have a limited offset on the
6032 SPE, so using a vector or float variable directly as an operand is
6033 not useful. */
6034 if (TARGET_SPE
6035 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
6036 return 0;
6038 if (GET_CODE (op) == SYMBOL_REF)
6039 sym_ref = op;
6041 else if (GET_CODE (op) != CONST
6042 || GET_CODE (XEXP (op, 0)) != PLUS
6043 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
6044 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
6045 return 0;
6047 else
6049 rtx sum = XEXP (op, 0);
6050 HOST_WIDE_INT summand;
6052 /* We have to be careful here, because it is the referenced address
6053 that must be 32k from _SDA_BASE_, not just the symbol. */
6054 summand = INTVAL (XEXP (sum, 1));
6055 if (summand < 0 || summand > g_switch_value)
6056 return 0;
6058 sym_ref = XEXP (sum, 0);
6061 return SYMBOL_REF_SMALL_P (sym_ref);
6062 #else
6063 return 0;
6064 #endif
6067 /* Return true if either operand is a general purpose register. */
6069 bool
6070 gpr_or_gpr_p (rtx op0, rtx op1)
6072 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
6073 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
6076 /* Return true if this is a move direct operation between GPR registers and
6077 floating point/VSX registers. */
6079 bool
6080 direct_move_p (rtx op0, rtx op1)
6082 int regno0, regno1;
6084 if (!REG_P (op0) || !REG_P (op1))
6085 return false;
6087 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
6088 return false;
6090 regno0 = REGNO (op0);
6091 regno1 = REGNO (op1);
6092 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
6093 return false;
6095 if (INT_REGNO_P (regno0))
6096 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
6098 else if (INT_REGNO_P (regno1))
6100 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
6101 return true;
6103 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
6104 return true;
6107 return false;
6110 /* Return true if this is a load or store quad operation. This function does
6111 not handle the atomic quad memory instructions. */
6113 bool
6114 quad_load_store_p (rtx op0, rtx op1)
6116 bool ret;
6118 if (!TARGET_QUAD_MEMORY)
6119 ret = false;
6121 else if (REG_P (op0) && MEM_P (op1))
6122 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
6123 && quad_memory_operand (op1, GET_MODE (op1))
6124 && !reg_overlap_mentioned_p (op0, op1));
6126 else if (MEM_P (op0) && REG_P (op1))
6127 ret = (quad_memory_operand (op0, GET_MODE (op0))
6128 && quad_int_reg_operand (op1, GET_MODE (op1)));
6130 else
6131 ret = false;
6133 if (TARGET_DEBUG_ADDR)
6135 fprintf (stderr, "\n========== quad_load_store, return %s\n",
6136 ret ? "true" : "false");
6137 debug_rtx (gen_rtx_SET (VOIDmode, op0, op1));
6140 return ret;
6143 /* Given an address, return a constant offset term if one exists. */
6145 static rtx
6146 address_offset (rtx op)
6148 if (GET_CODE (op) == PRE_INC
6149 || GET_CODE (op) == PRE_DEC)
6150 op = XEXP (op, 0);
6151 else if (GET_CODE (op) == PRE_MODIFY
6152 || GET_CODE (op) == LO_SUM)
6153 op = XEXP (op, 1);
6155 if (GET_CODE (op) == CONST)
6156 op = XEXP (op, 0);
6158 if (GET_CODE (op) == PLUS)
6159 op = XEXP (op, 1);
6161 if (CONST_INT_P (op))
6162 return op;
6164 return NULL_RTX;
6167 /* Return true if the MEM operand is a memory operand suitable for use
6168 with a (full width, possibly multiple) gpr load/store. On
6169 powerpc64 this means the offset must be divisible by 4.
6170 Implements 'Y' constraint.
6172 Accept direct, indexed, offset, lo_sum and tocref. Since this is
6173 a constraint function we know the operand has satisfied a suitable
6174 memory predicate. Also accept some odd rtl generated by reload
6175 (see rs6000_legitimize_reload_address for various forms). It is
6176 important that reload rtl be accepted by appropriate constraints
6177 but not by the operand predicate.
6179 Offsetting a lo_sum should not be allowed, except where we know by
6180 alignment that a 32k boundary is not crossed, but see the ???
6181 comment in rs6000_legitimize_reload_address. Note that by
6182 "offsetting" here we mean a further offset to access parts of the
6183 MEM. It's fine to have a lo_sum where the inner address is offset
6184 from a sym, since the same sym+offset will appear in the high part
6185 of the address calculation. */
6187 bool
6188 mem_operand_gpr (rtx op, enum machine_mode mode)
6190 unsigned HOST_WIDE_INT offset;
6191 int extra;
6192 rtx addr = XEXP (op, 0);
6194 op = address_offset (addr);
6195 if (op == NULL_RTX)
6196 return true;
6198 offset = INTVAL (op);
6199 if (TARGET_POWERPC64 && (offset & 3) != 0)
6200 return false;
6202 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
6203 if (extra < 0)
6204 extra = 0;
6206 if (GET_CODE (addr) == LO_SUM)
6207 /* For lo_sum addresses, we must allow any offset except one that
6208 causes a wrap, so test only the low 16 bits. */
6209 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
6211 return offset + 0x8000 < 0x10000u - extra;
6214 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
6216 static bool
6217 reg_offset_addressing_ok_p (enum machine_mode mode)
6219 switch (mode)
6221 case V16QImode:
6222 case V8HImode:
6223 case V4SFmode:
6224 case V4SImode:
6225 case V2DFmode:
6226 case V2DImode:
6227 case V1TImode:
6228 case TImode:
6229 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
6230 TImode is not a vector mode, if we want to use the VSX registers to
6231 move it around, we need to restrict ourselves to reg+reg
6232 addressing. */
6233 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
6234 return false;
6235 break;
6237 case V4HImode:
6238 case V2SImode:
6239 case V1DImode:
6240 case V2SFmode:
6241 /* Paired vector modes. Only reg+reg addressing is valid. */
6242 if (TARGET_PAIRED_FLOAT)
6243 return false;
6244 break;
6246 case SDmode:
6247 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
6248 addressing for the LFIWZX and STFIWX instructions. */
6249 if (TARGET_NO_SDMODE_STACK)
6250 return false;
6251 break;
6253 default:
6254 break;
6257 return true;
6260 static bool
6261 virtual_stack_registers_memory_p (rtx op)
6263 int regnum;
6265 if (GET_CODE (op) == REG)
6266 regnum = REGNO (op);
6268 else if (GET_CODE (op) == PLUS
6269 && GET_CODE (XEXP (op, 0)) == REG
6270 && GET_CODE (XEXP (op, 1)) == CONST_INT)
6271 regnum = REGNO (XEXP (op, 0));
6273 else
6274 return false;
6276 return (regnum >= FIRST_VIRTUAL_REGISTER
6277 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
6280 /* Return true if a MODE sized memory accesses to OP plus OFFSET
6281 is known to not straddle a 32k boundary. */
6283 static bool
6284 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
6285 enum machine_mode mode)
6287 tree decl, type;
6288 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
6290 if (GET_CODE (op) != SYMBOL_REF)
6291 return false;
6293 dsize = GET_MODE_SIZE (mode);
6294 decl = SYMBOL_REF_DECL (op);
6295 if (!decl)
6297 if (dsize == 0)
6298 return false;
6300 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
6301 replacing memory addresses with an anchor plus offset. We
6302 could find the decl by rummaging around in the block->objects
6303 VEC for the given offset but that seems like too much work. */
6304 dalign = BITS_PER_UNIT;
6305 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
6306 && SYMBOL_REF_ANCHOR_P (op)
6307 && SYMBOL_REF_BLOCK (op) != NULL)
6309 struct object_block *block = SYMBOL_REF_BLOCK (op);
6311 dalign = block->alignment;
6312 offset += SYMBOL_REF_BLOCK_OFFSET (op);
6314 else if (CONSTANT_POOL_ADDRESS_P (op))
6316 /* It would be nice to have get_pool_align().. */
6317 enum machine_mode cmode = get_pool_mode (op);
6319 dalign = GET_MODE_ALIGNMENT (cmode);
6322 else if (DECL_P (decl))
6324 dalign = DECL_ALIGN (decl);
6326 if (dsize == 0)
6328 /* Allow BLKmode when the entire object is known to not
6329 cross a 32k boundary. */
6330 if (!DECL_SIZE_UNIT (decl))
6331 return false;
6333 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
6334 return false;
6336 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
6337 if (dsize > 32768)
6338 return false;
6340 return dalign / BITS_PER_UNIT >= dsize;
6343 else
6345 type = TREE_TYPE (decl);
6347 dalign = TYPE_ALIGN (type);
6348 if (CONSTANT_CLASS_P (decl))
6349 dalign = CONSTANT_ALIGNMENT (decl, dalign);
6350 else
6351 dalign = DATA_ALIGNMENT (decl, dalign);
6353 if (dsize == 0)
6355 /* BLKmode, check the entire object. */
6356 if (TREE_CODE (decl) == STRING_CST)
6357 dsize = TREE_STRING_LENGTH (decl);
6358 else if (TYPE_SIZE_UNIT (type)
6359 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
6360 dsize = tree_to_uhwi (TYPE_SIZE_UNIT (type));
6361 else
6362 return false;
6363 if (dsize > 32768)
6364 return false;
6366 return dalign / BITS_PER_UNIT >= dsize;
6370 /* Find how many bits of the alignment we know for this access. */
6371 mask = dalign / BITS_PER_UNIT - 1;
6372 lsb = offset & -offset;
6373 mask &= lsb - 1;
6374 dalign = mask + 1;
6376 return dalign >= dsize;
6379 static bool
6380 constant_pool_expr_p (rtx op)
6382 rtx base, offset;
6384 split_const (op, &base, &offset);
6385 return (GET_CODE (base) == SYMBOL_REF
6386 && CONSTANT_POOL_ADDRESS_P (base)
6387 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
6390 static const_rtx tocrel_base, tocrel_offset;
6392 /* Return true if OP is a toc pointer relative address (the output
6393 of create_TOC_reference). If STRICT, do not match high part or
6394 non-split -mcmodel=large/medium toc pointer relative addresses. */
6396 bool
6397 toc_relative_expr_p (const_rtx op, bool strict)
6399 if (!TARGET_TOC)
6400 return false;
6402 if (TARGET_CMODEL != CMODEL_SMALL)
6404 /* Only match the low part. */
6405 if (GET_CODE (op) == LO_SUM
6406 && REG_P (XEXP (op, 0))
6407 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
6408 op = XEXP (op, 1);
6409 else if (strict)
6410 return false;
6413 tocrel_base = op;
6414 tocrel_offset = const0_rtx;
6415 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
6417 tocrel_base = XEXP (op, 0);
6418 tocrel_offset = XEXP (op, 1);
6421 return (GET_CODE (tocrel_base) == UNSPEC
6422 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
6425 /* Return true if X is a constant pool address, and also for cmodel=medium
6426 if X is a toc-relative address known to be offsettable within MODE. */
6428 bool
6429 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
6430 bool strict)
6432 return (toc_relative_expr_p (x, strict)
6433 && (TARGET_CMODEL != CMODEL_MEDIUM
6434 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
6435 || mode == QImode
6436 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
6437 INTVAL (tocrel_offset), mode)));
6440 static bool
6441 legitimate_small_data_p (enum machine_mode mode, rtx x)
6443 return (DEFAULT_ABI == ABI_V4
6444 && !flag_pic && !TARGET_TOC
6445 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
6446 && small_data_operand (x, mode));
6449 /* SPE offset addressing is limited to 5-bits worth of double words. */
6450 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
6452 bool
6453 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
6454 bool strict, bool worst_case)
6456 unsigned HOST_WIDE_INT offset;
6457 unsigned int extra;
6459 if (GET_CODE (x) != PLUS)
6460 return false;
6461 if (!REG_P (XEXP (x, 0)))
6462 return false;
6463 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6464 return false;
6465 if (!reg_offset_addressing_ok_p (mode))
6466 return virtual_stack_registers_memory_p (x);
6467 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
6468 return true;
6469 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6470 return false;
6472 offset = INTVAL (XEXP (x, 1));
6473 extra = 0;
6474 switch (mode)
6476 case V4HImode:
6477 case V2SImode:
6478 case V1DImode:
6479 case V2SFmode:
6480 /* SPE vector modes. */
6481 return SPE_CONST_OFFSET_OK (offset);
6483 case DFmode:
6484 case DDmode:
6485 case DImode:
6486 /* On e500v2, we may have:
6488 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
6490 Which gets addressed with evldd instructions. */
6491 if (TARGET_E500_DOUBLE)
6492 return SPE_CONST_OFFSET_OK (offset);
6494 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
6495 addressing. */
6496 if (VECTOR_MEM_VSX_P (mode))
6497 return false;
6499 if (!worst_case)
6500 break;
6501 if (!TARGET_POWERPC64)
6502 extra = 4;
6503 else if (offset & 3)
6504 return false;
6505 break;
6507 case TFmode:
6508 if (TARGET_E500_DOUBLE)
6509 return (SPE_CONST_OFFSET_OK (offset)
6510 && SPE_CONST_OFFSET_OK (offset + 8));
6511 /* fall through */
6513 case TDmode:
6514 case TImode:
6515 case PTImode:
6516 extra = 8;
6517 if (!worst_case)
6518 break;
6519 if (!TARGET_POWERPC64)
6520 extra = 12;
6521 else if (offset & 3)
6522 return false;
6523 break;
6525 default:
6526 break;
6529 offset += 0x8000;
6530 return offset < 0x10000 - extra;
6533 bool
6534 legitimate_indexed_address_p (rtx x, int strict)
6536 rtx op0, op1;
6538 if (GET_CODE (x) != PLUS)
6539 return false;
6541 op0 = XEXP (x, 0);
6542 op1 = XEXP (x, 1);
6544 /* Recognize the rtl generated by reload which we know will later be
6545 replaced with proper base and index regs. */
6546 if (!strict
6547 && reload_in_progress
6548 && (REG_P (op0) || GET_CODE (op0) == PLUS)
6549 && REG_P (op1))
6550 return true;
6552 return (REG_P (op0) && REG_P (op1)
6553 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
6554 && INT_REG_OK_FOR_INDEX_P (op1, strict))
6555 || (INT_REG_OK_FOR_BASE_P (op1, strict)
6556 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
6559 bool
6560 avoiding_indexed_address_p (enum machine_mode mode)
6562 /* Avoid indexed addressing for modes that have non-indexed
6563 load/store instruction forms. */
6564 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
6567 bool
6568 legitimate_indirect_address_p (rtx x, int strict)
6570 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
6573 bool
6574 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
6576 if (!TARGET_MACHO || !flag_pic
6577 || mode != SImode || GET_CODE (x) != MEM)
6578 return false;
6579 x = XEXP (x, 0);
6581 if (GET_CODE (x) != LO_SUM)
6582 return false;
6583 if (GET_CODE (XEXP (x, 0)) != REG)
6584 return false;
6585 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
6586 return false;
6587 x = XEXP (x, 1);
6589 return CONSTANT_P (x);
6592 static bool
6593 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
6595 if (GET_CODE (x) != LO_SUM)
6596 return false;
6597 if (GET_CODE (XEXP (x, 0)) != REG)
6598 return false;
6599 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6600 return false;
6601 /* Restrict addressing for DI because of our SUBREG hackery. */
6602 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
6603 return false;
6604 x = XEXP (x, 1);
6606 if (TARGET_ELF || TARGET_MACHO)
6608 bool large_toc_ok;
6610 if (DEFAULT_ABI == ABI_V4 && flag_pic)
6611 return false;
6612 /* LRA don't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
6613 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
6614 recognizes some LO_SUM addresses as valid although this
6615 function says opposite. In most cases, LRA through different
6616 transformations can generate correct code for address reloads.
6617 It can not manage only some LO_SUM cases. So we need to add
6618 code analogous to one in rs6000_legitimize_reload_address for
6619 LOW_SUM here saying that some addresses are still valid. */
6620 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
6621 && small_toc_ref (x, VOIDmode));
6622 if (TARGET_TOC && ! large_toc_ok)
6623 return false;
6624 if (GET_MODE_NUNITS (mode) != 1)
6625 return false;
6626 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6627 && !(/* ??? Assume floating point reg based on mode? */
6628 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6629 && (mode == DFmode || mode == DDmode)))
6630 return false;
6632 return CONSTANT_P (x) || large_toc_ok;
6635 return false;
6639 /* Try machine-dependent ways of modifying an illegitimate address
6640 to be legitimate. If we find one, return the new, valid address.
6641 This is used from only one place: `memory_address' in explow.c.
6643 OLDX is the address as it was before break_out_memory_refs was
6644 called. In some cases it is useful to look at this to decide what
6645 needs to be done.
6647 It is always safe for this function to do nothing. It exists to
6648 recognize opportunities to optimize the output.
6650 On RS/6000, first check for the sum of a register with a constant
6651 integer that is out of range. If so, generate code to add the
6652 constant with the low-order 16 bits masked to the register and force
6653 this result into another register (this can be done with `cau').
6654 Then generate an address of REG+(CONST&0xffff), allowing for the
6655 possibility of bit 16 being a one.
6657 Then check for the sum of a register and something not constant, try to
6658 load the other things into a register and return the sum. */
6660 static rtx
6661 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
6662 enum machine_mode mode)
6664 unsigned int extra;
6666 if (!reg_offset_addressing_ok_p (mode))
6668 if (virtual_stack_registers_memory_p (x))
6669 return x;
6671 /* In theory we should not be seeing addresses of the form reg+0,
6672 but just in case it is generated, optimize it away. */
6673 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
6674 return force_reg (Pmode, XEXP (x, 0));
6676 /* For TImode with load/store quad, restrict addresses to just a single
6677 pointer, so it works with both GPRs and VSX registers. */
6678 /* Make sure both operands are registers. */
6679 else if (GET_CODE (x) == PLUS
6680 && (mode != TImode || !TARGET_QUAD_MEMORY))
6681 return gen_rtx_PLUS (Pmode,
6682 force_reg (Pmode, XEXP (x, 0)),
6683 force_reg (Pmode, XEXP (x, 1)));
6684 else
6685 return force_reg (Pmode, x);
6687 if (GET_CODE (x) == SYMBOL_REF)
6689 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
6690 if (model != 0)
6691 return rs6000_legitimize_tls_address (x, model);
6694 extra = 0;
6695 switch (mode)
6697 case TFmode:
6698 case TDmode:
6699 case TImode:
6700 case PTImode:
6701 /* As in legitimate_offset_address_p we do not assume
6702 worst-case. The mode here is just a hint as to the registers
6703 used. A TImode is usually in gprs, but may actually be in
6704 fprs. Leave worst-case scenario for reload to handle via
6705 insn constraints. PTImode is only GPRs. */
6706 extra = 8;
6707 break;
6708 default:
6709 break;
6712 if (GET_CODE (x) == PLUS
6713 && GET_CODE (XEXP (x, 0)) == REG
6714 && GET_CODE (XEXP (x, 1)) == CONST_INT
6715 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
6716 >= 0x10000 - extra)
6717 && !(SPE_VECTOR_MODE (mode)
6718 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
6720 HOST_WIDE_INT high_int, low_int;
6721 rtx sum;
6722 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6723 if (low_int >= 0x8000 - extra)
6724 low_int = 0;
6725 high_int = INTVAL (XEXP (x, 1)) - low_int;
6726 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
6727 GEN_INT (high_int)), 0);
6728 return plus_constant (Pmode, sum, low_int);
6730 else if (GET_CODE (x) == PLUS
6731 && GET_CODE (XEXP (x, 0)) == REG
6732 && GET_CODE (XEXP (x, 1)) != CONST_INT
6733 && GET_MODE_NUNITS (mode) == 1
6734 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6735 || (/* ??? Assume floating point reg based on mode? */
6736 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6737 && (mode == DFmode || mode == DDmode)))
6738 && !avoiding_indexed_address_p (mode))
6740 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
6741 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
6743 else if (SPE_VECTOR_MODE (mode)
6744 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
6746 if (mode == DImode)
6747 return x;
6748 /* We accept [reg + reg] and [reg + OFFSET]. */
6750 if (GET_CODE (x) == PLUS)
6752 rtx op1 = XEXP (x, 0);
6753 rtx op2 = XEXP (x, 1);
6754 rtx y;
6756 op1 = force_reg (Pmode, op1);
6758 if (GET_CODE (op2) != REG
6759 && (GET_CODE (op2) != CONST_INT
6760 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
6761 || (GET_MODE_SIZE (mode) > 8
6762 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
6763 op2 = force_reg (Pmode, op2);
6765 /* We can't always do [reg + reg] for these, because [reg +
6766 reg + offset] is not a legitimate addressing mode. */
6767 y = gen_rtx_PLUS (Pmode, op1, op2);
6769 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
6770 return force_reg (Pmode, y);
6771 else
6772 return y;
6775 return force_reg (Pmode, x);
6777 else if ((TARGET_ELF
6778 #if TARGET_MACHO
6779 || !MACHO_DYNAMIC_NO_PIC_P
6780 #endif
6782 && TARGET_32BIT
6783 && TARGET_NO_TOC
6784 && ! flag_pic
6785 && GET_CODE (x) != CONST_INT
6786 && GET_CODE (x) != CONST_WIDE_INT
6787 && GET_CODE (x) != CONST_DOUBLE
6788 && CONSTANT_P (x)
6789 && GET_MODE_NUNITS (mode) == 1
6790 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6791 || (/* ??? Assume floating point reg based on mode? */
6792 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6793 && (mode == DFmode || mode == DDmode))))
6795 rtx reg = gen_reg_rtx (Pmode);
6796 if (TARGET_ELF)
6797 emit_insn (gen_elf_high (reg, x));
6798 else
6799 emit_insn (gen_macho_high (reg, x));
6800 return gen_rtx_LO_SUM (Pmode, reg, x);
6802 else if (TARGET_TOC
6803 && GET_CODE (x) == SYMBOL_REF
6804 && constant_pool_expr_p (x)
6805 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
6806 return create_TOC_reference (x, NULL_RTX);
6807 else
6808 return x;
6811 /* Debug version of rs6000_legitimize_address. */
6812 static rtx
6813 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
6815 rtx ret;
6816 rtx_insn *insns;
6818 start_sequence ();
6819 ret = rs6000_legitimize_address (x, oldx, mode);
6820 insns = get_insns ();
6821 end_sequence ();
6823 if (ret != x)
6825 fprintf (stderr,
6826 "\nrs6000_legitimize_address: mode %s, old code %s, "
6827 "new code %s, modified\n",
6828 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
6829 GET_RTX_NAME (GET_CODE (ret)));
6831 fprintf (stderr, "Original address:\n");
6832 debug_rtx (x);
6834 fprintf (stderr, "oldx:\n");
6835 debug_rtx (oldx);
6837 fprintf (stderr, "New address:\n");
6838 debug_rtx (ret);
6840 if (insns)
6842 fprintf (stderr, "Insns added:\n");
6843 debug_rtx_list (insns, 20);
6846 else
6848 fprintf (stderr,
6849 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6850 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
6852 debug_rtx (x);
6855 if (insns)
6856 emit_insn (insns);
6858 return ret;
6861 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6862 We need to emit DTP-relative relocations. */
6864 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6865 static void
6866 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
6868 switch (size)
6870 case 4:
6871 fputs ("\t.long\t", file);
6872 break;
6873 case 8:
6874 fputs (DOUBLE_INT_ASM_OP, file);
6875 break;
6876 default:
6877 gcc_unreachable ();
6879 output_addr_const (file, x);
6880 fputs ("@dtprel+0x8000", file);
6883 /* Return true if X is a symbol that refers to real (rather than emulated)
6884 TLS. */
6886 static bool
6887 rs6000_real_tls_symbol_ref_p (rtx x)
6889 return (GET_CODE (x) == SYMBOL_REF
6890 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
6893 /* In the name of slightly smaller debug output, and to cater to
6894 general assembler lossage, recognize various UNSPEC sequences
6895 and turn them back into a direct symbol reference. */
6897 static rtx
6898 rs6000_delegitimize_address (rtx orig_x)
6900 rtx x, y, offset;
6902 orig_x = delegitimize_mem_from_attrs (orig_x);
6903 x = orig_x;
6904 if (MEM_P (x))
6905 x = XEXP (x, 0);
6907 y = x;
6908 if (TARGET_CMODEL != CMODEL_SMALL
6909 && GET_CODE (y) == LO_SUM)
6910 y = XEXP (y, 1);
6912 offset = NULL_RTX;
6913 if (GET_CODE (y) == PLUS
6914 && GET_MODE (y) == Pmode
6915 && CONST_INT_P (XEXP (y, 1)))
6917 offset = XEXP (y, 1);
6918 y = XEXP (y, 0);
6921 if (GET_CODE (y) == UNSPEC
6922 && XINT (y, 1) == UNSPEC_TOCREL)
6924 #ifdef ENABLE_CHECKING
6925 if (REG_P (XVECEXP (y, 0, 1))
6926 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
6928 /* All good. */
6930 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
6932 /* Weirdness alert. df_note_compute can replace r2 with a
6933 debug_expr when this unspec is in a debug_insn.
6934 Seen in gcc.dg/pr51957-1.c */
6936 else
6938 debug_rtx (orig_x);
6939 abort ();
6941 #endif
6942 y = XVECEXP (y, 0, 0);
6944 #ifdef HAVE_AS_TLS
6945 /* Do not associate thread-local symbols with the original
6946 constant pool symbol. */
6947 if (TARGET_XCOFF
6948 && GET_CODE (y) == SYMBOL_REF
6949 && CONSTANT_POOL_ADDRESS_P (y)
6950 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
6951 return orig_x;
6952 #endif
6954 if (offset != NULL_RTX)
6955 y = gen_rtx_PLUS (Pmode, y, offset);
6956 if (!MEM_P (orig_x))
6957 return y;
6958 else
6959 return replace_equiv_address_nv (orig_x, y);
6962 if (TARGET_MACHO
6963 && GET_CODE (orig_x) == LO_SUM
6964 && GET_CODE (XEXP (orig_x, 1)) == CONST)
6966 y = XEXP (XEXP (orig_x, 1), 0);
6967 if (GET_CODE (y) == UNSPEC
6968 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
6969 return XVECEXP (y, 0, 0);
6972 return orig_x;
6975 /* Return true if X shouldn't be emitted into the debug info.
6976 The linker doesn't like .toc section references from
6977 .debug_* sections, so reject .toc section symbols. */
6979 static bool
6980 rs6000_const_not_ok_for_debug_p (rtx x)
6982 if (GET_CODE (x) == SYMBOL_REF
6983 && CONSTANT_POOL_ADDRESS_P (x))
6985 rtx c = get_pool_constant (x);
6986 enum machine_mode cmode = get_pool_mode (x);
6987 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
6988 return true;
6991 return false;
6994 /* Construct the SYMBOL_REF for the tls_get_addr function. */
6996 static GTY(()) rtx rs6000_tls_symbol;
6997 static rtx
6998 rs6000_tls_get_addr (void)
7000 if (!rs6000_tls_symbol)
7001 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
7003 return rs6000_tls_symbol;
7006 /* Construct the SYMBOL_REF for TLS GOT references. */
7008 static GTY(()) rtx rs6000_got_symbol;
7009 static rtx
7010 rs6000_got_sym (void)
7012 if (!rs6000_got_symbol)
7014 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7015 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
7016 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
7019 return rs6000_got_symbol;
7022 /* AIX Thread-Local Address support. */
7024 static rtx
7025 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
7027 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
7028 const char *name;
7029 char *tlsname;
7031 name = XSTR (addr, 0);
7032 /* Append TLS CSECT qualifier, unless the symbol already is qualified
7033 or the symbol will be in TLS private data section. */
7034 if (name[strlen (name) - 1] != ']'
7035 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
7036 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
7038 tlsname = XALLOCAVEC (char, strlen (name) + 4);
7039 strcpy (tlsname, name);
7040 strcat (tlsname,
7041 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
7042 tlsaddr = copy_rtx (addr);
7043 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
7045 else
7046 tlsaddr = addr;
7048 /* Place addr into TOC constant pool. */
7049 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
7051 /* Output the TOC entry and create the MEM referencing the value. */
7052 if (constant_pool_expr_p (XEXP (sym, 0))
7053 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
7055 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
7056 mem = gen_const_mem (Pmode, tocref);
7057 set_mem_alias_set (mem, get_TOC_alias_set ());
7059 else
7060 return sym;
7062 /* Use global-dynamic for local-dynamic. */
7063 if (model == TLS_MODEL_GLOBAL_DYNAMIC
7064 || model == TLS_MODEL_LOCAL_DYNAMIC)
7066 /* Create new TOC reference for @m symbol. */
7067 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
7068 tlsname = XALLOCAVEC (char, strlen (name) + 1);
7069 strcpy (tlsname, "*LCM");
7070 strcat (tlsname, name + 3);
7071 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
7072 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
7073 tocref = create_TOC_reference (modaddr, NULL_RTX);
7074 rtx modmem = gen_const_mem (Pmode, tocref);
7075 set_mem_alias_set (modmem, get_TOC_alias_set ());
7077 rtx modreg = gen_reg_rtx (Pmode);
7078 emit_insn (gen_rtx_SET (VOIDmode, modreg, modmem));
7080 tmpreg = gen_reg_rtx (Pmode);
7081 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
7083 dest = gen_reg_rtx (Pmode);
7084 if (TARGET_32BIT)
7085 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
7086 else
7087 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
7088 return dest;
7090 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
7091 else if (TARGET_32BIT)
7093 tlsreg = gen_reg_rtx (SImode);
7094 emit_insn (gen_tls_get_tpointer (tlsreg));
7096 else
7097 tlsreg = gen_rtx_REG (DImode, 13);
7099 /* Load the TOC value into temporary register. */
7100 tmpreg = gen_reg_rtx (Pmode);
7101 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
7102 set_unique_reg_note (get_last_insn (), REG_EQUAL,
7103 gen_rtx_MINUS (Pmode, addr, tlsreg));
7105 /* Add TOC symbol value to TLS pointer. */
7106 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
7108 return dest;
7111 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
7112 this (thread-local) address. */
7114 static rtx
7115 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
7117 rtx dest, insn;
7119 if (TARGET_XCOFF)
7120 return rs6000_legitimize_tls_address_aix (addr, model);
7122 dest = gen_reg_rtx (Pmode);
7123 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
7125 rtx tlsreg;
7127 if (TARGET_64BIT)
7129 tlsreg = gen_rtx_REG (Pmode, 13);
7130 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
7132 else
7134 tlsreg = gen_rtx_REG (Pmode, 2);
7135 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
7137 emit_insn (insn);
7139 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
7141 rtx tlsreg, tmp;
7143 tmp = gen_reg_rtx (Pmode);
7144 if (TARGET_64BIT)
7146 tlsreg = gen_rtx_REG (Pmode, 13);
7147 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
7149 else
7151 tlsreg = gen_rtx_REG (Pmode, 2);
7152 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
7154 emit_insn (insn);
7155 if (TARGET_64BIT)
7156 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
7157 else
7158 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
7159 emit_insn (insn);
7161 else
7163 rtx r3, got, tga, tmp1, tmp2, call_insn;
7165 /* We currently use relocations like @got@tlsgd for tls, which
7166 means the linker will handle allocation of tls entries, placing
7167 them in the .got section. So use a pointer to the .got section,
7168 not one to secondary TOC sections used by 64-bit -mminimal-toc,
7169 or to secondary GOT sections used by 32-bit -fPIC. */
7170 if (TARGET_64BIT)
7171 got = gen_rtx_REG (Pmode, 2);
7172 else
7174 if (flag_pic == 1)
7175 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
7176 else
7178 rtx gsym = rs6000_got_sym ();
7179 got = gen_reg_rtx (Pmode);
7180 if (flag_pic == 0)
7181 rs6000_emit_move (got, gsym, Pmode);
7182 else
7184 rtx mem, lab, last;
7186 tmp1 = gen_reg_rtx (Pmode);
7187 tmp2 = gen_reg_rtx (Pmode);
7188 mem = gen_const_mem (Pmode, tmp1);
7189 lab = gen_label_rtx ();
7190 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
7191 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
7192 if (TARGET_LINK_STACK)
7193 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
7194 emit_move_insn (tmp2, mem);
7195 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
7196 set_unique_reg_note (last, REG_EQUAL, gsym);
7201 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
7203 tga = rs6000_tls_get_addr ();
7204 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
7205 1, const0_rtx, Pmode);
7207 r3 = gen_rtx_REG (Pmode, 3);
7208 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7210 if (TARGET_64BIT)
7211 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
7212 else
7213 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
7215 else if (DEFAULT_ABI == ABI_V4)
7216 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
7217 else
7218 gcc_unreachable ();
7219 call_insn = last_call_insn ();
7220 PATTERN (call_insn) = insn;
7221 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7222 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7223 pic_offset_table_rtx);
7225 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
7227 tga = rs6000_tls_get_addr ();
7228 tmp1 = gen_reg_rtx (Pmode);
7229 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
7230 1, const0_rtx, Pmode);
7232 r3 = gen_rtx_REG (Pmode, 3);
7233 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7235 if (TARGET_64BIT)
7236 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
7237 else
7238 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
7240 else if (DEFAULT_ABI == ABI_V4)
7241 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
7242 else
7243 gcc_unreachable ();
7244 call_insn = last_call_insn ();
7245 PATTERN (call_insn) = insn;
7246 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7247 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7248 pic_offset_table_rtx);
7250 if (rs6000_tls_size == 16)
7252 if (TARGET_64BIT)
7253 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
7254 else
7255 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
7257 else if (rs6000_tls_size == 32)
7259 tmp2 = gen_reg_rtx (Pmode);
7260 if (TARGET_64BIT)
7261 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
7262 else
7263 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
7264 emit_insn (insn);
7265 if (TARGET_64BIT)
7266 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
7267 else
7268 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
7270 else
7272 tmp2 = gen_reg_rtx (Pmode);
7273 if (TARGET_64BIT)
7274 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
7275 else
7276 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
7277 emit_insn (insn);
7278 insn = gen_rtx_SET (Pmode, dest,
7279 gen_rtx_PLUS (Pmode, tmp2, tmp1));
7281 emit_insn (insn);
7283 else
7285 /* IE, or 64-bit offset LE. */
7286 tmp2 = gen_reg_rtx (Pmode);
7287 if (TARGET_64BIT)
7288 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
7289 else
7290 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
7291 emit_insn (insn);
7292 if (TARGET_64BIT)
7293 insn = gen_tls_tls_64 (dest, tmp2, addr);
7294 else
7295 insn = gen_tls_tls_32 (dest, tmp2, addr);
7296 emit_insn (insn);
7300 return dest;
7303 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
7305 static bool
7306 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
7308 if (GET_CODE (x) == HIGH
7309 && GET_CODE (XEXP (x, 0)) == UNSPEC)
7310 return true;
7312 /* A TLS symbol in the TOC cannot contain a sum. */
7313 if (GET_CODE (x) == CONST
7314 && GET_CODE (XEXP (x, 0)) == PLUS
7315 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7316 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
7317 return true;
7319 /* Do not place an ELF TLS symbol in the constant pool. */
7320 return TARGET_ELF && tls_referenced_p (x);
7323 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
7324 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
7325 can be addressed relative to the toc pointer. */
7327 static bool
7328 use_toc_relative_ref (rtx sym)
7330 return ((constant_pool_expr_p (sym)
7331 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
7332 get_pool_mode (sym)))
7333 || (TARGET_CMODEL == CMODEL_MEDIUM
7334 && SYMBOL_REF_LOCAL_P (sym)));
7337 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
7338 replace the input X, or the original X if no replacement is called for.
7339 The output parameter *WIN is 1 if the calling macro should goto WIN,
7340 0 if it should not.
7342 For RS/6000, we wish to handle large displacements off a base
7343 register by splitting the addend across an addiu/addis and the mem insn.
7344 This cuts number of extra insns needed from 3 to 1.
7346 On Darwin, we use this to generate code for floating point constants.
7347 A movsf_low is generated so we wind up with 2 instructions rather than 3.
7348 The Darwin code is inside #if TARGET_MACHO because only then are the
7349 machopic_* functions defined. */
7350 static rtx
7351 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
7352 int opnum, int type,
7353 int ind_levels ATTRIBUTE_UNUSED, int *win)
7355 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7357 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
7358 DFmode/DImode MEM. */
7359 if (reg_offset_p
7360 && opnum == 1
7361 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
7362 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
7363 reg_offset_p = false;
7365 /* We must recognize output that we have already generated ourselves. */
7366 if (GET_CODE (x) == PLUS
7367 && GET_CODE (XEXP (x, 0)) == PLUS
7368 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7369 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7370 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7372 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7373 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7374 opnum, (enum reload_type) type);
7375 *win = 1;
7376 return x;
7379 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
7380 if (GET_CODE (x) == LO_SUM
7381 && GET_CODE (XEXP (x, 0)) == HIGH)
7383 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7384 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7385 opnum, (enum reload_type) type);
7386 *win = 1;
7387 return x;
7390 #if TARGET_MACHO
7391 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
7392 && GET_CODE (x) == LO_SUM
7393 && GET_CODE (XEXP (x, 0)) == PLUS
7394 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
7395 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
7396 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
7397 && machopic_operand_p (XEXP (x, 1)))
7399 /* Result of previous invocation of this function on Darwin
7400 floating point constant. */
7401 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7402 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7403 opnum, (enum reload_type) type);
7404 *win = 1;
7405 return x;
7407 #endif
7409 if (TARGET_CMODEL != CMODEL_SMALL
7410 && reg_offset_p
7411 && small_toc_ref (x, VOIDmode))
7413 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
7414 x = gen_rtx_LO_SUM (Pmode, hi, x);
7415 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7416 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7417 opnum, (enum reload_type) type);
7418 *win = 1;
7419 return x;
7422 if (GET_CODE (x) == PLUS
7423 && GET_CODE (XEXP (x, 0)) == REG
7424 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
7425 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
7426 && GET_CODE (XEXP (x, 1)) == CONST_INT
7427 && reg_offset_p
7428 && !SPE_VECTOR_MODE (mode)
7429 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
7430 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
7432 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
7433 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
7434 HOST_WIDE_INT high
7435 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7437 /* Check for 32-bit overflow. */
7438 if (high + low != val)
7440 *win = 0;
7441 return x;
7444 /* Reload the high part into a base reg; leave the low part
7445 in the mem directly. */
7447 x = gen_rtx_PLUS (GET_MODE (x),
7448 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
7449 GEN_INT (high)),
7450 GEN_INT (low));
7452 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7453 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7454 opnum, (enum reload_type) type);
7455 *win = 1;
7456 return x;
7459 if (GET_CODE (x) == SYMBOL_REF
7460 && reg_offset_p
7461 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
7462 && !SPE_VECTOR_MODE (mode)
7463 #if TARGET_MACHO
7464 && DEFAULT_ABI == ABI_DARWIN
7465 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
7466 && machopic_symbol_defined_p (x)
7467 #else
7468 && DEFAULT_ABI == ABI_V4
7469 && !flag_pic
7470 #endif
7471 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
7472 The same goes for DImode without 64-bit gprs and DFmode and DDmode
7473 without fprs.
7474 ??? Assume floating point reg based on mode? This assumption is
7475 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
7476 where reload ends up doing a DFmode load of a constant from
7477 mem using two gprs. Unfortunately, at this point reload
7478 hasn't yet selected regs so poking around in reload data
7479 won't help and even if we could figure out the regs reliably,
7480 we'd still want to allow this transformation when the mem is
7481 naturally aligned. Since we say the address is good here, we
7482 can't disable offsets from LO_SUMs in mem_operand_gpr.
7483 FIXME: Allow offset from lo_sum for other modes too, when
7484 mem is sufficiently aligned. */
7485 && mode != TFmode
7486 && mode != TDmode
7487 && (mode != TImode || !TARGET_VSX_TIMODE)
7488 && mode != PTImode
7489 && (mode != DImode || TARGET_POWERPC64)
7490 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
7491 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
7493 #if TARGET_MACHO
7494 if (flag_pic)
7496 rtx offset = machopic_gen_offset (x);
7497 x = gen_rtx_LO_SUM (GET_MODE (x),
7498 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
7499 gen_rtx_HIGH (Pmode, offset)), offset);
7501 else
7502 #endif
7503 x = gen_rtx_LO_SUM (GET_MODE (x),
7504 gen_rtx_HIGH (Pmode, x), x);
7506 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7507 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7508 opnum, (enum reload_type) type);
7509 *win = 1;
7510 return x;
7513 /* Reload an offset address wrapped by an AND that represents the
7514 masking of the lower bits. Strip the outer AND and let reload
7515 convert the offset address into an indirect address. For VSX,
7516 force reload to create the address with an AND in a separate
7517 register, because we can't guarantee an altivec register will
7518 be used. */
7519 if (VECTOR_MEM_ALTIVEC_P (mode)
7520 && GET_CODE (x) == AND
7521 && GET_CODE (XEXP (x, 0)) == PLUS
7522 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7523 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7524 && GET_CODE (XEXP (x, 1)) == CONST_INT
7525 && INTVAL (XEXP (x, 1)) == -16)
7527 x = XEXP (x, 0);
7528 *win = 1;
7529 return x;
7532 if (TARGET_TOC
7533 && reg_offset_p
7534 && GET_CODE (x) == SYMBOL_REF
7535 && use_toc_relative_ref (x))
7537 x = create_TOC_reference (x, NULL_RTX);
7538 if (TARGET_CMODEL != CMODEL_SMALL)
7539 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7540 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7541 opnum, (enum reload_type) type);
7542 *win = 1;
7543 return x;
7545 *win = 0;
7546 return x;
7549 /* Debug version of rs6000_legitimize_reload_address. */
7550 static rtx
7551 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
7552 int opnum, int type,
7553 int ind_levels, int *win)
7555 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
7556 ind_levels, win);
7557 fprintf (stderr,
7558 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7559 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7560 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
7561 debug_rtx (x);
7563 if (x == ret)
7564 fprintf (stderr, "Same address returned\n");
7565 else if (!ret)
7566 fprintf (stderr, "NULL returned\n");
7567 else
7569 fprintf (stderr, "New address:\n");
7570 debug_rtx (ret);
7573 return ret;
7576 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7577 that is a valid memory address for an instruction.
7578 The MODE argument is the machine mode for the MEM expression
7579 that wants to use this address.
7581 On the RS/6000, there are four valid address: a SYMBOL_REF that
7582 refers to a constant pool entry of an address (or the sum of it
7583 plus a constant), a short (16-bit signed) constant plus a register,
7584 the sum of two registers, or a register indirect, possibly with an
7585 auto-increment. For DFmode, DDmode and DImode with a constant plus
7586 register, we must ensure that both words are addressable or PowerPC64
7587 with offset word aligned.
7589 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7590 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7591 because adjacent memory cells are accessed by adding word-sized offsets
7592 during assembly output. */
7593 static bool
7594 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
7596 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7598 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7599 if (VECTOR_MEM_ALTIVEC_P (mode)
7600 && GET_CODE (x) == AND
7601 && GET_CODE (XEXP (x, 1)) == CONST_INT
7602 && INTVAL (XEXP (x, 1)) == -16)
7603 x = XEXP (x, 0);
7605 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
7606 return 0;
7607 if (legitimate_indirect_address_p (x, reg_ok_strict))
7608 return 1;
7609 if (TARGET_UPDATE
7610 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
7611 && mode_supports_pre_incdec_p (mode)
7612 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
7613 return 1;
7614 if (virtual_stack_registers_memory_p (x))
7615 return 1;
7616 if (reg_offset_p && legitimate_small_data_p (mode, x))
7617 return 1;
7618 if (reg_offset_p
7619 && legitimate_constant_pool_address_p (x, mode,
7620 reg_ok_strict || lra_in_progress))
7621 return 1;
7622 /* For TImode, if we have load/store quad and TImode in VSX registers, only
7623 allow register indirect addresses. This will allow the values to go in
7624 either GPRs or VSX registers without reloading. The vector types would
7625 tend to go into VSX registers, so we allow REG+REG, while TImode seems
7626 somewhat split, in that some uses are GPR based, and some VSX based. */
7627 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
7628 return 0;
7629 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7630 if (! reg_ok_strict
7631 && reg_offset_p
7632 && GET_CODE (x) == PLUS
7633 && GET_CODE (XEXP (x, 0)) == REG
7634 && (XEXP (x, 0) == virtual_stack_vars_rtx
7635 || XEXP (x, 0) == arg_pointer_rtx)
7636 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7637 return 1;
7638 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
7639 return 1;
7640 if (mode != TFmode
7641 && mode != TDmode
7642 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7643 || TARGET_POWERPC64
7644 || (mode != DFmode && mode != DDmode)
7645 || (TARGET_E500_DOUBLE && mode != DDmode))
7646 && (TARGET_POWERPC64 || mode != DImode)
7647 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
7648 && mode != PTImode
7649 && !avoiding_indexed_address_p (mode)
7650 && legitimate_indexed_address_p (x, reg_ok_strict))
7651 return 1;
7652 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
7653 && mode_supports_pre_modify_p (mode)
7654 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
7655 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
7656 reg_ok_strict, false)
7657 || (!avoiding_indexed_address_p (mode)
7658 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
7659 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7660 return 1;
7661 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
7662 return 1;
7663 return 0;
7666 /* Debug version of rs6000_legitimate_address_p. */
7667 static bool
7668 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
7669 bool reg_ok_strict)
7671 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
7672 fprintf (stderr,
7673 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7674 "strict = %d, reload = %s, code = %s\n",
7675 ret ? "true" : "false",
7676 GET_MODE_NAME (mode),
7677 reg_ok_strict,
7678 (reload_completed
7679 ? "after"
7680 : (reload_in_progress ? "progress" : "before")),
7681 GET_RTX_NAME (GET_CODE (x)));
7682 debug_rtx (x);
7684 return ret;
7687 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7689 static bool
7690 rs6000_mode_dependent_address_p (const_rtx addr,
7691 addr_space_t as ATTRIBUTE_UNUSED)
7693 return rs6000_mode_dependent_address_ptr (addr);
7696 /* Go to LABEL if ADDR (a legitimate address expression)
7697 has an effect that depends on the machine mode it is used for.
7699 On the RS/6000 this is true of all integral offsets (since AltiVec
7700 and VSX modes don't allow them) or is a pre-increment or decrement.
7702 ??? Except that due to conceptual problems in offsettable_address_p
7703 we can't really report the problems of integral offsets. So leave
7704 this assuming that the adjustable offset must be valid for the
7705 sub-words of a TFmode operand, which is what we had before. */
7707 static bool
7708 rs6000_mode_dependent_address (const_rtx addr)
7710 switch (GET_CODE (addr))
7712 case PLUS:
7713 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7714 is considered a legitimate address before reload, so there
7715 are no offset restrictions in that case. Note that this
7716 condition is safe in strict mode because any address involving
7717 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7718 been rejected as illegitimate. */
7719 if (XEXP (addr, 0) != virtual_stack_vars_rtx
7720 && XEXP (addr, 0) != arg_pointer_rtx
7721 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
7723 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
7724 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
7726 break;
7728 case LO_SUM:
7729 /* Anything in the constant pool is sufficiently aligned that
7730 all bytes have the same high part address. */
7731 return !legitimate_constant_pool_address_p (addr, QImode, false);
7733 /* Auto-increment cases are now treated generically in recog.c. */
7734 case PRE_MODIFY:
7735 return TARGET_UPDATE;
7737 /* AND is only allowed in Altivec loads. */
7738 case AND:
7739 return true;
7741 default:
7742 break;
7745 return false;
7748 /* Debug version of rs6000_mode_dependent_address. */
7749 static bool
7750 rs6000_debug_mode_dependent_address (const_rtx addr)
7752 bool ret = rs6000_mode_dependent_address (addr);
7754 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
7755 ret ? "true" : "false");
7756 debug_rtx (addr);
7758 return ret;
7761 /* Implement FIND_BASE_TERM. */
7764 rs6000_find_base_term (rtx op)
7766 rtx base;
7768 base = op;
7769 if (GET_CODE (base) == CONST)
7770 base = XEXP (base, 0);
7771 if (GET_CODE (base) == PLUS)
7772 base = XEXP (base, 0);
7773 if (GET_CODE (base) == UNSPEC)
7774 switch (XINT (base, 1))
7776 case UNSPEC_TOCREL:
7777 case UNSPEC_MACHOPIC_OFFSET:
7778 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
7779 for aliasing purposes. */
7780 return XVECEXP (base, 0, 0);
7783 return op;
7786 /* More elaborate version of recog's offsettable_memref_p predicate
7787 that works around the ??? note of rs6000_mode_dependent_address.
7788 In particular it accepts
7790 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
7792 in 32-bit mode, that the recog predicate rejects. */
7794 static bool
7795 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
7797 bool worst_case;
7799 if (!MEM_P (op))
7800 return false;
7802 /* First mimic offsettable_memref_p. */
7803 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
7804 return true;
7806 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
7807 the latter predicate knows nothing about the mode of the memory
7808 reference and, therefore, assumes that it is the largest supported
7809 mode (TFmode). As a consequence, legitimate offsettable memory
7810 references are rejected. rs6000_legitimate_offset_address_p contains
7811 the correct logic for the PLUS case of rs6000_mode_dependent_address,
7812 at least with a little bit of help here given that we know the
7813 actual registers used. */
7814 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
7815 || GET_MODE_SIZE (reg_mode) == 4);
7816 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
7817 true, worst_case);
7820 /* Change register usage conditional on target flags. */
7821 static void
7822 rs6000_conditional_register_usage (void)
7824 int i;
7826 if (TARGET_DEBUG_TARGET)
7827 fprintf (stderr, "rs6000_conditional_register_usage called\n");
7829 /* Set MQ register fixed (already call_used) so that it will not be
7830 allocated. */
7831 fixed_regs[64] = 1;
7833 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7834 if (TARGET_64BIT)
7835 fixed_regs[13] = call_used_regs[13]
7836 = call_really_used_regs[13] = 1;
7838 /* Conditionally disable FPRs. */
7839 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
7840 for (i = 32; i < 64; i++)
7841 fixed_regs[i] = call_used_regs[i]
7842 = call_really_used_regs[i] = 1;
7844 /* The TOC register is not killed across calls in a way that is
7845 visible to the compiler. */
7846 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7847 call_really_used_regs[2] = 0;
7849 if (DEFAULT_ABI == ABI_V4
7850 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7851 && flag_pic == 2)
7852 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7854 if (DEFAULT_ABI == ABI_V4
7855 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7856 && flag_pic == 1)
7857 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7858 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7859 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7861 if (DEFAULT_ABI == ABI_DARWIN
7862 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
7863 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7864 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7865 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7867 if (TARGET_TOC && TARGET_MINIMAL_TOC)
7868 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7869 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7871 if (TARGET_SPE)
7873 global_regs[SPEFSCR_REGNO] = 1;
7874 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7875 registers in prologues and epilogues. We no longer use r14
7876 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7877 pool for link-compatibility with older versions of GCC. Once
7878 "old" code has died out, we can return r14 to the allocation
7879 pool. */
7880 fixed_regs[14]
7881 = call_used_regs[14]
7882 = call_really_used_regs[14] = 1;
7885 if (!TARGET_ALTIVEC && !TARGET_VSX)
7887 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
7888 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7889 call_really_used_regs[VRSAVE_REGNO] = 1;
7892 if (TARGET_ALTIVEC || TARGET_VSX)
7893 global_regs[VSCR_REGNO] = 1;
7895 if (TARGET_ALTIVEC_ABI)
7897 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
7898 call_used_regs[i] = call_really_used_regs[i] = 1;
7900 /* AIX reserves VR20:31 in non-extended ABI mode. */
7901 if (TARGET_XCOFF)
7902 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
7903 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7908 /* Output insns to set DEST equal to the constant SOURCE as a series of
7909 lis, ori and shl instructions and return TRUE. */
7911 bool
7912 rs6000_emit_set_const (rtx dest, rtx source)
7914 enum machine_mode mode = GET_MODE (dest);
7915 rtx temp, set;
7916 rtx_insn *insn;
7917 HOST_WIDE_INT c;
7919 gcc_checking_assert (CONST_INT_P (source));
7920 c = INTVAL (source);
7921 switch (mode)
7923 case QImode:
7924 case HImode:
7925 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
7926 return true;
7928 case SImode:
7929 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
7931 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (temp),
7932 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
7933 emit_insn (gen_rtx_SET (VOIDmode, dest,
7934 gen_rtx_IOR (SImode, copy_rtx (temp),
7935 GEN_INT (c & 0xffff))));
7936 break;
7938 case DImode:
7939 if (!TARGET_POWERPC64)
7941 rtx hi, lo;
7943 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
7944 DImode);
7945 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
7946 DImode);
7947 emit_move_insn (hi, GEN_INT (c >> 32));
7948 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
7949 emit_move_insn (lo, GEN_INT (c));
7951 else
7952 rs6000_emit_set_long_const (dest, c);
7953 break;
7955 default:
7956 gcc_unreachable ();
7959 insn = get_last_insn ();
7960 set = single_set (insn);
7961 if (! CONSTANT_P (SET_SRC (set)))
7962 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
7964 return true;
7967 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
7968 Output insns to set DEST equal to the constant C as a series of
7969 lis, ori and shl instructions. */
7971 static void
7972 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
7974 rtx temp;
7975 HOST_WIDE_INT ud1, ud2, ud3, ud4;
7977 ud1 = c & 0xffff;
7978 c = c >> 16;
7979 ud2 = c & 0xffff;
7980 c = c >> 16;
7981 ud3 = c & 0xffff;
7982 c = c >> 16;
7983 ud4 = c & 0xffff;
7985 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
7986 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
7987 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
7989 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
7990 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
7992 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
7994 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
7995 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
7996 if (ud1 != 0)
7997 emit_move_insn (dest,
7998 gen_rtx_IOR (DImode, copy_rtx (temp),
7999 GEN_INT (ud1)));
8001 else if (ud3 == 0 && ud4 == 0)
8003 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8005 gcc_assert (ud2 & 0x8000);
8006 emit_move_insn (copy_rtx (temp),
8007 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
8008 if (ud1 != 0)
8009 emit_move_insn (copy_rtx (temp),
8010 gen_rtx_IOR (DImode, copy_rtx (temp),
8011 GEN_INT (ud1)));
8012 emit_move_insn (dest,
8013 gen_rtx_ZERO_EXTEND (DImode,
8014 gen_lowpart (SImode,
8015 copy_rtx (temp))));
8017 else if ((ud4 == 0xffff && (ud3 & 0x8000))
8018 || (ud4 == 0 && ! (ud3 & 0x8000)))
8020 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8022 emit_move_insn (copy_rtx (temp),
8023 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
8024 if (ud2 != 0)
8025 emit_move_insn (copy_rtx (temp),
8026 gen_rtx_IOR (DImode, copy_rtx (temp),
8027 GEN_INT (ud2)));
8028 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
8029 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
8030 GEN_INT (16)));
8031 if (ud1 != 0)
8032 emit_move_insn (dest,
8033 gen_rtx_IOR (DImode, copy_rtx (temp),
8034 GEN_INT (ud1)));
8036 else
8038 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8040 emit_move_insn (copy_rtx (temp),
8041 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
8042 if (ud3 != 0)
8043 emit_move_insn (copy_rtx (temp),
8044 gen_rtx_IOR (DImode, copy_rtx (temp),
8045 GEN_INT (ud3)));
8047 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
8048 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
8049 GEN_INT (32)));
8050 if (ud2 != 0)
8051 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
8052 gen_rtx_IOR (DImode, copy_rtx (temp),
8053 GEN_INT (ud2 << 16)));
8054 if (ud1 != 0)
8055 emit_move_insn (dest,
8056 gen_rtx_IOR (DImode, copy_rtx (temp),
8057 GEN_INT (ud1)));
8061 /* Helper for the following. Get rid of [r+r] memory refs
8062 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
8064 static void
8065 rs6000_eliminate_indexed_memrefs (rtx operands[2])
8067 if (reload_in_progress)
8068 return;
8070 if (GET_CODE (operands[0]) == MEM
8071 && GET_CODE (XEXP (operands[0], 0)) != REG
8072 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
8073 GET_MODE (operands[0]), false))
8074 operands[0]
8075 = replace_equiv_address (operands[0],
8076 copy_addr_to_reg (XEXP (operands[0], 0)));
8078 if (GET_CODE (operands[1]) == MEM
8079 && GET_CODE (XEXP (operands[1], 0)) != REG
8080 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
8081 GET_MODE (operands[1]), false))
8082 operands[1]
8083 = replace_equiv_address (operands[1],
8084 copy_addr_to_reg (XEXP (operands[1], 0)));
8087 /* Generate a vector of constants to permute MODE for a little-endian
8088 storage operation by swapping the two halves of a vector. */
8089 static rtvec
8090 rs6000_const_vec (enum machine_mode mode)
8092 int i, subparts;
8093 rtvec v;
8095 switch (mode)
8097 case V1TImode:
8098 subparts = 1;
8099 break;
8100 case V2DFmode:
8101 case V2DImode:
8102 subparts = 2;
8103 break;
8104 case V4SFmode:
8105 case V4SImode:
8106 subparts = 4;
8107 break;
8108 case V8HImode:
8109 subparts = 8;
8110 break;
8111 case V16QImode:
8112 subparts = 16;
8113 break;
8114 default:
8115 gcc_unreachable();
8118 v = rtvec_alloc (subparts);
8120 for (i = 0; i < subparts / 2; ++i)
8121 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
8122 for (i = subparts / 2; i < subparts; ++i)
8123 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
8125 return v;
8128 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
8129 for a VSX load or store operation. */
8131 rs6000_gen_le_vsx_permute (rtx source, enum machine_mode mode)
8133 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
8134 return gen_rtx_VEC_SELECT (mode, source, par);
8137 /* Emit a little-endian load from vector memory location SOURCE to VSX
8138 register DEST in mode MODE. The load is done with two permuting
8139 insn's that represent an lxvd2x and xxpermdi. */
8140 void
8141 rs6000_emit_le_vsx_load (rtx dest, rtx source, enum machine_mode mode)
8143 rtx tmp, permute_mem, permute_reg;
8145 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8146 V1TImode). */
8147 if (mode == TImode || mode == V1TImode)
8149 mode = V2DImode;
8150 dest = gen_lowpart (V2DImode, dest);
8151 source = adjust_address (source, V2DImode, 0);
8154 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
8155 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
8156 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
8157 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_mem));
8158 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_reg));
8161 /* Emit a little-endian store to vector memory location DEST from VSX
8162 register SOURCE in mode MODE. The store is done with two permuting
8163 insn's that represent an xxpermdi and an stxvd2x. */
8164 void
8165 rs6000_emit_le_vsx_store (rtx dest, rtx source, enum machine_mode mode)
8167 rtx tmp, permute_src, permute_tmp;
8169 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8170 V1TImode). */
8171 if (mode == TImode || mode == V1TImode)
8173 mode = V2DImode;
8174 dest = adjust_address (dest, V2DImode, 0);
8175 source = gen_lowpart (V2DImode, source);
8178 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
8179 permute_src = rs6000_gen_le_vsx_permute (source, mode);
8180 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
8181 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_src));
8182 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_tmp));
8185 /* Emit a sequence representing a little-endian VSX load or store,
8186 moving data from SOURCE to DEST in mode MODE. This is done
8187 separately from rs6000_emit_move to ensure it is called only
8188 during expand. LE VSX loads and stores introduced later are
8189 handled with a split. The expand-time RTL generation allows
8190 us to optimize away redundant pairs of register-permutes. */
8191 void
8192 rs6000_emit_le_vsx_move (rtx dest, rtx source, enum machine_mode mode)
8194 gcc_assert (!BYTES_BIG_ENDIAN
8195 && VECTOR_MEM_VSX_P (mode)
8196 && !gpr_or_gpr_p (dest, source)
8197 && (MEM_P (source) ^ MEM_P (dest)));
8199 if (MEM_P (source))
8201 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
8202 rs6000_emit_le_vsx_load (dest, source, mode);
8204 else
8206 if (!REG_P (source))
8207 source = force_reg (mode, source);
8208 rs6000_emit_le_vsx_store (dest, source, mode);
8212 /* Emit a move from SOURCE to DEST in mode MODE. */
8213 void
8214 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
8216 rtx operands[2];
8217 operands[0] = dest;
8218 operands[1] = source;
8220 if (TARGET_DEBUG_ADDR)
8222 fprintf (stderr,
8223 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
8224 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
8225 GET_MODE_NAME (mode),
8226 reload_in_progress,
8227 reload_completed,
8228 can_create_pseudo_p ());
8229 debug_rtx (dest);
8230 fprintf (stderr, "source:\n");
8231 debug_rtx (source);
8234 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
8235 if (CONST_WIDE_INT_P (operands[1])
8236 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
8238 /* This should be fixed with the introduction of CONST_WIDE_INT. */
8239 gcc_unreachable ();
8242 /* Check if GCC is setting up a block move that will end up using FP
8243 registers as temporaries. We must make sure this is acceptable. */
8244 if (GET_CODE (operands[0]) == MEM
8245 && GET_CODE (operands[1]) == MEM
8246 && mode == DImode
8247 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
8248 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
8249 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
8250 ? 32 : MEM_ALIGN (operands[0])))
8251 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
8252 ? 32
8253 : MEM_ALIGN (operands[1]))))
8254 && ! MEM_VOLATILE_P (operands [0])
8255 && ! MEM_VOLATILE_P (operands [1]))
8257 emit_move_insn (adjust_address (operands[0], SImode, 0),
8258 adjust_address (operands[1], SImode, 0));
8259 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
8260 adjust_address (copy_rtx (operands[1]), SImode, 4));
8261 return;
8264 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
8265 && !gpc_reg_operand (operands[1], mode))
8266 operands[1] = force_reg (mode, operands[1]);
8268 /* Recognize the case where operand[1] is a reference to thread-local
8269 data and load its address to a register. */
8270 if (tls_referenced_p (operands[1]))
8272 enum tls_model model;
8273 rtx tmp = operands[1];
8274 rtx addend = NULL;
8276 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
8278 addend = XEXP (XEXP (tmp, 0), 1);
8279 tmp = XEXP (XEXP (tmp, 0), 0);
8282 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
8283 model = SYMBOL_REF_TLS_MODEL (tmp);
8284 gcc_assert (model != 0);
8286 tmp = rs6000_legitimize_tls_address (tmp, model);
8287 if (addend)
8289 tmp = gen_rtx_PLUS (mode, tmp, addend);
8290 tmp = force_operand (tmp, operands[0]);
8292 operands[1] = tmp;
8295 /* Handle the case where reload calls us with an invalid address. */
8296 if (reload_in_progress && mode == Pmode
8297 && (! general_operand (operands[1], mode)
8298 || ! nonimmediate_operand (operands[0], mode)))
8299 goto emit_set;
8301 /* 128-bit constant floating-point values on Darwin should really be
8302 loaded as two parts. */
8303 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
8304 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
8306 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
8307 simplify_gen_subreg (DFmode, operands[1], mode, 0),
8308 DFmode);
8309 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
8310 GET_MODE_SIZE (DFmode)),
8311 simplify_gen_subreg (DFmode, operands[1], mode,
8312 GET_MODE_SIZE (DFmode)),
8313 DFmode);
8314 return;
8317 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
8318 cfun->machine->sdmode_stack_slot =
8319 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
8322 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
8323 p1:SD) if p1 is not of floating point class and p0 is spilled as
8324 we can have no analogous movsd_store for this. */
8325 if (lra_in_progress && mode == DDmode
8326 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
8327 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
8328 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
8329 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
8331 enum reg_class cl;
8332 int regno = REGNO (SUBREG_REG (operands[1]));
8334 if (regno >= FIRST_PSEUDO_REGISTER)
8336 cl = reg_preferred_class (regno);
8337 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
8339 if (regno >= 0 && ! FP_REGNO_P (regno))
8341 mode = SDmode;
8342 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
8343 operands[1] = SUBREG_REG (operands[1]);
8346 if (lra_in_progress
8347 && mode == SDmode
8348 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
8349 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
8350 && (REG_P (operands[1])
8351 || (GET_CODE (operands[1]) == SUBREG
8352 && REG_P (SUBREG_REG (operands[1])))))
8354 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
8355 ? SUBREG_REG (operands[1]) : operands[1]);
8356 enum reg_class cl;
8358 if (regno >= FIRST_PSEUDO_REGISTER)
8360 cl = reg_preferred_class (regno);
8361 gcc_assert (cl != NO_REGS);
8362 regno = ira_class_hard_regs[cl][0];
8364 if (FP_REGNO_P (regno))
8366 if (GET_MODE (operands[0]) != DDmode)
8367 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
8368 emit_insn (gen_movsd_store (operands[0], operands[1]));
8370 else if (INT_REGNO_P (regno))
8371 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8372 else
8373 gcc_unreachable();
8374 return;
8376 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
8377 p:DD)) if p0 is not of floating point class and p1 is spilled as
8378 we can have no analogous movsd_load for this. */
8379 if (lra_in_progress && mode == DDmode
8380 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
8381 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
8382 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
8383 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
8385 enum reg_class cl;
8386 int regno = REGNO (SUBREG_REG (operands[0]));
8388 if (regno >= FIRST_PSEUDO_REGISTER)
8390 cl = reg_preferred_class (regno);
8391 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
8393 if (regno >= 0 && ! FP_REGNO_P (regno))
8395 mode = SDmode;
8396 operands[0] = SUBREG_REG (operands[0]);
8397 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
8400 if (lra_in_progress
8401 && mode == SDmode
8402 && (REG_P (operands[0])
8403 || (GET_CODE (operands[0]) == SUBREG
8404 && REG_P (SUBREG_REG (operands[0]))))
8405 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
8406 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
8408 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
8409 ? SUBREG_REG (operands[0]) : operands[0]);
8410 enum reg_class cl;
8412 if (regno >= FIRST_PSEUDO_REGISTER)
8414 cl = reg_preferred_class (regno);
8415 gcc_assert (cl != NO_REGS);
8416 regno = ira_class_hard_regs[cl][0];
8418 if (FP_REGNO_P (regno))
8420 if (GET_MODE (operands[1]) != DDmode)
8421 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
8422 emit_insn (gen_movsd_load (operands[0], operands[1]));
8424 else if (INT_REGNO_P (regno))
8425 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8426 else
8427 gcc_unreachable();
8428 return;
8431 if (reload_in_progress
8432 && mode == SDmode
8433 && cfun->machine->sdmode_stack_slot != NULL_RTX
8434 && MEM_P (operands[0])
8435 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
8436 && REG_P (operands[1]))
8438 if (FP_REGNO_P (REGNO (operands[1])))
8440 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
8441 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8442 emit_insn (gen_movsd_store (mem, operands[1]));
8444 else if (INT_REGNO_P (REGNO (operands[1])))
8446 rtx mem = operands[0];
8447 if (BYTES_BIG_ENDIAN)
8448 mem = adjust_address_nv (mem, mode, 4);
8449 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8450 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
8452 else
8453 gcc_unreachable();
8454 return;
8456 if (reload_in_progress
8457 && mode == SDmode
8458 && REG_P (operands[0])
8459 && MEM_P (operands[1])
8460 && cfun->machine->sdmode_stack_slot != NULL_RTX
8461 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
8463 if (FP_REGNO_P (REGNO (operands[0])))
8465 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
8466 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8467 emit_insn (gen_movsd_load (operands[0], mem));
8469 else if (INT_REGNO_P (REGNO (operands[0])))
8471 rtx mem = operands[1];
8472 if (BYTES_BIG_ENDIAN)
8473 mem = adjust_address_nv (mem, mode, 4);
8474 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8475 emit_insn (gen_movsd_hardfloat (operands[0], mem));
8477 else
8478 gcc_unreachable();
8479 return;
8482 /* FIXME: In the long term, this switch statement should go away
8483 and be replaced by a sequence of tests based on things like
8484 mode == Pmode. */
8485 switch (mode)
8487 case HImode:
8488 case QImode:
8489 if (CONSTANT_P (operands[1])
8490 && GET_CODE (operands[1]) != CONST_INT)
8491 operands[1] = force_const_mem (mode, operands[1]);
8492 break;
8494 case TFmode:
8495 case TDmode:
8496 rs6000_eliminate_indexed_memrefs (operands);
8497 /* fall through */
8499 case DFmode:
8500 case DDmode:
8501 case SFmode:
8502 case SDmode:
8503 if (CONSTANT_P (operands[1])
8504 && ! easy_fp_constant (operands[1], mode))
8505 operands[1] = force_const_mem (mode, operands[1]);
8506 break;
8508 case V16QImode:
8509 case V8HImode:
8510 case V4SFmode:
8511 case V4SImode:
8512 case V4HImode:
8513 case V2SFmode:
8514 case V2SImode:
8515 case V1DImode:
8516 case V2DFmode:
8517 case V2DImode:
8518 case V1TImode:
8519 if (CONSTANT_P (operands[1])
8520 && !easy_vector_constant (operands[1], mode))
8521 operands[1] = force_const_mem (mode, operands[1]);
8522 break;
8524 case SImode:
8525 case DImode:
8526 /* Use default pattern for address of ELF small data */
8527 if (TARGET_ELF
8528 && mode == Pmode
8529 && DEFAULT_ABI == ABI_V4
8530 && (GET_CODE (operands[1]) == SYMBOL_REF
8531 || GET_CODE (operands[1]) == CONST)
8532 && small_data_operand (operands[1], mode))
8534 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8535 return;
8538 if (DEFAULT_ABI == ABI_V4
8539 && mode == Pmode && mode == SImode
8540 && flag_pic == 1 && got_operand (operands[1], mode))
8542 emit_insn (gen_movsi_got (operands[0], operands[1]));
8543 return;
8546 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
8547 && TARGET_NO_TOC
8548 && ! flag_pic
8549 && mode == Pmode
8550 && CONSTANT_P (operands[1])
8551 && GET_CODE (operands[1]) != HIGH
8552 && GET_CODE (operands[1]) != CONST_INT)
8554 rtx target = (!can_create_pseudo_p ()
8555 ? operands[0]
8556 : gen_reg_rtx (mode));
8558 /* If this is a function address on -mcall-aixdesc,
8559 convert it to the address of the descriptor. */
8560 if (DEFAULT_ABI == ABI_AIX
8561 && GET_CODE (operands[1]) == SYMBOL_REF
8562 && XSTR (operands[1], 0)[0] == '.')
8564 const char *name = XSTR (operands[1], 0);
8565 rtx new_ref;
8566 while (*name == '.')
8567 name++;
8568 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
8569 CONSTANT_POOL_ADDRESS_P (new_ref)
8570 = CONSTANT_POOL_ADDRESS_P (operands[1]);
8571 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
8572 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
8573 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
8574 operands[1] = new_ref;
8577 if (DEFAULT_ABI == ABI_DARWIN)
8579 #if TARGET_MACHO
8580 if (MACHO_DYNAMIC_NO_PIC_P)
8582 /* Take care of any required data indirection. */
8583 operands[1] = rs6000_machopic_legitimize_pic_address (
8584 operands[1], mode, operands[0]);
8585 if (operands[0] != operands[1])
8586 emit_insn (gen_rtx_SET (VOIDmode,
8587 operands[0], operands[1]));
8588 return;
8590 #endif
8591 emit_insn (gen_macho_high (target, operands[1]));
8592 emit_insn (gen_macho_low (operands[0], target, operands[1]));
8593 return;
8596 emit_insn (gen_elf_high (target, operands[1]));
8597 emit_insn (gen_elf_low (operands[0], target, operands[1]));
8598 return;
8601 /* If this is a SYMBOL_REF that refers to a constant pool entry,
8602 and we have put it in the TOC, we just need to make a TOC-relative
8603 reference to it. */
8604 if (TARGET_TOC
8605 && GET_CODE (operands[1]) == SYMBOL_REF
8606 && use_toc_relative_ref (operands[1]))
8607 operands[1] = create_TOC_reference (operands[1], operands[0]);
8608 else if (mode == Pmode
8609 && CONSTANT_P (operands[1])
8610 && GET_CODE (operands[1]) != HIGH
8611 && ((GET_CODE (operands[1]) != CONST_INT
8612 && ! easy_fp_constant (operands[1], mode))
8613 || (GET_CODE (operands[1]) == CONST_INT
8614 && (num_insns_constant (operands[1], mode)
8615 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
8616 || (GET_CODE (operands[0]) == REG
8617 && FP_REGNO_P (REGNO (operands[0]))))
8618 && !toc_relative_expr_p (operands[1], false)
8619 && (TARGET_CMODEL == CMODEL_SMALL
8620 || can_create_pseudo_p ()
8621 || (REG_P (operands[0])
8622 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
8625 #if TARGET_MACHO
8626 /* Darwin uses a special PIC legitimizer. */
8627 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
8629 operands[1] =
8630 rs6000_machopic_legitimize_pic_address (operands[1], mode,
8631 operands[0]);
8632 if (operands[0] != operands[1])
8633 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8634 return;
8636 #endif
8638 /* If we are to limit the number of things we put in the TOC and
8639 this is a symbol plus a constant we can add in one insn,
8640 just put the symbol in the TOC and add the constant. Don't do
8641 this if reload is in progress. */
8642 if (GET_CODE (operands[1]) == CONST
8643 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
8644 && GET_CODE (XEXP (operands[1], 0)) == PLUS
8645 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
8646 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
8647 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
8648 && ! side_effects_p (operands[0]))
8650 rtx sym =
8651 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
8652 rtx other = XEXP (XEXP (operands[1], 0), 1);
8654 sym = force_reg (mode, sym);
8655 emit_insn (gen_add3_insn (operands[0], sym, other));
8656 return;
8659 operands[1] = force_const_mem (mode, operands[1]);
8661 if (TARGET_TOC
8662 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
8663 && constant_pool_expr_p (XEXP (operands[1], 0))
8664 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
8665 get_pool_constant (XEXP (operands[1], 0)),
8666 get_pool_mode (XEXP (operands[1], 0))))
8668 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
8669 operands[0]);
8670 operands[1] = gen_const_mem (mode, tocref);
8671 set_mem_alias_set (operands[1], get_TOC_alias_set ());
8674 break;
8676 case TImode:
8677 if (!VECTOR_MEM_VSX_P (TImode))
8678 rs6000_eliminate_indexed_memrefs (operands);
8679 break;
8681 case PTImode:
8682 rs6000_eliminate_indexed_memrefs (operands);
8683 break;
8685 default:
8686 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
8689 /* Above, we may have called force_const_mem which may have returned
8690 an invalid address. If we can, fix this up; otherwise, reload will
8691 have to deal with it. */
8692 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
8693 operands[1] = validize_mem (operands[1]);
8695 emit_set:
8696 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8699 /* Return true if a structure, union or array containing FIELD should be
8700 accessed using `BLKMODE'.
8702 For the SPE, simd types are V2SI, and gcc can be tempted to put the
8703 entire thing in a DI and use subregs to access the internals.
8704 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
8705 back-end. Because a single GPR can hold a V2SI, but not a DI, the
8706 best thing to do is set structs to BLKmode and avoid Severe Tire
8707 Damage.
8709 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
8710 fit into 1, whereas DI still needs two. */
8712 static bool
8713 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
8715 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
8716 || (TARGET_E500_DOUBLE && mode == DFmode));
8719 /* Nonzero if we can use a floating-point register to pass this arg. */
8720 #define USE_FP_FOR_ARG_P(CUM,MODE) \
8721 (SCALAR_FLOAT_MODE_P (MODE) \
8722 && (CUM)->fregno <= FP_ARG_MAX_REG \
8723 && TARGET_HARD_FLOAT && TARGET_FPRS)
8725 /* Nonzero if we can use an AltiVec register to pass this arg. */
8726 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
8727 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
8728 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
8729 && TARGET_ALTIVEC_ABI \
8730 && (NAMED))
8732 /* Walk down the type tree of TYPE counting consecutive base elements.
8733 If *MODEP is VOIDmode, then set it to the first valid floating point
8734 or vector type. If a non-floating point or vector type is found, or
8735 if a floating point or vector type that doesn't match a non-VOIDmode
8736 *MODEP is found, then return -1, otherwise return the count in the
8737 sub-tree. */
8739 static int
8740 rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
8742 enum machine_mode mode;
8743 HOST_WIDE_INT size;
8745 switch (TREE_CODE (type))
8747 case REAL_TYPE:
8748 mode = TYPE_MODE (type);
8749 if (!SCALAR_FLOAT_MODE_P (mode))
8750 return -1;
8752 if (*modep == VOIDmode)
8753 *modep = mode;
8755 if (*modep == mode)
8756 return 1;
8758 break;
8760 case COMPLEX_TYPE:
8761 mode = TYPE_MODE (TREE_TYPE (type));
8762 if (!SCALAR_FLOAT_MODE_P (mode))
8763 return -1;
8765 if (*modep == VOIDmode)
8766 *modep = mode;
8768 if (*modep == mode)
8769 return 2;
8771 break;
8773 case VECTOR_TYPE:
8774 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
8775 return -1;
8777 /* Use V4SImode as representative of all 128-bit vector types. */
8778 size = int_size_in_bytes (type);
8779 switch (size)
8781 case 16:
8782 mode = V4SImode;
8783 break;
8784 default:
8785 return -1;
8788 if (*modep == VOIDmode)
8789 *modep = mode;
8791 /* Vector modes are considered to be opaque: two vectors are
8792 equivalent for the purposes of being homogeneous aggregates
8793 if they are the same size. */
8794 if (*modep == mode)
8795 return 1;
8797 break;
8799 case ARRAY_TYPE:
8801 int count;
8802 tree index = TYPE_DOMAIN (type);
8804 /* Can't handle incomplete types nor sizes that are not
8805 fixed. */
8806 if (!COMPLETE_TYPE_P (type)
8807 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8808 return -1;
8810 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
8811 if (count == -1
8812 || !index
8813 || !TYPE_MAX_VALUE (index)
8814 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
8815 || !TYPE_MIN_VALUE (index)
8816 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
8817 || count < 0)
8818 return -1;
8820 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
8821 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
8823 /* There must be no padding. */
8824 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8825 return -1;
8827 return count;
8830 case RECORD_TYPE:
8832 int count = 0;
8833 int sub_count;
8834 tree field;
8836 /* Can't handle incomplete types nor sizes that are not
8837 fixed. */
8838 if (!COMPLETE_TYPE_P (type)
8839 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8840 return -1;
8842 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8844 if (TREE_CODE (field) != FIELD_DECL)
8845 continue;
8847 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8848 if (sub_count < 0)
8849 return -1;
8850 count += sub_count;
8853 /* There must be no padding. */
8854 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8855 return -1;
8857 return count;
8860 case UNION_TYPE:
8861 case QUAL_UNION_TYPE:
8863 /* These aren't very interesting except in a degenerate case. */
8864 int count = 0;
8865 int sub_count;
8866 tree field;
8868 /* Can't handle incomplete types nor sizes that are not
8869 fixed. */
8870 if (!COMPLETE_TYPE_P (type)
8871 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8872 return -1;
8874 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8876 if (TREE_CODE (field) != FIELD_DECL)
8877 continue;
8879 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8880 if (sub_count < 0)
8881 return -1;
8882 count = count > sub_count ? count : sub_count;
8885 /* There must be no padding. */
8886 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8887 return -1;
8889 return count;
8892 default:
8893 break;
8896 return -1;
8899 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
8900 float or vector aggregate that shall be passed in FP/vector registers
8901 according to the ELFv2 ABI, return the homogeneous element mode in
8902 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
8904 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
8906 static bool
8907 rs6000_discover_homogeneous_aggregate (enum machine_mode mode, const_tree type,
8908 enum machine_mode *elt_mode,
8909 int *n_elts)
8911 /* Note that we do not accept complex types at the top level as
8912 homogeneous aggregates; these types are handled via the
8913 targetm.calls.split_complex_arg mechanism. Complex types
8914 can be elements of homogeneous aggregates, however. */
8915 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
8917 enum machine_mode field_mode = VOIDmode;
8918 int field_count = rs6000_aggregate_candidate (type, &field_mode);
8920 if (field_count > 0)
8922 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode)?
8923 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
8925 /* The ELFv2 ABI allows homogeneous aggregates to occupy
8926 up to AGGR_ARG_NUM_REG registers. */
8927 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
8929 if (elt_mode)
8930 *elt_mode = field_mode;
8931 if (n_elts)
8932 *n_elts = field_count;
8933 return true;
8938 if (elt_mode)
8939 *elt_mode = mode;
8940 if (n_elts)
8941 *n_elts = 1;
8942 return false;
8945 /* Return a nonzero value to say to return the function value in
8946 memory, just as large structures are always returned. TYPE will be
8947 the data type of the value, and FNTYPE will be the type of the
8948 function doing the returning, or @code{NULL} for libcalls.
8950 The AIX ABI for the RS/6000 specifies that all structures are
8951 returned in memory. The Darwin ABI does the same.
8953 For the Darwin 64 Bit ABI, a function result can be returned in
8954 registers or in memory, depending on the size of the return data
8955 type. If it is returned in registers, the value occupies the same
8956 registers as it would if it were the first and only function
8957 argument. Otherwise, the function places its result in memory at
8958 the location pointed to by GPR3.
8960 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
8961 but a draft put them in memory, and GCC used to implement the draft
8962 instead of the final standard. Therefore, aix_struct_return
8963 controls this instead of DEFAULT_ABI; V.4 targets needing backward
8964 compatibility can change DRAFT_V4_STRUCT_RET to override the
8965 default, and -m switches get the final word. See
8966 rs6000_option_override_internal for more details.
8968 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
8969 long double support is enabled. These values are returned in memory.
8971 int_size_in_bytes returns -1 for variable size objects, which go in
8972 memory always. The cast to unsigned makes -1 > 8. */
8974 static bool
8975 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
8977 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
8978 if (TARGET_MACHO
8979 && rs6000_darwin64_abi
8980 && TREE_CODE (type) == RECORD_TYPE
8981 && int_size_in_bytes (type) > 0)
8983 CUMULATIVE_ARGS valcum;
8984 rtx valret;
8986 valcum.words = 0;
8987 valcum.fregno = FP_ARG_MIN_REG;
8988 valcum.vregno = ALTIVEC_ARG_MIN_REG;
8989 /* Do a trial code generation as if this were going to be passed
8990 as an argument; if any part goes in memory, we return NULL. */
8991 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
8992 if (valret)
8993 return false;
8994 /* Otherwise fall through to more conventional ABI rules. */
8997 #if HAVE_UPC_PTS_STRUCT_REP
8998 if (POINTER_TYPE_P (type) && upc_shared_type_p (TREE_TYPE (type)))
8999 return true;
9000 #endif
9002 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
9003 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
9004 NULL, NULL))
9005 return false;
9007 /* The ELFv2 ABI returns aggregates up to 16B in registers */
9008 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
9009 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
9010 return false;
9012 if (AGGREGATE_TYPE_P (type)
9013 && (aix_struct_return
9014 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
9015 return true;
9017 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
9018 modes only exist for GCC vector types if -maltivec. */
9019 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
9020 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9021 return false;
9023 /* Return synthetic vectors in memory. */
9024 if (TREE_CODE (type) == VECTOR_TYPE
9025 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
9027 static bool warned_for_return_big_vectors = false;
9028 if (!warned_for_return_big_vectors)
9030 warning (0, "GCC vector returned by reference: "
9031 "non-standard ABI extension with no compatibility guarantee");
9032 warned_for_return_big_vectors = true;
9034 return true;
9037 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
9038 return true;
9040 return false;
9043 /* Specify whether values returned in registers should be at the most
9044 significant end of a register. We want aggregates returned by
9045 value to match the way aggregates are passed to functions. */
9047 static bool
9048 rs6000_return_in_msb (const_tree valtype)
9050 return (DEFAULT_ABI == ABI_ELFv2
9051 && BYTES_BIG_ENDIAN
9052 && AGGREGATE_TYPE_P (valtype)
9053 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
9056 #ifdef HAVE_AS_GNU_ATTRIBUTE
9057 /* Return TRUE if a call to function FNDECL may be one that
9058 potentially affects the function calling ABI of the object file. */
9060 static bool
9061 call_ABI_of_interest (tree fndecl)
9063 if (symtab->state == EXPANSION)
9065 struct cgraph_node *c_node;
9067 /* Libcalls are always interesting. */
9068 if (fndecl == NULL_TREE)
9069 return true;
9071 /* Any call to an external function is interesting. */
9072 if (DECL_EXTERNAL (fndecl))
9073 return true;
9075 /* Interesting functions that we are emitting in this object file. */
9076 c_node = cgraph_node::get (fndecl);
9077 c_node = c_node->ultimate_alias_target ();
9078 return !c_node->only_called_directly_p ();
9080 return false;
9082 #endif
9084 /* Initialize a variable CUM of type CUMULATIVE_ARGS
9085 for a call to a function whose data type is FNTYPE.
9086 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
9088 For incoming args we set the number of arguments in the prototype large
9089 so we never return a PARALLEL. */
9091 void
9092 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
9093 rtx libname ATTRIBUTE_UNUSED, int incoming,
9094 int libcall, int n_named_args,
9095 tree fndecl ATTRIBUTE_UNUSED,
9096 enum machine_mode return_mode ATTRIBUTE_UNUSED)
9098 static CUMULATIVE_ARGS zero_cumulative;
9100 *cum = zero_cumulative;
9101 cum->words = 0;
9102 cum->fregno = FP_ARG_MIN_REG;
9103 cum->vregno = ALTIVEC_ARG_MIN_REG;
9104 cum->prototype = (fntype && prototype_p (fntype));
9105 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
9106 ? CALL_LIBCALL : CALL_NORMAL);
9107 cum->sysv_gregno = GP_ARG_MIN_REG;
9108 cum->stdarg = stdarg_p (fntype);
9110 cum->nargs_prototype = 0;
9111 if (incoming || cum->prototype)
9112 cum->nargs_prototype = n_named_args;
9114 /* Check for a longcall attribute. */
9115 if ((!fntype && rs6000_default_long_calls)
9116 || (fntype
9117 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
9118 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
9119 cum->call_cookie |= CALL_LONG;
9121 if (TARGET_DEBUG_ARG)
9123 fprintf (stderr, "\ninit_cumulative_args:");
9124 if (fntype)
9126 tree ret_type = TREE_TYPE (fntype);
9127 fprintf (stderr, " ret code = %s,",
9128 get_tree_code_name (TREE_CODE (ret_type)));
9131 if (cum->call_cookie & CALL_LONG)
9132 fprintf (stderr, " longcall,");
9134 fprintf (stderr, " proto = %d, nargs = %d\n",
9135 cum->prototype, cum->nargs_prototype);
9138 #ifdef HAVE_AS_GNU_ATTRIBUTE
9139 if (DEFAULT_ABI == ABI_V4)
9141 cum->escapes = call_ABI_of_interest (fndecl);
9142 if (cum->escapes)
9144 tree return_type;
9146 if (fntype)
9148 return_type = TREE_TYPE (fntype);
9149 return_mode = TYPE_MODE (return_type);
9151 else
9152 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
9154 if (return_type != NULL)
9156 if (TREE_CODE (return_type) == RECORD_TYPE
9157 && TYPE_TRANSPARENT_AGGR (return_type))
9159 return_type = TREE_TYPE (first_field (return_type));
9160 return_mode = TYPE_MODE (return_type);
9162 if (AGGREGATE_TYPE_P (return_type)
9163 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
9164 <= 8))
9165 rs6000_returns_struct = true;
9167 if (SCALAR_FLOAT_MODE_P (return_mode))
9168 rs6000_passes_float = true;
9169 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
9170 || SPE_VECTOR_MODE (return_mode))
9171 rs6000_passes_vector = true;
9174 #endif
9176 if (fntype
9177 && !TARGET_ALTIVEC
9178 && TARGET_ALTIVEC_ABI
9179 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
9181 error ("cannot return value in vector register because"
9182 " altivec instructions are disabled, use -maltivec"
9183 " to enable them");
9187 /* Return true if TYPE must be passed on the stack and not in registers. */
9189 static bool
9190 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
9192 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
9193 return must_pass_in_stack_var_size (mode, type);
9194 else
9195 return must_pass_in_stack_var_size_or_pad (mode, type);
9198 /* If defined, a C expression which determines whether, and in which
9199 direction, to pad out an argument with extra space. The value
9200 should be of type `enum direction': either `upward' to pad above
9201 the argument, `downward' to pad below, or `none' to inhibit
9202 padding.
9204 For the AIX ABI structs are always stored left shifted in their
9205 argument slot. */
9207 enum direction
9208 function_arg_padding (enum machine_mode mode, const_tree type)
9210 #ifndef AGGREGATE_PADDING_FIXED
9211 #define AGGREGATE_PADDING_FIXED 0
9212 #endif
9213 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
9214 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
9215 #endif
9217 if (!AGGREGATE_PADDING_FIXED)
9219 /* GCC used to pass structures of the same size as integer types as
9220 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
9221 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
9222 passed padded downward, except that -mstrict-align further
9223 muddied the water in that multi-component structures of 2 and 4
9224 bytes in size were passed padded upward.
9226 The following arranges for best compatibility with previous
9227 versions of gcc, but removes the -mstrict-align dependency. */
9228 if (BYTES_BIG_ENDIAN)
9230 HOST_WIDE_INT size = 0;
9232 if (mode == BLKmode)
9234 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
9235 size = int_size_in_bytes (type);
9237 else
9238 size = GET_MODE_SIZE (mode);
9240 if (size == 1 || size == 2 || size == 4)
9241 return downward;
9243 return upward;
9246 if (AGGREGATES_PAD_UPWARD_ALWAYS)
9248 if (type != 0 && AGGREGATE_TYPE_P (type))
9249 return upward;
9252 /* Fall back to the default. */
9253 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9256 /* If defined, a C expression that gives the alignment boundary, in bits,
9257 of an argument with the specified mode and type. If it is not defined,
9258 PARM_BOUNDARY is used for all arguments.
9260 V.4 wants long longs and doubles to be double word aligned. Just
9261 testing the mode size is a boneheaded way to do this as it means
9262 that other types such as complex int are also double word aligned.
9263 However, we're stuck with this because changing the ABI might break
9264 existing library interfaces.
9266 Doubleword align SPE vectors.
9267 Quadword align Altivec/VSX vectors.
9268 Quadword align large synthetic vector types. */
9270 static unsigned int
9271 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
9273 enum machine_mode elt_mode;
9274 int n_elts;
9276 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9278 if (DEFAULT_ABI == ABI_V4
9279 && (GET_MODE_SIZE (mode) == 8
9280 || (TARGET_HARD_FLOAT
9281 && TARGET_FPRS
9282 && (mode == TFmode || mode == TDmode))))
9283 return 64;
9284 else if (SPE_VECTOR_MODE (mode)
9285 || (type && TREE_CODE (type) == VECTOR_TYPE
9286 && int_size_in_bytes (type) >= 8
9287 && int_size_in_bytes (type) < 16))
9288 return 64;
9289 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9290 || (type && TREE_CODE (type) == VECTOR_TYPE
9291 && int_size_in_bytes (type) >= 16))
9292 return 128;
9294 /* Aggregate types that need > 8 byte alignment are quadword-aligned
9295 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
9296 -mcompat-align-parm is used. */
9297 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
9298 || DEFAULT_ABI == ABI_ELFv2)
9299 && type && TYPE_ALIGN (type) > 64)
9301 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
9302 or homogeneous float/vector aggregates here. We already handled
9303 vector aggregates above, but still need to check for float here. */
9304 bool aggregate_p = (AGGREGATE_TYPE_P (type)
9305 && !SCALAR_FLOAT_MODE_P (elt_mode));
9307 /* We used to check for BLKmode instead of the above aggregate type
9308 check. Warn when this results in any difference to the ABI. */
9309 if (aggregate_p != (mode == BLKmode))
9311 static bool warned;
9312 if (!warned && warn_psabi)
9314 warned = true;
9315 inform (input_location,
9316 "the ABI of passing aggregates with %d-byte alignment"
9317 " has changed in GCC 4.10",
9318 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
9322 if (aggregate_p)
9323 return 128;
9326 /* Similar for the Darwin64 ABI. Note that for historical reasons we
9327 implement the "aggregate type" check as a BLKmode check here; this
9328 means certain aggregate types are in fact not aligned. */
9329 if (TARGET_MACHO && rs6000_darwin64_abi
9330 && mode == BLKmode
9331 && type && TYPE_ALIGN (type) > 64)
9332 return 128;
9334 return PARM_BOUNDARY;
9337 /* The offset in words to the start of the parameter save area. */
9339 static unsigned int
9340 rs6000_parm_offset (void)
9342 return (DEFAULT_ABI == ABI_V4 ? 2
9343 : DEFAULT_ABI == ABI_ELFv2 ? 4
9344 : 6);
9347 /* For a function parm of MODE and TYPE, return the starting word in
9348 the parameter area. NWORDS of the parameter area are already used. */
9350 static unsigned int
9351 rs6000_parm_start (enum machine_mode mode, const_tree type,
9352 unsigned int nwords)
9354 unsigned int align;
9356 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
9357 return nwords + (-(rs6000_parm_offset () + nwords) & align);
9360 /* Compute the size (in words) of a function argument. */
9362 static unsigned long
9363 rs6000_arg_size (enum machine_mode mode, const_tree type)
9365 unsigned long size;
9367 if (mode != BLKmode)
9368 size = GET_MODE_SIZE (mode);
9369 else
9370 size = int_size_in_bytes (type);
9372 if (TARGET_32BIT)
9373 return (size + 3) >> 2;
9374 else
9375 return (size + 7) >> 3;
9378 /* Use this to flush pending int fields. */
9380 static void
9381 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
9382 HOST_WIDE_INT bitpos, int final)
9384 unsigned int startbit, endbit;
9385 int intregs, intoffset;
9386 enum machine_mode mode;
9388 /* Handle the situations where a float is taking up the first half
9389 of the GPR, and the other half is empty (typically due to
9390 alignment restrictions). We can detect this by a 8-byte-aligned
9391 int field, or by seeing that this is the final flush for this
9392 argument. Count the word and continue on. */
9393 if (cum->floats_in_gpr == 1
9394 && (cum->intoffset % 64 == 0
9395 || (cum->intoffset == -1 && final)))
9397 cum->words++;
9398 cum->floats_in_gpr = 0;
9401 if (cum->intoffset == -1)
9402 return;
9404 intoffset = cum->intoffset;
9405 cum->intoffset = -1;
9406 cum->floats_in_gpr = 0;
9408 if (intoffset % BITS_PER_WORD != 0)
9410 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9411 MODE_INT, 0);
9412 if (mode == BLKmode)
9414 /* We couldn't find an appropriate mode, which happens,
9415 e.g., in packed structs when there are 3 bytes to load.
9416 Back intoffset back to the beginning of the word in this
9417 case. */
9418 intoffset = intoffset & -BITS_PER_WORD;
9422 startbit = intoffset & -BITS_PER_WORD;
9423 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9424 intregs = (endbit - startbit) / BITS_PER_WORD;
9425 cum->words += intregs;
9426 /* words should be unsigned. */
9427 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
9429 int pad = (endbit/BITS_PER_WORD) - cum->words;
9430 cum->words += pad;
9434 /* The darwin64 ABI calls for us to recurse down through structs,
9435 looking for elements passed in registers. Unfortunately, we have
9436 to track int register count here also because of misalignments
9437 in powerpc alignment mode. */
9439 static void
9440 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
9441 const_tree type,
9442 HOST_WIDE_INT startbitpos)
9444 tree f;
9446 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9447 if (TREE_CODE (f) == FIELD_DECL)
9449 HOST_WIDE_INT bitpos = startbitpos;
9450 tree ftype = TREE_TYPE (f);
9451 enum machine_mode mode;
9452 if (ftype == error_mark_node)
9453 continue;
9454 mode = TYPE_MODE (ftype);
9456 if (DECL_SIZE (f) != 0
9457 && tree_fits_uhwi_p (bit_position (f)))
9458 bitpos += int_bit_position (f);
9460 /* ??? FIXME: else assume zero offset. */
9462 if (TREE_CODE (ftype) == RECORD_TYPE)
9463 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
9464 else if (USE_FP_FOR_ARG_P (cum, mode))
9466 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
9467 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9468 cum->fregno += n_fpregs;
9469 /* Single-precision floats present a special problem for
9470 us, because they are smaller than an 8-byte GPR, and so
9471 the structure-packing rules combined with the standard
9472 varargs behavior mean that we want to pack float/float
9473 and float/int combinations into a single register's
9474 space. This is complicated by the arg advance flushing,
9475 which works on arbitrarily large groups of int-type
9476 fields. */
9477 if (mode == SFmode)
9479 if (cum->floats_in_gpr == 1)
9481 /* Two floats in a word; count the word and reset
9482 the float count. */
9483 cum->words++;
9484 cum->floats_in_gpr = 0;
9486 else if (bitpos % 64 == 0)
9488 /* A float at the beginning of an 8-byte word;
9489 count it and put off adjusting cum->words until
9490 we see if a arg advance flush is going to do it
9491 for us. */
9492 cum->floats_in_gpr++;
9494 else
9496 /* The float is at the end of a word, preceded
9497 by integer fields, so the arg advance flush
9498 just above has already set cum->words and
9499 everything is taken care of. */
9502 else
9503 cum->words += n_fpregs;
9505 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9507 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9508 cum->vregno++;
9509 cum->words += 2;
9511 else if (cum->intoffset == -1)
9512 cum->intoffset = bitpos;
9516 /* Check for an item that needs to be considered specially under the darwin 64
9517 bit ABI. These are record types where the mode is BLK or the structure is
9518 8 bytes in size. */
9519 static int
9520 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
9522 return rs6000_darwin64_abi
9523 && ((mode == BLKmode
9524 && TREE_CODE (type) == RECORD_TYPE
9525 && int_size_in_bytes (type) > 0)
9526 || (type && TREE_CODE (type) == RECORD_TYPE
9527 && int_size_in_bytes (type) == 8)) ? 1 : 0;
9530 /* Update the data in CUM to advance over an argument
9531 of mode MODE and data type TYPE.
9532 (TYPE is null for libcalls where that information may not be available.)
9534 Note that for args passed by reference, function_arg will be called
9535 with MODE and TYPE set to that of the pointer to the arg, not the arg
9536 itself. */
9538 static void
9539 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9540 const_tree type, bool named, int depth)
9542 enum machine_mode elt_mode;
9543 int n_elts;
9545 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9547 /* Only tick off an argument if we're not recursing. */
9548 if (depth == 0)
9549 cum->nargs_prototype--;
9551 #ifdef HAVE_AS_GNU_ATTRIBUTE
9552 if (DEFAULT_ABI == ABI_V4
9553 && cum->escapes)
9555 if (SCALAR_FLOAT_MODE_P (mode))
9556 rs6000_passes_float = true;
9557 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
9558 rs6000_passes_vector = true;
9559 else if (SPE_VECTOR_MODE (mode)
9560 && !cum->stdarg
9561 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9562 rs6000_passes_vector = true;
9564 #endif
9566 if (TARGET_ALTIVEC_ABI
9567 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9568 || (type && TREE_CODE (type) == VECTOR_TYPE
9569 && int_size_in_bytes (type) == 16)))
9571 bool stack = false;
9573 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
9575 cum->vregno += n_elts;
9577 if (!TARGET_ALTIVEC)
9578 error ("cannot pass argument in vector register because"
9579 " altivec instructions are disabled, use -maltivec"
9580 " to enable them");
9582 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
9583 even if it is going to be passed in a vector register.
9584 Darwin does the same for variable-argument functions. */
9585 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9586 && TARGET_64BIT)
9587 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
9588 stack = true;
9590 else
9591 stack = true;
9593 if (stack)
9595 int align;
9597 /* Vector parameters must be 16-byte aligned. In 32-bit
9598 mode this means we need to take into account the offset
9599 to the parameter save area. In 64-bit mode, they just
9600 have to start on an even word, since the parameter save
9601 area is 16-byte aligned. */
9602 if (TARGET_32BIT)
9603 align = -(rs6000_parm_offset () + cum->words) & 3;
9604 else
9605 align = cum->words & 1;
9606 cum->words += align + rs6000_arg_size (mode, type);
9608 if (TARGET_DEBUG_ARG)
9610 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
9611 cum->words, align);
9612 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
9613 cum->nargs_prototype, cum->prototype,
9614 GET_MODE_NAME (mode));
9618 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
9619 && !cum->stdarg
9620 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9621 cum->sysv_gregno++;
9623 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9625 int size = int_size_in_bytes (type);
9626 /* Variable sized types have size == -1 and are
9627 treated as if consisting entirely of ints.
9628 Pad to 16 byte boundary if needed. */
9629 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9630 && (cum->words % 2) != 0)
9631 cum->words++;
9632 /* For varargs, we can just go up by the size of the struct. */
9633 if (!named)
9634 cum->words += (size + 7) / 8;
9635 else
9637 /* It is tempting to say int register count just goes up by
9638 sizeof(type)/8, but this is wrong in a case such as
9639 { int; double; int; } [powerpc alignment]. We have to
9640 grovel through the fields for these too. */
9641 cum->intoffset = 0;
9642 cum->floats_in_gpr = 0;
9643 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
9644 rs6000_darwin64_record_arg_advance_flush (cum,
9645 size * BITS_PER_UNIT, 1);
9647 if (TARGET_DEBUG_ARG)
9649 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
9650 cum->words, TYPE_ALIGN (type), size);
9651 fprintf (stderr,
9652 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
9653 cum->nargs_prototype, cum->prototype,
9654 GET_MODE_NAME (mode));
9657 else if (DEFAULT_ABI == ABI_V4)
9659 if (TARGET_HARD_FLOAT && TARGET_FPRS
9660 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
9661 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
9662 || (mode == TFmode && !TARGET_IEEEQUAD)
9663 || mode == SDmode || mode == DDmode || mode == TDmode))
9665 /* _Decimal128 must use an even/odd register pair. This assumes
9666 that the register number is odd when fregno is odd. */
9667 if (mode == TDmode && (cum->fregno % 2) == 1)
9668 cum->fregno++;
9670 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
9671 <= FP_ARG_V4_MAX_REG)
9672 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
9673 else
9675 cum->fregno = FP_ARG_V4_MAX_REG + 1;
9676 if (mode == DFmode || mode == TFmode
9677 || mode == DDmode || mode == TDmode)
9678 cum->words += cum->words & 1;
9679 cum->words += rs6000_arg_size (mode, type);
9682 else
9684 int n_words = rs6000_arg_size (mode, type);
9685 int gregno = cum->sysv_gregno;
9687 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9688 (r7,r8) or (r9,r10). As does any other 2 word item such
9689 as complex int due to a historical mistake. */
9690 if (n_words == 2)
9691 gregno += (1 - gregno) & 1;
9693 /* Multi-reg args are not split between registers and stack. */
9694 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9696 /* Long long and SPE vectors are aligned on the stack.
9697 So are other 2 word items such as complex int due to
9698 a historical mistake. */
9699 if (n_words == 2)
9700 cum->words += cum->words & 1;
9701 cum->words += n_words;
9704 /* Note: continuing to accumulate gregno past when we've started
9705 spilling to the stack indicates the fact that we've started
9706 spilling to the stack to expand_builtin_saveregs. */
9707 cum->sysv_gregno = gregno + n_words;
9710 if (TARGET_DEBUG_ARG)
9712 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9713 cum->words, cum->fregno);
9714 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
9715 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
9716 fprintf (stderr, "mode = %4s, named = %d\n",
9717 GET_MODE_NAME (mode), named);
9720 else
9722 int n_words = rs6000_arg_size (mode, type);
9723 int start_words = cum->words;
9724 int align_words = rs6000_parm_start (mode, type, start_words);
9726 cum->words = align_words + n_words;
9728 if (SCALAR_FLOAT_MODE_P (elt_mode)
9729 && TARGET_HARD_FLOAT && TARGET_FPRS)
9731 /* _Decimal128 must be passed in an even/odd float register pair.
9732 This assumes that the register number is odd when fregno is
9733 odd. */
9734 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
9735 cum->fregno++;
9736 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
9739 if (TARGET_DEBUG_ARG)
9741 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9742 cum->words, cum->fregno);
9743 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
9744 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
9745 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
9746 named, align_words - start_words, depth);
9751 static void
9752 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
9753 const_tree type, bool named)
9755 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
9759 static rtx
9760 spe_build_register_parallel (enum machine_mode mode, int gregno)
9762 rtx r1, r3, r5, r7;
9764 switch (mode)
9766 case DFmode:
9767 r1 = gen_rtx_REG (DImode, gregno);
9768 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9769 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
9771 case DCmode:
9772 case TFmode:
9773 r1 = gen_rtx_REG (DImode, gregno);
9774 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9775 r3 = gen_rtx_REG (DImode, gregno + 2);
9776 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9777 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
9779 case TCmode:
9780 r1 = gen_rtx_REG (DImode, gregno);
9781 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9782 r3 = gen_rtx_REG (DImode, gregno + 2);
9783 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9784 r5 = gen_rtx_REG (DImode, gregno + 4);
9785 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
9786 r7 = gen_rtx_REG (DImode, gregno + 6);
9787 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
9788 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
9790 default:
9791 gcc_unreachable ();
9795 /* Determine where to put a SIMD argument on the SPE. */
9796 static rtx
9797 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
9798 const_tree type)
9800 int gregno = cum->sysv_gregno;
9802 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
9803 are passed and returned in a pair of GPRs for ABI compatibility. */
9804 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
9805 || mode == DCmode || mode == TCmode))
9807 int n_words = rs6000_arg_size (mode, type);
9809 /* Doubles go in an odd/even register pair (r5/r6, etc). */
9810 if (mode == DFmode)
9811 gregno += (1 - gregno) & 1;
9813 /* Multi-reg args are not split between registers and stack. */
9814 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9815 return NULL_RTX;
9817 return spe_build_register_parallel (mode, gregno);
9819 if (cum->stdarg)
9821 int n_words = rs6000_arg_size (mode, type);
9823 /* SPE vectors are put in odd registers. */
9824 if (n_words == 2 && (gregno & 1) == 0)
9825 gregno += 1;
9827 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
9829 rtx r1, r2;
9830 enum machine_mode m = SImode;
9832 r1 = gen_rtx_REG (m, gregno);
9833 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
9834 r2 = gen_rtx_REG (m, gregno + 1);
9835 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
9836 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
9838 else
9839 return NULL_RTX;
9841 else
9843 if (gregno <= GP_ARG_MAX_REG)
9844 return gen_rtx_REG (mode, gregno);
9845 else
9846 return NULL_RTX;
9850 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
9851 structure between cum->intoffset and bitpos to integer registers. */
9853 static void
9854 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
9855 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
9857 enum machine_mode mode;
9858 unsigned int regno;
9859 unsigned int startbit, endbit;
9860 int this_regno, intregs, intoffset;
9861 rtx reg;
9863 if (cum->intoffset == -1)
9864 return;
9866 intoffset = cum->intoffset;
9867 cum->intoffset = -1;
9869 /* If this is the trailing part of a word, try to only load that
9870 much into the register. Otherwise load the whole register. Note
9871 that in the latter case we may pick up unwanted bits. It's not a
9872 problem at the moment but may wish to revisit. */
9874 if (intoffset % BITS_PER_WORD != 0)
9876 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9877 MODE_INT, 0);
9878 if (mode == BLKmode)
9880 /* We couldn't find an appropriate mode, which happens,
9881 e.g., in packed structs when there are 3 bytes to load.
9882 Back intoffset back to the beginning of the word in this
9883 case. */
9884 intoffset = intoffset & -BITS_PER_WORD;
9885 mode = word_mode;
9888 else
9889 mode = word_mode;
9891 startbit = intoffset & -BITS_PER_WORD;
9892 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9893 intregs = (endbit - startbit) / BITS_PER_WORD;
9894 this_regno = cum->words + intoffset / BITS_PER_WORD;
9896 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
9897 cum->use_stack = 1;
9899 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
9900 if (intregs <= 0)
9901 return;
9903 intoffset /= BITS_PER_UNIT;
9906 regno = GP_ARG_MIN_REG + this_regno;
9907 reg = gen_rtx_REG (mode, regno);
9908 rvec[(*k)++] =
9909 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
9911 this_regno += 1;
9912 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
9913 mode = word_mode;
9914 intregs -= 1;
9916 while (intregs > 0);
9919 /* Recursive workhorse for the following. */
9921 static void
9922 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
9923 HOST_WIDE_INT startbitpos, rtx rvec[],
9924 int *k)
9926 tree f;
9928 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9929 if (TREE_CODE (f) == FIELD_DECL)
9931 HOST_WIDE_INT bitpos = startbitpos;
9932 tree ftype = TREE_TYPE (f);
9933 enum machine_mode mode;
9934 if (ftype == error_mark_node)
9935 continue;
9936 mode = TYPE_MODE (ftype);
9938 if (DECL_SIZE (f) != 0
9939 && tree_fits_uhwi_p (bit_position (f)))
9940 bitpos += int_bit_position (f);
9942 /* ??? FIXME: else assume zero offset. */
9944 if (TREE_CODE (ftype) == RECORD_TYPE)
9945 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
9946 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
9948 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
9949 #if 0
9950 switch (mode)
9952 case SCmode: mode = SFmode; break;
9953 case DCmode: mode = DFmode; break;
9954 case TCmode: mode = TFmode; break;
9955 default: break;
9957 #endif
9958 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9959 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
9961 gcc_assert (cum->fregno == FP_ARG_MAX_REG
9962 && (mode == TFmode || mode == TDmode));
9963 /* Long double or _Decimal128 split over regs and memory. */
9964 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
9965 cum->use_stack=1;
9967 rvec[(*k)++]
9968 = gen_rtx_EXPR_LIST (VOIDmode,
9969 gen_rtx_REG (mode, cum->fregno++),
9970 GEN_INT (bitpos / BITS_PER_UNIT));
9971 if (mode == TFmode || mode == TDmode)
9972 cum->fregno++;
9974 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9976 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9977 rvec[(*k)++]
9978 = gen_rtx_EXPR_LIST (VOIDmode,
9979 gen_rtx_REG (mode, cum->vregno++),
9980 GEN_INT (bitpos / BITS_PER_UNIT));
9982 else if (cum->intoffset == -1)
9983 cum->intoffset = bitpos;
9987 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
9988 the register(s) to be used for each field and subfield of a struct
9989 being passed by value, along with the offset of where the
9990 register's value may be found in the block. FP fields go in FP
9991 register, vector fields go in vector registers, and everything
9992 else goes in int registers, packed as in memory.
9994 This code is also used for function return values. RETVAL indicates
9995 whether this is the case.
9997 Much of this is taken from the SPARC V9 port, which has a similar
9998 calling convention. */
10000 static rtx
10001 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
10002 bool named, bool retval)
10004 rtx rvec[FIRST_PSEUDO_REGISTER];
10005 int k = 1, kbase = 1;
10006 HOST_WIDE_INT typesize = int_size_in_bytes (type);
10007 /* This is a copy; modifications are not visible to our caller. */
10008 CUMULATIVE_ARGS copy_cum = *orig_cum;
10009 CUMULATIVE_ARGS *cum = &copy_cum;
10011 /* Pad to 16 byte boundary if needed. */
10012 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
10013 && (cum->words % 2) != 0)
10014 cum->words++;
10016 cum->intoffset = 0;
10017 cum->use_stack = 0;
10018 cum->named = named;
10020 /* Put entries into rvec[] for individual FP and vector fields, and
10021 for the chunks of memory that go in int regs. Note we start at
10022 element 1; 0 is reserved for an indication of using memory, and
10023 may or may not be filled in below. */
10024 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
10025 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
10027 /* If any part of the struct went on the stack put all of it there.
10028 This hack is because the generic code for
10029 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
10030 parts of the struct are not at the beginning. */
10031 if (cum->use_stack)
10033 if (retval)
10034 return NULL_RTX; /* doesn't go in registers at all */
10035 kbase = 0;
10036 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10038 if (k > 1 || cum->use_stack)
10039 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
10040 else
10041 return NULL_RTX;
10044 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
10046 static rtx
10047 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
10048 int align_words)
10050 int n_units;
10051 int i, k;
10052 rtx rvec[GP_ARG_NUM_REG + 1];
10054 if (align_words >= GP_ARG_NUM_REG)
10055 return NULL_RTX;
10057 n_units = rs6000_arg_size (mode, type);
10059 /* Optimize the simple case where the arg fits in one gpr, except in
10060 the case of BLKmode due to assign_parms assuming that registers are
10061 BITS_PER_WORD wide. */
10062 if (n_units == 0
10063 || (n_units == 1 && mode != BLKmode))
10064 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10066 k = 0;
10067 if (align_words + n_units > GP_ARG_NUM_REG)
10068 /* Not all of the arg fits in gprs. Say that it goes in memory too,
10069 using a magic NULL_RTX component.
10070 This is not strictly correct. Only some of the arg belongs in
10071 memory, not all of it. However, the normal scheme using
10072 function_arg_partial_nregs can result in unusual subregs, eg.
10073 (subreg:SI (reg:DF) 4), which are not handled well. The code to
10074 store the whole arg to memory is often more efficient than code
10075 to store pieces, and we know that space is available in the right
10076 place for the whole arg. */
10077 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10079 i = 0;
10082 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
10083 rtx off = GEN_INT (i++ * 4);
10084 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10086 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
10088 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
10091 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
10092 but must also be copied into the parameter save area starting at
10093 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
10094 to the GPRs and/or memory. Return the number of elements used. */
10096 static int
10097 rs6000_psave_function_arg (enum machine_mode mode, const_tree type,
10098 int align_words, rtx *rvec)
10100 int k = 0;
10102 if (align_words < GP_ARG_NUM_REG)
10104 int n_words = rs6000_arg_size (mode, type);
10106 if (align_words + n_words > GP_ARG_NUM_REG
10107 || mode == BLKmode
10108 || (TARGET_32BIT && TARGET_POWERPC64))
10110 /* If this is partially on the stack, then we only
10111 include the portion actually in registers here. */
10112 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
10113 int i = 0;
10115 if (align_words + n_words > GP_ARG_NUM_REG)
10117 /* Not all of the arg fits in gprs. Say that it goes in memory
10118 too, using a magic NULL_RTX component. Also see comment in
10119 rs6000_mixed_function_arg for why the normal
10120 function_arg_partial_nregs scheme doesn't work in this case. */
10121 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10126 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
10127 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
10128 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10130 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
10132 else
10134 /* The whole arg fits in gprs. */
10135 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10136 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
10139 else
10141 /* It's entirely in memory. */
10142 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10145 return k;
10148 /* RVEC is a vector of K components of an argument of mode MODE.
10149 Construct the final function_arg return value from it. */
10151 static rtx
10152 rs6000_finish_function_arg (enum machine_mode mode, rtx *rvec, int k)
10154 gcc_assert (k >= 1);
10156 /* Avoid returning a PARALLEL in the trivial cases. */
10157 if (k == 1)
10159 if (XEXP (rvec[0], 0) == NULL_RTX)
10160 return NULL_RTX;
10162 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
10163 return XEXP (rvec[0], 0);
10166 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
10169 /* Determine where to put an argument to a function.
10170 Value is zero to push the argument on the stack,
10171 or a hard register in which to store the argument.
10173 MODE is the argument's machine mode.
10174 TYPE is the data type of the argument (as a tree).
10175 This is null for libcalls where that information may
10176 not be available.
10177 CUM is a variable of type CUMULATIVE_ARGS which gives info about
10178 the preceding args and about the function being called. It is
10179 not modified in this routine.
10180 NAMED is nonzero if this argument is a named parameter
10181 (otherwise it is an extra parameter matching an ellipsis).
10183 On RS/6000 the first eight words of non-FP are normally in registers
10184 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
10185 Under V.4, the first 8 FP args are in registers.
10187 If this is floating-point and no prototype is specified, we use
10188 both an FP and integer register (or possibly FP reg and stack). Library
10189 functions (when CALL_LIBCALL is set) always have the proper types for args,
10190 so we can pass the FP value just in one register. emit_library_function
10191 doesn't support PARALLEL anyway.
10193 Note that for args passed by reference, function_arg will be called
10194 with MODE and TYPE set to that of the pointer to the arg, not the arg
10195 itself. */
10197 static rtx
10198 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
10199 const_tree type, bool named)
10201 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10202 enum rs6000_abi abi = DEFAULT_ABI;
10203 enum machine_mode elt_mode;
10204 int n_elts;
10206 /* Return a marker to indicate whether CR1 needs to set or clear the
10207 bit that V.4 uses to say fp args were passed in registers.
10208 Assume that we don't need the marker for software floating point,
10209 or compiler generated library calls. */
10210 if (mode == VOIDmode)
10212 if (abi == ABI_V4
10213 && (cum->call_cookie & CALL_LIBCALL) == 0
10214 && (cum->stdarg
10215 || (cum->nargs_prototype < 0
10216 && (cum->prototype || TARGET_NO_PROTOTYPE))))
10218 /* For the SPE, we need to crxor CR6 always. */
10219 if (TARGET_SPE_ABI)
10220 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
10221 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
10222 return GEN_INT (cum->call_cookie
10223 | ((cum->fregno == FP_ARG_MIN_REG)
10224 ? CALL_V4_SET_FP_ARGS
10225 : CALL_V4_CLEAR_FP_ARGS));
10228 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
10231 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10233 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10235 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
10236 if (rslt != NULL_RTX)
10237 return rslt;
10238 /* Else fall through to usual handling. */
10241 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10243 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10244 rtx r, off;
10245 int i, k = 0;
10247 /* Do we also need to pass this argument in the parameter
10248 save area? */
10249 if (TARGET_64BIT && ! cum->prototype)
10251 int align_words = (cum->words + 1) & ~1;
10252 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10255 /* Describe where this argument goes in the vector registers. */
10256 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
10258 r = gen_rtx_REG (elt_mode, cum->vregno + i);
10259 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10260 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10263 return rs6000_finish_function_arg (mode, rvec, k);
10265 else if (TARGET_ALTIVEC_ABI
10266 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
10267 || (type && TREE_CODE (type) == VECTOR_TYPE
10268 && int_size_in_bytes (type) == 16)))
10270 if (named || abi == ABI_V4)
10271 return NULL_RTX;
10272 else
10274 /* Vector parameters to varargs functions under AIX or Darwin
10275 get passed in memory and possibly also in GPRs. */
10276 int align, align_words, n_words;
10277 enum machine_mode part_mode;
10279 /* Vector parameters must be 16-byte aligned. In 32-bit
10280 mode this means we need to take into account the offset
10281 to the parameter save area. In 64-bit mode, they just
10282 have to start on an even word, since the parameter save
10283 area is 16-byte aligned. */
10284 if (TARGET_32BIT)
10285 align = -(rs6000_parm_offset () + cum->words) & 3;
10286 else
10287 align = cum->words & 1;
10288 align_words = cum->words + align;
10290 /* Out of registers? Memory, then. */
10291 if (align_words >= GP_ARG_NUM_REG)
10292 return NULL_RTX;
10294 if (TARGET_32BIT && TARGET_POWERPC64)
10295 return rs6000_mixed_function_arg (mode, type, align_words);
10297 /* The vector value goes in GPRs. Only the part of the
10298 value in GPRs is reported here. */
10299 part_mode = mode;
10300 n_words = rs6000_arg_size (mode, type);
10301 if (align_words + n_words > GP_ARG_NUM_REG)
10302 /* Fortunately, there are only two possibilities, the value
10303 is either wholly in GPRs or half in GPRs and half not. */
10304 part_mode = DImode;
10306 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
10309 else if (TARGET_SPE_ABI && TARGET_SPE
10310 && (SPE_VECTOR_MODE (mode)
10311 || (TARGET_E500_DOUBLE && (mode == DFmode
10312 || mode == DCmode
10313 || mode == TFmode
10314 || mode == TCmode))))
10315 return rs6000_spe_function_arg (cum, mode, type);
10317 else if (abi == ABI_V4)
10319 if (TARGET_HARD_FLOAT && TARGET_FPRS
10320 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
10321 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
10322 || (mode == TFmode && !TARGET_IEEEQUAD)
10323 || mode == SDmode || mode == DDmode || mode == TDmode))
10325 /* _Decimal128 must use an even/odd register pair. This assumes
10326 that the register number is odd when fregno is odd. */
10327 if (mode == TDmode && (cum->fregno % 2) == 1)
10328 cum->fregno++;
10330 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
10331 <= FP_ARG_V4_MAX_REG)
10332 return gen_rtx_REG (mode, cum->fregno);
10333 else
10334 return NULL_RTX;
10336 else
10338 int n_words = rs6000_arg_size (mode, type);
10339 int gregno = cum->sysv_gregno;
10341 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
10342 (r7,r8) or (r9,r10). As does any other 2 word item such
10343 as complex int due to a historical mistake. */
10344 if (n_words == 2)
10345 gregno += (1 - gregno) & 1;
10347 /* Multi-reg args are not split between registers and stack. */
10348 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
10349 return NULL_RTX;
10351 if (TARGET_32BIT && TARGET_POWERPC64)
10352 return rs6000_mixed_function_arg (mode, type,
10353 gregno - GP_ARG_MIN_REG);
10354 return gen_rtx_REG (mode, gregno);
10357 else
10359 int align_words = rs6000_parm_start (mode, type, cum->words);
10361 /* _Decimal128 must be passed in an even/odd float register pair.
10362 This assumes that the register number is odd when fregno is odd. */
10363 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
10364 cum->fregno++;
10366 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10368 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10369 rtx r, off;
10370 int i, k = 0;
10371 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10372 int fpr_words;
10374 /* Do we also need to pass this argument in the parameter
10375 save area? */
10376 if (type && (cum->nargs_prototype <= 0
10377 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10378 && TARGET_XL_COMPAT
10379 && align_words >= GP_ARG_NUM_REG)))
10380 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10382 /* Describe where this argument goes in the fprs. */
10383 for (i = 0; i < n_elts
10384 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
10386 /* Check if the argument is split over registers and memory.
10387 This can only ever happen for long double or _Decimal128;
10388 complex types are handled via split_complex_arg. */
10389 enum machine_mode fmode = elt_mode;
10390 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
10392 gcc_assert (fmode == TFmode || fmode == TDmode);
10393 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
10396 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
10397 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10398 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10401 /* If there were not enough FPRs to hold the argument, the rest
10402 usually goes into memory. However, if the current position
10403 is still within the register parameter area, a portion may
10404 actually have to go into GPRs.
10406 Note that it may happen that the portion of the argument
10407 passed in the first "half" of the first GPR was already
10408 passed in the last FPR as well.
10410 For unnamed arguments, we already set up GPRs to cover the
10411 whole argument in rs6000_psave_function_arg, so there is
10412 nothing further to do at this point. */
10413 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
10414 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
10415 && cum->nargs_prototype > 0)
10417 static bool warned;
10419 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
10420 int n_words = rs6000_arg_size (mode, type);
10422 align_words += fpr_words;
10423 n_words -= fpr_words;
10427 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
10428 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
10429 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10431 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
10433 if (!warned && warn_psabi)
10435 warned = true;
10436 inform (input_location,
10437 "the ABI of passing homogeneous float aggregates"
10438 " has changed in GCC 4.10");
10442 return rs6000_finish_function_arg (mode, rvec, k);
10444 else if (align_words < GP_ARG_NUM_REG)
10446 if (TARGET_32BIT && TARGET_POWERPC64)
10447 return rs6000_mixed_function_arg (mode, type, align_words);
10449 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10451 else
10452 return NULL_RTX;
10456 /* For an arg passed partly in registers and partly in memory, this is
10457 the number of bytes passed in registers. For args passed entirely in
10458 registers or entirely in memory, zero. When an arg is described by a
10459 PARALLEL, perhaps using more than one register type, this function
10460 returns the number of bytes used by the first element of the PARALLEL. */
10462 static int
10463 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
10464 tree type, bool named)
10466 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10467 bool passed_in_gprs = true;
10468 int ret = 0;
10469 int align_words;
10470 enum machine_mode elt_mode;
10471 int n_elts;
10473 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10475 if (DEFAULT_ABI == ABI_V4)
10476 return 0;
10478 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10480 /* If we are passing this arg in the fixed parameter save area
10481 (gprs or memory) as well as VRs, we do not use the partial
10482 bytes mechanism; instead, rs6000_function_arg will return a
10483 PARALLEL including a memory element as necessary. */
10484 if (TARGET_64BIT && ! cum->prototype)
10485 return 0;
10487 /* Otherwise, we pass in VRs only. Check for partial copies. */
10488 passed_in_gprs = false;
10489 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
10490 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
10493 /* In this complicated case we just disable the partial_nregs code. */
10494 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10495 return 0;
10497 align_words = rs6000_parm_start (mode, type, cum->words);
10499 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10501 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10503 /* If we are passing this arg in the fixed parameter save area
10504 (gprs or memory) as well as FPRs, we do not use the partial
10505 bytes mechanism; instead, rs6000_function_arg will return a
10506 PARALLEL including a memory element as necessary. */
10507 if (type
10508 && (cum->nargs_prototype <= 0
10509 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10510 && TARGET_XL_COMPAT
10511 && align_words >= GP_ARG_NUM_REG)))
10512 return 0;
10514 /* Otherwise, we pass in FPRs only. Check for partial copies. */
10515 passed_in_gprs = false;
10516 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
10518 /* Compute number of bytes / words passed in FPRs. If there
10519 is still space available in the register parameter area
10520 *after* that amount, a part of the argument will be passed
10521 in GPRs. In that case, the total amount passed in any
10522 registers is equal to the amount that would have been passed
10523 in GPRs if everything were passed there, so we fall back to
10524 the GPR code below to compute the appropriate value. */
10525 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
10526 * MIN (8, GET_MODE_SIZE (elt_mode)));
10527 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
10529 if (align_words + fpr_words < GP_ARG_NUM_REG)
10530 passed_in_gprs = true;
10531 else
10532 ret = fpr;
10536 if (passed_in_gprs
10537 && align_words < GP_ARG_NUM_REG
10538 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
10539 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
10541 if (ret != 0 && TARGET_DEBUG_ARG)
10542 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
10544 return ret;
10547 /* A C expression that indicates when an argument must be passed by
10548 reference. If nonzero for an argument, a copy of that argument is
10549 made in memory and a pointer to the argument is passed instead of
10550 the argument itself. The pointer is passed in whatever way is
10551 appropriate for passing a pointer to that type.
10553 Under V.4, aggregates and long double are passed by reference.
10555 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
10556 reference unless the AltiVec vector extension ABI is in force.
10558 As an extension to all ABIs, variable sized types are passed by
10559 reference. */
10561 static bool
10562 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
10563 enum machine_mode mode, const_tree type,
10564 bool named ATTRIBUTE_UNUSED)
10566 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
10568 if (TARGET_DEBUG_ARG)
10569 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
10570 return 1;
10573 if (!type)
10574 return 0;
10576 #if HAVE_UPC_PTS_STRUCT_REP
10577 if (DEFAULT_ABI == ABI_V4 && POINTER_TYPE_P (type)
10578 && upc_shared_type_p (TREE_TYPE (type)))
10580 if (TARGET_DEBUG_ARG)
10581 fprintf (stderr,
10582 "function_arg_pass_by_reference: V4 UPC ptr to shared\n");
10583 return 1;
10585 #endif
10587 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
10589 if (TARGET_DEBUG_ARG)
10590 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
10591 return 1;
10594 if (int_size_in_bytes (type) < 0)
10596 if (TARGET_DEBUG_ARG)
10597 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
10598 return 1;
10601 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10602 modes only exist for GCC vector types if -maltivec. */
10603 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
10605 if (TARGET_DEBUG_ARG)
10606 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
10607 return 1;
10610 /* Pass synthetic vectors in memory. */
10611 if (TREE_CODE (type) == VECTOR_TYPE
10612 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10614 static bool warned_for_pass_big_vectors = false;
10615 if (TARGET_DEBUG_ARG)
10616 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
10617 if (!warned_for_pass_big_vectors)
10619 warning (0, "GCC vector passed by reference: "
10620 "non-standard ABI extension with no compatibility guarantee");
10621 warned_for_pass_big_vectors = true;
10623 return 1;
10626 return 0;
10629 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
10630 already processes. Return true if the parameter must be passed
10631 (fully or partially) on the stack. */
10633 static bool
10634 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
10636 enum machine_mode mode;
10637 int unsignedp;
10638 rtx entry_parm;
10640 /* Catch errors. */
10641 if (type == NULL || type == error_mark_node)
10642 return true;
10644 /* Handle types with no storage requirement. */
10645 if (TYPE_MODE (type) == VOIDmode)
10646 return false;
10648 /* Handle complex types. */
10649 if (TREE_CODE (type) == COMPLEX_TYPE)
10650 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
10651 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
10653 /* Handle transparent aggregates. */
10654 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
10655 && TYPE_TRANSPARENT_AGGR (type))
10656 type = TREE_TYPE (first_field (type));
10658 /* See if this arg was passed by invisible reference. */
10659 if (pass_by_reference (get_cumulative_args (args_so_far),
10660 TYPE_MODE (type), type, true))
10661 type = build_pointer_type (type);
10663 /* Find mode as it is passed by the ABI. */
10664 unsignedp = TYPE_UNSIGNED (type);
10665 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
10667 /* If we must pass in stack, we need a stack. */
10668 if (rs6000_must_pass_in_stack (mode, type))
10669 return true;
10671 /* If there is no incoming register, we need a stack. */
10672 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
10673 if (entry_parm == NULL)
10674 return true;
10676 /* Likewise if we need to pass both in registers and on the stack. */
10677 if (GET_CODE (entry_parm) == PARALLEL
10678 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
10679 return true;
10681 /* Also true if we're partially in registers and partially not. */
10682 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
10683 return true;
10685 /* Update info on where next arg arrives in registers. */
10686 rs6000_function_arg_advance (args_so_far, mode, type, true);
10687 return false;
10690 /* Return true if FUN has no prototype, has a variable argument
10691 list, or passes any parameter in memory. */
10693 static bool
10694 rs6000_function_parms_need_stack (tree fun, bool incoming)
10696 tree fntype, result;
10697 CUMULATIVE_ARGS args_so_far_v;
10698 cumulative_args_t args_so_far;
10700 if (!fun)
10701 /* Must be a libcall, all of which only use reg parms. */
10702 return false;
10704 fntype = fun;
10705 if (!TYPE_P (fun))
10706 fntype = TREE_TYPE (fun);
10708 /* Varargs functions need the parameter save area. */
10709 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
10710 return true;
10712 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
10713 args_so_far = pack_cumulative_args (&args_so_far_v);
10715 /* When incoming, we will have been passed the function decl.
10716 It is necessary to use the decl to handle K&R style functions,
10717 where TYPE_ARG_TYPES may not be available. */
10718 if (incoming)
10720 gcc_assert (DECL_P (fun));
10721 result = DECL_RESULT (fun);
10723 else
10724 result = TREE_TYPE (fntype);
10726 if (result && aggregate_value_p (result, fntype))
10728 if (!TYPE_P (result))
10729 result = TREE_TYPE (result);
10730 result = build_pointer_type (result);
10731 rs6000_parm_needs_stack (args_so_far, result);
10734 if (incoming)
10736 tree parm;
10738 for (parm = DECL_ARGUMENTS (fun);
10739 parm && parm != void_list_node;
10740 parm = TREE_CHAIN (parm))
10741 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
10742 return true;
10744 else
10746 function_args_iterator args_iter;
10747 tree arg_type;
10749 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
10750 if (rs6000_parm_needs_stack (args_so_far, arg_type))
10751 return true;
10754 return false;
10757 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
10758 usually a constant depending on the ABI. However, in the ELFv2 ABI
10759 the register parameter area is optional when calling a function that
10760 has a prototype is scope, has no variable argument list, and passes
10761 all parameters in registers. */
10764 rs6000_reg_parm_stack_space (tree fun, bool incoming)
10766 int reg_parm_stack_space;
10768 switch (DEFAULT_ABI)
10770 default:
10771 reg_parm_stack_space = 0;
10772 break;
10774 case ABI_AIX:
10775 case ABI_DARWIN:
10776 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10777 break;
10779 case ABI_ELFv2:
10780 /* ??? Recomputing this every time is a bit expensive. Is there
10781 a place to cache this information? */
10782 if (rs6000_function_parms_need_stack (fun, incoming))
10783 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10784 else
10785 reg_parm_stack_space = 0;
10786 break;
10789 return reg_parm_stack_space;
10792 static void
10793 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
10795 int i;
10796 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
10798 if (nregs == 0)
10799 return;
10801 for (i = 0; i < nregs; i++)
10803 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
10804 if (reload_completed)
10806 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
10807 tem = NULL_RTX;
10808 else
10809 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
10810 i * GET_MODE_SIZE (reg_mode));
10812 else
10813 tem = replace_equiv_address (tem, XEXP (tem, 0));
10815 gcc_assert (tem);
10817 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
10821 /* Perform any needed actions needed for a function that is receiving a
10822 variable number of arguments.
10824 CUM is as above.
10826 MODE and TYPE are the mode and type of the current parameter.
10828 PRETEND_SIZE is a variable that should be set to the amount of stack
10829 that must be pushed by the prolog to pretend that our caller pushed
10832 Normally, this macro will push all remaining incoming registers on the
10833 stack and set PRETEND_SIZE to the length of the registers pushed. */
10835 static void
10836 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
10837 tree type, int *pretend_size ATTRIBUTE_UNUSED,
10838 int no_rtl)
10840 CUMULATIVE_ARGS next_cum;
10841 int reg_size = TARGET_32BIT ? 4 : 8;
10842 rtx save_area = NULL_RTX, mem;
10843 int first_reg_offset;
10844 alias_set_type set;
10846 /* Skip the last named argument. */
10847 next_cum = *get_cumulative_args (cum);
10848 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
10850 if (DEFAULT_ABI == ABI_V4)
10852 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
10854 if (! no_rtl)
10856 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
10857 HOST_WIDE_INT offset = 0;
10859 /* Try to optimize the size of the varargs save area.
10860 The ABI requires that ap.reg_save_area is doubleword
10861 aligned, but we don't need to allocate space for all
10862 the bytes, only those to which we actually will save
10863 anything. */
10864 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
10865 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
10866 if (TARGET_HARD_FLOAT && TARGET_FPRS
10867 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10868 && cfun->va_list_fpr_size)
10870 if (gpr_reg_num)
10871 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
10872 * UNITS_PER_FP_WORD;
10873 if (cfun->va_list_fpr_size
10874 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10875 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
10876 else
10877 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10878 * UNITS_PER_FP_WORD;
10880 if (gpr_reg_num)
10882 offset = -((first_reg_offset * reg_size) & ~7);
10883 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
10885 gpr_reg_num = cfun->va_list_gpr_size;
10886 if (reg_size == 4 && (first_reg_offset & 1))
10887 gpr_reg_num++;
10889 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
10891 else if (fpr_size)
10892 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
10893 * UNITS_PER_FP_WORD
10894 - (int) (GP_ARG_NUM_REG * reg_size);
10896 if (gpr_size + fpr_size)
10898 rtx reg_save_area
10899 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
10900 gcc_assert (GET_CODE (reg_save_area) == MEM);
10901 reg_save_area = XEXP (reg_save_area, 0);
10902 if (GET_CODE (reg_save_area) == PLUS)
10904 gcc_assert (XEXP (reg_save_area, 0)
10905 == virtual_stack_vars_rtx);
10906 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
10907 offset += INTVAL (XEXP (reg_save_area, 1));
10909 else
10910 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
10913 cfun->machine->varargs_save_offset = offset;
10914 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
10917 else
10919 first_reg_offset = next_cum.words;
10920 save_area = virtual_incoming_args_rtx;
10922 if (targetm.calls.must_pass_in_stack (mode, type))
10923 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
10926 set = get_varargs_alias_set ();
10927 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
10928 && cfun->va_list_gpr_size)
10930 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
10932 if (va_list_gpr_counter_field)
10933 /* V4 va_list_gpr_size counts number of registers needed. */
10934 n_gpr = cfun->va_list_gpr_size;
10935 else
10936 /* char * va_list instead counts number of bytes needed. */
10937 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
10939 if (nregs > n_gpr)
10940 nregs = n_gpr;
10942 mem = gen_rtx_MEM (BLKmode,
10943 plus_constant (Pmode, save_area,
10944 first_reg_offset * reg_size));
10945 MEM_NOTRAP_P (mem) = 1;
10946 set_mem_alias_set (mem, set);
10947 set_mem_align (mem, BITS_PER_WORD);
10949 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
10950 nregs);
10953 /* Save FP registers if needed. */
10954 if (DEFAULT_ABI == ABI_V4
10955 && TARGET_HARD_FLOAT && TARGET_FPRS
10956 && ! no_rtl
10957 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10958 && cfun->va_list_fpr_size)
10960 int fregno = next_cum.fregno, nregs;
10961 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
10962 rtx lab = gen_label_rtx ();
10963 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
10964 * UNITS_PER_FP_WORD);
10966 emit_jump_insn
10967 (gen_rtx_SET (VOIDmode,
10968 pc_rtx,
10969 gen_rtx_IF_THEN_ELSE (VOIDmode,
10970 gen_rtx_NE (VOIDmode, cr1,
10971 const0_rtx),
10972 gen_rtx_LABEL_REF (VOIDmode, lab),
10973 pc_rtx)));
10975 for (nregs = 0;
10976 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
10977 fregno++, off += UNITS_PER_FP_WORD, nregs++)
10979 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10980 ? DFmode : SFmode,
10981 plus_constant (Pmode, save_area, off));
10982 MEM_NOTRAP_P (mem) = 1;
10983 set_mem_alias_set (mem, set);
10984 set_mem_align (mem, GET_MODE_ALIGNMENT (
10985 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10986 ? DFmode : SFmode));
10987 emit_move_insn (mem, gen_rtx_REG (
10988 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10989 ? DFmode : SFmode, fregno));
10992 emit_label (lab);
10996 /* Create the va_list data type. */
10998 static tree
10999 rs6000_build_builtin_va_list (void)
11001 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
11003 /* For AIX, prefer 'char *' because that's what the system
11004 header files like. */
11005 if (DEFAULT_ABI != ABI_V4)
11006 return build_pointer_type (char_type_node);
11008 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
11009 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
11010 get_identifier ("__va_list_tag"), record);
11012 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
11013 unsigned_char_type_node);
11014 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
11015 unsigned_char_type_node);
11016 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
11017 every user file. */
11018 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
11019 get_identifier ("reserved"), short_unsigned_type_node);
11020 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
11021 get_identifier ("overflow_arg_area"),
11022 ptr_type_node);
11023 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
11024 get_identifier ("reg_save_area"),
11025 ptr_type_node);
11027 va_list_gpr_counter_field = f_gpr;
11028 va_list_fpr_counter_field = f_fpr;
11030 DECL_FIELD_CONTEXT (f_gpr) = record;
11031 DECL_FIELD_CONTEXT (f_fpr) = record;
11032 DECL_FIELD_CONTEXT (f_res) = record;
11033 DECL_FIELD_CONTEXT (f_ovf) = record;
11034 DECL_FIELD_CONTEXT (f_sav) = record;
11036 TYPE_STUB_DECL (record) = type_decl;
11037 TYPE_NAME (record) = type_decl;
11038 TYPE_FIELDS (record) = f_gpr;
11039 DECL_CHAIN (f_gpr) = f_fpr;
11040 DECL_CHAIN (f_fpr) = f_res;
11041 DECL_CHAIN (f_res) = f_ovf;
11042 DECL_CHAIN (f_ovf) = f_sav;
11044 layout_type (record);
11046 /* The correct type is an array type of one element. */
11047 return build_array_type (record, build_index_type (size_zero_node));
11050 /* Implement va_start. */
11052 static void
11053 rs6000_va_start (tree valist, rtx nextarg)
11055 HOST_WIDE_INT words, n_gpr, n_fpr;
11056 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
11057 tree gpr, fpr, ovf, sav, t;
11059 /* Only SVR4 needs something special. */
11060 if (DEFAULT_ABI != ABI_V4)
11062 std_expand_builtin_va_start (valist, nextarg);
11063 return;
11066 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11067 f_fpr = DECL_CHAIN (f_gpr);
11068 f_res = DECL_CHAIN (f_fpr);
11069 f_ovf = DECL_CHAIN (f_res);
11070 f_sav = DECL_CHAIN (f_ovf);
11072 valist = build_simple_mem_ref (valist);
11073 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11074 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
11075 f_fpr, NULL_TREE);
11076 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
11077 f_ovf, NULL_TREE);
11078 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
11079 f_sav, NULL_TREE);
11081 /* Count number of gp and fp argument registers used. */
11082 words = crtl->args.info.words;
11083 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
11084 GP_ARG_NUM_REG);
11085 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
11086 FP_ARG_NUM_REG);
11088 if (TARGET_DEBUG_ARG)
11089 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
11090 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
11091 words, n_gpr, n_fpr);
11093 if (cfun->va_list_gpr_size)
11095 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11096 build_int_cst (NULL_TREE, n_gpr));
11097 TREE_SIDE_EFFECTS (t) = 1;
11098 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11101 if (cfun->va_list_fpr_size)
11103 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11104 build_int_cst (NULL_TREE, n_fpr));
11105 TREE_SIDE_EFFECTS (t) = 1;
11106 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11108 #ifdef HAVE_AS_GNU_ATTRIBUTE
11109 if (call_ABI_of_interest (cfun->decl))
11110 rs6000_passes_float = true;
11111 #endif
11114 /* Find the overflow area. */
11115 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11116 if (words != 0)
11117 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
11118 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11119 TREE_SIDE_EFFECTS (t) = 1;
11120 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11122 /* If there were no va_arg invocations, don't set up the register
11123 save area. */
11124 if (!cfun->va_list_gpr_size
11125 && !cfun->va_list_fpr_size
11126 && n_gpr < GP_ARG_NUM_REG
11127 && n_fpr < FP_ARG_V4_MAX_REG)
11128 return;
11130 /* Find the register save area. */
11131 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
11132 if (cfun->machine->varargs_save_offset)
11133 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
11134 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11135 TREE_SIDE_EFFECTS (t) = 1;
11136 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11139 /* Implement va_arg. */
11141 static tree
11142 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11143 gimple_seq *post_p)
11145 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
11146 tree gpr, fpr, ovf, sav, reg, t, u;
11147 int size, rsize, n_reg, sav_ofs, sav_scale;
11148 tree lab_false, lab_over, addr;
11149 int align;
11150 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
11151 int regalign = 0;
11152 gimple stmt;
11154 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11156 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
11157 return build_va_arg_indirect_ref (t);
11160 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
11161 earlier version of gcc, with the property that it always applied alignment
11162 adjustments to the va-args (even for zero-sized types). The cheapest way
11163 to deal with this is to replicate the effect of the part of
11164 std_gimplify_va_arg_expr that carries out the align adjust, for the case
11165 of relevance.
11166 We don't need to check for pass-by-reference because of the test above.
11167 We can return a simplifed answer, since we know there's no offset to add. */
11169 if (((TARGET_MACHO
11170 && rs6000_darwin64_abi)
11171 || DEFAULT_ABI == ABI_ELFv2
11172 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
11173 && integer_zerop (TYPE_SIZE (type)))
11175 unsigned HOST_WIDE_INT align, boundary;
11176 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
11177 align = PARM_BOUNDARY / BITS_PER_UNIT;
11178 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
11179 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
11180 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
11181 boundary /= BITS_PER_UNIT;
11182 if (boundary > align)
11184 tree t ;
11185 /* This updates arg ptr by the amount that would be necessary
11186 to align the zero-sized (but not zero-alignment) item. */
11187 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
11188 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
11189 gimplify_and_add (t, pre_p);
11191 t = fold_convert (sizetype, valist_tmp);
11192 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
11193 fold_convert (TREE_TYPE (valist),
11194 fold_build2 (BIT_AND_EXPR, sizetype, t,
11195 size_int (-boundary))));
11196 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
11197 gimplify_and_add (t, pre_p);
11199 /* Since it is zero-sized there's no increment for the item itself. */
11200 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
11201 return build_va_arg_indirect_ref (valist_tmp);
11204 if (DEFAULT_ABI != ABI_V4)
11206 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
11208 tree elem_type = TREE_TYPE (type);
11209 enum machine_mode elem_mode = TYPE_MODE (elem_type);
11210 int elem_size = GET_MODE_SIZE (elem_mode);
11212 if (elem_size < UNITS_PER_WORD)
11214 tree real_part, imag_part;
11215 gimple_seq post = NULL;
11217 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
11218 &post);
11219 /* Copy the value into a temporary, lest the formal temporary
11220 be reused out from under us. */
11221 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
11222 gimple_seq_add_seq (pre_p, post);
11224 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
11225 post_p);
11227 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
11231 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
11234 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11235 f_fpr = DECL_CHAIN (f_gpr);
11236 f_res = DECL_CHAIN (f_fpr);
11237 f_ovf = DECL_CHAIN (f_res);
11238 f_sav = DECL_CHAIN (f_ovf);
11240 valist = build_va_arg_indirect_ref (valist);
11241 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11242 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
11243 f_fpr, NULL_TREE);
11244 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
11245 f_ovf, NULL_TREE);
11246 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
11247 f_sav, NULL_TREE);
11249 size = int_size_in_bytes (type);
11250 rsize = (size + 3) / 4;
11251 align = 1;
11253 if (TARGET_HARD_FLOAT && TARGET_FPRS
11254 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
11255 || (TARGET_DOUBLE_FLOAT
11256 && (TYPE_MODE (type) == DFmode
11257 || TYPE_MODE (type) == TFmode
11258 || TYPE_MODE (type) == SDmode
11259 || TYPE_MODE (type) == DDmode
11260 || TYPE_MODE (type) == TDmode))))
11262 /* FP args go in FP registers, if present. */
11263 reg = fpr;
11264 n_reg = (size + 7) / 8;
11265 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
11266 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
11267 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
11268 align = 8;
11270 else
11272 /* Otherwise into GP registers. */
11273 reg = gpr;
11274 n_reg = rsize;
11275 sav_ofs = 0;
11276 sav_scale = 4;
11277 if (n_reg == 2)
11278 align = 8;
11281 /* Pull the value out of the saved registers.... */
11283 lab_over = NULL;
11284 addr = create_tmp_var (ptr_type_node, "addr");
11286 /* AltiVec vectors never go in registers when -mabi=altivec. */
11287 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11288 align = 16;
11289 else
11291 lab_false = create_artificial_label (input_location);
11292 lab_over = create_artificial_label (input_location);
11294 /* Long long and SPE vectors are aligned in the registers.
11295 As are any other 2 gpr item such as complex int due to a
11296 historical mistake. */
11297 u = reg;
11298 if (n_reg == 2 && reg == gpr)
11300 regalign = 1;
11301 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11302 build_int_cst (TREE_TYPE (reg), n_reg - 1));
11303 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
11304 unshare_expr (reg), u);
11306 /* _Decimal128 is passed in even/odd fpr pairs; the stored
11307 reg number is 0 for f1, so we want to make it odd. */
11308 else if (reg == fpr && TYPE_MODE (type) == TDmode)
11310 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11311 build_int_cst (TREE_TYPE (reg), 1));
11312 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
11315 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
11316 t = build2 (GE_EXPR, boolean_type_node, u, t);
11317 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11318 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11319 gimplify_and_add (t, pre_p);
11321 t = sav;
11322 if (sav_ofs)
11323 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11325 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11326 build_int_cst (TREE_TYPE (reg), n_reg));
11327 u = fold_convert (sizetype, u);
11328 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
11329 t = fold_build_pointer_plus (t, u);
11331 /* _Decimal32 varargs are located in the second word of the 64-bit
11332 FP register for 32-bit binaries. */
11333 if (!TARGET_POWERPC64
11334 && TARGET_HARD_FLOAT && TARGET_FPRS
11335 && TYPE_MODE (type) == SDmode)
11336 t = fold_build_pointer_plus_hwi (t, size);
11338 gimplify_assign (addr, t, pre_p);
11340 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11342 stmt = gimple_build_label (lab_false);
11343 gimple_seq_add_stmt (pre_p, stmt);
11345 if ((n_reg == 2 && !regalign) || n_reg > 2)
11347 /* Ensure that we don't find any more args in regs.
11348 Alignment has taken care of for special cases. */
11349 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
11353 /* ... otherwise out of the overflow area. */
11355 /* Care for on-stack alignment if needed. */
11356 t = ovf;
11357 if (align != 1)
11359 t = fold_build_pointer_plus_hwi (t, align - 1);
11360 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
11361 build_int_cst (TREE_TYPE (t), -align));
11363 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11365 gimplify_assign (unshare_expr (addr), t, pre_p);
11367 t = fold_build_pointer_plus_hwi (t, size);
11368 gimplify_assign (unshare_expr (ovf), t, pre_p);
11370 if (lab_over)
11372 stmt = gimple_build_label (lab_over);
11373 gimple_seq_add_stmt (pre_p, stmt);
11376 if (STRICT_ALIGNMENT
11377 && (TYPE_ALIGN (type)
11378 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
11380 /* The value (of type complex double, for example) may not be
11381 aligned in memory in the saved registers, so copy via a
11382 temporary. (This is the same code as used for SPARC.) */
11383 tree tmp = create_tmp_var (type, "va_arg_tmp");
11384 tree dest_addr = build_fold_addr_expr (tmp);
11386 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
11387 3, dest_addr, addr, size_int (rsize * 4));
11389 gimplify_and_add (copy, pre_p);
11390 addr = dest_addr;
11393 addr = fold_convert (ptrtype, addr);
11394 return build_va_arg_indirect_ref (addr);
11397 /* Builtins. */
11399 static void
11400 def_builtin (const char *name, tree type, enum rs6000_builtins code)
11402 tree t;
11403 unsigned classify = rs6000_builtin_info[(int)code].attr;
11404 const char *attr_string = "";
11406 gcc_assert (name != NULL);
11407 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
11409 if (rs6000_builtin_decls[(int)code])
11410 fatal_error ("internal error: builtin function %s already processed", name);
11412 rs6000_builtin_decls[(int)code] = t =
11413 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
11415 /* Set any special attributes. */
11416 if ((classify & RS6000_BTC_CONST) != 0)
11418 /* const function, function only depends on the inputs. */
11419 TREE_READONLY (t) = 1;
11420 TREE_NOTHROW (t) = 1;
11421 attr_string = ", pure";
11423 else if ((classify & RS6000_BTC_PURE) != 0)
11425 /* pure function, function can read global memory, but does not set any
11426 external state. */
11427 DECL_PURE_P (t) = 1;
11428 TREE_NOTHROW (t) = 1;
11429 attr_string = ", const";
11431 else if ((classify & RS6000_BTC_FP) != 0)
11433 /* Function is a math function. If rounding mode is on, then treat the
11434 function as not reading global memory, but it can have arbitrary side
11435 effects. If it is off, then assume the function is a const function.
11436 This mimics the ATTR_MATHFN_FPROUNDING attribute in
11437 builtin-attribute.def that is used for the math functions. */
11438 TREE_NOTHROW (t) = 1;
11439 if (flag_rounding_math)
11441 DECL_PURE_P (t) = 1;
11442 DECL_IS_NOVOPS (t) = 1;
11443 attr_string = ", fp, pure";
11445 else
11447 TREE_READONLY (t) = 1;
11448 attr_string = ", fp, const";
11451 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
11452 gcc_unreachable ();
11454 if (TARGET_DEBUG_BUILTIN)
11455 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
11456 (int)code, name, attr_string);
11459 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
11461 #undef RS6000_BUILTIN_1
11462 #undef RS6000_BUILTIN_2
11463 #undef RS6000_BUILTIN_3
11464 #undef RS6000_BUILTIN_A
11465 #undef RS6000_BUILTIN_D
11466 #undef RS6000_BUILTIN_E
11467 #undef RS6000_BUILTIN_H
11468 #undef RS6000_BUILTIN_P
11469 #undef RS6000_BUILTIN_Q
11470 #undef RS6000_BUILTIN_S
11471 #undef RS6000_BUILTIN_X
11473 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11474 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11475 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
11476 { MASK, ICODE, NAME, ENUM },
11478 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11479 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11480 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11481 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11482 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11483 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11484 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11485 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11487 static const struct builtin_description bdesc_3arg[] =
11489 #include "rs6000-builtin.def"
11492 /* DST operations: void foo (void *, const int, const char). */
11494 #undef RS6000_BUILTIN_1
11495 #undef RS6000_BUILTIN_2
11496 #undef RS6000_BUILTIN_3
11497 #undef RS6000_BUILTIN_A
11498 #undef RS6000_BUILTIN_D
11499 #undef RS6000_BUILTIN_E
11500 #undef RS6000_BUILTIN_H
11501 #undef RS6000_BUILTIN_P
11502 #undef RS6000_BUILTIN_Q
11503 #undef RS6000_BUILTIN_S
11504 #undef RS6000_BUILTIN_X
11506 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11507 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11508 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11509 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11510 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
11511 { MASK, ICODE, NAME, ENUM },
11513 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11514 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11515 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11516 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11517 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11518 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11520 static const struct builtin_description bdesc_dst[] =
11522 #include "rs6000-builtin.def"
11525 /* Simple binary operations: VECc = foo (VECa, VECb). */
11527 #undef RS6000_BUILTIN_1
11528 #undef RS6000_BUILTIN_2
11529 #undef RS6000_BUILTIN_3
11530 #undef RS6000_BUILTIN_A
11531 #undef RS6000_BUILTIN_D
11532 #undef RS6000_BUILTIN_E
11533 #undef RS6000_BUILTIN_H
11534 #undef RS6000_BUILTIN_P
11535 #undef RS6000_BUILTIN_Q
11536 #undef RS6000_BUILTIN_S
11537 #undef RS6000_BUILTIN_X
11539 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11540 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
11541 { MASK, ICODE, NAME, ENUM },
11543 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11544 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11545 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11546 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11547 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11548 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11549 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11550 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11551 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11553 static const struct builtin_description bdesc_2arg[] =
11555 #include "rs6000-builtin.def"
11558 #undef RS6000_BUILTIN_1
11559 #undef RS6000_BUILTIN_2
11560 #undef RS6000_BUILTIN_3
11561 #undef RS6000_BUILTIN_A
11562 #undef RS6000_BUILTIN_D
11563 #undef RS6000_BUILTIN_E
11564 #undef RS6000_BUILTIN_H
11565 #undef RS6000_BUILTIN_P
11566 #undef RS6000_BUILTIN_Q
11567 #undef RS6000_BUILTIN_S
11568 #undef RS6000_BUILTIN_X
11570 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11571 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11572 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11573 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11574 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11575 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11576 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11577 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
11578 { MASK, ICODE, NAME, ENUM },
11580 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11581 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11582 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11584 /* AltiVec predicates. */
11586 static const struct builtin_description bdesc_altivec_preds[] =
11588 #include "rs6000-builtin.def"
11591 /* SPE predicates. */
11592 #undef RS6000_BUILTIN_1
11593 #undef RS6000_BUILTIN_2
11594 #undef RS6000_BUILTIN_3
11595 #undef RS6000_BUILTIN_A
11596 #undef RS6000_BUILTIN_D
11597 #undef RS6000_BUILTIN_E
11598 #undef RS6000_BUILTIN_H
11599 #undef RS6000_BUILTIN_P
11600 #undef RS6000_BUILTIN_Q
11601 #undef RS6000_BUILTIN_S
11602 #undef RS6000_BUILTIN_X
11604 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11605 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11606 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11607 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11608 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11609 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11610 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11611 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11612 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11613 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
11614 { MASK, ICODE, NAME, ENUM },
11616 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11618 static const struct builtin_description bdesc_spe_predicates[] =
11620 #include "rs6000-builtin.def"
11623 /* SPE evsel predicates. */
11624 #undef RS6000_BUILTIN_1
11625 #undef RS6000_BUILTIN_2
11626 #undef RS6000_BUILTIN_3
11627 #undef RS6000_BUILTIN_A
11628 #undef RS6000_BUILTIN_D
11629 #undef RS6000_BUILTIN_E
11630 #undef RS6000_BUILTIN_H
11631 #undef RS6000_BUILTIN_P
11632 #undef RS6000_BUILTIN_Q
11633 #undef RS6000_BUILTIN_S
11634 #undef RS6000_BUILTIN_X
11636 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11637 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11638 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11639 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11640 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11641 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
11642 { MASK, ICODE, NAME, ENUM },
11644 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11645 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11646 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11647 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11648 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11650 static const struct builtin_description bdesc_spe_evsel[] =
11652 #include "rs6000-builtin.def"
11655 /* PAIRED predicates. */
11656 #undef RS6000_BUILTIN_1
11657 #undef RS6000_BUILTIN_2
11658 #undef RS6000_BUILTIN_3
11659 #undef RS6000_BUILTIN_A
11660 #undef RS6000_BUILTIN_D
11661 #undef RS6000_BUILTIN_E
11662 #undef RS6000_BUILTIN_H
11663 #undef RS6000_BUILTIN_P
11664 #undef RS6000_BUILTIN_Q
11665 #undef RS6000_BUILTIN_S
11666 #undef RS6000_BUILTIN_X
11668 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11669 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11670 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11671 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11672 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11673 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11674 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11675 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11676 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
11677 { MASK, ICODE, NAME, ENUM },
11679 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11680 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11682 static const struct builtin_description bdesc_paired_preds[] =
11684 #include "rs6000-builtin.def"
11687 /* ABS* operations. */
11689 #undef RS6000_BUILTIN_1
11690 #undef RS6000_BUILTIN_2
11691 #undef RS6000_BUILTIN_3
11692 #undef RS6000_BUILTIN_A
11693 #undef RS6000_BUILTIN_D
11694 #undef RS6000_BUILTIN_E
11695 #undef RS6000_BUILTIN_H
11696 #undef RS6000_BUILTIN_P
11697 #undef RS6000_BUILTIN_Q
11698 #undef RS6000_BUILTIN_S
11699 #undef RS6000_BUILTIN_X
11701 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11702 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11703 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11704 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
11705 { MASK, ICODE, NAME, ENUM },
11707 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11708 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11709 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11710 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11711 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11712 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11713 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11715 static const struct builtin_description bdesc_abs[] =
11717 #include "rs6000-builtin.def"
11720 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
11721 foo (VECa). */
11723 #undef RS6000_BUILTIN_1
11724 #undef RS6000_BUILTIN_2
11725 #undef RS6000_BUILTIN_3
11726 #undef RS6000_BUILTIN_A
11727 #undef RS6000_BUILTIN_D
11728 #undef RS6000_BUILTIN_E
11729 #undef RS6000_BUILTIN_H
11730 #undef RS6000_BUILTIN_P
11731 #undef RS6000_BUILTIN_Q
11732 #undef RS6000_BUILTIN_S
11733 #undef RS6000_BUILTIN_X
11735 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
11736 { MASK, ICODE, NAME, ENUM },
11738 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11739 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11740 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11741 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11742 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11743 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11744 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11745 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11746 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11747 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11749 static const struct builtin_description bdesc_1arg[] =
11751 #include "rs6000-builtin.def"
11754 /* HTM builtins. */
11755 #undef RS6000_BUILTIN_1
11756 #undef RS6000_BUILTIN_2
11757 #undef RS6000_BUILTIN_3
11758 #undef RS6000_BUILTIN_A
11759 #undef RS6000_BUILTIN_D
11760 #undef RS6000_BUILTIN_E
11761 #undef RS6000_BUILTIN_H
11762 #undef RS6000_BUILTIN_P
11763 #undef RS6000_BUILTIN_Q
11764 #undef RS6000_BUILTIN_S
11765 #undef RS6000_BUILTIN_X
11767 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11768 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11769 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11770 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11771 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11772 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11773 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
11774 { MASK, ICODE, NAME, ENUM },
11776 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11777 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11778 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11779 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11781 static const struct builtin_description bdesc_htm[] =
11783 #include "rs6000-builtin.def"
11786 #undef RS6000_BUILTIN_1
11787 #undef RS6000_BUILTIN_2
11788 #undef RS6000_BUILTIN_3
11789 #undef RS6000_BUILTIN_A
11790 #undef RS6000_BUILTIN_D
11791 #undef RS6000_BUILTIN_E
11792 #undef RS6000_BUILTIN_H
11793 #undef RS6000_BUILTIN_P
11794 #undef RS6000_BUILTIN_Q
11795 #undef RS6000_BUILTIN_S
11797 /* Return true if a builtin function is overloaded. */
11798 bool
11799 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
11801 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
11804 /* Expand an expression EXP that calls a builtin without arguments. */
11805 static rtx
11806 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
11808 rtx pat;
11809 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11811 if (icode == CODE_FOR_nothing)
11812 /* Builtin not supported on this processor. */
11813 return 0;
11815 if (target == 0
11816 || GET_MODE (target) != tmode
11817 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11818 target = gen_reg_rtx (tmode);
11820 pat = GEN_FCN (icode) (target);
11821 if (! pat)
11822 return 0;
11823 emit_insn (pat);
11825 return target;
11829 static rtx
11830 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
11832 rtx pat;
11833 tree arg0 = CALL_EXPR_ARG (exp, 0);
11834 tree arg1 = CALL_EXPR_ARG (exp, 1);
11835 rtx op0 = expand_normal (arg0);
11836 rtx op1 = expand_normal (arg1);
11837 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
11838 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
11840 if (icode == CODE_FOR_nothing)
11841 /* Builtin not supported on this processor. */
11842 return 0;
11844 /* If we got invalid arguments bail out before generating bad rtl. */
11845 if (arg0 == error_mark_node || arg1 == error_mark_node)
11846 return const0_rtx;
11848 if (GET_CODE (op0) != CONST_INT
11849 || INTVAL (op0) > 255
11850 || INTVAL (op0) < 0)
11852 error ("argument 1 must be an 8-bit field value");
11853 return const0_rtx;
11856 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11857 op0 = copy_to_mode_reg (mode0, op0);
11859 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11860 op1 = copy_to_mode_reg (mode1, op1);
11862 pat = GEN_FCN (icode) (op0, op1);
11863 if (! pat)
11864 return const0_rtx;
11865 emit_insn (pat);
11867 return NULL_RTX;
11871 static rtx
11872 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
11874 rtx pat;
11875 tree arg0 = CALL_EXPR_ARG (exp, 0);
11876 rtx op0 = expand_normal (arg0);
11877 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11878 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11880 if (icode == CODE_FOR_nothing)
11881 /* Builtin not supported on this processor. */
11882 return 0;
11884 /* If we got invalid arguments bail out before generating bad rtl. */
11885 if (arg0 == error_mark_node)
11886 return const0_rtx;
11888 if (icode == CODE_FOR_altivec_vspltisb
11889 || icode == CODE_FOR_altivec_vspltish
11890 || icode == CODE_FOR_altivec_vspltisw
11891 || icode == CODE_FOR_spe_evsplatfi
11892 || icode == CODE_FOR_spe_evsplati)
11894 /* Only allow 5-bit *signed* literals. */
11895 if (GET_CODE (op0) != CONST_INT
11896 || INTVAL (op0) > 15
11897 || INTVAL (op0) < -16)
11899 error ("argument 1 must be a 5-bit signed literal");
11900 return const0_rtx;
11904 if (target == 0
11905 || GET_MODE (target) != tmode
11906 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11907 target = gen_reg_rtx (tmode);
11909 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11910 op0 = copy_to_mode_reg (mode0, op0);
11912 pat = GEN_FCN (icode) (target, op0);
11913 if (! pat)
11914 return 0;
11915 emit_insn (pat);
11917 return target;
11920 static rtx
11921 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
11923 rtx pat, scratch1, scratch2;
11924 tree arg0 = CALL_EXPR_ARG (exp, 0);
11925 rtx op0 = expand_normal (arg0);
11926 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11927 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11929 /* If we have invalid arguments, bail out before generating bad rtl. */
11930 if (arg0 == error_mark_node)
11931 return const0_rtx;
11933 if (target == 0
11934 || GET_MODE (target) != tmode
11935 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11936 target = gen_reg_rtx (tmode);
11938 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11939 op0 = copy_to_mode_reg (mode0, op0);
11941 scratch1 = gen_reg_rtx (mode0);
11942 scratch2 = gen_reg_rtx (mode0);
11944 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
11945 if (! pat)
11946 return 0;
11947 emit_insn (pat);
11949 return target;
11952 static rtx
11953 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
11955 rtx pat;
11956 tree arg0 = CALL_EXPR_ARG (exp, 0);
11957 tree arg1 = CALL_EXPR_ARG (exp, 1);
11958 rtx op0 = expand_normal (arg0);
11959 rtx op1 = expand_normal (arg1);
11960 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11961 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11962 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11964 if (icode == CODE_FOR_nothing)
11965 /* Builtin not supported on this processor. */
11966 return 0;
11968 /* If we got invalid arguments bail out before generating bad rtl. */
11969 if (arg0 == error_mark_node || arg1 == error_mark_node)
11970 return const0_rtx;
11972 if (icode == CODE_FOR_altivec_vcfux
11973 || icode == CODE_FOR_altivec_vcfsx
11974 || icode == CODE_FOR_altivec_vctsxs
11975 || icode == CODE_FOR_altivec_vctuxs
11976 || icode == CODE_FOR_altivec_vspltb
11977 || icode == CODE_FOR_altivec_vsplth
11978 || icode == CODE_FOR_altivec_vspltw
11979 || icode == CODE_FOR_spe_evaddiw
11980 || icode == CODE_FOR_spe_evldd
11981 || icode == CODE_FOR_spe_evldh
11982 || icode == CODE_FOR_spe_evldw
11983 || icode == CODE_FOR_spe_evlhhesplat
11984 || icode == CODE_FOR_spe_evlhhossplat
11985 || icode == CODE_FOR_spe_evlhhousplat
11986 || icode == CODE_FOR_spe_evlwhe
11987 || icode == CODE_FOR_spe_evlwhos
11988 || icode == CODE_FOR_spe_evlwhou
11989 || icode == CODE_FOR_spe_evlwhsplat
11990 || icode == CODE_FOR_spe_evlwwsplat
11991 || icode == CODE_FOR_spe_evrlwi
11992 || icode == CODE_FOR_spe_evslwi
11993 || icode == CODE_FOR_spe_evsrwis
11994 || icode == CODE_FOR_spe_evsubifw
11995 || icode == CODE_FOR_spe_evsrwiu)
11997 /* Only allow 5-bit unsigned literals. */
11998 STRIP_NOPS (arg1);
11999 if (TREE_CODE (arg1) != INTEGER_CST
12000 || TREE_INT_CST_LOW (arg1) & ~0x1f)
12002 error ("argument 2 must be a 5-bit unsigned literal");
12003 return const0_rtx;
12007 if (target == 0
12008 || GET_MODE (target) != tmode
12009 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12010 target = gen_reg_rtx (tmode);
12012 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12013 op0 = copy_to_mode_reg (mode0, op0);
12014 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12015 op1 = copy_to_mode_reg (mode1, op1);
12017 pat = GEN_FCN (icode) (target, op0, op1);
12018 if (! pat)
12019 return 0;
12020 emit_insn (pat);
12022 return target;
12025 static rtx
12026 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
12028 rtx pat, scratch;
12029 tree cr6_form = CALL_EXPR_ARG (exp, 0);
12030 tree arg0 = CALL_EXPR_ARG (exp, 1);
12031 tree arg1 = CALL_EXPR_ARG (exp, 2);
12032 rtx op0 = expand_normal (arg0);
12033 rtx op1 = expand_normal (arg1);
12034 enum machine_mode tmode = SImode;
12035 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12036 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12037 int cr6_form_int;
12039 if (TREE_CODE (cr6_form) != INTEGER_CST)
12041 error ("argument 1 of __builtin_altivec_predicate must be a constant");
12042 return const0_rtx;
12044 else
12045 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
12047 gcc_assert (mode0 == mode1);
12049 /* If we have invalid arguments, bail out before generating bad rtl. */
12050 if (arg0 == error_mark_node || arg1 == error_mark_node)
12051 return const0_rtx;
12053 if (target == 0
12054 || GET_MODE (target) != tmode
12055 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12056 target = gen_reg_rtx (tmode);
12058 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12059 op0 = copy_to_mode_reg (mode0, op0);
12060 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12061 op1 = copy_to_mode_reg (mode1, op1);
12063 scratch = gen_reg_rtx (mode0);
12065 pat = GEN_FCN (icode) (scratch, op0, op1);
12066 if (! pat)
12067 return 0;
12068 emit_insn (pat);
12070 /* The vec_any* and vec_all* predicates use the same opcodes for two
12071 different operations, but the bits in CR6 will be different
12072 depending on what information we want. So we have to play tricks
12073 with CR6 to get the right bits out.
12075 If you think this is disgusting, look at the specs for the
12076 AltiVec predicates. */
12078 switch (cr6_form_int)
12080 case 0:
12081 emit_insn (gen_cr6_test_for_zero (target));
12082 break;
12083 case 1:
12084 emit_insn (gen_cr6_test_for_zero_reverse (target));
12085 break;
12086 case 2:
12087 emit_insn (gen_cr6_test_for_lt (target));
12088 break;
12089 case 3:
12090 emit_insn (gen_cr6_test_for_lt_reverse (target));
12091 break;
12092 default:
12093 error ("argument 1 of __builtin_altivec_predicate is out of range");
12094 break;
12097 return target;
12100 static rtx
12101 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
12103 rtx pat, addr;
12104 tree arg0 = CALL_EXPR_ARG (exp, 0);
12105 tree arg1 = CALL_EXPR_ARG (exp, 1);
12106 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12107 enum machine_mode mode0 = Pmode;
12108 enum machine_mode mode1 = Pmode;
12109 rtx op0 = expand_normal (arg0);
12110 rtx op1 = expand_normal (arg1);
12112 if (icode == CODE_FOR_nothing)
12113 /* Builtin not supported on this processor. */
12114 return 0;
12116 /* If we got invalid arguments bail out before generating bad rtl. */
12117 if (arg0 == error_mark_node || arg1 == error_mark_node)
12118 return const0_rtx;
12120 if (target == 0
12121 || GET_MODE (target) != tmode
12122 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12123 target = gen_reg_rtx (tmode);
12125 op1 = copy_to_mode_reg (mode1, op1);
12127 if (op0 == const0_rtx)
12129 addr = gen_rtx_MEM (tmode, op1);
12131 else
12133 op0 = copy_to_mode_reg (mode0, op0);
12134 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
12137 pat = GEN_FCN (icode) (target, addr);
12139 if (! pat)
12140 return 0;
12141 emit_insn (pat);
12143 return target;
12146 /* Return a constant vector for use as a little-endian permute control vector
12147 to reverse the order of elements of the given vector mode. */
12148 static rtx
12149 swap_selector_for_mode (enum machine_mode mode)
12151 /* These are little endian vectors, so their elements are reversed
12152 from what you would normally expect for a permute control vector. */
12153 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
12154 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
12155 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
12156 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
12157 unsigned int *swaparray, i;
12158 rtx perm[16];
12160 switch (mode)
12162 case V2DFmode:
12163 case V2DImode:
12164 swaparray = swap2;
12165 break;
12166 case V4SFmode:
12167 case V4SImode:
12168 swaparray = swap4;
12169 break;
12170 case V8HImode:
12171 swaparray = swap8;
12172 break;
12173 case V16QImode:
12174 swaparray = swap16;
12175 break;
12176 default:
12177 gcc_unreachable ();
12180 for (i = 0; i < 16; ++i)
12181 perm[i] = GEN_INT (swaparray[i]);
12183 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
12186 /* Generate code for an "lvx", "lvxl", or "lve*x" built-in for a little endian target
12187 with -maltivec=be specified. Issue the load followed by an element-reversing
12188 permute. */
12189 void
12190 altivec_expand_lvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
12192 rtx tmp = gen_reg_rtx (mode);
12193 rtx load = gen_rtx_SET (VOIDmode, tmp, op1);
12194 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
12195 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
12196 rtx sel = swap_selector_for_mode (mode);
12197 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
12199 gcc_assert (REG_P (op0));
12200 emit_insn (par);
12201 emit_insn (gen_rtx_SET (VOIDmode, op0, vperm));
12204 /* Generate code for a "stvx" or "stvxl" built-in for a little endian target
12205 with -maltivec=be specified. Issue the store preceded by an element-reversing
12206 permute. */
12207 void
12208 altivec_expand_stvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
12210 rtx tmp = gen_reg_rtx (mode);
12211 rtx store = gen_rtx_SET (VOIDmode, op0, tmp);
12212 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
12213 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
12214 rtx sel = swap_selector_for_mode (mode);
12215 rtx vperm;
12217 gcc_assert (REG_P (op1));
12218 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
12219 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
12220 emit_insn (par);
12223 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
12224 specified. Issue the store preceded by an element-reversing permute. */
12225 void
12226 altivec_expand_stvex_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
12228 enum machine_mode inner_mode = GET_MODE_INNER (mode);
12229 rtx tmp = gen_reg_rtx (mode);
12230 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
12231 rtx sel = swap_selector_for_mode (mode);
12232 rtx vperm;
12234 gcc_assert (REG_P (op1));
12235 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
12236 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
12237 emit_insn (gen_rtx_SET (VOIDmode, op0, stvx));
12240 static rtx
12241 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
12243 rtx pat, addr;
12244 tree arg0 = CALL_EXPR_ARG (exp, 0);
12245 tree arg1 = CALL_EXPR_ARG (exp, 1);
12246 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12247 enum machine_mode mode0 = Pmode;
12248 enum machine_mode mode1 = Pmode;
12249 rtx op0 = expand_normal (arg0);
12250 rtx op1 = expand_normal (arg1);
12252 if (icode == CODE_FOR_nothing)
12253 /* Builtin not supported on this processor. */
12254 return 0;
12256 /* If we got invalid arguments bail out before generating bad rtl. */
12257 if (arg0 == error_mark_node || arg1 == error_mark_node)
12258 return const0_rtx;
12260 if (target == 0
12261 || GET_MODE (target) != tmode
12262 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12263 target = gen_reg_rtx (tmode);
12265 op1 = copy_to_mode_reg (mode1, op1);
12267 if (op0 == const0_rtx)
12269 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
12271 else
12273 op0 = copy_to_mode_reg (mode0, op0);
12274 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
12277 pat = GEN_FCN (icode) (target, addr);
12279 if (! pat)
12280 return 0;
12281 emit_insn (pat);
12283 return target;
12286 static rtx
12287 spe_expand_stv_builtin (enum insn_code icode, tree exp)
12289 tree arg0 = CALL_EXPR_ARG (exp, 0);
12290 tree arg1 = CALL_EXPR_ARG (exp, 1);
12291 tree arg2 = CALL_EXPR_ARG (exp, 2);
12292 rtx op0 = expand_normal (arg0);
12293 rtx op1 = expand_normal (arg1);
12294 rtx op2 = expand_normal (arg2);
12295 rtx pat;
12296 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
12297 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
12298 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
12300 /* Invalid arguments. Bail before doing anything stoopid! */
12301 if (arg0 == error_mark_node
12302 || arg1 == error_mark_node
12303 || arg2 == error_mark_node)
12304 return const0_rtx;
12306 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
12307 op0 = copy_to_mode_reg (mode2, op0);
12308 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
12309 op1 = copy_to_mode_reg (mode0, op1);
12310 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
12311 op2 = copy_to_mode_reg (mode1, op2);
12313 pat = GEN_FCN (icode) (op1, op2, op0);
12314 if (pat)
12315 emit_insn (pat);
12316 return NULL_RTX;
12319 static rtx
12320 paired_expand_stv_builtin (enum insn_code icode, tree exp)
12322 tree arg0 = CALL_EXPR_ARG (exp, 0);
12323 tree arg1 = CALL_EXPR_ARG (exp, 1);
12324 tree arg2 = CALL_EXPR_ARG (exp, 2);
12325 rtx op0 = expand_normal (arg0);
12326 rtx op1 = expand_normal (arg1);
12327 rtx op2 = expand_normal (arg2);
12328 rtx pat, addr;
12329 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12330 enum machine_mode mode1 = Pmode;
12331 enum machine_mode mode2 = Pmode;
12333 /* Invalid arguments. Bail before doing anything stoopid! */
12334 if (arg0 == error_mark_node
12335 || arg1 == error_mark_node
12336 || arg2 == error_mark_node)
12337 return const0_rtx;
12339 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
12340 op0 = copy_to_mode_reg (tmode, op0);
12342 op2 = copy_to_mode_reg (mode2, op2);
12344 if (op1 == const0_rtx)
12346 addr = gen_rtx_MEM (tmode, op2);
12348 else
12350 op1 = copy_to_mode_reg (mode1, op1);
12351 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12354 pat = GEN_FCN (icode) (addr, op0);
12355 if (pat)
12356 emit_insn (pat);
12357 return NULL_RTX;
12360 static rtx
12361 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
12363 tree arg0 = CALL_EXPR_ARG (exp, 0);
12364 tree arg1 = CALL_EXPR_ARG (exp, 1);
12365 tree arg2 = CALL_EXPR_ARG (exp, 2);
12366 rtx op0 = expand_normal (arg0);
12367 rtx op1 = expand_normal (arg1);
12368 rtx op2 = expand_normal (arg2);
12369 rtx pat, addr;
12370 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12371 enum machine_mode smode = insn_data[icode].operand[1].mode;
12372 enum machine_mode mode1 = Pmode;
12373 enum machine_mode mode2 = Pmode;
12375 /* Invalid arguments. Bail before doing anything stoopid! */
12376 if (arg0 == error_mark_node
12377 || arg1 == error_mark_node
12378 || arg2 == error_mark_node)
12379 return const0_rtx;
12381 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
12382 op0 = copy_to_mode_reg (smode, op0);
12384 op2 = copy_to_mode_reg (mode2, op2);
12386 if (op1 == const0_rtx)
12388 addr = gen_rtx_MEM (tmode, op2);
12390 else
12392 op1 = copy_to_mode_reg (mode1, op1);
12393 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12396 pat = GEN_FCN (icode) (addr, op0);
12397 if (pat)
12398 emit_insn (pat);
12399 return NULL_RTX;
12402 /* Return the appropriate SPR number associated with the given builtin. */
12403 static inline HOST_WIDE_INT
12404 htm_spr_num (enum rs6000_builtins code)
12406 if (code == HTM_BUILTIN_GET_TFHAR
12407 || code == HTM_BUILTIN_SET_TFHAR)
12408 return TFHAR_SPR;
12409 else if (code == HTM_BUILTIN_GET_TFIAR
12410 || code == HTM_BUILTIN_SET_TFIAR)
12411 return TFIAR_SPR;
12412 else if (code == HTM_BUILTIN_GET_TEXASR
12413 || code == HTM_BUILTIN_SET_TEXASR)
12414 return TEXASR_SPR;
12415 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
12416 || code == HTM_BUILTIN_SET_TEXASRU);
12417 return TEXASRU_SPR;
12420 /* Return the appropriate SPR regno associated with the given builtin. */
12421 static inline HOST_WIDE_INT
12422 htm_spr_regno (enum rs6000_builtins code)
12424 if (code == HTM_BUILTIN_GET_TFHAR
12425 || code == HTM_BUILTIN_SET_TFHAR)
12426 return TFHAR_REGNO;
12427 else if (code == HTM_BUILTIN_GET_TFIAR
12428 || code == HTM_BUILTIN_SET_TFIAR)
12429 return TFIAR_REGNO;
12430 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
12431 || code == HTM_BUILTIN_SET_TEXASR
12432 || code == HTM_BUILTIN_GET_TEXASRU
12433 || code == HTM_BUILTIN_SET_TEXASRU);
12434 return TEXASR_REGNO;
12437 /* Return the correct ICODE value depending on whether we are
12438 setting or reading the HTM SPRs. */
12439 static inline enum insn_code
12440 rs6000_htm_spr_icode (bool nonvoid)
12442 if (nonvoid)
12443 return (TARGET_64BIT) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
12444 else
12445 return (TARGET_64BIT) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
12448 /* Expand the HTM builtin in EXP and store the result in TARGET.
12449 Store true in *EXPANDEDP if we found a builtin to expand. */
12450 static rtx
12451 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
12453 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12454 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
12455 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12456 const struct builtin_description *d;
12457 size_t i;
12459 *expandedp = false;
12461 /* Expand the HTM builtins. */
12462 d = bdesc_htm;
12463 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
12464 if (d->code == fcode)
12466 rtx op[MAX_HTM_OPERANDS], pat;
12467 int nopnds = 0;
12468 tree arg;
12469 call_expr_arg_iterator iter;
12470 unsigned attr = rs6000_builtin_info[fcode].attr;
12471 enum insn_code icode = d->icode;
12473 if (attr & RS6000_BTC_SPR)
12474 icode = rs6000_htm_spr_icode (nonvoid);
12476 if (nonvoid)
12478 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12479 if (!target
12480 || GET_MODE (target) != tmode
12481 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
12482 target = gen_reg_rtx (tmode);
12483 op[nopnds++] = target;
12486 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
12488 const struct insn_operand_data *insn_op;
12490 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
12491 return NULL_RTX;
12493 insn_op = &insn_data[icode].operand[nopnds];
12495 op[nopnds] = expand_normal (arg);
12497 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
12499 if (!strcmp (insn_op->constraint, "n"))
12501 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
12502 if (!CONST_INT_P (op[nopnds]))
12503 error ("argument %d must be an unsigned literal", arg_num);
12504 else
12505 error ("argument %d is an unsigned literal that is "
12506 "out of range", arg_num);
12507 return const0_rtx;
12509 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
12512 nopnds++;
12515 /* Handle the builtins for extended mnemonics. These accept
12516 no arguments, but map to builtins that take arguments. */
12517 switch (fcode)
12519 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
12520 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
12521 op[nopnds++] = GEN_INT (1);
12522 #ifdef ENABLE_CHECKING
12523 attr |= RS6000_BTC_UNARY;
12524 #endif
12525 break;
12526 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
12527 op[nopnds++] = GEN_INT (0);
12528 #ifdef ENABLE_CHECKING
12529 attr |= RS6000_BTC_UNARY;
12530 #endif
12531 break;
12532 default:
12533 break;
12536 /* If this builtin accesses SPRs, then pass in the appropriate
12537 SPR number and SPR regno as the last two operands. */
12538 if (attr & RS6000_BTC_SPR)
12540 op[nopnds++] = gen_rtx_CONST_INT (Pmode, htm_spr_num (fcode));
12541 op[nopnds++] = gen_rtx_REG (Pmode, htm_spr_regno (fcode));
12544 #ifdef ENABLE_CHECKING
12545 int expected_nopnds = 0;
12546 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
12547 expected_nopnds = 1;
12548 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
12549 expected_nopnds = 2;
12550 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
12551 expected_nopnds = 3;
12552 if (!(attr & RS6000_BTC_VOID))
12553 expected_nopnds += 1;
12554 if (attr & RS6000_BTC_SPR)
12555 expected_nopnds += 2;
12557 gcc_assert (nopnds == expected_nopnds && nopnds <= MAX_HTM_OPERANDS);
12558 #endif
12560 switch (nopnds)
12562 case 1:
12563 pat = GEN_FCN (icode) (op[0]);
12564 break;
12565 case 2:
12566 pat = GEN_FCN (icode) (op[0], op[1]);
12567 break;
12568 case 3:
12569 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
12570 break;
12571 case 4:
12572 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
12573 break;
12574 default:
12575 gcc_unreachable ();
12577 if (!pat)
12578 return NULL_RTX;
12579 emit_insn (pat);
12581 *expandedp = true;
12582 if (nonvoid)
12583 return target;
12584 return const0_rtx;
12587 return NULL_RTX;
12590 static rtx
12591 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
12593 rtx pat;
12594 tree arg0 = CALL_EXPR_ARG (exp, 0);
12595 tree arg1 = CALL_EXPR_ARG (exp, 1);
12596 tree arg2 = CALL_EXPR_ARG (exp, 2);
12597 rtx op0 = expand_normal (arg0);
12598 rtx op1 = expand_normal (arg1);
12599 rtx op2 = expand_normal (arg2);
12600 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12601 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12602 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12603 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
12605 if (icode == CODE_FOR_nothing)
12606 /* Builtin not supported on this processor. */
12607 return 0;
12609 /* If we got invalid arguments bail out before generating bad rtl. */
12610 if (arg0 == error_mark_node
12611 || arg1 == error_mark_node
12612 || arg2 == error_mark_node)
12613 return const0_rtx;
12615 /* Check and prepare argument depending on the instruction code.
12617 Note that a switch statement instead of the sequence of tests
12618 would be incorrect as many of the CODE_FOR values could be
12619 CODE_FOR_nothing and that would yield multiple alternatives
12620 with identical values. We'd never reach here at runtime in
12621 this case. */
12622 if (icode == CODE_FOR_altivec_vsldoi_v4sf
12623 || icode == CODE_FOR_altivec_vsldoi_v4si
12624 || icode == CODE_FOR_altivec_vsldoi_v8hi
12625 || icode == CODE_FOR_altivec_vsldoi_v16qi)
12627 /* Only allow 4-bit unsigned literals. */
12628 STRIP_NOPS (arg2);
12629 if (TREE_CODE (arg2) != INTEGER_CST
12630 || TREE_INT_CST_LOW (arg2) & ~0xf)
12632 error ("argument 3 must be a 4-bit unsigned literal");
12633 return const0_rtx;
12636 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
12637 || icode == CODE_FOR_vsx_xxpermdi_v2di
12638 || icode == CODE_FOR_vsx_xxsldwi_v16qi
12639 || icode == CODE_FOR_vsx_xxsldwi_v8hi
12640 || icode == CODE_FOR_vsx_xxsldwi_v4si
12641 || icode == CODE_FOR_vsx_xxsldwi_v4sf
12642 || icode == CODE_FOR_vsx_xxsldwi_v2di
12643 || icode == CODE_FOR_vsx_xxsldwi_v2df)
12645 /* Only allow 2-bit unsigned literals. */
12646 STRIP_NOPS (arg2);
12647 if (TREE_CODE (arg2) != INTEGER_CST
12648 || TREE_INT_CST_LOW (arg2) & ~0x3)
12650 error ("argument 3 must be a 2-bit unsigned literal");
12651 return const0_rtx;
12654 else if (icode == CODE_FOR_vsx_set_v2df
12655 || icode == CODE_FOR_vsx_set_v2di
12656 || icode == CODE_FOR_bcdadd
12657 || icode == CODE_FOR_bcdadd_lt
12658 || icode == CODE_FOR_bcdadd_eq
12659 || icode == CODE_FOR_bcdadd_gt
12660 || icode == CODE_FOR_bcdsub
12661 || icode == CODE_FOR_bcdsub_lt
12662 || icode == CODE_FOR_bcdsub_eq
12663 || icode == CODE_FOR_bcdsub_gt)
12665 /* Only allow 1-bit unsigned literals. */
12666 STRIP_NOPS (arg2);
12667 if (TREE_CODE (arg2) != INTEGER_CST
12668 || TREE_INT_CST_LOW (arg2) & ~0x1)
12670 error ("argument 3 must be a 1-bit unsigned literal");
12671 return const0_rtx;
12674 else if (icode == CODE_FOR_dfp_ddedpd_dd
12675 || icode == CODE_FOR_dfp_ddedpd_td)
12677 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
12678 STRIP_NOPS (arg0);
12679 if (TREE_CODE (arg0) != INTEGER_CST
12680 || TREE_INT_CST_LOW (arg2) & ~0x3)
12682 error ("argument 1 must be 0 or 2");
12683 return const0_rtx;
12686 else if (icode == CODE_FOR_dfp_denbcd_dd
12687 || icode == CODE_FOR_dfp_denbcd_td)
12689 /* Only allow 1-bit unsigned literals. */
12690 STRIP_NOPS (arg0);
12691 if (TREE_CODE (arg0) != INTEGER_CST
12692 || TREE_INT_CST_LOW (arg0) & ~0x1)
12694 error ("argument 1 must be a 1-bit unsigned literal");
12695 return const0_rtx;
12698 else if (icode == CODE_FOR_dfp_dscli_dd
12699 || icode == CODE_FOR_dfp_dscli_td
12700 || icode == CODE_FOR_dfp_dscri_dd
12701 || icode == CODE_FOR_dfp_dscri_td)
12703 /* Only allow 6-bit unsigned literals. */
12704 STRIP_NOPS (arg1);
12705 if (TREE_CODE (arg1) != INTEGER_CST
12706 || TREE_INT_CST_LOW (arg1) & ~0x3f)
12708 error ("argument 2 must be a 6-bit unsigned literal");
12709 return const0_rtx;
12712 else if (icode == CODE_FOR_crypto_vshasigmaw
12713 || icode == CODE_FOR_crypto_vshasigmad)
12715 /* Check whether the 2nd and 3rd arguments are integer constants and in
12716 range and prepare arguments. */
12717 STRIP_NOPS (arg1);
12718 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
12720 error ("argument 2 must be 0 or 1");
12721 return const0_rtx;
12724 STRIP_NOPS (arg2);
12725 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
12727 error ("argument 3 must be in the range 0..15");
12728 return const0_rtx;
12732 if (target == 0
12733 || GET_MODE (target) != tmode
12734 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12735 target = gen_reg_rtx (tmode);
12737 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12738 op0 = copy_to_mode_reg (mode0, op0);
12739 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12740 op1 = copy_to_mode_reg (mode1, op1);
12741 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12742 op2 = copy_to_mode_reg (mode2, op2);
12744 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
12745 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
12746 else
12747 pat = GEN_FCN (icode) (target, op0, op1, op2);
12748 if (! pat)
12749 return 0;
12750 emit_insn (pat);
12752 return target;
12755 /* Expand the lvx builtins. */
12756 static rtx
12757 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
12759 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12760 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12761 tree arg0;
12762 enum machine_mode tmode, mode0;
12763 rtx pat, op0;
12764 enum insn_code icode;
12766 switch (fcode)
12768 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
12769 icode = CODE_FOR_vector_altivec_load_v16qi;
12770 break;
12771 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
12772 icode = CODE_FOR_vector_altivec_load_v8hi;
12773 break;
12774 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
12775 icode = CODE_FOR_vector_altivec_load_v4si;
12776 break;
12777 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
12778 icode = CODE_FOR_vector_altivec_load_v4sf;
12779 break;
12780 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
12781 icode = CODE_FOR_vector_altivec_load_v2df;
12782 break;
12783 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
12784 icode = CODE_FOR_vector_altivec_load_v2di;
12785 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
12786 icode = CODE_FOR_vector_altivec_load_v1ti;
12787 break;
12788 default:
12789 *expandedp = false;
12790 return NULL_RTX;
12793 *expandedp = true;
12795 arg0 = CALL_EXPR_ARG (exp, 0);
12796 op0 = expand_normal (arg0);
12797 tmode = insn_data[icode].operand[0].mode;
12798 mode0 = insn_data[icode].operand[1].mode;
12800 if (target == 0
12801 || GET_MODE (target) != tmode
12802 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12803 target = gen_reg_rtx (tmode);
12805 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12806 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12808 pat = GEN_FCN (icode) (target, op0);
12809 if (! pat)
12810 return 0;
12811 emit_insn (pat);
12812 return target;
12815 /* Expand the stvx builtins. */
12816 static rtx
12817 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12818 bool *expandedp)
12820 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12821 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12822 tree arg0, arg1;
12823 enum machine_mode mode0, mode1;
12824 rtx pat, op0, op1;
12825 enum insn_code icode;
12827 switch (fcode)
12829 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
12830 icode = CODE_FOR_vector_altivec_store_v16qi;
12831 break;
12832 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
12833 icode = CODE_FOR_vector_altivec_store_v8hi;
12834 break;
12835 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
12836 icode = CODE_FOR_vector_altivec_store_v4si;
12837 break;
12838 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
12839 icode = CODE_FOR_vector_altivec_store_v4sf;
12840 break;
12841 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
12842 icode = CODE_FOR_vector_altivec_store_v2df;
12843 break;
12844 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
12845 icode = CODE_FOR_vector_altivec_store_v2di;
12846 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
12847 icode = CODE_FOR_vector_altivec_store_v1ti;
12848 break;
12849 default:
12850 *expandedp = false;
12851 return NULL_RTX;
12854 arg0 = CALL_EXPR_ARG (exp, 0);
12855 arg1 = CALL_EXPR_ARG (exp, 1);
12856 op0 = expand_normal (arg0);
12857 op1 = expand_normal (arg1);
12858 mode0 = insn_data[icode].operand[0].mode;
12859 mode1 = insn_data[icode].operand[1].mode;
12861 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12862 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12863 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
12864 op1 = copy_to_mode_reg (mode1, op1);
12866 pat = GEN_FCN (icode) (op0, op1);
12867 if (pat)
12868 emit_insn (pat);
12870 *expandedp = true;
12871 return NULL_RTX;
12874 /* Expand the dst builtins. */
12875 static rtx
12876 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12877 bool *expandedp)
12879 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12880 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12881 tree arg0, arg1, arg2;
12882 enum machine_mode mode0, mode1;
12883 rtx pat, op0, op1, op2;
12884 const struct builtin_description *d;
12885 size_t i;
12887 *expandedp = false;
12889 /* Handle DST variants. */
12890 d = bdesc_dst;
12891 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12892 if (d->code == fcode)
12894 arg0 = CALL_EXPR_ARG (exp, 0);
12895 arg1 = CALL_EXPR_ARG (exp, 1);
12896 arg2 = CALL_EXPR_ARG (exp, 2);
12897 op0 = expand_normal (arg0);
12898 op1 = expand_normal (arg1);
12899 op2 = expand_normal (arg2);
12900 mode0 = insn_data[d->icode].operand[0].mode;
12901 mode1 = insn_data[d->icode].operand[1].mode;
12903 /* Invalid arguments, bail out before generating bad rtl. */
12904 if (arg0 == error_mark_node
12905 || arg1 == error_mark_node
12906 || arg2 == error_mark_node)
12907 return const0_rtx;
12909 *expandedp = true;
12910 STRIP_NOPS (arg2);
12911 if (TREE_CODE (arg2) != INTEGER_CST
12912 || TREE_INT_CST_LOW (arg2) & ~0x3)
12914 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
12915 return const0_rtx;
12918 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
12919 op0 = copy_to_mode_reg (Pmode, op0);
12920 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
12921 op1 = copy_to_mode_reg (mode1, op1);
12923 pat = GEN_FCN (d->icode) (op0, op1, op2);
12924 if (pat != 0)
12925 emit_insn (pat);
12927 return NULL_RTX;
12930 return NULL_RTX;
12933 /* Expand vec_init builtin. */
12934 static rtx
12935 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
12937 enum machine_mode tmode = TYPE_MODE (type);
12938 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
12939 int i, n_elt = GET_MODE_NUNITS (tmode);
12941 gcc_assert (VECTOR_MODE_P (tmode));
12942 gcc_assert (n_elt == call_expr_nargs (exp));
12944 if (!target || !register_operand (target, tmode))
12945 target = gen_reg_rtx (tmode);
12947 /* If we have a vector compromised of a single element, such as V1TImode, do
12948 the initialization directly. */
12949 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
12951 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
12952 emit_move_insn (target, gen_lowpart (tmode, x));
12954 else
12956 rtvec v = rtvec_alloc (n_elt);
12958 for (i = 0; i < n_elt; ++i)
12960 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
12961 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
12964 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
12967 return target;
12970 /* Return the integer constant in ARG. Constrain it to be in the range
12971 of the subparts of VEC_TYPE; issue an error if not. */
12973 static int
12974 get_element_number (tree vec_type, tree arg)
12976 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
12978 if (!tree_fits_uhwi_p (arg)
12979 || (elt = tree_to_uhwi (arg), elt > max))
12981 error ("selector must be an integer constant in the range 0..%wi", max);
12982 return 0;
12985 return elt;
12988 /* Expand vec_set builtin. */
12989 static rtx
12990 altivec_expand_vec_set_builtin (tree exp)
12992 enum machine_mode tmode, mode1;
12993 tree arg0, arg1, arg2;
12994 int elt;
12995 rtx op0, op1;
12997 arg0 = CALL_EXPR_ARG (exp, 0);
12998 arg1 = CALL_EXPR_ARG (exp, 1);
12999 arg2 = CALL_EXPR_ARG (exp, 2);
13001 tmode = TYPE_MODE (TREE_TYPE (arg0));
13002 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
13003 gcc_assert (VECTOR_MODE_P (tmode));
13005 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
13006 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
13007 elt = get_element_number (TREE_TYPE (arg0), arg2);
13009 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
13010 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
13012 op0 = force_reg (tmode, op0);
13013 op1 = force_reg (mode1, op1);
13015 rs6000_expand_vector_set (op0, op1, elt);
13017 return op0;
13020 /* Expand vec_ext builtin. */
13021 static rtx
13022 altivec_expand_vec_ext_builtin (tree exp, rtx target)
13024 enum machine_mode tmode, mode0;
13025 tree arg0, arg1;
13026 int elt;
13027 rtx op0;
13029 arg0 = CALL_EXPR_ARG (exp, 0);
13030 arg1 = CALL_EXPR_ARG (exp, 1);
13032 op0 = expand_normal (arg0);
13033 elt = get_element_number (TREE_TYPE (arg0), arg1);
13035 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
13036 mode0 = TYPE_MODE (TREE_TYPE (arg0));
13037 gcc_assert (VECTOR_MODE_P (mode0));
13039 op0 = force_reg (mode0, op0);
13041 if (optimize || !target || !register_operand (target, tmode))
13042 target = gen_reg_rtx (tmode);
13044 rs6000_expand_vector_extract (target, op0, elt);
13046 return target;
13049 /* Expand the builtin in EXP and store the result in TARGET. Store
13050 true in *EXPANDEDP if we found a builtin to expand. */
13051 static rtx
13052 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
13054 const struct builtin_description *d;
13055 size_t i;
13056 enum insn_code icode;
13057 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13058 tree arg0;
13059 rtx op0, pat;
13060 enum machine_mode tmode, mode0;
13061 enum rs6000_builtins fcode
13062 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13064 if (rs6000_overloaded_builtin_p (fcode))
13066 *expandedp = true;
13067 error ("unresolved overload for Altivec builtin %qF", fndecl);
13069 /* Given it is invalid, just generate a normal call. */
13070 return expand_call (exp, target, false);
13073 target = altivec_expand_ld_builtin (exp, target, expandedp);
13074 if (*expandedp)
13075 return target;
13077 target = altivec_expand_st_builtin (exp, target, expandedp);
13078 if (*expandedp)
13079 return target;
13081 target = altivec_expand_dst_builtin (exp, target, expandedp);
13082 if (*expandedp)
13083 return target;
13085 *expandedp = true;
13087 switch (fcode)
13089 case ALTIVEC_BUILTIN_STVX_V2DF:
13090 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
13091 case ALTIVEC_BUILTIN_STVX_V2DI:
13092 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
13093 case ALTIVEC_BUILTIN_STVX_V4SF:
13094 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
13095 case ALTIVEC_BUILTIN_STVX:
13096 case ALTIVEC_BUILTIN_STVX_V4SI:
13097 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
13098 case ALTIVEC_BUILTIN_STVX_V8HI:
13099 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
13100 case ALTIVEC_BUILTIN_STVX_V16QI:
13101 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
13102 case ALTIVEC_BUILTIN_STVEBX:
13103 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
13104 case ALTIVEC_BUILTIN_STVEHX:
13105 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
13106 case ALTIVEC_BUILTIN_STVEWX:
13107 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
13108 case ALTIVEC_BUILTIN_STVXL_V2DF:
13109 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
13110 case ALTIVEC_BUILTIN_STVXL_V2DI:
13111 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
13112 case ALTIVEC_BUILTIN_STVXL_V4SF:
13113 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
13114 case ALTIVEC_BUILTIN_STVXL:
13115 case ALTIVEC_BUILTIN_STVXL_V4SI:
13116 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
13117 case ALTIVEC_BUILTIN_STVXL_V8HI:
13118 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
13119 case ALTIVEC_BUILTIN_STVXL_V16QI:
13120 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
13122 case ALTIVEC_BUILTIN_STVLX:
13123 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
13124 case ALTIVEC_BUILTIN_STVLXL:
13125 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
13126 case ALTIVEC_BUILTIN_STVRX:
13127 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
13128 case ALTIVEC_BUILTIN_STVRXL:
13129 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
13131 case VSX_BUILTIN_STXVD2X_V1TI:
13132 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
13133 case VSX_BUILTIN_STXVD2X_V2DF:
13134 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
13135 case VSX_BUILTIN_STXVD2X_V2DI:
13136 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
13137 case VSX_BUILTIN_STXVW4X_V4SF:
13138 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
13139 case VSX_BUILTIN_STXVW4X_V4SI:
13140 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
13141 case VSX_BUILTIN_STXVW4X_V8HI:
13142 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
13143 case VSX_BUILTIN_STXVW4X_V16QI:
13144 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
13146 case ALTIVEC_BUILTIN_MFVSCR:
13147 icode = CODE_FOR_altivec_mfvscr;
13148 tmode = insn_data[icode].operand[0].mode;
13150 if (target == 0
13151 || GET_MODE (target) != tmode
13152 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13153 target = gen_reg_rtx (tmode);
13155 pat = GEN_FCN (icode) (target);
13156 if (! pat)
13157 return 0;
13158 emit_insn (pat);
13159 return target;
13161 case ALTIVEC_BUILTIN_MTVSCR:
13162 icode = CODE_FOR_altivec_mtvscr;
13163 arg0 = CALL_EXPR_ARG (exp, 0);
13164 op0 = expand_normal (arg0);
13165 mode0 = insn_data[icode].operand[0].mode;
13167 /* If we got invalid arguments bail out before generating bad rtl. */
13168 if (arg0 == error_mark_node)
13169 return const0_rtx;
13171 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13172 op0 = copy_to_mode_reg (mode0, op0);
13174 pat = GEN_FCN (icode) (op0);
13175 if (pat)
13176 emit_insn (pat);
13177 return NULL_RTX;
13179 case ALTIVEC_BUILTIN_DSSALL:
13180 emit_insn (gen_altivec_dssall ());
13181 return NULL_RTX;
13183 case ALTIVEC_BUILTIN_DSS:
13184 icode = CODE_FOR_altivec_dss;
13185 arg0 = CALL_EXPR_ARG (exp, 0);
13186 STRIP_NOPS (arg0);
13187 op0 = expand_normal (arg0);
13188 mode0 = insn_data[icode].operand[0].mode;
13190 /* If we got invalid arguments bail out before generating bad rtl. */
13191 if (arg0 == error_mark_node)
13192 return const0_rtx;
13194 if (TREE_CODE (arg0) != INTEGER_CST
13195 || TREE_INT_CST_LOW (arg0) & ~0x3)
13197 error ("argument to dss must be a 2-bit unsigned literal");
13198 return const0_rtx;
13201 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13202 op0 = copy_to_mode_reg (mode0, op0);
13204 emit_insn (gen_altivec_dss (op0));
13205 return NULL_RTX;
13207 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
13208 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
13209 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
13210 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
13211 case VSX_BUILTIN_VEC_INIT_V2DF:
13212 case VSX_BUILTIN_VEC_INIT_V2DI:
13213 case VSX_BUILTIN_VEC_INIT_V1TI:
13214 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
13216 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
13217 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
13218 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
13219 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
13220 case VSX_BUILTIN_VEC_SET_V2DF:
13221 case VSX_BUILTIN_VEC_SET_V2DI:
13222 case VSX_BUILTIN_VEC_SET_V1TI:
13223 return altivec_expand_vec_set_builtin (exp);
13225 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
13226 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
13227 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
13228 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
13229 case VSX_BUILTIN_VEC_EXT_V2DF:
13230 case VSX_BUILTIN_VEC_EXT_V2DI:
13231 case VSX_BUILTIN_VEC_EXT_V1TI:
13232 return altivec_expand_vec_ext_builtin (exp, target);
13234 default:
13235 break;
13236 /* Fall through. */
13239 /* Expand abs* operations. */
13240 d = bdesc_abs;
13241 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
13242 if (d->code == fcode)
13243 return altivec_expand_abs_builtin (d->icode, exp, target);
13245 /* Expand the AltiVec predicates. */
13246 d = bdesc_altivec_preds;
13247 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
13248 if (d->code == fcode)
13249 return altivec_expand_predicate_builtin (d->icode, exp, target);
13251 /* LV* are funky. We initialized them differently. */
13252 switch (fcode)
13254 case ALTIVEC_BUILTIN_LVSL:
13255 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
13256 exp, target, false);
13257 case ALTIVEC_BUILTIN_LVSR:
13258 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
13259 exp, target, false);
13260 case ALTIVEC_BUILTIN_LVEBX:
13261 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
13262 exp, target, false);
13263 case ALTIVEC_BUILTIN_LVEHX:
13264 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
13265 exp, target, false);
13266 case ALTIVEC_BUILTIN_LVEWX:
13267 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
13268 exp, target, false);
13269 case ALTIVEC_BUILTIN_LVXL_V2DF:
13270 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
13271 exp, target, false);
13272 case ALTIVEC_BUILTIN_LVXL_V2DI:
13273 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
13274 exp, target, false);
13275 case ALTIVEC_BUILTIN_LVXL_V4SF:
13276 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
13277 exp, target, false);
13278 case ALTIVEC_BUILTIN_LVXL:
13279 case ALTIVEC_BUILTIN_LVXL_V4SI:
13280 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
13281 exp, target, false);
13282 case ALTIVEC_BUILTIN_LVXL_V8HI:
13283 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
13284 exp, target, false);
13285 case ALTIVEC_BUILTIN_LVXL_V16QI:
13286 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
13287 exp, target, false);
13288 case ALTIVEC_BUILTIN_LVX_V2DF:
13289 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
13290 exp, target, false);
13291 case ALTIVEC_BUILTIN_LVX_V2DI:
13292 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
13293 exp, target, false);
13294 case ALTIVEC_BUILTIN_LVX_V4SF:
13295 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
13296 exp, target, false);
13297 case ALTIVEC_BUILTIN_LVX:
13298 case ALTIVEC_BUILTIN_LVX_V4SI:
13299 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
13300 exp, target, false);
13301 case ALTIVEC_BUILTIN_LVX_V8HI:
13302 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
13303 exp, target, false);
13304 case ALTIVEC_BUILTIN_LVX_V16QI:
13305 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
13306 exp, target, false);
13307 case ALTIVEC_BUILTIN_LVLX:
13308 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
13309 exp, target, true);
13310 case ALTIVEC_BUILTIN_LVLXL:
13311 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
13312 exp, target, true);
13313 case ALTIVEC_BUILTIN_LVRX:
13314 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
13315 exp, target, true);
13316 case ALTIVEC_BUILTIN_LVRXL:
13317 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
13318 exp, target, true);
13319 case VSX_BUILTIN_LXVD2X_V1TI:
13320 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
13321 exp, target, false);
13322 case VSX_BUILTIN_LXVD2X_V2DF:
13323 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
13324 exp, target, false);
13325 case VSX_BUILTIN_LXVD2X_V2DI:
13326 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
13327 exp, target, false);
13328 case VSX_BUILTIN_LXVW4X_V4SF:
13329 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
13330 exp, target, false);
13331 case VSX_BUILTIN_LXVW4X_V4SI:
13332 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
13333 exp, target, false);
13334 case VSX_BUILTIN_LXVW4X_V8HI:
13335 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
13336 exp, target, false);
13337 case VSX_BUILTIN_LXVW4X_V16QI:
13338 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
13339 exp, target, false);
13340 break;
13341 default:
13342 break;
13343 /* Fall through. */
13346 *expandedp = false;
13347 return NULL_RTX;
13350 /* Expand the builtin in EXP and store the result in TARGET. Store
13351 true in *EXPANDEDP if we found a builtin to expand. */
13352 static rtx
13353 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
13355 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13356 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13357 const struct builtin_description *d;
13358 size_t i;
13360 *expandedp = true;
13362 switch (fcode)
13364 case PAIRED_BUILTIN_STX:
13365 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
13366 case PAIRED_BUILTIN_LX:
13367 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
13368 default:
13369 break;
13370 /* Fall through. */
13373 /* Expand the paired predicates. */
13374 d = bdesc_paired_preds;
13375 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
13376 if (d->code == fcode)
13377 return paired_expand_predicate_builtin (d->icode, exp, target);
13379 *expandedp = false;
13380 return NULL_RTX;
13383 /* Binops that need to be initialized manually, but can be expanded
13384 automagically by rs6000_expand_binop_builtin. */
13385 static const struct builtin_description bdesc_2arg_spe[] =
13387 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
13388 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
13389 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
13390 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
13391 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
13392 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
13393 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
13394 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
13395 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
13396 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
13397 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
13398 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
13399 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
13400 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
13401 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
13402 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
13403 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
13404 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
13405 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
13406 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
13407 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
13408 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
13411 /* Expand the builtin in EXP and store the result in TARGET. Store
13412 true in *EXPANDEDP if we found a builtin to expand.
13414 This expands the SPE builtins that are not simple unary and binary
13415 operations. */
13416 static rtx
13417 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
13419 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13420 tree arg1, arg0;
13421 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13422 enum insn_code icode;
13423 enum machine_mode tmode, mode0;
13424 rtx pat, op0;
13425 const struct builtin_description *d;
13426 size_t i;
13428 *expandedp = true;
13430 /* Syntax check for a 5-bit unsigned immediate. */
13431 switch (fcode)
13433 case SPE_BUILTIN_EVSTDD:
13434 case SPE_BUILTIN_EVSTDH:
13435 case SPE_BUILTIN_EVSTDW:
13436 case SPE_BUILTIN_EVSTWHE:
13437 case SPE_BUILTIN_EVSTWHO:
13438 case SPE_BUILTIN_EVSTWWE:
13439 case SPE_BUILTIN_EVSTWWO:
13440 arg1 = CALL_EXPR_ARG (exp, 2);
13441 if (TREE_CODE (arg1) != INTEGER_CST
13442 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13444 error ("argument 2 must be a 5-bit unsigned literal");
13445 return const0_rtx;
13447 break;
13448 default:
13449 break;
13452 /* The evsplat*i instructions are not quite generic. */
13453 switch (fcode)
13455 case SPE_BUILTIN_EVSPLATFI:
13456 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
13457 exp, target);
13458 case SPE_BUILTIN_EVSPLATI:
13459 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
13460 exp, target);
13461 default:
13462 break;
13465 d = bdesc_2arg_spe;
13466 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
13467 if (d->code == fcode)
13468 return rs6000_expand_binop_builtin (d->icode, exp, target);
13470 d = bdesc_spe_predicates;
13471 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
13472 if (d->code == fcode)
13473 return spe_expand_predicate_builtin (d->icode, exp, target);
13475 d = bdesc_spe_evsel;
13476 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
13477 if (d->code == fcode)
13478 return spe_expand_evsel_builtin (d->icode, exp, target);
13480 switch (fcode)
13482 case SPE_BUILTIN_EVSTDDX:
13483 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
13484 case SPE_BUILTIN_EVSTDHX:
13485 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
13486 case SPE_BUILTIN_EVSTDWX:
13487 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
13488 case SPE_BUILTIN_EVSTWHEX:
13489 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
13490 case SPE_BUILTIN_EVSTWHOX:
13491 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
13492 case SPE_BUILTIN_EVSTWWEX:
13493 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
13494 case SPE_BUILTIN_EVSTWWOX:
13495 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
13496 case SPE_BUILTIN_EVSTDD:
13497 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
13498 case SPE_BUILTIN_EVSTDH:
13499 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
13500 case SPE_BUILTIN_EVSTDW:
13501 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
13502 case SPE_BUILTIN_EVSTWHE:
13503 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
13504 case SPE_BUILTIN_EVSTWHO:
13505 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
13506 case SPE_BUILTIN_EVSTWWE:
13507 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
13508 case SPE_BUILTIN_EVSTWWO:
13509 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
13510 case SPE_BUILTIN_MFSPEFSCR:
13511 icode = CODE_FOR_spe_mfspefscr;
13512 tmode = insn_data[icode].operand[0].mode;
13514 if (target == 0
13515 || GET_MODE (target) != tmode
13516 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13517 target = gen_reg_rtx (tmode);
13519 pat = GEN_FCN (icode) (target);
13520 if (! pat)
13521 return 0;
13522 emit_insn (pat);
13523 return target;
13524 case SPE_BUILTIN_MTSPEFSCR:
13525 icode = CODE_FOR_spe_mtspefscr;
13526 arg0 = CALL_EXPR_ARG (exp, 0);
13527 op0 = expand_normal (arg0);
13528 mode0 = insn_data[icode].operand[0].mode;
13530 if (arg0 == error_mark_node)
13531 return const0_rtx;
13533 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13534 op0 = copy_to_mode_reg (mode0, op0);
13536 pat = GEN_FCN (icode) (op0);
13537 if (pat)
13538 emit_insn (pat);
13539 return NULL_RTX;
13540 default:
13541 break;
13544 *expandedp = false;
13545 return NULL_RTX;
13548 static rtx
13549 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13551 rtx pat, scratch, tmp;
13552 tree form = CALL_EXPR_ARG (exp, 0);
13553 tree arg0 = CALL_EXPR_ARG (exp, 1);
13554 tree arg1 = CALL_EXPR_ARG (exp, 2);
13555 rtx op0 = expand_normal (arg0);
13556 rtx op1 = expand_normal (arg1);
13557 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13558 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13559 int form_int;
13560 enum rtx_code code;
13562 if (TREE_CODE (form) != INTEGER_CST)
13564 error ("argument 1 of __builtin_paired_predicate must be a constant");
13565 return const0_rtx;
13567 else
13568 form_int = TREE_INT_CST_LOW (form);
13570 gcc_assert (mode0 == mode1);
13572 if (arg0 == error_mark_node || arg1 == error_mark_node)
13573 return const0_rtx;
13575 if (target == 0
13576 || GET_MODE (target) != SImode
13577 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
13578 target = gen_reg_rtx (SImode);
13579 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
13580 op0 = copy_to_mode_reg (mode0, op0);
13581 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
13582 op1 = copy_to_mode_reg (mode1, op1);
13584 scratch = gen_reg_rtx (CCFPmode);
13586 pat = GEN_FCN (icode) (scratch, op0, op1);
13587 if (!pat)
13588 return const0_rtx;
13590 emit_insn (pat);
13592 switch (form_int)
13594 /* LT bit. */
13595 case 0:
13596 code = LT;
13597 break;
13598 /* GT bit. */
13599 case 1:
13600 code = GT;
13601 break;
13602 /* EQ bit. */
13603 case 2:
13604 code = EQ;
13605 break;
13606 /* UN bit. */
13607 case 3:
13608 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13609 return target;
13610 default:
13611 error ("argument 1 of __builtin_paired_predicate is out of range");
13612 return const0_rtx;
13615 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13616 emit_move_insn (target, tmp);
13617 return target;
13620 static rtx
13621 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13623 rtx pat, scratch, tmp;
13624 tree form = CALL_EXPR_ARG (exp, 0);
13625 tree arg0 = CALL_EXPR_ARG (exp, 1);
13626 tree arg1 = CALL_EXPR_ARG (exp, 2);
13627 rtx op0 = expand_normal (arg0);
13628 rtx op1 = expand_normal (arg1);
13629 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13630 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13631 int form_int;
13632 enum rtx_code code;
13634 if (TREE_CODE (form) != INTEGER_CST)
13636 error ("argument 1 of __builtin_spe_predicate must be a constant");
13637 return const0_rtx;
13639 else
13640 form_int = TREE_INT_CST_LOW (form);
13642 gcc_assert (mode0 == mode1);
13644 if (arg0 == error_mark_node || arg1 == error_mark_node)
13645 return const0_rtx;
13647 if (target == 0
13648 || GET_MODE (target) != SImode
13649 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
13650 target = gen_reg_rtx (SImode);
13652 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13653 op0 = copy_to_mode_reg (mode0, op0);
13654 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13655 op1 = copy_to_mode_reg (mode1, op1);
13657 scratch = gen_reg_rtx (CCmode);
13659 pat = GEN_FCN (icode) (scratch, op0, op1);
13660 if (! pat)
13661 return const0_rtx;
13662 emit_insn (pat);
13664 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
13665 _lower_. We use one compare, but look in different bits of the
13666 CR for each variant.
13668 There are 2 elements in each SPE simd type (upper/lower). The CR
13669 bits are set as follows:
13671 BIT0 | BIT 1 | BIT 2 | BIT 3
13672 U | L | (U | L) | (U & L)
13674 So, for an "all" relationship, BIT 3 would be set.
13675 For an "any" relationship, BIT 2 would be set. Etc.
13677 Following traditional nomenclature, these bits map to:
13679 BIT0 | BIT 1 | BIT 2 | BIT 3
13680 LT | GT | EQ | OV
13682 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
13685 switch (form_int)
13687 /* All variant. OV bit. */
13688 case 0:
13689 /* We need to get to the OV bit, which is the ORDERED bit. We
13690 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
13691 that's ugly and will make validate_condition_mode die.
13692 So let's just use another pattern. */
13693 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13694 return target;
13695 /* Any variant. EQ bit. */
13696 case 1:
13697 code = EQ;
13698 break;
13699 /* Upper variant. LT bit. */
13700 case 2:
13701 code = LT;
13702 break;
13703 /* Lower variant. GT bit. */
13704 case 3:
13705 code = GT;
13706 break;
13707 default:
13708 error ("argument 1 of __builtin_spe_predicate is out of range");
13709 return const0_rtx;
13712 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13713 emit_move_insn (target, tmp);
13715 return target;
13718 /* The evsel builtins look like this:
13720 e = __builtin_spe_evsel_OP (a, b, c, d);
13722 and work like this:
13724 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
13725 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
13728 static rtx
13729 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
13731 rtx pat, scratch;
13732 tree arg0 = CALL_EXPR_ARG (exp, 0);
13733 tree arg1 = CALL_EXPR_ARG (exp, 1);
13734 tree arg2 = CALL_EXPR_ARG (exp, 2);
13735 tree arg3 = CALL_EXPR_ARG (exp, 3);
13736 rtx op0 = expand_normal (arg0);
13737 rtx op1 = expand_normal (arg1);
13738 rtx op2 = expand_normal (arg2);
13739 rtx op3 = expand_normal (arg3);
13740 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13741 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13743 gcc_assert (mode0 == mode1);
13745 if (arg0 == error_mark_node || arg1 == error_mark_node
13746 || arg2 == error_mark_node || arg3 == error_mark_node)
13747 return const0_rtx;
13749 if (target == 0
13750 || GET_MODE (target) != mode0
13751 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
13752 target = gen_reg_rtx (mode0);
13754 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13755 op0 = copy_to_mode_reg (mode0, op0);
13756 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13757 op1 = copy_to_mode_reg (mode0, op1);
13758 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
13759 op2 = copy_to_mode_reg (mode0, op2);
13760 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
13761 op3 = copy_to_mode_reg (mode0, op3);
13763 /* Generate the compare. */
13764 scratch = gen_reg_rtx (CCmode);
13765 pat = GEN_FCN (icode) (scratch, op0, op1);
13766 if (! pat)
13767 return const0_rtx;
13768 emit_insn (pat);
13770 if (mode0 == V2SImode)
13771 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
13772 else
13773 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
13775 return target;
13778 /* Raise an error message for a builtin function that is called without the
13779 appropriate target options being set. */
13781 static void
13782 rs6000_invalid_builtin (enum rs6000_builtins fncode)
13784 size_t uns_fncode = (size_t)fncode;
13785 const char *name = rs6000_builtin_info[uns_fncode].name;
13786 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
13788 gcc_assert (name != NULL);
13789 if ((fnmask & RS6000_BTM_CELL) != 0)
13790 error ("Builtin function %s is only valid for the cell processor", name);
13791 else if ((fnmask & RS6000_BTM_VSX) != 0)
13792 error ("Builtin function %s requires the -mvsx option", name);
13793 else if ((fnmask & RS6000_BTM_HTM) != 0)
13794 error ("Builtin function %s requires the -mhtm option", name);
13795 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
13796 error ("Builtin function %s requires the -maltivec option", name);
13797 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
13798 error ("Builtin function %s requires the -mpaired option", name);
13799 else if ((fnmask & RS6000_BTM_SPE) != 0)
13800 error ("Builtin function %s requires the -mspe option", name);
13801 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
13802 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
13803 error ("Builtin function %s requires the -mhard-dfp and"
13804 " -mpower8-vector options", name);
13805 else if ((fnmask & RS6000_BTM_DFP) != 0)
13806 error ("Builtin function %s requires the -mhard-dfp option", name);
13807 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
13808 error ("Builtin function %s requires the -mpower8-vector option", name);
13809 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
13810 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
13811 error ("Builtin function %s requires the -mhard-float and"
13812 " -mlong-double-128 options", name);
13813 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
13814 error ("Builtin function %s requires the -mhard-float option", name);
13815 else
13816 error ("Builtin function %s is not supported with the current options",
13817 name);
13820 /* Expand an expression EXP that calls a built-in function,
13821 with result going to TARGET if that's convenient
13822 (and in mode MODE if that's convenient).
13823 SUBTARGET may be used as the target for computing one of EXP's operands.
13824 IGNORE is nonzero if the value is to be ignored. */
13826 static rtx
13827 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13828 enum machine_mode mode ATTRIBUTE_UNUSED,
13829 int ignore ATTRIBUTE_UNUSED)
13831 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13832 enum rs6000_builtins fcode
13833 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
13834 size_t uns_fcode = (size_t)fcode;
13835 const struct builtin_description *d;
13836 size_t i;
13837 rtx ret;
13838 bool success;
13839 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
13840 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
13842 if (TARGET_DEBUG_BUILTIN)
13844 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
13845 const char *name1 = rs6000_builtin_info[uns_fcode].name;
13846 const char *name2 = ((icode != CODE_FOR_nothing)
13847 ? get_insn_name ((int)icode)
13848 : "nothing");
13849 const char *name3;
13851 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
13853 default: name3 = "unknown"; break;
13854 case RS6000_BTC_SPECIAL: name3 = "special"; break;
13855 case RS6000_BTC_UNARY: name3 = "unary"; break;
13856 case RS6000_BTC_BINARY: name3 = "binary"; break;
13857 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
13858 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
13859 case RS6000_BTC_ABS: name3 = "abs"; break;
13860 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
13861 case RS6000_BTC_DST: name3 = "dst"; break;
13865 fprintf (stderr,
13866 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
13867 (name1) ? name1 : "---", fcode,
13868 (name2) ? name2 : "---", (int)icode,
13869 name3,
13870 func_valid_p ? "" : ", not valid");
13873 if (!func_valid_p)
13875 rs6000_invalid_builtin (fcode);
13877 /* Given it is invalid, just generate a normal call. */
13878 return expand_call (exp, target, ignore);
13881 switch (fcode)
13883 case RS6000_BUILTIN_RECIP:
13884 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
13886 case RS6000_BUILTIN_RECIPF:
13887 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
13889 case RS6000_BUILTIN_RSQRTF:
13890 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
13892 case RS6000_BUILTIN_RSQRT:
13893 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
13895 case POWER7_BUILTIN_BPERMD:
13896 return rs6000_expand_binop_builtin (((TARGET_64BIT)
13897 ? CODE_FOR_bpermd_di
13898 : CODE_FOR_bpermd_si), exp, target);
13900 case RS6000_BUILTIN_GET_TB:
13901 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
13902 target);
13904 case RS6000_BUILTIN_MFTB:
13905 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
13906 ? CODE_FOR_rs6000_mftb_di
13907 : CODE_FOR_rs6000_mftb_si),
13908 target);
13910 case RS6000_BUILTIN_MFFS:
13911 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
13913 case RS6000_BUILTIN_MTFSF:
13914 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
13916 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
13917 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
13919 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr
13920 : (int) CODE_FOR_altivec_lvsl);
13921 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13922 enum machine_mode mode = insn_data[icode].operand[1].mode;
13923 tree arg;
13924 rtx op, addr, pat;
13926 gcc_assert (TARGET_ALTIVEC);
13928 arg = CALL_EXPR_ARG (exp, 0);
13929 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
13930 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
13931 addr = memory_address (mode, op);
13932 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
13933 op = addr;
13934 else
13936 /* For the load case need to negate the address. */
13937 op = gen_reg_rtx (GET_MODE (addr));
13938 emit_insn (gen_rtx_SET (VOIDmode, op,
13939 gen_rtx_NEG (GET_MODE (addr), addr)));
13941 op = gen_rtx_MEM (mode, op);
13943 if (target == 0
13944 || GET_MODE (target) != tmode
13945 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13946 target = gen_reg_rtx (tmode);
13948 /*pat = gen_altivec_lvsr (target, op);*/
13949 pat = GEN_FCN (icode) (target, op);
13950 if (!pat)
13951 return 0;
13952 emit_insn (pat);
13954 return target;
13957 case ALTIVEC_BUILTIN_VCFUX:
13958 case ALTIVEC_BUILTIN_VCFSX:
13959 case ALTIVEC_BUILTIN_VCTUXS:
13960 case ALTIVEC_BUILTIN_VCTSXS:
13961 /* FIXME: There's got to be a nicer way to handle this case than
13962 constructing a new CALL_EXPR. */
13963 if (call_expr_nargs (exp) == 1)
13965 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
13966 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
13968 break;
13970 default:
13971 break;
13974 if (TARGET_ALTIVEC)
13976 ret = altivec_expand_builtin (exp, target, &success);
13978 if (success)
13979 return ret;
13981 if (TARGET_SPE)
13983 ret = spe_expand_builtin (exp, target, &success);
13985 if (success)
13986 return ret;
13988 if (TARGET_PAIRED_FLOAT)
13990 ret = paired_expand_builtin (exp, target, &success);
13992 if (success)
13993 return ret;
13995 if (TARGET_HTM)
13997 ret = htm_expand_builtin (exp, target, &success);
13999 if (success)
14000 return ret;
14003 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
14004 gcc_assert (attr == RS6000_BTC_UNARY
14005 || attr == RS6000_BTC_BINARY
14006 || attr == RS6000_BTC_TERNARY);
14008 /* Handle simple unary operations. */
14009 d = bdesc_1arg;
14010 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
14011 if (d->code == fcode)
14012 return rs6000_expand_unop_builtin (d->icode, exp, target);
14014 /* Handle simple binary operations. */
14015 d = bdesc_2arg;
14016 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14017 if (d->code == fcode)
14018 return rs6000_expand_binop_builtin (d->icode, exp, target);
14020 /* Handle simple ternary operations. */
14021 d = bdesc_3arg;
14022 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
14023 if (d->code == fcode)
14024 return rs6000_expand_ternop_builtin (d->icode, exp, target);
14026 gcc_unreachable ();
14029 static void
14030 rs6000_init_builtins (void)
14032 tree tdecl;
14033 tree ftype;
14034 enum machine_mode mode;
14036 if (TARGET_DEBUG_BUILTIN)
14037 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
14038 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
14039 (TARGET_SPE) ? ", spe" : "",
14040 (TARGET_ALTIVEC) ? ", altivec" : "",
14041 (TARGET_VSX) ? ", vsx" : "");
14043 V2SI_type_node = build_vector_type (intSI_type_node, 2);
14044 V2SF_type_node = build_vector_type (float_type_node, 2);
14045 V2DI_type_node = build_vector_type (intDI_type_node, 2);
14046 V2DF_type_node = build_vector_type (double_type_node, 2);
14047 V4HI_type_node = build_vector_type (intHI_type_node, 4);
14048 V4SI_type_node = build_vector_type (intSI_type_node, 4);
14049 V4SF_type_node = build_vector_type (float_type_node, 4);
14050 V8HI_type_node = build_vector_type (intHI_type_node, 8);
14051 V16QI_type_node = build_vector_type (intQI_type_node, 16);
14053 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
14054 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
14055 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
14056 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
14058 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
14059 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
14060 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
14061 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
14063 /* We use V1TI mode as a special container to hold __int128_t items that
14064 must live in VSX registers. */
14065 if (intTI_type_node)
14067 V1TI_type_node = build_vector_type (intTI_type_node, 1);
14068 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
14071 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
14072 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
14073 'vector unsigned short'. */
14075 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
14076 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
14077 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
14078 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
14079 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
14081 long_integer_type_internal_node = long_integer_type_node;
14082 long_unsigned_type_internal_node = long_unsigned_type_node;
14083 long_long_integer_type_internal_node = long_long_integer_type_node;
14084 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
14085 intQI_type_internal_node = intQI_type_node;
14086 uintQI_type_internal_node = unsigned_intQI_type_node;
14087 intHI_type_internal_node = intHI_type_node;
14088 uintHI_type_internal_node = unsigned_intHI_type_node;
14089 intSI_type_internal_node = intSI_type_node;
14090 uintSI_type_internal_node = unsigned_intSI_type_node;
14091 intDI_type_internal_node = intDI_type_node;
14092 uintDI_type_internal_node = unsigned_intDI_type_node;
14093 intTI_type_internal_node = intTI_type_node;
14094 uintTI_type_internal_node = unsigned_intTI_type_node;
14095 float_type_internal_node = float_type_node;
14096 double_type_internal_node = double_type_node;
14097 long_double_type_internal_node = long_double_type_node;
14098 dfloat64_type_internal_node = dfloat64_type_node;
14099 dfloat128_type_internal_node = dfloat128_type_node;
14100 void_type_internal_node = void_type_node;
14102 /* Initialize the modes for builtin_function_type, mapping a machine mode to
14103 tree type node. */
14104 builtin_mode_to_type[QImode][0] = integer_type_node;
14105 builtin_mode_to_type[HImode][0] = integer_type_node;
14106 builtin_mode_to_type[SImode][0] = intSI_type_node;
14107 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
14108 builtin_mode_to_type[DImode][0] = intDI_type_node;
14109 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
14110 builtin_mode_to_type[TImode][0] = intTI_type_node;
14111 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
14112 builtin_mode_to_type[SFmode][0] = float_type_node;
14113 builtin_mode_to_type[DFmode][0] = double_type_node;
14114 builtin_mode_to_type[TFmode][0] = long_double_type_node;
14115 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
14116 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
14117 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
14118 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
14119 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
14120 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
14121 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
14122 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
14123 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
14124 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
14125 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
14126 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
14127 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
14128 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
14129 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
14130 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
14131 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
14133 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
14134 TYPE_NAME (bool_char_type_node) = tdecl;
14136 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
14137 TYPE_NAME (bool_short_type_node) = tdecl;
14139 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
14140 TYPE_NAME (bool_int_type_node) = tdecl;
14142 tdecl = add_builtin_type ("__pixel", pixel_type_node);
14143 TYPE_NAME (pixel_type_node) = tdecl;
14145 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
14146 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
14147 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
14148 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
14149 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
14151 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
14152 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
14154 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
14155 TYPE_NAME (V16QI_type_node) = tdecl;
14157 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
14158 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
14160 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
14161 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
14163 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
14164 TYPE_NAME (V8HI_type_node) = tdecl;
14166 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
14167 TYPE_NAME (bool_V8HI_type_node) = tdecl;
14169 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
14170 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
14172 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
14173 TYPE_NAME (V4SI_type_node) = tdecl;
14175 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
14176 TYPE_NAME (bool_V4SI_type_node) = tdecl;
14178 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
14179 TYPE_NAME (V4SF_type_node) = tdecl;
14181 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
14182 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
14184 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
14185 TYPE_NAME (V2DF_type_node) = tdecl;
14187 if (TARGET_POWERPC64)
14189 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
14190 TYPE_NAME (V2DI_type_node) = tdecl;
14192 tdecl = add_builtin_type ("__vector unsigned long",
14193 unsigned_V2DI_type_node);
14194 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
14196 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
14197 TYPE_NAME (bool_V2DI_type_node) = tdecl;
14199 else
14201 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
14202 TYPE_NAME (V2DI_type_node) = tdecl;
14204 tdecl = add_builtin_type ("__vector unsigned long long",
14205 unsigned_V2DI_type_node);
14206 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
14208 tdecl = add_builtin_type ("__vector __bool long long",
14209 bool_V2DI_type_node);
14210 TYPE_NAME (bool_V2DI_type_node) = tdecl;
14213 if (V1TI_type_node)
14215 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
14216 TYPE_NAME (V1TI_type_node) = tdecl;
14218 tdecl = add_builtin_type ("__vector unsigned __int128",
14219 unsigned_V1TI_type_node);
14220 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
14223 /* Paired and SPE builtins are only available if you build a compiler with
14224 the appropriate options, so only create those builtins with the
14225 appropriate compiler option. Create Altivec and VSX builtins on machines
14226 with at least the general purpose extensions (970 and newer) to allow the
14227 use of the target attribute. */
14228 if (TARGET_PAIRED_FLOAT)
14229 paired_init_builtins ();
14230 if (TARGET_SPE)
14231 spe_init_builtins ();
14232 if (TARGET_EXTRA_BUILTINS)
14233 altivec_init_builtins ();
14234 if (TARGET_HTM)
14235 htm_init_builtins ();
14237 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
14238 rs6000_common_init_builtins ();
14240 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
14241 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
14242 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
14244 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
14245 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
14246 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
14248 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
14249 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
14250 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
14252 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
14253 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
14254 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
14256 mode = (TARGET_64BIT) ? DImode : SImode;
14257 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
14258 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
14259 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
14261 ftype = build_function_type_list (unsigned_intDI_type_node,
14262 NULL_TREE);
14263 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
14265 if (TARGET_64BIT)
14266 ftype = build_function_type_list (unsigned_intDI_type_node,
14267 NULL_TREE);
14268 else
14269 ftype = build_function_type_list (unsigned_intSI_type_node,
14270 NULL_TREE);
14271 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
14273 ftype = build_function_type_list (double_type_node, NULL_TREE);
14274 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
14276 ftype = build_function_type_list (void_type_node,
14277 intSI_type_node, double_type_node,
14278 NULL_TREE);
14279 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
14281 #if TARGET_XCOFF
14282 /* AIX libm provides clog as __clog. */
14283 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
14284 set_user_assembler_name (tdecl, "__clog");
14285 #endif
14287 #ifdef SUBTARGET_INIT_BUILTINS
14288 SUBTARGET_INIT_BUILTINS;
14289 #endif
14292 /* Returns the rs6000 builtin decl for CODE. */
14294 static tree
14295 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
14297 HOST_WIDE_INT fnmask;
14299 if (code >= RS6000_BUILTIN_COUNT)
14300 return error_mark_node;
14302 fnmask = rs6000_builtin_info[code].mask;
14303 if ((fnmask & rs6000_builtin_mask) != fnmask)
14305 rs6000_invalid_builtin ((enum rs6000_builtins)code);
14306 return error_mark_node;
14309 return rs6000_builtin_decls[code];
14312 static void
14313 spe_init_builtins (void)
14315 tree puint_type_node = build_pointer_type (unsigned_type_node);
14316 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
14317 const struct builtin_description *d;
14318 size_t i;
14320 tree v2si_ftype_4_v2si
14321 = build_function_type_list (opaque_V2SI_type_node,
14322 opaque_V2SI_type_node,
14323 opaque_V2SI_type_node,
14324 opaque_V2SI_type_node,
14325 opaque_V2SI_type_node,
14326 NULL_TREE);
14328 tree v2sf_ftype_4_v2sf
14329 = build_function_type_list (opaque_V2SF_type_node,
14330 opaque_V2SF_type_node,
14331 opaque_V2SF_type_node,
14332 opaque_V2SF_type_node,
14333 opaque_V2SF_type_node,
14334 NULL_TREE);
14336 tree int_ftype_int_v2si_v2si
14337 = build_function_type_list (integer_type_node,
14338 integer_type_node,
14339 opaque_V2SI_type_node,
14340 opaque_V2SI_type_node,
14341 NULL_TREE);
14343 tree int_ftype_int_v2sf_v2sf
14344 = build_function_type_list (integer_type_node,
14345 integer_type_node,
14346 opaque_V2SF_type_node,
14347 opaque_V2SF_type_node,
14348 NULL_TREE);
14350 tree void_ftype_v2si_puint_int
14351 = build_function_type_list (void_type_node,
14352 opaque_V2SI_type_node,
14353 puint_type_node,
14354 integer_type_node,
14355 NULL_TREE);
14357 tree void_ftype_v2si_puint_char
14358 = build_function_type_list (void_type_node,
14359 opaque_V2SI_type_node,
14360 puint_type_node,
14361 char_type_node,
14362 NULL_TREE);
14364 tree void_ftype_v2si_pv2si_int
14365 = build_function_type_list (void_type_node,
14366 opaque_V2SI_type_node,
14367 opaque_p_V2SI_type_node,
14368 integer_type_node,
14369 NULL_TREE);
14371 tree void_ftype_v2si_pv2si_char
14372 = build_function_type_list (void_type_node,
14373 opaque_V2SI_type_node,
14374 opaque_p_V2SI_type_node,
14375 char_type_node,
14376 NULL_TREE);
14378 tree void_ftype_int
14379 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14381 tree int_ftype_void
14382 = build_function_type_list (integer_type_node, NULL_TREE);
14384 tree v2si_ftype_pv2si_int
14385 = build_function_type_list (opaque_V2SI_type_node,
14386 opaque_p_V2SI_type_node,
14387 integer_type_node,
14388 NULL_TREE);
14390 tree v2si_ftype_puint_int
14391 = build_function_type_list (opaque_V2SI_type_node,
14392 puint_type_node,
14393 integer_type_node,
14394 NULL_TREE);
14396 tree v2si_ftype_pushort_int
14397 = build_function_type_list (opaque_V2SI_type_node,
14398 pushort_type_node,
14399 integer_type_node,
14400 NULL_TREE);
14402 tree v2si_ftype_signed_char
14403 = build_function_type_list (opaque_V2SI_type_node,
14404 signed_char_type_node,
14405 NULL_TREE);
14407 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
14409 /* Initialize irregular SPE builtins. */
14411 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
14412 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
14413 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
14414 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
14415 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
14416 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
14417 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
14418 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
14419 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
14420 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
14421 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
14422 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
14423 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
14424 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
14425 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
14426 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
14427 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
14428 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
14430 /* Loads. */
14431 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
14432 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
14433 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
14434 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
14435 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
14436 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
14437 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
14438 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
14439 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
14440 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
14441 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
14442 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
14443 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
14444 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
14445 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
14446 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
14447 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
14448 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
14449 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
14450 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
14451 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
14452 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
14454 /* Predicates. */
14455 d = bdesc_spe_predicates;
14456 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
14458 tree type;
14460 switch (insn_data[d->icode].operand[1].mode)
14462 case V2SImode:
14463 type = int_ftype_int_v2si_v2si;
14464 break;
14465 case V2SFmode:
14466 type = int_ftype_int_v2sf_v2sf;
14467 break;
14468 default:
14469 gcc_unreachable ();
14472 def_builtin (d->name, type, d->code);
14475 /* Evsel predicates. */
14476 d = bdesc_spe_evsel;
14477 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
14479 tree type;
14481 switch (insn_data[d->icode].operand[1].mode)
14483 case V2SImode:
14484 type = v2si_ftype_4_v2si;
14485 break;
14486 case V2SFmode:
14487 type = v2sf_ftype_4_v2sf;
14488 break;
14489 default:
14490 gcc_unreachable ();
14493 def_builtin (d->name, type, d->code);
14497 static void
14498 paired_init_builtins (void)
14500 const struct builtin_description *d;
14501 size_t i;
14503 tree int_ftype_int_v2sf_v2sf
14504 = build_function_type_list (integer_type_node,
14505 integer_type_node,
14506 V2SF_type_node,
14507 V2SF_type_node,
14508 NULL_TREE);
14509 tree pcfloat_type_node =
14510 build_pointer_type (build_qualified_type
14511 (float_type_node, TYPE_QUAL_CONST));
14513 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
14514 long_integer_type_node,
14515 pcfloat_type_node,
14516 NULL_TREE);
14517 tree void_ftype_v2sf_long_pcfloat =
14518 build_function_type_list (void_type_node,
14519 V2SF_type_node,
14520 long_integer_type_node,
14521 pcfloat_type_node,
14522 NULL_TREE);
14525 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
14526 PAIRED_BUILTIN_LX);
14529 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
14530 PAIRED_BUILTIN_STX);
14532 /* Predicates. */
14533 d = bdesc_paired_preds;
14534 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
14536 tree type;
14538 if (TARGET_DEBUG_BUILTIN)
14539 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
14540 (int)i, get_insn_name (d->icode), (int)d->icode,
14541 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
14543 switch (insn_data[d->icode].operand[1].mode)
14545 case V2SFmode:
14546 type = int_ftype_int_v2sf_v2sf;
14547 break;
14548 default:
14549 gcc_unreachable ();
14552 def_builtin (d->name, type, d->code);
14556 static void
14557 altivec_init_builtins (void)
14559 const struct builtin_description *d;
14560 size_t i;
14561 tree ftype;
14562 tree decl;
14564 tree pvoid_type_node = build_pointer_type (void_type_node);
14566 tree pcvoid_type_node
14567 = build_pointer_type (build_qualified_type (void_type_node,
14568 TYPE_QUAL_CONST));
14570 tree int_ftype_opaque
14571 = build_function_type_list (integer_type_node,
14572 opaque_V4SI_type_node, NULL_TREE);
14573 tree opaque_ftype_opaque
14574 = build_function_type_list (integer_type_node, NULL_TREE);
14575 tree opaque_ftype_opaque_int
14576 = build_function_type_list (opaque_V4SI_type_node,
14577 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
14578 tree opaque_ftype_opaque_opaque_int
14579 = build_function_type_list (opaque_V4SI_type_node,
14580 opaque_V4SI_type_node, opaque_V4SI_type_node,
14581 integer_type_node, NULL_TREE);
14582 tree int_ftype_int_opaque_opaque
14583 = build_function_type_list (integer_type_node,
14584 integer_type_node, opaque_V4SI_type_node,
14585 opaque_V4SI_type_node, NULL_TREE);
14586 tree int_ftype_int_v4si_v4si
14587 = build_function_type_list (integer_type_node,
14588 integer_type_node, V4SI_type_node,
14589 V4SI_type_node, NULL_TREE);
14590 tree int_ftype_int_v2di_v2di
14591 = build_function_type_list (integer_type_node,
14592 integer_type_node, V2DI_type_node,
14593 V2DI_type_node, NULL_TREE);
14594 tree void_ftype_v4si
14595 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
14596 tree v8hi_ftype_void
14597 = build_function_type_list (V8HI_type_node, NULL_TREE);
14598 tree void_ftype_void
14599 = build_function_type_list (void_type_node, NULL_TREE);
14600 tree void_ftype_int
14601 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14603 tree opaque_ftype_long_pcvoid
14604 = build_function_type_list (opaque_V4SI_type_node,
14605 long_integer_type_node, pcvoid_type_node,
14606 NULL_TREE);
14607 tree v16qi_ftype_long_pcvoid
14608 = build_function_type_list (V16QI_type_node,
14609 long_integer_type_node, pcvoid_type_node,
14610 NULL_TREE);
14611 tree v8hi_ftype_long_pcvoid
14612 = build_function_type_list (V8HI_type_node,
14613 long_integer_type_node, pcvoid_type_node,
14614 NULL_TREE);
14615 tree v4si_ftype_long_pcvoid
14616 = build_function_type_list (V4SI_type_node,
14617 long_integer_type_node, pcvoid_type_node,
14618 NULL_TREE);
14619 tree v4sf_ftype_long_pcvoid
14620 = build_function_type_list (V4SF_type_node,
14621 long_integer_type_node, pcvoid_type_node,
14622 NULL_TREE);
14623 tree v2df_ftype_long_pcvoid
14624 = build_function_type_list (V2DF_type_node,
14625 long_integer_type_node, pcvoid_type_node,
14626 NULL_TREE);
14627 tree v2di_ftype_long_pcvoid
14628 = build_function_type_list (V2DI_type_node,
14629 long_integer_type_node, pcvoid_type_node,
14630 NULL_TREE);
14632 tree void_ftype_opaque_long_pvoid
14633 = build_function_type_list (void_type_node,
14634 opaque_V4SI_type_node, long_integer_type_node,
14635 pvoid_type_node, NULL_TREE);
14636 tree void_ftype_v4si_long_pvoid
14637 = build_function_type_list (void_type_node,
14638 V4SI_type_node, long_integer_type_node,
14639 pvoid_type_node, NULL_TREE);
14640 tree void_ftype_v16qi_long_pvoid
14641 = build_function_type_list (void_type_node,
14642 V16QI_type_node, long_integer_type_node,
14643 pvoid_type_node, NULL_TREE);
14644 tree void_ftype_v8hi_long_pvoid
14645 = build_function_type_list (void_type_node,
14646 V8HI_type_node, long_integer_type_node,
14647 pvoid_type_node, NULL_TREE);
14648 tree void_ftype_v4sf_long_pvoid
14649 = build_function_type_list (void_type_node,
14650 V4SF_type_node, long_integer_type_node,
14651 pvoid_type_node, NULL_TREE);
14652 tree void_ftype_v2df_long_pvoid
14653 = build_function_type_list (void_type_node,
14654 V2DF_type_node, long_integer_type_node,
14655 pvoid_type_node, NULL_TREE);
14656 tree void_ftype_v2di_long_pvoid
14657 = build_function_type_list (void_type_node,
14658 V2DI_type_node, long_integer_type_node,
14659 pvoid_type_node, NULL_TREE);
14660 tree int_ftype_int_v8hi_v8hi
14661 = build_function_type_list (integer_type_node,
14662 integer_type_node, V8HI_type_node,
14663 V8HI_type_node, NULL_TREE);
14664 tree int_ftype_int_v16qi_v16qi
14665 = build_function_type_list (integer_type_node,
14666 integer_type_node, V16QI_type_node,
14667 V16QI_type_node, NULL_TREE);
14668 tree int_ftype_int_v4sf_v4sf
14669 = build_function_type_list (integer_type_node,
14670 integer_type_node, V4SF_type_node,
14671 V4SF_type_node, NULL_TREE);
14672 tree int_ftype_int_v2df_v2df
14673 = build_function_type_list (integer_type_node,
14674 integer_type_node, V2DF_type_node,
14675 V2DF_type_node, NULL_TREE);
14676 tree v2di_ftype_v2di
14677 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
14678 tree v4si_ftype_v4si
14679 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
14680 tree v8hi_ftype_v8hi
14681 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
14682 tree v16qi_ftype_v16qi
14683 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
14684 tree v4sf_ftype_v4sf
14685 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14686 tree v2df_ftype_v2df
14687 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14688 tree void_ftype_pcvoid_int_int
14689 = build_function_type_list (void_type_node,
14690 pcvoid_type_node, integer_type_node,
14691 integer_type_node, NULL_TREE);
14693 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
14694 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
14695 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
14696 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
14697 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
14698 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
14699 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
14700 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
14701 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
14702 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
14703 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
14704 ALTIVEC_BUILTIN_LVXL_V2DF);
14705 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
14706 ALTIVEC_BUILTIN_LVXL_V2DI);
14707 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
14708 ALTIVEC_BUILTIN_LVXL_V4SF);
14709 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
14710 ALTIVEC_BUILTIN_LVXL_V4SI);
14711 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
14712 ALTIVEC_BUILTIN_LVXL_V8HI);
14713 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
14714 ALTIVEC_BUILTIN_LVXL_V16QI);
14715 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
14716 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
14717 ALTIVEC_BUILTIN_LVX_V2DF);
14718 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
14719 ALTIVEC_BUILTIN_LVX_V2DI);
14720 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
14721 ALTIVEC_BUILTIN_LVX_V4SF);
14722 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
14723 ALTIVEC_BUILTIN_LVX_V4SI);
14724 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
14725 ALTIVEC_BUILTIN_LVX_V8HI);
14726 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
14727 ALTIVEC_BUILTIN_LVX_V16QI);
14728 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
14729 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
14730 ALTIVEC_BUILTIN_STVX_V2DF);
14731 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
14732 ALTIVEC_BUILTIN_STVX_V2DI);
14733 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
14734 ALTIVEC_BUILTIN_STVX_V4SF);
14735 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
14736 ALTIVEC_BUILTIN_STVX_V4SI);
14737 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
14738 ALTIVEC_BUILTIN_STVX_V8HI);
14739 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
14740 ALTIVEC_BUILTIN_STVX_V16QI);
14741 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
14742 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
14743 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
14744 ALTIVEC_BUILTIN_STVXL_V2DF);
14745 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
14746 ALTIVEC_BUILTIN_STVXL_V2DI);
14747 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
14748 ALTIVEC_BUILTIN_STVXL_V4SF);
14749 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
14750 ALTIVEC_BUILTIN_STVXL_V4SI);
14751 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
14752 ALTIVEC_BUILTIN_STVXL_V8HI);
14753 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
14754 ALTIVEC_BUILTIN_STVXL_V16QI);
14755 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
14756 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
14757 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
14758 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
14759 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
14760 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
14761 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
14762 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
14763 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
14764 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
14765 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
14766 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
14767 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
14768 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
14769 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
14770 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
14772 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
14773 VSX_BUILTIN_LXVD2X_V2DF);
14774 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
14775 VSX_BUILTIN_LXVD2X_V2DI);
14776 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
14777 VSX_BUILTIN_LXVW4X_V4SF);
14778 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
14779 VSX_BUILTIN_LXVW4X_V4SI);
14780 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
14781 VSX_BUILTIN_LXVW4X_V8HI);
14782 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
14783 VSX_BUILTIN_LXVW4X_V16QI);
14784 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
14785 VSX_BUILTIN_STXVD2X_V2DF);
14786 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
14787 VSX_BUILTIN_STXVD2X_V2DI);
14788 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
14789 VSX_BUILTIN_STXVW4X_V4SF);
14790 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
14791 VSX_BUILTIN_STXVW4X_V4SI);
14792 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
14793 VSX_BUILTIN_STXVW4X_V8HI);
14794 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
14795 VSX_BUILTIN_STXVW4X_V16QI);
14796 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
14797 VSX_BUILTIN_VEC_LD);
14798 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
14799 VSX_BUILTIN_VEC_ST);
14801 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
14802 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
14803 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
14805 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
14806 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
14807 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
14808 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
14809 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
14810 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
14811 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
14812 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
14813 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
14814 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
14815 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
14816 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
14818 /* Cell builtins. */
14819 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
14820 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
14821 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
14822 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
14824 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
14825 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
14826 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
14827 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
14829 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
14830 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
14831 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
14832 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
14834 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
14835 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
14836 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
14837 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
14839 /* Add the DST variants. */
14840 d = bdesc_dst;
14841 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14842 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
14844 /* Initialize the predicates. */
14845 d = bdesc_altivec_preds;
14846 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14848 enum machine_mode mode1;
14849 tree type;
14851 if (rs6000_overloaded_builtin_p (d->code))
14852 mode1 = VOIDmode;
14853 else
14854 mode1 = insn_data[d->icode].operand[1].mode;
14856 switch (mode1)
14858 case VOIDmode:
14859 type = int_ftype_int_opaque_opaque;
14860 break;
14861 case V2DImode:
14862 type = int_ftype_int_v2di_v2di;
14863 break;
14864 case V4SImode:
14865 type = int_ftype_int_v4si_v4si;
14866 break;
14867 case V8HImode:
14868 type = int_ftype_int_v8hi_v8hi;
14869 break;
14870 case V16QImode:
14871 type = int_ftype_int_v16qi_v16qi;
14872 break;
14873 case V4SFmode:
14874 type = int_ftype_int_v4sf_v4sf;
14875 break;
14876 case V2DFmode:
14877 type = int_ftype_int_v2df_v2df;
14878 break;
14879 default:
14880 gcc_unreachable ();
14883 def_builtin (d->name, type, d->code);
14886 /* Initialize the abs* operators. */
14887 d = bdesc_abs;
14888 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14890 enum machine_mode mode0;
14891 tree type;
14893 mode0 = insn_data[d->icode].operand[0].mode;
14895 switch (mode0)
14897 case V2DImode:
14898 type = v2di_ftype_v2di;
14899 break;
14900 case V4SImode:
14901 type = v4si_ftype_v4si;
14902 break;
14903 case V8HImode:
14904 type = v8hi_ftype_v8hi;
14905 break;
14906 case V16QImode:
14907 type = v16qi_ftype_v16qi;
14908 break;
14909 case V4SFmode:
14910 type = v4sf_ftype_v4sf;
14911 break;
14912 case V2DFmode:
14913 type = v2df_ftype_v2df;
14914 break;
14915 default:
14916 gcc_unreachable ();
14919 def_builtin (d->name, type, d->code);
14922 /* Initialize target builtin that implements
14923 targetm.vectorize.builtin_mask_for_load. */
14925 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
14926 v16qi_ftype_long_pcvoid,
14927 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
14928 BUILT_IN_MD, NULL, NULL_TREE);
14929 TREE_READONLY (decl) = 1;
14930 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
14931 altivec_builtin_mask_for_load = decl;
14933 /* Access to the vec_init patterns. */
14934 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
14935 integer_type_node, integer_type_node,
14936 integer_type_node, NULL_TREE);
14937 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
14939 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
14940 short_integer_type_node,
14941 short_integer_type_node,
14942 short_integer_type_node,
14943 short_integer_type_node,
14944 short_integer_type_node,
14945 short_integer_type_node,
14946 short_integer_type_node, NULL_TREE);
14947 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
14949 ftype = build_function_type_list (V16QI_type_node, char_type_node,
14950 char_type_node, char_type_node,
14951 char_type_node, char_type_node,
14952 char_type_node, char_type_node,
14953 char_type_node, char_type_node,
14954 char_type_node, char_type_node,
14955 char_type_node, char_type_node,
14956 char_type_node, char_type_node,
14957 char_type_node, NULL_TREE);
14958 def_builtin ("__builtin_vec_init_v16qi", ftype,
14959 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
14961 ftype = build_function_type_list (V4SF_type_node, float_type_node,
14962 float_type_node, float_type_node,
14963 float_type_node, NULL_TREE);
14964 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
14966 /* VSX builtins. */
14967 ftype = build_function_type_list (V2DF_type_node, double_type_node,
14968 double_type_node, NULL_TREE);
14969 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
14971 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
14972 intDI_type_node, NULL_TREE);
14973 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
14975 /* Access to the vec_set patterns. */
14976 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
14977 intSI_type_node,
14978 integer_type_node, NULL_TREE);
14979 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
14981 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14982 intHI_type_node,
14983 integer_type_node, NULL_TREE);
14984 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
14986 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
14987 intQI_type_node,
14988 integer_type_node, NULL_TREE);
14989 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
14991 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
14992 float_type_node,
14993 integer_type_node, NULL_TREE);
14994 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
14996 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
14997 double_type_node,
14998 integer_type_node, NULL_TREE);
14999 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
15001 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
15002 intDI_type_node,
15003 integer_type_node, NULL_TREE);
15004 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
15006 /* Access to the vec_extract patterns. */
15007 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
15008 integer_type_node, NULL_TREE);
15009 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
15011 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
15012 integer_type_node, NULL_TREE);
15013 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
15015 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
15016 integer_type_node, NULL_TREE);
15017 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
15019 ftype = build_function_type_list (float_type_node, V4SF_type_node,
15020 integer_type_node, NULL_TREE);
15021 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
15023 ftype = build_function_type_list (double_type_node, V2DF_type_node,
15024 integer_type_node, NULL_TREE);
15025 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
15027 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
15028 integer_type_node, NULL_TREE);
15029 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
15032 if (V1TI_type_node)
15034 tree v1ti_ftype_long_pcvoid
15035 = build_function_type_list (V1TI_type_node,
15036 long_integer_type_node, pcvoid_type_node,
15037 NULL_TREE);
15038 tree void_ftype_v1ti_long_pvoid
15039 = build_function_type_list (void_type_node,
15040 V1TI_type_node, long_integer_type_node,
15041 pvoid_type_node, NULL_TREE);
15042 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
15043 VSX_BUILTIN_LXVD2X_V1TI);
15044 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
15045 VSX_BUILTIN_STXVD2X_V1TI);
15046 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
15047 NULL_TREE, NULL_TREE);
15048 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
15049 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
15050 intTI_type_node,
15051 integer_type_node, NULL_TREE);
15052 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
15053 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
15054 integer_type_node, NULL_TREE);
15055 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
15060 static void
15061 htm_init_builtins (void)
15063 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
15064 const struct builtin_description *d;
15065 size_t i;
15067 d = bdesc_htm;
15068 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
15070 tree op[MAX_HTM_OPERANDS], type;
15071 HOST_WIDE_INT mask = d->mask;
15072 unsigned attr = rs6000_builtin_info[d->code].attr;
15073 bool void_func = (attr & RS6000_BTC_VOID);
15074 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
15075 int nopnds = 0;
15076 tree argtype = (attr & RS6000_BTC_SPR) ? long_unsigned_type_node
15077 : unsigned_type_node;
15079 if ((mask & builtin_mask) != mask)
15081 if (TARGET_DEBUG_BUILTIN)
15082 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
15083 continue;
15086 if (d->name == 0)
15088 if (TARGET_DEBUG_BUILTIN)
15089 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
15090 (long unsigned) i);
15091 continue;
15094 op[nopnds++] = (void_func) ? void_type_node : argtype;
15096 if (attr_args == RS6000_BTC_UNARY)
15097 op[nopnds++] = argtype;
15098 else if (attr_args == RS6000_BTC_BINARY)
15100 op[nopnds++] = argtype;
15101 op[nopnds++] = argtype;
15103 else if (attr_args == RS6000_BTC_TERNARY)
15105 op[nopnds++] = argtype;
15106 op[nopnds++] = argtype;
15107 op[nopnds++] = argtype;
15110 switch (nopnds)
15112 case 1:
15113 type = build_function_type_list (op[0], NULL_TREE);
15114 break;
15115 case 2:
15116 type = build_function_type_list (op[0], op[1], NULL_TREE);
15117 break;
15118 case 3:
15119 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
15120 break;
15121 case 4:
15122 type = build_function_type_list (op[0], op[1], op[2], op[3],
15123 NULL_TREE);
15124 break;
15125 default:
15126 gcc_unreachable ();
15129 def_builtin (d->name, type, d->code);
15133 /* Hash function for builtin functions with up to 3 arguments and a return
15134 type. */
15135 static unsigned
15136 builtin_hash_function (const void *hash_entry)
15138 unsigned ret = 0;
15139 int i;
15140 const struct builtin_hash_struct *bh =
15141 (const struct builtin_hash_struct *) hash_entry;
15143 for (i = 0; i < 4; i++)
15145 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
15146 ret = (ret * 2) + bh->uns_p[i];
15149 return ret;
15152 /* Compare builtin hash entries H1 and H2 for equivalence. */
15153 static int
15154 builtin_hash_eq (const void *h1, const void *h2)
15156 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
15157 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
15159 return ((p1->mode[0] == p2->mode[0])
15160 && (p1->mode[1] == p2->mode[1])
15161 && (p1->mode[2] == p2->mode[2])
15162 && (p1->mode[3] == p2->mode[3])
15163 && (p1->uns_p[0] == p2->uns_p[0])
15164 && (p1->uns_p[1] == p2->uns_p[1])
15165 && (p1->uns_p[2] == p2->uns_p[2])
15166 && (p1->uns_p[3] == p2->uns_p[3]));
15169 /* Map types for builtin functions with an explicit return type and up to 3
15170 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
15171 of the argument. */
15172 static tree
15173 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
15174 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
15175 enum rs6000_builtins builtin, const char *name)
15177 struct builtin_hash_struct h;
15178 struct builtin_hash_struct *h2;
15179 void **found;
15180 int num_args = 3;
15181 int i;
15182 tree ret_type = NULL_TREE;
15183 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
15185 /* Create builtin_hash_table. */
15186 if (builtin_hash_table == NULL)
15187 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
15188 builtin_hash_eq, NULL);
15190 h.type = NULL_TREE;
15191 h.mode[0] = mode_ret;
15192 h.mode[1] = mode_arg0;
15193 h.mode[2] = mode_arg1;
15194 h.mode[3] = mode_arg2;
15195 h.uns_p[0] = 0;
15196 h.uns_p[1] = 0;
15197 h.uns_p[2] = 0;
15198 h.uns_p[3] = 0;
15200 /* If the builtin is a type that produces unsigned results or takes unsigned
15201 arguments, and it is returned as a decl for the vectorizer (such as
15202 widening multiplies, permute), make sure the arguments and return value
15203 are type correct. */
15204 switch (builtin)
15206 /* unsigned 1 argument functions. */
15207 case CRYPTO_BUILTIN_VSBOX:
15208 case P8V_BUILTIN_VGBBD:
15209 case MISC_BUILTIN_CDTBCD:
15210 case MISC_BUILTIN_CBCDTD:
15211 h.uns_p[0] = 1;
15212 h.uns_p[1] = 1;
15213 break;
15215 /* unsigned 2 argument functions. */
15216 case ALTIVEC_BUILTIN_VMULEUB_UNS:
15217 case ALTIVEC_BUILTIN_VMULEUH_UNS:
15218 case ALTIVEC_BUILTIN_VMULOUB_UNS:
15219 case ALTIVEC_BUILTIN_VMULOUH_UNS:
15220 case CRYPTO_BUILTIN_VCIPHER:
15221 case CRYPTO_BUILTIN_VCIPHERLAST:
15222 case CRYPTO_BUILTIN_VNCIPHER:
15223 case CRYPTO_BUILTIN_VNCIPHERLAST:
15224 case CRYPTO_BUILTIN_VPMSUMB:
15225 case CRYPTO_BUILTIN_VPMSUMH:
15226 case CRYPTO_BUILTIN_VPMSUMW:
15227 case CRYPTO_BUILTIN_VPMSUMD:
15228 case CRYPTO_BUILTIN_VPMSUM:
15229 case MISC_BUILTIN_ADDG6S:
15230 case MISC_BUILTIN_DIVWEU:
15231 case MISC_BUILTIN_DIVWEUO:
15232 case MISC_BUILTIN_DIVDEU:
15233 case MISC_BUILTIN_DIVDEUO:
15234 h.uns_p[0] = 1;
15235 h.uns_p[1] = 1;
15236 h.uns_p[2] = 1;
15237 break;
15239 /* unsigned 3 argument functions. */
15240 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
15241 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
15242 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
15243 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
15244 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
15245 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
15246 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
15247 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
15248 case VSX_BUILTIN_VPERM_16QI_UNS:
15249 case VSX_BUILTIN_VPERM_8HI_UNS:
15250 case VSX_BUILTIN_VPERM_4SI_UNS:
15251 case VSX_BUILTIN_VPERM_2DI_UNS:
15252 case VSX_BUILTIN_XXSEL_16QI_UNS:
15253 case VSX_BUILTIN_XXSEL_8HI_UNS:
15254 case VSX_BUILTIN_XXSEL_4SI_UNS:
15255 case VSX_BUILTIN_XXSEL_2DI_UNS:
15256 case CRYPTO_BUILTIN_VPERMXOR:
15257 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
15258 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
15259 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
15260 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
15261 case CRYPTO_BUILTIN_VSHASIGMAW:
15262 case CRYPTO_BUILTIN_VSHASIGMAD:
15263 case CRYPTO_BUILTIN_VSHASIGMA:
15264 h.uns_p[0] = 1;
15265 h.uns_p[1] = 1;
15266 h.uns_p[2] = 1;
15267 h.uns_p[3] = 1;
15268 break;
15270 /* signed permute functions with unsigned char mask. */
15271 case ALTIVEC_BUILTIN_VPERM_16QI:
15272 case ALTIVEC_BUILTIN_VPERM_8HI:
15273 case ALTIVEC_BUILTIN_VPERM_4SI:
15274 case ALTIVEC_BUILTIN_VPERM_4SF:
15275 case ALTIVEC_BUILTIN_VPERM_2DI:
15276 case ALTIVEC_BUILTIN_VPERM_2DF:
15277 case VSX_BUILTIN_VPERM_16QI:
15278 case VSX_BUILTIN_VPERM_8HI:
15279 case VSX_BUILTIN_VPERM_4SI:
15280 case VSX_BUILTIN_VPERM_4SF:
15281 case VSX_BUILTIN_VPERM_2DI:
15282 case VSX_BUILTIN_VPERM_2DF:
15283 h.uns_p[3] = 1;
15284 break;
15286 /* unsigned args, signed return. */
15287 case VSX_BUILTIN_XVCVUXDDP_UNS:
15288 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
15289 h.uns_p[1] = 1;
15290 break;
15292 /* signed args, unsigned return. */
15293 case VSX_BUILTIN_XVCVDPUXDS_UNS:
15294 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
15295 case MISC_BUILTIN_UNPACK_TD:
15296 case MISC_BUILTIN_UNPACK_V1TI:
15297 h.uns_p[0] = 1;
15298 break;
15300 /* unsigned arguments for 128-bit pack instructions. */
15301 case MISC_BUILTIN_PACK_TD:
15302 case MISC_BUILTIN_PACK_V1TI:
15303 h.uns_p[1] = 1;
15304 h.uns_p[2] = 1;
15305 break;
15307 default:
15308 break;
15311 /* Figure out how many args are present. */
15312 while (num_args > 0 && h.mode[num_args] == VOIDmode)
15313 num_args--;
15315 if (num_args == 0)
15316 fatal_error ("internal error: builtin function %s had no type", name);
15318 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
15319 if (!ret_type && h.uns_p[0])
15320 ret_type = builtin_mode_to_type[h.mode[0]][0];
15322 if (!ret_type)
15323 fatal_error ("internal error: builtin function %s had an unexpected "
15324 "return type %s", name, GET_MODE_NAME (h.mode[0]));
15326 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
15327 arg_type[i] = NULL_TREE;
15329 for (i = 0; i < num_args; i++)
15331 int m = (int) h.mode[i+1];
15332 int uns_p = h.uns_p[i+1];
15334 arg_type[i] = builtin_mode_to_type[m][uns_p];
15335 if (!arg_type[i] && uns_p)
15336 arg_type[i] = builtin_mode_to_type[m][0];
15338 if (!arg_type[i])
15339 fatal_error ("internal error: builtin function %s, argument %d "
15340 "had unexpected argument type %s", name, i,
15341 GET_MODE_NAME (m));
15344 found = htab_find_slot (builtin_hash_table, &h, INSERT);
15345 if (*found == NULL)
15347 h2 = ggc_alloc<builtin_hash_struct> ();
15348 *h2 = h;
15349 *found = (void *)h2;
15351 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
15352 arg_type[2], NULL_TREE);
15355 return ((struct builtin_hash_struct *)(*found))->type;
15358 static void
15359 rs6000_common_init_builtins (void)
15361 const struct builtin_description *d;
15362 size_t i;
15364 tree opaque_ftype_opaque = NULL_TREE;
15365 tree opaque_ftype_opaque_opaque = NULL_TREE;
15366 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
15367 tree v2si_ftype_qi = NULL_TREE;
15368 tree v2si_ftype_v2si_qi = NULL_TREE;
15369 tree v2si_ftype_int_qi = NULL_TREE;
15370 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
15372 if (!TARGET_PAIRED_FLOAT)
15374 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
15375 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
15378 /* Paired and SPE builtins are only available if you build a compiler with
15379 the appropriate options, so only create those builtins with the
15380 appropriate compiler option. Create Altivec and VSX builtins on machines
15381 with at least the general purpose extensions (970 and newer) to allow the
15382 use of the target attribute.. */
15384 if (TARGET_EXTRA_BUILTINS)
15385 builtin_mask |= RS6000_BTM_COMMON;
15387 /* Add the ternary operators. */
15388 d = bdesc_3arg;
15389 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
15391 tree type;
15392 HOST_WIDE_INT mask = d->mask;
15394 if ((mask & builtin_mask) != mask)
15396 if (TARGET_DEBUG_BUILTIN)
15397 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
15398 continue;
15401 if (rs6000_overloaded_builtin_p (d->code))
15403 if (! (type = opaque_ftype_opaque_opaque_opaque))
15404 type = opaque_ftype_opaque_opaque_opaque
15405 = build_function_type_list (opaque_V4SI_type_node,
15406 opaque_V4SI_type_node,
15407 opaque_V4SI_type_node,
15408 opaque_V4SI_type_node,
15409 NULL_TREE);
15411 else
15413 enum insn_code icode = d->icode;
15414 if (d->name == 0)
15416 if (TARGET_DEBUG_BUILTIN)
15417 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
15418 (long unsigned)i);
15420 continue;
15423 if (icode == CODE_FOR_nothing)
15425 if (TARGET_DEBUG_BUILTIN)
15426 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
15427 d->name);
15429 continue;
15432 type = builtin_function_type (insn_data[icode].operand[0].mode,
15433 insn_data[icode].operand[1].mode,
15434 insn_data[icode].operand[2].mode,
15435 insn_data[icode].operand[3].mode,
15436 d->code, d->name);
15439 def_builtin (d->name, type, d->code);
15442 /* Add the binary operators. */
15443 d = bdesc_2arg;
15444 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15446 enum machine_mode mode0, mode1, mode2;
15447 tree type;
15448 HOST_WIDE_INT mask = d->mask;
15450 if ((mask & builtin_mask) != mask)
15452 if (TARGET_DEBUG_BUILTIN)
15453 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
15454 continue;
15457 if (rs6000_overloaded_builtin_p (d->code))
15459 if (! (type = opaque_ftype_opaque_opaque))
15460 type = opaque_ftype_opaque_opaque
15461 = build_function_type_list (opaque_V4SI_type_node,
15462 opaque_V4SI_type_node,
15463 opaque_V4SI_type_node,
15464 NULL_TREE);
15466 else
15468 enum insn_code icode = d->icode;
15469 if (d->name == 0)
15471 if (TARGET_DEBUG_BUILTIN)
15472 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
15473 (long unsigned)i);
15475 continue;
15478 if (icode == CODE_FOR_nothing)
15480 if (TARGET_DEBUG_BUILTIN)
15481 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
15482 d->name);
15484 continue;
15487 mode0 = insn_data[icode].operand[0].mode;
15488 mode1 = insn_data[icode].operand[1].mode;
15489 mode2 = insn_data[icode].operand[2].mode;
15491 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
15493 if (! (type = v2si_ftype_v2si_qi))
15494 type = v2si_ftype_v2si_qi
15495 = build_function_type_list (opaque_V2SI_type_node,
15496 opaque_V2SI_type_node,
15497 char_type_node,
15498 NULL_TREE);
15501 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
15502 && mode2 == QImode)
15504 if (! (type = v2si_ftype_int_qi))
15505 type = v2si_ftype_int_qi
15506 = build_function_type_list (opaque_V2SI_type_node,
15507 integer_type_node,
15508 char_type_node,
15509 NULL_TREE);
15512 else
15513 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
15514 d->code, d->name);
15517 def_builtin (d->name, type, d->code);
15520 /* Add the simple unary operators. */
15521 d = bdesc_1arg;
15522 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15524 enum machine_mode mode0, mode1;
15525 tree type;
15526 HOST_WIDE_INT mask = d->mask;
15528 if ((mask & builtin_mask) != mask)
15530 if (TARGET_DEBUG_BUILTIN)
15531 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
15532 continue;
15535 if (rs6000_overloaded_builtin_p (d->code))
15537 if (! (type = opaque_ftype_opaque))
15538 type = opaque_ftype_opaque
15539 = build_function_type_list (opaque_V4SI_type_node,
15540 opaque_V4SI_type_node,
15541 NULL_TREE);
15543 else
15545 enum insn_code icode = d->icode;
15546 if (d->name == 0)
15548 if (TARGET_DEBUG_BUILTIN)
15549 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
15550 (long unsigned)i);
15552 continue;
15555 if (icode == CODE_FOR_nothing)
15557 if (TARGET_DEBUG_BUILTIN)
15558 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
15559 d->name);
15561 continue;
15564 mode0 = insn_data[icode].operand[0].mode;
15565 mode1 = insn_data[icode].operand[1].mode;
15567 if (mode0 == V2SImode && mode1 == QImode)
15569 if (! (type = v2si_ftype_qi))
15570 type = v2si_ftype_qi
15571 = build_function_type_list (opaque_V2SI_type_node,
15572 char_type_node,
15573 NULL_TREE);
15576 else
15577 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
15578 d->code, d->name);
15581 def_builtin (d->name, type, d->code);
15585 static void
15586 rs6000_init_libfuncs (void)
15588 if (!TARGET_IEEEQUAD)
15589 /* AIX/Darwin/64-bit Linux quad floating point routines. */
15590 if (!TARGET_XL_COMPAT)
15592 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
15593 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
15594 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
15595 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
15597 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
15599 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
15600 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
15601 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
15602 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
15603 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
15604 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
15605 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
15607 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
15608 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
15609 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
15610 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
15611 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
15612 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
15613 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
15614 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
15617 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
15618 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
15620 else
15622 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
15623 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
15624 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
15625 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
15627 else
15629 /* 32-bit SVR4 quad floating point routines. */
15631 set_optab_libfunc (add_optab, TFmode, "_q_add");
15632 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
15633 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
15634 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
15635 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
15636 if (TARGET_PPC_GPOPT)
15637 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
15639 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
15640 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
15641 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
15642 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
15643 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
15644 set_optab_libfunc (le_optab, TFmode, "_q_fle");
15646 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
15647 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
15648 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
15649 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
15650 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
15651 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
15652 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
15653 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
15658 /* Expand a block clear operation, and return 1 if successful. Return 0
15659 if we should let the compiler generate normal code.
15661 operands[0] is the destination
15662 operands[1] is the length
15663 operands[3] is the alignment */
15666 expand_block_clear (rtx operands[])
15668 rtx orig_dest = operands[0];
15669 rtx bytes_rtx = operands[1];
15670 rtx align_rtx = operands[3];
15671 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
15672 HOST_WIDE_INT align;
15673 HOST_WIDE_INT bytes;
15674 int offset;
15675 int clear_bytes;
15676 int clear_step;
15678 /* If this is not a fixed size move, just call memcpy */
15679 if (! constp)
15680 return 0;
15682 /* This must be a fixed size alignment */
15683 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15684 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15686 /* Anything to clear? */
15687 bytes = INTVAL (bytes_rtx);
15688 if (bytes <= 0)
15689 return 1;
15691 /* Use the builtin memset after a point, to avoid huge code bloat.
15692 When optimize_size, avoid any significant code bloat; calling
15693 memset is about 4 instructions, so allow for one instruction to
15694 load zero and three to do clearing. */
15695 if (TARGET_ALTIVEC && align >= 128)
15696 clear_step = 16;
15697 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
15698 clear_step = 8;
15699 else if (TARGET_SPE && align >= 64)
15700 clear_step = 8;
15701 else
15702 clear_step = 4;
15704 if (optimize_size && bytes > 3 * clear_step)
15705 return 0;
15706 if (! optimize_size && bytes > 8 * clear_step)
15707 return 0;
15709 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
15711 enum machine_mode mode = BLKmode;
15712 rtx dest;
15714 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
15716 clear_bytes = 16;
15717 mode = V4SImode;
15719 else if (bytes >= 8 && TARGET_SPE && align >= 64)
15721 clear_bytes = 8;
15722 mode = V2SImode;
15724 else if (bytes >= 8 && TARGET_POWERPC64
15725 && (align >= 64 || !STRICT_ALIGNMENT))
15727 clear_bytes = 8;
15728 mode = DImode;
15729 if (offset == 0 && align < 64)
15731 rtx addr;
15733 /* If the address form is reg+offset with offset not a
15734 multiple of four, reload into reg indirect form here
15735 rather than waiting for reload. This way we get one
15736 reload, not one per store. */
15737 addr = XEXP (orig_dest, 0);
15738 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15739 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15740 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15742 addr = copy_addr_to_reg (addr);
15743 orig_dest = replace_equiv_address (orig_dest, addr);
15747 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15748 { /* move 4 bytes */
15749 clear_bytes = 4;
15750 mode = SImode;
15752 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15753 { /* move 2 bytes */
15754 clear_bytes = 2;
15755 mode = HImode;
15757 else /* move 1 byte at a time */
15759 clear_bytes = 1;
15760 mode = QImode;
15763 dest = adjust_address (orig_dest, mode, offset);
15765 emit_move_insn (dest, CONST0_RTX (mode));
15768 return 1;
15772 /* Expand a block move operation, and return 1 if successful. Return 0
15773 if we should let the compiler generate normal code.
15775 operands[0] is the destination
15776 operands[1] is the source
15777 operands[2] is the length
15778 operands[3] is the alignment */
15780 #define MAX_MOVE_REG 4
15783 expand_block_move (rtx operands[])
15785 rtx orig_dest = operands[0];
15786 rtx orig_src = operands[1];
15787 rtx bytes_rtx = operands[2];
15788 rtx align_rtx = operands[3];
15789 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
15790 int align;
15791 int bytes;
15792 int offset;
15793 int move_bytes;
15794 rtx stores[MAX_MOVE_REG];
15795 int num_reg = 0;
15797 /* If this is not a fixed size move, just call memcpy */
15798 if (! constp)
15799 return 0;
15801 /* This must be a fixed size alignment */
15802 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15803 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15805 /* Anything to move? */
15806 bytes = INTVAL (bytes_rtx);
15807 if (bytes <= 0)
15808 return 1;
15810 if (bytes > rs6000_block_move_inline_limit)
15811 return 0;
15813 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
15815 union {
15816 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
15817 rtx (*mov) (rtx, rtx);
15818 } gen_func;
15819 enum machine_mode mode = BLKmode;
15820 rtx src, dest;
15822 /* Altivec first, since it will be faster than a string move
15823 when it applies, and usually not significantly larger. */
15824 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
15826 move_bytes = 16;
15827 mode = V4SImode;
15828 gen_func.mov = gen_movv4si;
15830 else if (TARGET_SPE && bytes >= 8 && align >= 64)
15832 move_bytes = 8;
15833 mode = V2SImode;
15834 gen_func.mov = gen_movv2si;
15836 else if (TARGET_STRING
15837 && bytes > 24 /* move up to 32 bytes at a time */
15838 && ! fixed_regs[5]
15839 && ! fixed_regs[6]
15840 && ! fixed_regs[7]
15841 && ! fixed_regs[8]
15842 && ! fixed_regs[9]
15843 && ! fixed_regs[10]
15844 && ! fixed_regs[11]
15845 && ! fixed_regs[12])
15847 move_bytes = (bytes > 32) ? 32 : bytes;
15848 gen_func.movmemsi = gen_movmemsi_8reg;
15850 else if (TARGET_STRING
15851 && bytes > 16 /* move up to 24 bytes at a time */
15852 && ! fixed_regs[5]
15853 && ! fixed_regs[6]
15854 && ! fixed_regs[7]
15855 && ! fixed_regs[8]
15856 && ! fixed_regs[9]
15857 && ! fixed_regs[10])
15859 move_bytes = (bytes > 24) ? 24 : bytes;
15860 gen_func.movmemsi = gen_movmemsi_6reg;
15862 else if (TARGET_STRING
15863 && bytes > 8 /* move up to 16 bytes at a time */
15864 && ! fixed_regs[5]
15865 && ! fixed_regs[6]
15866 && ! fixed_regs[7]
15867 && ! fixed_regs[8])
15869 move_bytes = (bytes > 16) ? 16 : bytes;
15870 gen_func.movmemsi = gen_movmemsi_4reg;
15872 else if (bytes >= 8 && TARGET_POWERPC64
15873 && (align >= 64 || !STRICT_ALIGNMENT))
15875 move_bytes = 8;
15876 mode = DImode;
15877 gen_func.mov = gen_movdi;
15878 if (offset == 0 && align < 64)
15880 rtx addr;
15882 /* If the address form is reg+offset with offset not a
15883 multiple of four, reload into reg indirect form here
15884 rather than waiting for reload. This way we get one
15885 reload, not one per load and/or store. */
15886 addr = XEXP (orig_dest, 0);
15887 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15888 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15889 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15891 addr = copy_addr_to_reg (addr);
15892 orig_dest = replace_equiv_address (orig_dest, addr);
15894 addr = XEXP (orig_src, 0);
15895 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15896 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15897 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15899 addr = copy_addr_to_reg (addr);
15900 orig_src = replace_equiv_address (orig_src, addr);
15904 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
15905 { /* move up to 8 bytes at a time */
15906 move_bytes = (bytes > 8) ? 8 : bytes;
15907 gen_func.movmemsi = gen_movmemsi_2reg;
15909 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15910 { /* move 4 bytes */
15911 move_bytes = 4;
15912 mode = SImode;
15913 gen_func.mov = gen_movsi;
15915 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15916 { /* move 2 bytes */
15917 move_bytes = 2;
15918 mode = HImode;
15919 gen_func.mov = gen_movhi;
15921 else if (TARGET_STRING && bytes > 1)
15922 { /* move up to 4 bytes at a time */
15923 move_bytes = (bytes > 4) ? 4 : bytes;
15924 gen_func.movmemsi = gen_movmemsi_1reg;
15926 else /* move 1 byte at a time */
15928 move_bytes = 1;
15929 mode = QImode;
15930 gen_func.mov = gen_movqi;
15933 src = adjust_address (orig_src, mode, offset);
15934 dest = adjust_address (orig_dest, mode, offset);
15936 if (mode != BLKmode)
15938 rtx tmp_reg = gen_reg_rtx (mode);
15940 emit_insn ((*gen_func.mov) (tmp_reg, src));
15941 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
15944 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
15946 int i;
15947 for (i = 0; i < num_reg; i++)
15948 emit_insn (stores[i]);
15949 num_reg = 0;
15952 if (mode == BLKmode)
15954 /* Move the address into scratch registers. The movmemsi
15955 patterns require zero offset. */
15956 if (!REG_P (XEXP (src, 0)))
15958 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
15959 src = replace_equiv_address (src, src_reg);
15961 set_mem_size (src, move_bytes);
15963 if (!REG_P (XEXP (dest, 0)))
15965 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
15966 dest = replace_equiv_address (dest, dest_reg);
15968 set_mem_size (dest, move_bytes);
15970 emit_insn ((*gen_func.movmemsi) (dest, src,
15971 GEN_INT (move_bytes & 31),
15972 align_rtx));
15976 return 1;
15980 /* Return a string to perform a load_multiple operation.
15981 operands[0] is the vector.
15982 operands[1] is the source address.
15983 operands[2] is the first destination register. */
15985 const char *
15986 rs6000_output_load_multiple (rtx operands[3])
15988 /* We have to handle the case where the pseudo used to contain the address
15989 is assigned to one of the output registers. */
15990 int i, j;
15991 int words = XVECLEN (operands[0], 0);
15992 rtx xop[10];
15994 if (XVECLEN (operands[0], 0) == 1)
15995 return "lwz %2,0(%1)";
15997 for (i = 0; i < words; i++)
15998 if (refers_to_regno_p (REGNO (operands[2]) + i,
15999 REGNO (operands[2]) + i + 1, operands[1], 0))
16001 if (i == words-1)
16003 xop[0] = GEN_INT (4 * (words-1));
16004 xop[1] = operands[1];
16005 xop[2] = operands[2];
16006 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
16007 return "";
16009 else if (i == 0)
16011 xop[0] = GEN_INT (4 * (words-1));
16012 xop[1] = operands[1];
16013 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
16014 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
16015 return "";
16017 else
16019 for (j = 0; j < words; j++)
16020 if (j != i)
16022 xop[0] = GEN_INT (j * 4);
16023 xop[1] = operands[1];
16024 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
16025 output_asm_insn ("lwz %2,%0(%1)", xop);
16027 xop[0] = GEN_INT (i * 4);
16028 xop[1] = operands[1];
16029 output_asm_insn ("lwz %1,%0(%1)", xop);
16030 return "";
16034 return "lswi %2,%1,%N0";
16038 /* A validation routine: say whether CODE, a condition code, and MODE
16039 match. The other alternatives either don't make sense or should
16040 never be generated. */
16042 void
16043 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
16045 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
16046 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
16047 && GET_MODE_CLASS (mode) == MODE_CC);
16049 /* These don't make sense. */
16050 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
16051 || mode != CCUNSmode);
16053 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
16054 || mode == CCUNSmode);
16056 gcc_assert (mode == CCFPmode
16057 || (code != ORDERED && code != UNORDERED
16058 && code != UNEQ && code != LTGT
16059 && code != UNGT && code != UNLT
16060 && code != UNGE && code != UNLE));
16062 /* These should never be generated except for
16063 flag_finite_math_only. */
16064 gcc_assert (mode != CCFPmode
16065 || flag_finite_math_only
16066 || (code != LE && code != GE
16067 && code != UNEQ && code != LTGT
16068 && code != UNGT && code != UNLT));
16070 /* These are invalid; the information is not there. */
16071 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
16075 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
16076 mask required to convert the result of a rotate insn into a shift
16077 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
16080 includes_lshift_p (rtx shiftop, rtx andop)
16082 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
16084 shift_mask <<= INTVAL (shiftop);
16086 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
16089 /* Similar, but for right shift. */
16092 includes_rshift_p (rtx shiftop, rtx andop)
16094 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
16096 shift_mask >>= INTVAL (shiftop);
16098 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
16101 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
16102 to perform a left shift. It must have exactly SHIFTOP least
16103 significant 0's, then one or more 1's, then zero or more 0's. */
16106 includes_rldic_lshift_p (rtx shiftop, rtx andop)
16108 if (GET_CODE (andop) == CONST_INT)
16110 HOST_WIDE_INT c, lsb, shift_mask;
16112 c = INTVAL (andop);
16113 if (c == 0 || c == ~0)
16114 return 0;
16116 shift_mask = ~0;
16117 shift_mask <<= INTVAL (shiftop);
16119 /* Find the least significant one bit. */
16120 lsb = c & -c;
16122 /* It must coincide with the LSB of the shift mask. */
16123 if (-lsb != shift_mask)
16124 return 0;
16126 /* Invert to look for the next transition (if any). */
16127 c = ~c;
16129 /* Remove the low group of ones (originally low group of zeros). */
16130 c &= -lsb;
16132 /* Again find the lsb, and check we have all 1's above. */
16133 lsb = c & -c;
16134 return c == -lsb;
16136 else
16137 return 0;
16140 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
16141 to perform a left shift. It must have SHIFTOP or more least
16142 significant 0's, with the remainder of the word 1's. */
16145 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
16147 if (GET_CODE (andop) == CONST_INT)
16149 HOST_WIDE_INT c, lsb, shift_mask;
16151 shift_mask = ~0;
16152 shift_mask <<= INTVAL (shiftop);
16153 c = INTVAL (andop);
16155 /* Find the least significant one bit. */
16156 lsb = c & -c;
16158 /* It must be covered by the shift mask.
16159 This test also rejects c == 0. */
16160 if ((lsb & shift_mask) == 0)
16161 return 0;
16163 /* Check we have all 1's above the transition, and reject all 1's. */
16164 return c == -lsb && lsb != 1;
16166 else
16167 return 0;
16170 /* Return 1 if operands will generate a valid arguments to rlwimi
16171 instruction for insert with right shift in 64-bit mode. The mask may
16172 not start on the first bit or stop on the last bit because wrap-around
16173 effects of instruction do not correspond to semantics of RTL insn. */
16176 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
16178 if (INTVAL (startop) > 32
16179 && INTVAL (startop) < 64
16180 && INTVAL (sizeop) > 1
16181 && INTVAL (sizeop) + INTVAL (startop) < 64
16182 && INTVAL (shiftop) > 0
16183 && INTVAL (sizeop) + INTVAL (shiftop) < 32
16184 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
16185 return 1;
16187 return 0;
16190 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
16191 for lfq and stfq insns iff the registers are hard registers. */
16194 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
16196 /* We might have been passed a SUBREG. */
16197 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
16198 return 0;
16200 /* We might have been passed non floating point registers. */
16201 if (!FP_REGNO_P (REGNO (reg1))
16202 || !FP_REGNO_P (REGNO (reg2)))
16203 return 0;
16205 return (REGNO (reg1) == REGNO (reg2) - 1);
16208 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
16209 addr1 and addr2 must be in consecutive memory locations
16210 (addr2 == addr1 + 8). */
16213 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
16215 rtx addr1, addr2;
16216 unsigned int reg1, reg2;
16217 int offset1, offset2;
16219 /* The mems cannot be volatile. */
16220 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
16221 return 0;
16223 addr1 = XEXP (mem1, 0);
16224 addr2 = XEXP (mem2, 0);
16226 /* Extract an offset (if used) from the first addr. */
16227 if (GET_CODE (addr1) == PLUS)
16229 /* If not a REG, return zero. */
16230 if (GET_CODE (XEXP (addr1, 0)) != REG)
16231 return 0;
16232 else
16234 reg1 = REGNO (XEXP (addr1, 0));
16235 /* The offset must be constant! */
16236 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
16237 return 0;
16238 offset1 = INTVAL (XEXP (addr1, 1));
16241 else if (GET_CODE (addr1) != REG)
16242 return 0;
16243 else
16245 reg1 = REGNO (addr1);
16246 /* This was a simple (mem (reg)) expression. Offset is 0. */
16247 offset1 = 0;
16250 /* And now for the second addr. */
16251 if (GET_CODE (addr2) == PLUS)
16253 /* If not a REG, return zero. */
16254 if (GET_CODE (XEXP (addr2, 0)) != REG)
16255 return 0;
16256 else
16258 reg2 = REGNO (XEXP (addr2, 0));
16259 /* The offset must be constant. */
16260 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
16261 return 0;
16262 offset2 = INTVAL (XEXP (addr2, 1));
16265 else if (GET_CODE (addr2) != REG)
16266 return 0;
16267 else
16269 reg2 = REGNO (addr2);
16270 /* This was a simple (mem (reg)) expression. Offset is 0. */
16271 offset2 = 0;
16274 /* Both of these must have the same base register. */
16275 if (reg1 != reg2)
16276 return 0;
16278 /* The offset for the second addr must be 8 more than the first addr. */
16279 if (offset2 != offset1 + 8)
16280 return 0;
16282 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
16283 instructions. */
16284 return 1;
16289 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
16291 static bool eliminated = false;
16292 rtx ret;
16294 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
16295 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
16296 else
16298 rtx mem = cfun->machine->sdmode_stack_slot;
16299 gcc_assert (mem != NULL_RTX);
16301 if (!eliminated)
16303 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
16304 cfun->machine->sdmode_stack_slot = mem;
16305 eliminated = true;
16307 ret = mem;
16310 if (TARGET_DEBUG_ADDR)
16312 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
16313 GET_MODE_NAME (mode));
16314 if (!ret)
16315 fprintf (stderr, "\tNULL_RTX\n");
16316 else
16317 debug_rtx (ret);
16320 return ret;
16323 /* Return the mode to be used for memory when a secondary memory
16324 location is needed. For SDmode values we need to use DDmode, in
16325 all other cases we can use the same mode. */
16326 enum machine_mode
16327 rs6000_secondary_memory_needed_mode (enum machine_mode mode)
16329 if (lra_in_progress && mode == SDmode)
16330 return DDmode;
16331 return mode;
16334 static tree
16335 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
16337 /* Don't walk into types. */
16338 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
16340 *walk_subtrees = 0;
16341 return NULL_TREE;
16344 switch (TREE_CODE (*tp))
16346 case VAR_DECL:
16347 case PARM_DECL:
16348 case FIELD_DECL:
16349 case RESULT_DECL:
16350 case SSA_NAME:
16351 case REAL_CST:
16352 case MEM_REF:
16353 case VIEW_CONVERT_EXPR:
16354 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
16355 return *tp;
16356 break;
16357 default:
16358 break;
16361 return NULL_TREE;
16364 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
16365 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
16366 only work on the traditional altivec registers, note if an altivec register
16367 was chosen. */
16369 static enum rs6000_reg_type
16370 register_to_reg_type (rtx reg, bool *is_altivec)
16372 HOST_WIDE_INT regno;
16373 enum reg_class rclass;
16375 if (GET_CODE (reg) == SUBREG)
16376 reg = SUBREG_REG (reg);
16378 if (!REG_P (reg))
16379 return NO_REG_TYPE;
16381 regno = REGNO (reg);
16382 if (regno >= FIRST_PSEUDO_REGISTER)
16384 if (!lra_in_progress && !reload_in_progress && !reload_completed)
16385 return PSEUDO_REG_TYPE;
16387 regno = true_regnum (reg);
16388 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16389 return PSEUDO_REG_TYPE;
16392 gcc_assert (regno >= 0);
16394 if (is_altivec && ALTIVEC_REGNO_P (regno))
16395 *is_altivec = true;
16397 rclass = rs6000_regno_regclass[regno];
16398 return reg_class_to_reg_type[(int)rclass];
16401 /* Helper function for rs6000_secondary_reload to return true if a move to a
16402 different register classe is really a simple move. */
16404 static bool
16405 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
16406 enum rs6000_reg_type from_type,
16407 enum machine_mode mode)
16409 int size;
16411 /* Add support for various direct moves available. In this function, we only
16412 look at cases where we don't need any extra registers, and one or more
16413 simple move insns are issued. At present, 32-bit integers are not allowed
16414 in FPR/VSX registers. Single precision binary floating is not a simple
16415 move because we need to convert to the single precision memory layout.
16416 The 4-byte SDmode can be moved. */
16417 size = GET_MODE_SIZE (mode);
16418 if (TARGET_DIRECT_MOVE
16419 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
16420 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16421 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
16422 return true;
16424 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
16425 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
16426 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16427 return true;
16429 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
16430 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
16431 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16432 return true;
16434 return false;
16437 /* Power8 helper function for rs6000_secondary_reload, handle all of the
16438 special direct moves that involve allocating an extra register, return the
16439 insn code of the helper function if there is such a function or
16440 CODE_FOR_nothing if not. */
16442 static bool
16443 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
16444 enum rs6000_reg_type from_type,
16445 enum machine_mode mode,
16446 secondary_reload_info *sri,
16447 bool altivec_p)
16449 bool ret = false;
16450 enum insn_code icode = CODE_FOR_nothing;
16451 int cost = 0;
16452 int size = GET_MODE_SIZE (mode);
16454 if (TARGET_POWERPC64)
16456 if (size == 16)
16458 /* Handle moving 128-bit values from GPRs to VSX point registers on
16459 power8 when running in 64-bit mode using XXPERMDI to glue the two
16460 64-bit values back together. */
16461 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16463 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16464 icode = reg_addr[mode].reload_vsx_gpr;
16467 /* Handle moving 128-bit values from VSX point registers to GPRs on
16468 power8 when running in 64-bit mode using XXPERMDI to get access to the
16469 bottom 64-bit value. */
16470 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16472 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16473 icode = reg_addr[mode].reload_gpr_vsx;
16477 else if (mode == SFmode)
16479 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16481 cost = 3; /* xscvdpspn, mfvsrd, and. */
16482 icode = reg_addr[mode].reload_gpr_vsx;
16485 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16487 cost = 2; /* mtvsrz, xscvspdpn. */
16488 icode = reg_addr[mode].reload_vsx_gpr;
16493 if (TARGET_POWERPC64 && size == 16)
16495 /* Handle moving 128-bit values from GPRs to VSX point registers on
16496 power8 when running in 64-bit mode using XXPERMDI to glue the two
16497 64-bit values back together. */
16498 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16500 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16501 icode = reg_addr[mode].reload_vsx_gpr;
16504 /* Handle moving 128-bit values from VSX point registers to GPRs on
16505 power8 when running in 64-bit mode using XXPERMDI to get access to the
16506 bottom 64-bit value. */
16507 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16509 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16510 icode = reg_addr[mode].reload_gpr_vsx;
16514 else if (!TARGET_POWERPC64 && size == 8)
16516 /* Handle moving 64-bit values from GPRs to floating point registers on
16517 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
16518 values back together. Altivec register classes must be handled
16519 specially since a different instruction is used, and the secondary
16520 reload support requires a single instruction class in the scratch
16521 register constraint. However, right now TFmode is not allowed in
16522 Altivec registers, so the pattern will never match. */
16523 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
16525 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
16526 icode = reg_addr[mode].reload_fpr_gpr;
16530 if (icode != CODE_FOR_nothing)
16532 ret = true;
16533 if (sri)
16535 sri->icode = icode;
16536 sri->extra_cost = cost;
16540 return ret;
16543 /* Return whether a move between two register classes can be done either
16544 directly (simple move) or via a pattern that uses a single extra temporary
16545 (using power8's direct move in this case. */
16547 static bool
16548 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
16549 enum rs6000_reg_type from_type,
16550 enum machine_mode mode,
16551 secondary_reload_info *sri,
16552 bool altivec_p)
16554 /* Fall back to load/store reloads if either type is not a register. */
16555 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
16556 return false;
16558 /* If we haven't allocated registers yet, assume the move can be done for the
16559 standard register types. */
16560 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
16561 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
16562 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
16563 return true;
16565 /* Moves to the same set of registers is a simple move for non-specialized
16566 registers. */
16567 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
16568 return true;
16570 /* Check whether a simple move can be done directly. */
16571 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
16573 if (sri)
16575 sri->icode = CODE_FOR_nothing;
16576 sri->extra_cost = 0;
16578 return true;
16581 /* Now check if we can do it in a few steps. */
16582 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
16583 altivec_p);
16586 /* Inform reload about cases where moving X with a mode MODE to a register in
16587 RCLASS requires an extra scratch or immediate register. Return the class
16588 needed for the immediate register.
16590 For VSX and Altivec, we may need a register to convert sp+offset into
16591 reg+sp.
16593 For misaligned 64-bit gpr loads and stores we need a register to
16594 convert an offset address to indirect. */
16596 static reg_class_t
16597 rs6000_secondary_reload (bool in_p,
16598 rtx x,
16599 reg_class_t rclass_i,
16600 enum machine_mode mode,
16601 secondary_reload_info *sri)
16603 enum reg_class rclass = (enum reg_class) rclass_i;
16604 reg_class_t ret = ALL_REGS;
16605 enum insn_code icode;
16606 bool default_p = false;
16608 sri->icode = CODE_FOR_nothing;
16609 icode = ((in_p)
16610 ? reg_addr[mode].reload_load
16611 : reg_addr[mode].reload_store);
16613 if (REG_P (x) || register_operand (x, mode))
16615 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
16616 bool altivec_p = (rclass == ALTIVEC_REGS);
16617 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
16619 if (!in_p)
16621 enum rs6000_reg_type exchange = to_type;
16622 to_type = from_type;
16623 from_type = exchange;
16626 /* Can we do a direct move of some sort? */
16627 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
16628 altivec_p))
16630 icode = (enum insn_code)sri->icode;
16631 default_p = false;
16632 ret = NO_REGS;
16636 /* Handle vector moves with reload helper functions. */
16637 if (ret == ALL_REGS && icode != CODE_FOR_nothing)
16639 ret = NO_REGS;
16640 sri->icode = CODE_FOR_nothing;
16641 sri->extra_cost = 0;
16643 if (GET_CODE (x) == MEM)
16645 rtx addr = XEXP (x, 0);
16647 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
16648 an extra register in that case, but it would need an extra
16649 register if the addressing is reg+reg or (reg+reg)&(-16). Special
16650 case load/store quad. */
16651 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
16653 if (TARGET_POWERPC64 && TARGET_QUAD_MEMORY
16654 && GET_MODE_SIZE (mode) == 16
16655 && quad_memory_operand (x, mode))
16657 sri->icode = icode;
16658 sri->extra_cost = 2;
16661 else if (!legitimate_indirect_address_p (addr, false)
16662 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16663 false, true))
16665 sri->icode = icode;
16666 /* account for splitting the loads, and converting the
16667 address from reg+reg to reg. */
16668 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
16669 + ((GET_CODE (addr) == AND) ? 1 : 0));
16672 /* Allow scalar loads to/from the traditional floating point
16673 registers, even if VSX memory is set. */
16674 else if ((rclass == FLOAT_REGS || rclass == NO_REGS)
16675 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
16676 && (legitimate_indirect_address_p (addr, false)
16677 || legitimate_indirect_address_p (addr, false)
16678 || rs6000_legitimate_offset_address_p (mode, addr,
16679 false, true)))
16682 /* Loads to and stores from vector registers can only do reg+reg
16683 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
16684 scalar modes loading up the traditional floating point registers
16685 to use offset addresses. */
16686 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
16687 || rclass == FLOAT_REGS || rclass == NO_REGS)
16689 if (!VECTOR_MEM_ALTIVEC_P (mode)
16690 && GET_CODE (addr) == AND
16691 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16692 && INTVAL (XEXP (addr, 1)) == -16
16693 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
16694 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
16696 sri->icode = icode;
16697 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
16698 ? 2 : 1);
16700 else if (!legitimate_indirect_address_p (addr, false)
16701 && (rclass == NO_REGS
16702 || !legitimate_indexed_address_p (addr, false)))
16704 sri->icode = icode;
16705 sri->extra_cost = 1;
16707 else
16708 icode = CODE_FOR_nothing;
16710 /* Any other loads, including to pseudo registers which haven't been
16711 assigned to a register yet, default to require a scratch
16712 register. */
16713 else
16715 sri->icode = icode;
16716 sri->extra_cost = 2;
16719 else if (REG_P (x))
16721 int regno = true_regnum (x);
16723 icode = CODE_FOR_nothing;
16724 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16725 default_p = true;
16726 else
16728 enum reg_class xclass = REGNO_REG_CLASS (regno);
16729 enum rs6000_reg_type rtype1 = reg_class_to_reg_type[(int)rclass];
16730 enum rs6000_reg_type rtype2 = reg_class_to_reg_type[(int)xclass];
16732 /* If memory is needed, use default_secondary_reload to create the
16733 stack slot. */
16734 if (rtype1 != rtype2 || !IS_STD_REG_TYPE (rtype1))
16735 default_p = true;
16736 else
16737 ret = NO_REGS;
16740 else
16741 default_p = true;
16743 else if (TARGET_POWERPC64
16744 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16745 && MEM_P (x)
16746 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
16748 rtx addr = XEXP (x, 0);
16749 rtx off = address_offset (addr);
16751 if (off != NULL_RTX)
16753 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16754 unsigned HOST_WIDE_INT offset = INTVAL (off);
16756 /* We need a secondary reload when our legitimate_address_p
16757 says the address is good (as otherwise the entire address
16758 will be reloaded), and the offset is not a multiple of
16759 four or we have an address wrap. Address wrap will only
16760 occur for LO_SUMs since legitimate_offset_address_p
16761 rejects addresses for 16-byte mems that will wrap. */
16762 if (GET_CODE (addr) == LO_SUM
16763 ? (1 /* legitimate_address_p allows any offset for lo_sum */
16764 && ((offset & 3) != 0
16765 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
16766 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
16767 && (offset & 3) != 0))
16769 if (in_p)
16770 sri->icode = CODE_FOR_reload_di_load;
16771 else
16772 sri->icode = CODE_FOR_reload_di_store;
16773 sri->extra_cost = 2;
16774 ret = NO_REGS;
16776 else
16777 default_p = true;
16779 else
16780 default_p = true;
16782 else if (!TARGET_POWERPC64
16783 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16784 && MEM_P (x)
16785 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
16787 rtx addr = XEXP (x, 0);
16788 rtx off = address_offset (addr);
16790 if (off != NULL_RTX)
16792 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16793 unsigned HOST_WIDE_INT offset = INTVAL (off);
16795 /* We need a secondary reload when our legitimate_address_p
16796 says the address is good (as otherwise the entire address
16797 will be reloaded), and we have a wrap.
16799 legitimate_lo_sum_address_p allows LO_SUM addresses to
16800 have any offset so test for wrap in the low 16 bits.
16802 legitimate_offset_address_p checks for the range
16803 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
16804 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
16805 [0x7ff4,0x7fff] respectively, so test for the
16806 intersection of these ranges, [0x7ffc,0x7fff] and
16807 [0x7ff4,0x7ff7] respectively.
16809 Note that the address we see here may have been
16810 manipulated by legitimize_reload_address. */
16811 if (GET_CODE (addr) == LO_SUM
16812 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
16813 : offset - (0x8000 - extra) < UNITS_PER_WORD)
16815 if (in_p)
16816 sri->icode = CODE_FOR_reload_si_load;
16817 else
16818 sri->icode = CODE_FOR_reload_si_store;
16819 sri->extra_cost = 2;
16820 ret = NO_REGS;
16822 else
16823 default_p = true;
16825 else
16826 default_p = true;
16828 else
16829 default_p = true;
16831 if (default_p)
16832 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
16834 gcc_assert (ret != ALL_REGS);
16836 if (TARGET_DEBUG_ADDR)
16838 fprintf (stderr,
16839 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
16840 "mode = %s",
16841 reg_class_names[ret],
16842 in_p ? "true" : "false",
16843 reg_class_names[rclass],
16844 GET_MODE_NAME (mode));
16846 if (default_p)
16847 fprintf (stderr, ", default secondary reload");
16849 if (sri->icode != CODE_FOR_nothing)
16850 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
16851 insn_data[sri->icode].name, sri->extra_cost);
16852 else
16853 fprintf (stderr, "\n");
16855 debug_rtx (x);
16858 return ret;
16861 /* Better tracing for rs6000_secondary_reload_inner. */
16863 static void
16864 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
16865 bool store_p)
16867 rtx set, clobber;
16869 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
16871 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
16872 store_p ? "store" : "load");
16874 if (store_p)
16875 set = gen_rtx_SET (VOIDmode, mem, reg);
16876 else
16877 set = gen_rtx_SET (VOIDmode, reg, mem);
16879 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
16880 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
16883 static void
16884 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
16885 bool store_p)
16887 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
16888 gcc_unreachable ();
16891 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
16892 to SP+reg addressing. */
16894 void
16895 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
16897 int regno = true_regnum (reg);
16898 enum machine_mode mode = GET_MODE (reg);
16899 enum reg_class rclass;
16900 rtx addr;
16901 rtx and_op2 = NULL_RTX;
16902 rtx addr_op1;
16903 rtx addr_op2;
16904 rtx scratch_or_premodify = scratch;
16905 rtx and_rtx;
16906 rtx cc_clobber;
16908 if (TARGET_DEBUG_ADDR)
16909 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
16911 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16912 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16914 if (GET_CODE (mem) != MEM)
16915 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16917 rclass = REGNO_REG_CLASS (regno);
16918 addr = find_replacement (&XEXP (mem, 0));
16920 switch (rclass)
16922 /* GPRs can handle reg + small constant, all other addresses need to use
16923 the scratch register. */
16924 case GENERAL_REGS:
16925 case BASE_REGS:
16926 if (GET_CODE (addr) == AND)
16928 and_op2 = XEXP (addr, 1);
16929 addr = find_replacement (&XEXP (addr, 0));
16932 if (GET_CODE (addr) == PRE_MODIFY)
16934 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
16935 if (!REG_P (scratch_or_premodify))
16936 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16938 addr = find_replacement (&XEXP (addr, 1));
16939 if (GET_CODE (addr) != PLUS)
16940 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16943 if (GET_CODE (addr) == PLUS
16944 && (and_op2 != NULL_RTX
16945 || !rs6000_legitimate_offset_address_p (PTImode, addr,
16946 false, true)))
16948 /* find_replacement already recurses into both operands of
16949 PLUS so we don't need to call it here. */
16950 addr_op1 = XEXP (addr, 0);
16951 addr_op2 = XEXP (addr, 1);
16952 if (!legitimate_indirect_address_p (addr_op1, false))
16953 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16955 if (!REG_P (addr_op2)
16956 && (GET_CODE (addr_op2) != CONST_INT
16957 || !satisfies_constraint_I (addr_op2)))
16959 if (TARGET_DEBUG_ADDR)
16961 fprintf (stderr,
16962 "\nMove plus addr to register %s, mode = %s: ",
16963 rs6000_reg_names[REGNO (scratch)],
16964 GET_MODE_NAME (mode));
16965 debug_rtx (addr_op2);
16967 rs6000_emit_move (scratch, addr_op2, Pmode);
16968 addr_op2 = scratch;
16971 emit_insn (gen_rtx_SET (VOIDmode,
16972 scratch_or_premodify,
16973 gen_rtx_PLUS (Pmode,
16974 addr_op1,
16975 addr_op2)));
16977 addr = scratch_or_premodify;
16978 scratch_or_premodify = scratch;
16980 else if (!legitimate_indirect_address_p (addr, false)
16981 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16982 false, true))
16984 if (TARGET_DEBUG_ADDR)
16986 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
16987 rs6000_reg_names[REGNO (scratch_or_premodify)],
16988 GET_MODE_NAME (mode));
16989 debug_rtx (addr);
16991 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16992 addr = scratch_or_premodify;
16993 scratch_or_premodify = scratch;
16995 break;
16997 /* Float registers can do offset+reg addressing for scalar types. */
16998 case FLOAT_REGS:
16999 if (legitimate_indirect_address_p (addr, false) /* reg */
17000 || legitimate_indexed_address_p (addr, false) /* reg+reg */
17001 || ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
17002 && and_op2 == NULL_RTX
17003 && scratch_or_premodify == scratch
17004 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
17005 break;
17007 /* If this isn't a legacy floating point load/store, fall through to the
17008 VSX defaults. */
17010 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
17011 addresses into a scratch register. */
17012 case VSX_REGS:
17013 case ALTIVEC_REGS:
17015 /* With float regs, we need to handle the AND ourselves, since we can't
17016 use the Altivec instruction with an implicit AND -16. Allow scalar
17017 loads to float registers to use reg+offset even if VSX. */
17018 if (GET_CODE (addr) == AND
17019 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
17020 || GET_CODE (XEXP (addr, 1)) != CONST_INT
17021 || INTVAL (XEXP (addr, 1)) != -16
17022 || !VECTOR_MEM_ALTIVEC_P (mode)))
17024 and_op2 = XEXP (addr, 1);
17025 addr = find_replacement (&XEXP (addr, 0));
17028 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
17029 as the address later. */
17030 if (GET_CODE (addr) == PRE_MODIFY
17031 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode)
17032 && (rclass != FLOAT_REGS
17033 || (GET_MODE_SIZE (mode) != 4 && GET_MODE_SIZE (mode) != 8)))
17034 || and_op2 != NULL_RTX
17035 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
17037 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
17038 if (!legitimate_indirect_address_p (scratch_or_premodify, false))
17039 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17041 addr = find_replacement (&XEXP (addr, 1));
17042 if (GET_CODE (addr) != PLUS)
17043 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17046 if (legitimate_indirect_address_p (addr, false) /* reg */
17047 || legitimate_indexed_address_p (addr, false) /* reg+reg */
17048 || (GET_CODE (addr) == AND /* Altivec memory */
17049 && rclass == ALTIVEC_REGS
17050 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17051 && INTVAL (XEXP (addr, 1)) == -16
17052 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
17053 || legitimate_indexed_address_p (XEXP (addr, 0), false))))
17056 else if (GET_CODE (addr) == PLUS)
17058 addr_op1 = XEXP (addr, 0);
17059 addr_op2 = XEXP (addr, 1);
17060 if (!REG_P (addr_op1))
17061 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17063 if (TARGET_DEBUG_ADDR)
17065 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
17066 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
17067 debug_rtx (addr_op2);
17069 rs6000_emit_move (scratch, addr_op2, Pmode);
17070 emit_insn (gen_rtx_SET (VOIDmode,
17071 scratch_or_premodify,
17072 gen_rtx_PLUS (Pmode,
17073 addr_op1,
17074 scratch)));
17075 addr = scratch_or_premodify;
17076 scratch_or_premodify = scratch;
17079 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
17080 || GET_CODE (addr) == CONST_INT || GET_CODE (addr) == LO_SUM
17081 || REG_P (addr))
17083 if (TARGET_DEBUG_ADDR)
17085 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
17086 rs6000_reg_names[REGNO (scratch_or_premodify)],
17087 GET_MODE_NAME (mode));
17088 debug_rtx (addr);
17091 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
17092 addr = scratch_or_premodify;
17093 scratch_or_premodify = scratch;
17096 else
17097 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17099 break;
17101 default:
17102 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17105 /* If the original address involved a pre-modify that we couldn't use the VSX
17106 memory instruction with update, and we haven't taken care of already,
17107 store the address in the pre-modify register and use that as the
17108 address. */
17109 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
17111 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
17112 addr = scratch_or_premodify;
17115 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
17116 memory instruction, recreate the AND now, including the clobber which is
17117 generated by the general ANDSI3/ANDDI3 patterns for the
17118 andi. instruction. */
17119 if (and_op2 != NULL_RTX)
17121 if (! legitimate_indirect_address_p (addr, false))
17123 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
17124 addr = scratch;
17127 if (TARGET_DEBUG_ADDR)
17129 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
17130 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
17131 debug_rtx (and_op2);
17134 and_rtx = gen_rtx_SET (VOIDmode,
17135 scratch,
17136 gen_rtx_AND (Pmode,
17137 addr,
17138 and_op2));
17140 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
17141 emit_insn (gen_rtx_PARALLEL (VOIDmode,
17142 gen_rtvec (2, and_rtx, cc_clobber)));
17143 addr = scratch;
17146 /* Adjust the address if it changed. */
17147 if (addr != XEXP (mem, 0))
17149 mem = replace_equiv_address_nv (mem, addr);
17150 if (TARGET_DEBUG_ADDR)
17151 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
17154 /* Now create the move. */
17155 if (store_p)
17156 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
17157 else
17158 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
17160 return;
17163 /* Convert reloads involving 64-bit gprs and misaligned offset
17164 addressing, or multiple 32-bit gprs and offsets that are too large,
17165 to use indirect addressing. */
17167 void
17168 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
17170 int regno = true_regnum (reg);
17171 enum reg_class rclass;
17172 rtx addr;
17173 rtx scratch_or_premodify = scratch;
17175 if (TARGET_DEBUG_ADDR)
17177 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
17178 store_p ? "store" : "load");
17179 fprintf (stderr, "reg:\n");
17180 debug_rtx (reg);
17181 fprintf (stderr, "mem:\n");
17182 debug_rtx (mem);
17183 fprintf (stderr, "scratch:\n");
17184 debug_rtx (scratch);
17187 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
17188 gcc_assert (GET_CODE (mem) == MEM);
17189 rclass = REGNO_REG_CLASS (regno);
17190 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
17191 addr = XEXP (mem, 0);
17193 if (GET_CODE (addr) == PRE_MODIFY)
17195 scratch_or_premodify = XEXP (addr, 0);
17196 gcc_assert (REG_P (scratch_or_premodify));
17197 addr = XEXP (addr, 1);
17199 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
17201 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
17203 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
17205 /* Now create the move. */
17206 if (store_p)
17207 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
17208 else
17209 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
17211 return;
17214 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
17215 this function has any SDmode references. If we are on a power7 or later, we
17216 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
17217 can load/store the value. */
17219 static void
17220 rs6000_alloc_sdmode_stack_slot (void)
17222 tree t;
17223 basic_block bb;
17224 gimple_stmt_iterator gsi;
17226 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
17227 /* We use a different approach for dealing with the secondary
17228 memory in LRA. */
17229 if (ira_use_lra_p)
17230 return;
17232 if (TARGET_NO_SDMODE_STACK)
17233 return;
17235 FOR_EACH_BB_FN (bb, cfun)
17236 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
17238 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
17239 if (ret)
17241 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17242 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17243 SDmode, 0);
17244 return;
17248 /* Check for any SDmode parameters of the function. */
17249 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
17251 if (TREE_TYPE (t) == error_mark_node)
17252 continue;
17254 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
17255 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
17257 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17258 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17259 SDmode, 0);
17260 return;
17265 static void
17266 rs6000_instantiate_decls (void)
17268 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
17269 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
17272 /* Given an rtx X being reloaded into a reg required to be
17273 in class CLASS, return the class of reg to actually use.
17274 In general this is just CLASS; but on some machines
17275 in some cases it is preferable to use a more restrictive class.
17277 On the RS/6000, we have to return NO_REGS when we want to reload a
17278 floating-point CONST_DOUBLE to force it to be copied to memory.
17280 We also don't want to reload integer values into floating-point
17281 registers if we can at all help it. In fact, this can
17282 cause reload to die, if it tries to generate a reload of CTR
17283 into a FP register and discovers it doesn't have the memory location
17284 required.
17286 ??? Would it be a good idea to have reload do the converse, that is
17287 try to reload floating modes into FP registers if possible?
17290 static enum reg_class
17291 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
17293 enum machine_mode mode = GET_MODE (x);
17295 if (TARGET_VSX && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
17296 return rclass;
17298 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
17299 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
17300 && easy_vector_constant (x, mode))
17301 return ALTIVEC_REGS;
17303 if ((CONSTANT_P (x) || GET_CODE (x) == PLUS))
17305 if (reg_class_subset_p (GENERAL_REGS, rclass))
17306 return GENERAL_REGS;
17307 if (reg_class_subset_p (BASE_REGS, rclass))
17308 return BASE_REGS;
17309 return NO_REGS;
17312 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
17313 return GENERAL_REGS;
17315 /* For VSX, prefer the traditional registers for 64-bit values because we can
17316 use the non-VSX loads. Prefer the Altivec registers if Altivec is
17317 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
17318 prefer Altivec loads.. */
17319 if (rclass == VSX_REGS)
17321 if (MEM_P (x) && reg_addr[mode].scalar_in_vmx_p)
17323 rtx addr = XEXP (x, 0);
17324 if (rs6000_legitimate_offset_address_p (mode, addr, false, true)
17325 || legitimate_lo_sum_address_p (mode, addr, false))
17326 return FLOAT_REGS;
17328 else if (GET_MODE_SIZE (mode) <= 8 && !reg_addr[mode].scalar_in_vmx_p)
17329 return FLOAT_REGS;
17331 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
17332 || mode == V1TImode)
17333 return ALTIVEC_REGS;
17335 return rclass;
17338 return rclass;
17341 /* Debug version of rs6000_preferred_reload_class. */
17342 static enum reg_class
17343 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
17345 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
17347 fprintf (stderr,
17348 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
17349 "mode = %s, x:\n",
17350 reg_class_names[ret], reg_class_names[rclass],
17351 GET_MODE_NAME (GET_MODE (x)));
17352 debug_rtx (x);
17354 return ret;
17357 /* If we are copying between FP or AltiVec registers and anything else, we need
17358 a memory location. The exception is when we are targeting ppc64 and the
17359 move to/from fpr to gpr instructions are available. Also, under VSX, you
17360 can copy vector registers from the FP register set to the Altivec register
17361 set and vice versa. */
17363 static bool
17364 rs6000_secondary_memory_needed (enum reg_class from_class,
17365 enum reg_class to_class,
17366 enum machine_mode mode)
17368 enum rs6000_reg_type from_type, to_type;
17369 bool altivec_p = ((from_class == ALTIVEC_REGS)
17370 || (to_class == ALTIVEC_REGS));
17372 /* If a simple/direct move is available, we don't need secondary memory */
17373 from_type = reg_class_to_reg_type[(int)from_class];
17374 to_type = reg_class_to_reg_type[(int)to_class];
17376 if (rs6000_secondary_reload_move (to_type, from_type, mode,
17377 (secondary_reload_info *)0, altivec_p))
17378 return false;
17380 /* If we have a floating point or vector register class, we need to use
17381 memory to transfer the data. */
17382 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
17383 return true;
17385 return false;
17388 /* Debug version of rs6000_secondary_memory_needed. */
17389 static bool
17390 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
17391 enum reg_class to_class,
17392 enum machine_mode mode)
17394 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
17396 fprintf (stderr,
17397 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
17398 "to_class = %s, mode = %s\n",
17399 ret ? "true" : "false",
17400 reg_class_names[from_class],
17401 reg_class_names[to_class],
17402 GET_MODE_NAME (mode));
17404 return ret;
17407 /* Return the register class of a scratch register needed to copy IN into
17408 or out of a register in RCLASS in MODE. If it can be done directly,
17409 NO_REGS is returned. */
17411 static enum reg_class
17412 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
17413 rtx in)
17415 int regno;
17417 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
17418 #if TARGET_MACHO
17419 && MACHOPIC_INDIRECT
17420 #endif
17423 /* We cannot copy a symbolic operand directly into anything
17424 other than BASE_REGS for TARGET_ELF. So indicate that a
17425 register from BASE_REGS is needed as an intermediate
17426 register.
17428 On Darwin, pic addresses require a load from memory, which
17429 needs a base register. */
17430 if (rclass != BASE_REGS
17431 && (GET_CODE (in) == SYMBOL_REF
17432 || GET_CODE (in) == HIGH
17433 || GET_CODE (in) == LABEL_REF
17434 || GET_CODE (in) == CONST))
17435 return BASE_REGS;
17438 if (GET_CODE (in) == REG)
17440 regno = REGNO (in);
17441 if (regno >= FIRST_PSEUDO_REGISTER)
17443 regno = true_regnum (in);
17444 if (regno >= FIRST_PSEUDO_REGISTER)
17445 regno = -1;
17448 else if (GET_CODE (in) == SUBREG)
17450 regno = true_regnum (in);
17451 if (regno >= FIRST_PSEUDO_REGISTER)
17452 regno = -1;
17454 else
17455 regno = -1;
17457 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
17458 into anything. */
17459 if (rclass == GENERAL_REGS || rclass == BASE_REGS
17460 || (regno >= 0 && INT_REGNO_P (regno)))
17461 return NO_REGS;
17463 /* Constants, memory, and FP registers can go into FP registers. */
17464 if ((regno == -1 || FP_REGNO_P (regno))
17465 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
17466 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
17468 /* Memory, and FP/altivec registers can go into fp/altivec registers under
17469 VSX. However, for scalar variables, use the traditional floating point
17470 registers so that we can use offset+register addressing. */
17471 if (TARGET_VSX
17472 && (regno == -1 || VSX_REGNO_P (regno))
17473 && VSX_REG_CLASS_P (rclass))
17475 if (GET_MODE_SIZE (mode) < 16)
17476 return FLOAT_REGS;
17478 return NO_REGS;
17481 /* Memory, and AltiVec registers can go into AltiVec registers. */
17482 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
17483 && rclass == ALTIVEC_REGS)
17484 return NO_REGS;
17486 /* We can copy among the CR registers. */
17487 if ((rclass == CR_REGS || rclass == CR0_REGS)
17488 && regno >= 0 && CR_REGNO_P (regno))
17489 return NO_REGS;
17491 /* Otherwise, we need GENERAL_REGS. */
17492 return GENERAL_REGS;
17495 /* Debug version of rs6000_secondary_reload_class. */
17496 static enum reg_class
17497 rs6000_debug_secondary_reload_class (enum reg_class rclass,
17498 enum machine_mode mode, rtx in)
17500 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
17501 fprintf (stderr,
17502 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
17503 "mode = %s, input rtx:\n",
17504 reg_class_names[ret], reg_class_names[rclass],
17505 GET_MODE_NAME (mode));
17506 debug_rtx (in);
17508 return ret;
17511 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
17513 static bool
17514 rs6000_cannot_change_mode_class (enum machine_mode from,
17515 enum machine_mode to,
17516 enum reg_class rclass)
17518 unsigned from_size = GET_MODE_SIZE (from);
17519 unsigned to_size = GET_MODE_SIZE (to);
17521 if (from_size != to_size)
17523 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
17525 if (reg_classes_intersect_p (xclass, rclass))
17527 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
17528 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
17530 /* Don't allow 64-bit types to overlap with 128-bit types that take a
17531 single register under VSX because the scalar part of the register
17532 is in the upper 64-bits, and not the lower 64-bits. Types like
17533 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
17534 IEEE floating point can't overlap, and neither can small
17535 values. */
17537 if (TARGET_IEEEQUAD && (to == TFmode || from == TFmode))
17538 return true;
17540 /* TDmode in floating-mode registers must always go into a register
17541 pair with the most significant word in the even-numbered register
17542 to match ISA requirements. In little-endian mode, this does not
17543 match subreg numbering, so we cannot allow subregs. */
17544 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
17545 return true;
17547 if (from_size < 8 || to_size < 8)
17548 return true;
17550 if (from_size == 8 && (8 * to_nregs) != to_size)
17551 return true;
17553 if (to_size == 8 && (8 * from_nregs) != from_size)
17554 return true;
17556 return false;
17558 else
17559 return false;
17562 if (TARGET_E500_DOUBLE
17563 && ((((to) == DFmode) + ((from) == DFmode)) == 1
17564 || (((to) == TFmode) + ((from) == TFmode)) == 1
17565 || (((to) == DDmode) + ((from) == DDmode)) == 1
17566 || (((to) == TDmode) + ((from) == TDmode)) == 1
17567 || (((to) == DImode) + ((from) == DImode)) == 1))
17568 return true;
17570 /* Since the VSX register set includes traditional floating point registers
17571 and altivec registers, just check for the size being different instead of
17572 trying to check whether the modes are vector modes. Otherwise it won't
17573 allow say DF and DI to change classes. For types like TFmode and TDmode
17574 that take 2 64-bit registers, rather than a single 128-bit register, don't
17575 allow subregs of those types to other 128 bit types. */
17576 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
17578 unsigned num_regs = (from_size + 15) / 16;
17579 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
17580 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
17581 return true;
17583 return (from_size != 8 && from_size != 16);
17586 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
17587 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
17588 return true;
17590 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
17591 && reg_classes_intersect_p (GENERAL_REGS, rclass))
17592 return true;
17594 return false;
17597 /* Debug version of rs6000_cannot_change_mode_class. */
17598 static bool
17599 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
17600 enum machine_mode to,
17601 enum reg_class rclass)
17603 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
17605 fprintf (stderr,
17606 "rs6000_cannot_change_mode_class, return %s, from = %s, "
17607 "to = %s, rclass = %s\n",
17608 ret ? "true" : "false",
17609 GET_MODE_NAME (from), GET_MODE_NAME (to),
17610 reg_class_names[rclass]);
17612 return ret;
17615 /* Return a string to do a move operation of 128 bits of data. */
17617 const char *
17618 rs6000_output_move_128bit (rtx operands[])
17620 rtx dest = operands[0];
17621 rtx src = operands[1];
17622 enum machine_mode mode = GET_MODE (dest);
17623 int dest_regno;
17624 int src_regno;
17625 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
17626 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
17628 if (REG_P (dest))
17630 dest_regno = REGNO (dest);
17631 dest_gpr_p = INT_REGNO_P (dest_regno);
17632 dest_fp_p = FP_REGNO_P (dest_regno);
17633 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
17634 dest_vsx_p = dest_fp_p | dest_vmx_p;
17636 else
17638 dest_regno = -1;
17639 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
17642 if (REG_P (src))
17644 src_regno = REGNO (src);
17645 src_gpr_p = INT_REGNO_P (src_regno);
17646 src_fp_p = FP_REGNO_P (src_regno);
17647 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
17648 src_vsx_p = src_fp_p | src_vmx_p;
17650 else
17652 src_regno = -1;
17653 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
17656 /* Register moves. */
17657 if (dest_regno >= 0 && src_regno >= 0)
17659 if (dest_gpr_p)
17661 if (src_gpr_p)
17662 return "#";
17664 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
17665 return "#";
17668 else if (TARGET_VSX && dest_vsx_p)
17670 if (src_vsx_p)
17671 return "xxlor %x0,%x1,%x1";
17673 else if (TARGET_DIRECT_MOVE && src_gpr_p)
17674 return "#";
17677 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
17678 return "vor %0,%1,%1";
17680 else if (dest_fp_p && src_fp_p)
17681 return "#";
17684 /* Loads. */
17685 else if (dest_regno >= 0 && MEM_P (src))
17687 if (dest_gpr_p)
17689 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17690 return "lq %0,%1";
17691 else
17692 return "#";
17695 else if (TARGET_ALTIVEC && dest_vmx_p
17696 && altivec_indexed_or_indirect_operand (src, mode))
17697 return "lvx %0,%y1";
17699 else if (TARGET_VSX && dest_vsx_p)
17701 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17702 return "lxvw4x %x0,%y1";
17703 else
17704 return "lxvd2x %x0,%y1";
17707 else if (TARGET_ALTIVEC && dest_vmx_p)
17708 return "lvx %0,%y1";
17710 else if (dest_fp_p)
17711 return "#";
17714 /* Stores. */
17715 else if (src_regno >= 0 && MEM_P (dest))
17717 if (src_gpr_p)
17719 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17720 return "stq %1,%0";
17721 else
17722 return "#";
17725 else if (TARGET_ALTIVEC && src_vmx_p
17726 && altivec_indexed_or_indirect_operand (src, mode))
17727 return "stvx %1,%y0";
17729 else if (TARGET_VSX && src_vsx_p)
17731 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17732 return "stxvw4x %x1,%y0";
17733 else
17734 return "stxvd2x %x1,%y0";
17737 else if (TARGET_ALTIVEC && src_vmx_p)
17738 return "stvx %1,%y0";
17740 else if (src_fp_p)
17741 return "#";
17744 /* Constants. */
17745 else if (dest_regno >= 0
17746 && (GET_CODE (src) == CONST_INT
17747 || GET_CODE (src) == CONST_WIDE_INT
17748 || GET_CODE (src) == CONST_DOUBLE
17749 || GET_CODE (src) == CONST_VECTOR))
17751 if (dest_gpr_p)
17752 return "#";
17754 else if (TARGET_VSX && dest_vsx_p && zero_constant (src, mode))
17755 return "xxlxor %x0,%x0,%x0";
17757 else if (TARGET_ALTIVEC && dest_vmx_p)
17758 return output_vec_const_move (operands);
17761 if (TARGET_DEBUG_ADDR)
17763 fprintf (stderr, "\n===== Bad 128 bit move:\n");
17764 debug_rtx (gen_rtx_SET (VOIDmode, dest, src));
17767 gcc_unreachable ();
17770 /* Validate a 128-bit move. */
17771 bool
17772 rs6000_move_128bit_ok_p (rtx operands[])
17774 enum machine_mode mode = GET_MODE (operands[0]);
17775 return (gpc_reg_operand (operands[0], mode)
17776 || gpc_reg_operand (operands[1], mode));
17779 /* Return true if a 128-bit move needs to be split. */
17780 bool
17781 rs6000_split_128bit_ok_p (rtx operands[])
17783 if (!reload_completed)
17784 return false;
17786 if (!gpr_or_gpr_p (operands[0], operands[1]))
17787 return false;
17789 if (quad_load_store_p (operands[0], operands[1]))
17790 return false;
17792 return true;
17796 /* Given a comparison operation, return the bit number in CCR to test. We
17797 know this is a valid comparison.
17799 SCC_P is 1 if this is for an scc. That means that %D will have been
17800 used instead of %C, so the bits will be in different places.
17802 Return -1 if OP isn't a valid comparison for some reason. */
17805 ccr_bit (rtx op, int scc_p)
17807 enum rtx_code code = GET_CODE (op);
17808 enum machine_mode cc_mode;
17809 int cc_regnum;
17810 int base_bit;
17811 rtx reg;
17813 if (!COMPARISON_P (op))
17814 return -1;
17816 reg = XEXP (op, 0);
17818 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
17820 cc_mode = GET_MODE (reg);
17821 cc_regnum = REGNO (reg);
17822 base_bit = 4 * (cc_regnum - CR0_REGNO);
17824 validate_condition_mode (code, cc_mode);
17826 /* When generating a sCOND operation, only positive conditions are
17827 allowed. */
17828 gcc_assert (!scc_p
17829 || code == EQ || code == GT || code == LT || code == UNORDERED
17830 || code == GTU || code == LTU);
17832 switch (code)
17834 case NE:
17835 return scc_p ? base_bit + 3 : base_bit + 2;
17836 case EQ:
17837 return base_bit + 2;
17838 case GT: case GTU: case UNLE:
17839 return base_bit + 1;
17840 case LT: case LTU: case UNGE:
17841 return base_bit;
17842 case ORDERED: case UNORDERED:
17843 return base_bit + 3;
17845 case GE: case GEU:
17846 /* If scc, we will have done a cror to put the bit in the
17847 unordered position. So test that bit. For integer, this is ! LT
17848 unless this is an scc insn. */
17849 return scc_p ? base_bit + 3 : base_bit;
17851 case LE: case LEU:
17852 return scc_p ? base_bit + 3 : base_bit + 1;
17854 default:
17855 gcc_unreachable ();
17859 /* Return the GOT register. */
17862 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
17864 /* The second flow pass currently (June 1999) can't update
17865 regs_ever_live without disturbing other parts of the compiler, so
17866 update it here to make the prolog/epilogue code happy. */
17867 if (!can_create_pseudo_p ()
17868 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17869 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
17871 crtl->uses_pic_offset_table = 1;
17873 return pic_offset_table_rtx;
17876 static rs6000_stack_t stack_info;
17878 /* Function to init struct machine_function.
17879 This will be called, via a pointer variable,
17880 from push_function_context. */
17882 static struct machine_function *
17883 rs6000_init_machine_status (void)
17885 stack_info.reload_completed = 0;
17886 return ggc_cleared_alloc<machine_function> ();
17889 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
17892 extract_MB (rtx op)
17894 int i;
17895 unsigned long val = INTVAL (op);
17897 /* If the high bit is zero, the value is the first 1 bit we find
17898 from the left. */
17899 if ((val & 0x80000000) == 0)
17901 gcc_assert (val & 0xffffffff);
17903 i = 1;
17904 while (((val <<= 1) & 0x80000000) == 0)
17905 ++i;
17906 return i;
17909 /* If the high bit is set and the low bit is not, or the mask is all
17910 1's, the value is zero. */
17911 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
17912 return 0;
17914 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17915 from the right. */
17916 i = 31;
17917 while (((val >>= 1) & 1) != 0)
17918 --i;
17920 return i;
17924 extract_ME (rtx op)
17926 int i;
17927 unsigned long val = INTVAL (op);
17929 /* If the low bit is zero, the value is the first 1 bit we find from
17930 the right. */
17931 if ((val & 1) == 0)
17933 gcc_assert (val & 0xffffffff);
17935 i = 30;
17936 while (((val >>= 1) & 1) == 0)
17937 --i;
17939 return i;
17942 /* If the low bit is set and the high bit is not, or the mask is all
17943 1's, the value is 31. */
17944 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
17945 return 31;
17947 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17948 from the left. */
17949 i = 0;
17950 while (((val <<= 1) & 0x80000000) != 0)
17951 ++i;
17953 return i;
17956 /* Locate some local-dynamic symbol still in use by this function
17957 so that we can print its name in some tls_ld pattern. */
17959 static const char *
17960 rs6000_get_some_local_dynamic_name (void)
17962 rtx_insn *insn;
17964 if (cfun->machine->some_ld_name)
17965 return cfun->machine->some_ld_name;
17967 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
17968 if (INSN_P (insn)
17969 && for_each_rtx (&PATTERN (insn),
17970 rs6000_get_some_local_dynamic_name_1, 0))
17971 return cfun->machine->some_ld_name;
17973 gcc_unreachable ();
17976 /* Helper function for rs6000_get_some_local_dynamic_name. */
17978 static int
17979 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
17981 rtx x = *px;
17983 if (GET_CODE (x) == SYMBOL_REF)
17985 const char *str = XSTR (x, 0);
17986 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
17988 cfun->machine->some_ld_name = str;
17989 return 1;
17993 return 0;
17996 /* Write out a function code label. */
17998 void
17999 rs6000_output_function_entry (FILE *file, const char *fname)
18001 if (fname[0] != '.')
18003 switch (DEFAULT_ABI)
18005 default:
18006 gcc_unreachable ();
18008 case ABI_AIX:
18009 if (DOT_SYMBOLS)
18010 putc ('.', file);
18011 else
18012 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
18013 break;
18015 case ABI_ELFv2:
18016 case ABI_V4:
18017 case ABI_DARWIN:
18018 break;
18022 RS6000_OUTPUT_BASENAME (file, fname);
18025 /* Print an operand. Recognize special options, documented below. */
18027 #if TARGET_ELF
18028 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
18029 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
18030 #else
18031 #define SMALL_DATA_RELOC "sda21"
18032 #define SMALL_DATA_REG 0
18033 #endif
18035 void
18036 print_operand (FILE *file, rtx x, int code)
18038 int i;
18039 unsigned HOST_WIDE_INT uval;
18041 switch (code)
18043 /* %a is output_address. */
18045 case 'b':
18046 /* If constant, low-order 16 bits of constant, unsigned.
18047 Otherwise, write normally. */
18048 if (INT_P (x))
18049 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
18050 else
18051 print_operand (file, x, 0);
18052 return;
18054 case 'B':
18055 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
18056 for 64-bit mask direction. */
18057 putc (((INTVAL (x) & 1) == 0 ? 'r' : 'l'), file);
18058 return;
18060 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
18061 output_operand. */
18063 case 'D':
18064 /* Like 'J' but get to the GT bit only. */
18065 gcc_assert (REG_P (x));
18067 /* Bit 1 is GT bit. */
18068 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
18070 /* Add one for shift count in rlinm for scc. */
18071 fprintf (file, "%d", i + 1);
18072 return;
18074 case 'e':
18075 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
18076 if (! INT_P (x))
18078 output_operand_lossage ("invalid %%e value");
18079 return;
18082 uval = INTVAL (x);
18083 if ((uval & 0xffff) == 0 && uval != 0)
18084 putc ('s', file);
18085 return;
18087 case 'E':
18088 /* X is a CR register. Print the number of the EQ bit of the CR */
18089 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18090 output_operand_lossage ("invalid %%E value");
18091 else
18092 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
18093 return;
18095 case 'f':
18096 /* X is a CR register. Print the shift count needed to move it
18097 to the high-order four bits. */
18098 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18099 output_operand_lossage ("invalid %%f value");
18100 else
18101 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
18102 return;
18104 case 'F':
18105 /* Similar, but print the count for the rotate in the opposite
18106 direction. */
18107 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18108 output_operand_lossage ("invalid %%F value");
18109 else
18110 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
18111 return;
18113 case 'G':
18114 /* X is a constant integer. If it is negative, print "m",
18115 otherwise print "z". This is to make an aze or ame insn. */
18116 if (GET_CODE (x) != CONST_INT)
18117 output_operand_lossage ("invalid %%G value");
18118 else if (INTVAL (x) >= 0)
18119 putc ('z', file);
18120 else
18121 putc ('m', file);
18122 return;
18124 case 'h':
18125 /* If constant, output low-order five bits. Otherwise, write
18126 normally. */
18127 if (INT_P (x))
18128 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
18129 else
18130 print_operand (file, x, 0);
18131 return;
18133 case 'H':
18134 /* If constant, output low-order six bits. Otherwise, write
18135 normally. */
18136 if (INT_P (x))
18137 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
18138 else
18139 print_operand (file, x, 0);
18140 return;
18142 case 'I':
18143 /* Print `i' if this is a constant, else nothing. */
18144 if (INT_P (x))
18145 putc ('i', file);
18146 return;
18148 case 'j':
18149 /* Write the bit number in CCR for jump. */
18150 i = ccr_bit (x, 0);
18151 if (i == -1)
18152 output_operand_lossage ("invalid %%j code");
18153 else
18154 fprintf (file, "%d", i);
18155 return;
18157 case 'J':
18158 /* Similar, but add one for shift count in rlinm for scc and pass
18159 scc flag to `ccr_bit'. */
18160 i = ccr_bit (x, 1);
18161 if (i == -1)
18162 output_operand_lossage ("invalid %%J code");
18163 else
18164 /* If we want bit 31, write a shift count of zero, not 32. */
18165 fprintf (file, "%d", i == 31 ? 0 : i + 1);
18166 return;
18168 case 'k':
18169 /* X must be a constant. Write the 1's complement of the
18170 constant. */
18171 if (! INT_P (x))
18172 output_operand_lossage ("invalid %%k value");
18173 else
18174 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
18175 return;
18177 case 'K':
18178 /* X must be a symbolic constant on ELF. Write an
18179 expression suitable for an 'addi' that adds in the low 16
18180 bits of the MEM. */
18181 if (GET_CODE (x) == CONST)
18183 if (GET_CODE (XEXP (x, 0)) != PLUS
18184 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
18185 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
18186 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
18187 output_operand_lossage ("invalid %%K value");
18189 print_operand_address (file, x);
18190 fputs ("@l", file);
18191 return;
18193 /* %l is output_asm_label. */
18195 case 'L':
18196 /* Write second word of DImode or DFmode reference. Works on register
18197 or non-indexed memory only. */
18198 if (REG_P (x))
18199 fputs (reg_names[REGNO (x) + 1], file);
18200 else if (MEM_P (x))
18202 /* Handle possible auto-increment. Since it is pre-increment and
18203 we have already done it, we can just use an offset of word. */
18204 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18205 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18206 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
18207 UNITS_PER_WORD));
18208 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18209 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
18210 UNITS_PER_WORD));
18211 else
18212 output_address (XEXP (adjust_address_nv (x, SImode,
18213 UNITS_PER_WORD),
18214 0));
18216 if (small_data_operand (x, GET_MODE (x)))
18217 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18218 reg_names[SMALL_DATA_REG]);
18220 return;
18222 case 'm':
18223 /* MB value for a mask operand. */
18224 if (! mask_operand (x, SImode))
18225 output_operand_lossage ("invalid %%m value");
18227 fprintf (file, "%d", extract_MB (x));
18228 return;
18230 case 'M':
18231 /* ME value for a mask operand. */
18232 if (! mask_operand (x, SImode))
18233 output_operand_lossage ("invalid %%M value");
18235 fprintf (file, "%d", extract_ME (x));
18236 return;
18238 /* %n outputs the negative of its operand. */
18240 case 'N':
18241 /* Write the number of elements in the vector times 4. */
18242 if (GET_CODE (x) != PARALLEL)
18243 output_operand_lossage ("invalid %%N value");
18244 else
18245 fprintf (file, "%d", XVECLEN (x, 0) * 4);
18246 return;
18248 case 'O':
18249 /* Similar, but subtract 1 first. */
18250 if (GET_CODE (x) != PARALLEL)
18251 output_operand_lossage ("invalid %%O value");
18252 else
18253 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
18254 return;
18256 case 'p':
18257 /* X is a CONST_INT that is a power of two. Output the logarithm. */
18258 if (! INT_P (x)
18259 || INTVAL (x) < 0
18260 || (i = exact_log2 (INTVAL (x))) < 0)
18261 output_operand_lossage ("invalid %%p value");
18262 else
18263 fprintf (file, "%d", i);
18264 return;
18266 case 'P':
18267 /* The operand must be an indirect memory reference. The result
18268 is the register name. */
18269 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
18270 || REGNO (XEXP (x, 0)) >= 32)
18271 output_operand_lossage ("invalid %%P value");
18272 else
18273 fputs (reg_names[REGNO (XEXP (x, 0))], file);
18274 return;
18276 case 'q':
18277 /* This outputs the logical code corresponding to a boolean
18278 expression. The expression may have one or both operands
18279 negated (if one, only the first one). For condition register
18280 logical operations, it will also treat the negated
18281 CR codes as NOTs, but not handle NOTs of them. */
18283 const char *const *t = 0;
18284 const char *s;
18285 enum rtx_code code = GET_CODE (x);
18286 static const char * const tbl[3][3] = {
18287 { "and", "andc", "nor" },
18288 { "or", "orc", "nand" },
18289 { "xor", "eqv", "xor" } };
18291 if (code == AND)
18292 t = tbl[0];
18293 else if (code == IOR)
18294 t = tbl[1];
18295 else if (code == XOR)
18296 t = tbl[2];
18297 else
18298 output_operand_lossage ("invalid %%q value");
18300 if (GET_CODE (XEXP (x, 0)) != NOT)
18301 s = t[0];
18302 else
18304 if (GET_CODE (XEXP (x, 1)) == NOT)
18305 s = t[2];
18306 else
18307 s = t[1];
18310 fputs (s, file);
18312 return;
18314 case 'Q':
18315 if (! TARGET_MFCRF)
18316 return;
18317 fputc (',', file);
18318 /* FALLTHRU */
18320 case 'R':
18321 /* X is a CR register. Print the mask for `mtcrf'. */
18322 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18323 output_operand_lossage ("invalid %%R value");
18324 else
18325 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
18326 return;
18328 case 's':
18329 /* Low 5 bits of 32 - value */
18330 if (! INT_P (x))
18331 output_operand_lossage ("invalid %%s value");
18332 else
18333 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
18334 return;
18336 case 'S':
18337 /* PowerPC64 mask position. All 0's is excluded.
18338 CONST_INT 32-bit mask is considered sign-extended so any
18339 transition must occur within the CONST_INT, not on the boundary. */
18340 if (! mask64_operand (x, DImode))
18341 output_operand_lossage ("invalid %%S value");
18343 uval = INTVAL (x);
18345 if (uval & 1) /* Clear Left */
18347 #if HOST_BITS_PER_WIDE_INT > 64
18348 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18349 #endif
18350 i = 64;
18352 else /* Clear Right */
18354 uval = ~uval;
18355 #if HOST_BITS_PER_WIDE_INT > 64
18356 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18357 #endif
18358 i = 63;
18360 while (uval != 0)
18361 --i, uval >>= 1;
18362 gcc_assert (i >= 0);
18363 fprintf (file, "%d", i);
18364 return;
18366 case 't':
18367 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
18368 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
18370 /* Bit 3 is OV bit. */
18371 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
18373 /* If we want bit 31, write a shift count of zero, not 32. */
18374 fprintf (file, "%d", i == 31 ? 0 : i + 1);
18375 return;
18377 case 'T':
18378 /* Print the symbolic name of a branch target register. */
18379 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
18380 && REGNO (x) != CTR_REGNO))
18381 output_operand_lossage ("invalid %%T value");
18382 else if (REGNO (x) == LR_REGNO)
18383 fputs ("lr", file);
18384 else
18385 fputs ("ctr", file);
18386 return;
18388 case 'u':
18389 /* High-order or low-order 16 bits of constant, whichever is non-zero,
18390 for use in unsigned operand. */
18391 if (! INT_P (x))
18393 output_operand_lossage ("invalid %%u value");
18394 return;
18397 uval = INTVAL (x);
18398 if ((uval & 0xffff) == 0)
18399 uval >>= 16;
18401 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
18402 return;
18404 case 'v':
18405 /* High-order 16 bits of constant for use in signed operand. */
18406 if (! INT_P (x))
18407 output_operand_lossage ("invalid %%v value");
18408 else
18409 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
18410 (INTVAL (x) >> 16) & 0xffff);
18411 return;
18413 case 'U':
18414 /* Print `u' if this has an auto-increment or auto-decrement. */
18415 if (MEM_P (x)
18416 && (GET_CODE (XEXP (x, 0)) == PRE_INC
18417 || GET_CODE (XEXP (x, 0)) == PRE_DEC
18418 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
18419 putc ('u', file);
18420 return;
18422 case 'V':
18423 /* Print the trap code for this operand. */
18424 switch (GET_CODE (x))
18426 case EQ:
18427 fputs ("eq", file); /* 4 */
18428 break;
18429 case NE:
18430 fputs ("ne", file); /* 24 */
18431 break;
18432 case LT:
18433 fputs ("lt", file); /* 16 */
18434 break;
18435 case LE:
18436 fputs ("le", file); /* 20 */
18437 break;
18438 case GT:
18439 fputs ("gt", file); /* 8 */
18440 break;
18441 case GE:
18442 fputs ("ge", file); /* 12 */
18443 break;
18444 case LTU:
18445 fputs ("llt", file); /* 2 */
18446 break;
18447 case LEU:
18448 fputs ("lle", file); /* 6 */
18449 break;
18450 case GTU:
18451 fputs ("lgt", file); /* 1 */
18452 break;
18453 case GEU:
18454 fputs ("lge", file); /* 5 */
18455 break;
18456 default:
18457 gcc_unreachable ();
18459 break;
18461 case 'w':
18462 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
18463 normally. */
18464 if (INT_P (x))
18465 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
18466 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
18467 else
18468 print_operand (file, x, 0);
18469 return;
18471 case 'W':
18472 /* MB value for a PowerPC64 rldic operand. */
18473 i = clz_hwi (INTVAL (x));
18475 fprintf (file, "%d", i);
18476 return;
18478 case 'x':
18479 /* X is a FPR or Altivec register used in a VSX context. */
18480 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
18481 output_operand_lossage ("invalid %%x value");
18482 else
18484 int reg = REGNO (x);
18485 int vsx_reg = (FP_REGNO_P (reg)
18486 ? reg - 32
18487 : reg - FIRST_ALTIVEC_REGNO + 32);
18489 #ifdef TARGET_REGNAMES
18490 if (TARGET_REGNAMES)
18491 fprintf (file, "%%vs%d", vsx_reg);
18492 else
18493 #endif
18494 fprintf (file, "%d", vsx_reg);
18496 return;
18498 case 'X':
18499 if (MEM_P (x)
18500 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
18501 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
18502 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
18503 putc ('x', file);
18504 return;
18506 case 'Y':
18507 /* Like 'L', for third word of TImode/PTImode */
18508 if (REG_P (x))
18509 fputs (reg_names[REGNO (x) + 2], file);
18510 else if (MEM_P (x))
18512 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18513 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18514 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18515 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18516 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18517 else
18518 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
18519 if (small_data_operand (x, GET_MODE (x)))
18520 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18521 reg_names[SMALL_DATA_REG]);
18523 return;
18525 case 'z':
18526 /* X is a SYMBOL_REF. Write out the name preceded by a
18527 period and without any trailing data in brackets. Used for function
18528 names. If we are configured for System V (or the embedded ABI) on
18529 the PowerPC, do not emit the period, since those systems do not use
18530 TOCs and the like. */
18531 gcc_assert (GET_CODE (x) == SYMBOL_REF);
18533 /* For macho, check to see if we need a stub. */
18534 if (TARGET_MACHO)
18536 const char *name = XSTR (x, 0);
18537 #if TARGET_MACHO
18538 if (darwin_emit_branch_islands
18539 && MACHOPIC_INDIRECT
18540 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
18541 name = machopic_indirection_name (x, /*stub_p=*/true);
18542 #endif
18543 assemble_name (file, name);
18545 else if (!DOT_SYMBOLS)
18546 assemble_name (file, XSTR (x, 0));
18547 else
18548 rs6000_output_function_entry (file, XSTR (x, 0));
18549 return;
18551 case 'Z':
18552 /* Like 'L', for last word of TImode/PTImode. */
18553 if (REG_P (x))
18554 fputs (reg_names[REGNO (x) + 3], file);
18555 else if (MEM_P (x))
18557 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18558 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18559 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18560 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18561 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18562 else
18563 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
18564 if (small_data_operand (x, GET_MODE (x)))
18565 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18566 reg_names[SMALL_DATA_REG]);
18568 return;
18570 /* Print AltiVec or SPE memory operand. */
18571 case 'y':
18573 rtx tmp;
18575 gcc_assert (MEM_P (x));
18577 tmp = XEXP (x, 0);
18579 /* Ugly hack because %y is overloaded. */
18580 if ((TARGET_SPE || TARGET_E500_DOUBLE)
18581 && (GET_MODE_SIZE (GET_MODE (x)) == 8
18582 || GET_MODE (x) == TFmode
18583 || GET_MODE (x) == TImode
18584 || GET_MODE (x) == PTImode))
18586 /* Handle [reg]. */
18587 if (REG_P (tmp))
18589 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
18590 break;
18592 /* Handle [reg+UIMM]. */
18593 else if (GET_CODE (tmp) == PLUS &&
18594 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
18596 int x;
18598 gcc_assert (REG_P (XEXP (tmp, 0)));
18600 x = INTVAL (XEXP (tmp, 1));
18601 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
18602 break;
18605 /* Fall through. Must be [reg+reg]. */
18607 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
18608 && GET_CODE (tmp) == AND
18609 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
18610 && INTVAL (XEXP (tmp, 1)) == -16)
18611 tmp = XEXP (tmp, 0);
18612 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
18613 && GET_CODE (tmp) == PRE_MODIFY)
18614 tmp = XEXP (tmp, 1);
18615 if (REG_P (tmp))
18616 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
18617 else
18619 if (GET_CODE (tmp) != PLUS
18620 || !REG_P (XEXP (tmp, 0))
18621 || !REG_P (XEXP (tmp, 1)))
18623 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
18624 break;
18627 if (REGNO (XEXP (tmp, 0)) == 0)
18628 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
18629 reg_names[ REGNO (XEXP (tmp, 0)) ]);
18630 else
18631 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
18632 reg_names[ REGNO (XEXP (tmp, 1)) ]);
18634 break;
18637 case 0:
18638 if (REG_P (x))
18639 fprintf (file, "%s", reg_names[REGNO (x)]);
18640 else if (MEM_P (x))
18642 /* We need to handle PRE_INC and PRE_DEC here, since we need to
18643 know the width from the mode. */
18644 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
18645 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
18646 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18647 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
18648 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
18649 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18650 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18651 output_address (XEXP (XEXP (x, 0), 1));
18652 else
18653 output_address (XEXP (x, 0));
18655 else
18657 if (toc_relative_expr_p (x, false))
18658 /* This hack along with a corresponding hack in
18659 rs6000_output_addr_const_extra arranges to output addends
18660 where the assembler expects to find them. eg.
18661 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
18662 without this hack would be output as "x@toc+4". We
18663 want "x+4@toc". */
18664 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18665 else
18666 output_addr_const (file, x);
18668 return;
18670 case '&':
18671 assemble_name (file, rs6000_get_some_local_dynamic_name ());
18672 return;
18674 default:
18675 output_operand_lossage ("invalid %%xn code");
18679 /* Print the address of an operand. */
18681 void
18682 print_operand_address (FILE *file, rtx x)
18684 if (REG_P (x))
18685 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
18686 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
18687 || GET_CODE (x) == LABEL_REF)
18689 output_addr_const (file, x);
18690 if (small_data_operand (x, GET_MODE (x)))
18691 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18692 reg_names[SMALL_DATA_REG]);
18693 else
18694 gcc_assert (!TARGET_TOC);
18696 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18697 && REG_P (XEXP (x, 1)))
18699 if (REGNO (XEXP (x, 0)) == 0)
18700 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
18701 reg_names[ REGNO (XEXP (x, 0)) ]);
18702 else
18703 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
18704 reg_names[ REGNO (XEXP (x, 1)) ]);
18706 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18707 && GET_CODE (XEXP (x, 1)) == CONST_INT)
18708 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
18709 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
18710 #if TARGET_MACHO
18711 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18712 && CONSTANT_P (XEXP (x, 1)))
18714 fprintf (file, "lo16(");
18715 output_addr_const (file, XEXP (x, 1));
18716 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18718 #endif
18719 #if TARGET_ELF
18720 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18721 && CONSTANT_P (XEXP (x, 1)))
18723 output_addr_const (file, XEXP (x, 1));
18724 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18726 #endif
18727 else if (toc_relative_expr_p (x, false))
18729 /* This hack along with a corresponding hack in
18730 rs6000_output_addr_const_extra arranges to output addends
18731 where the assembler expects to find them. eg.
18732 (lo_sum (reg 9)
18733 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
18734 without this hack would be output as "x@toc+8@l(9)". We
18735 want "x+8@toc@l(9)". */
18736 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18737 if (GET_CODE (x) == LO_SUM)
18738 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
18739 else
18740 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
18742 else
18743 gcc_unreachable ();
18746 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
18748 static bool
18749 rs6000_output_addr_const_extra (FILE *file, rtx x)
18751 if (GET_CODE (x) == UNSPEC)
18752 switch (XINT (x, 1))
18754 case UNSPEC_TOCREL:
18755 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
18756 && REG_P (XVECEXP (x, 0, 1))
18757 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
18758 output_addr_const (file, XVECEXP (x, 0, 0));
18759 if (x == tocrel_base && tocrel_offset != const0_rtx)
18761 if (INTVAL (tocrel_offset) >= 0)
18762 fprintf (file, "+");
18763 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
18765 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
18767 putc ('-', file);
18768 assemble_name (file, toc_label_name);
18770 else if (TARGET_ELF)
18771 fputs ("@toc", file);
18772 return true;
18774 #if TARGET_MACHO
18775 case UNSPEC_MACHOPIC_OFFSET:
18776 output_addr_const (file, XVECEXP (x, 0, 0));
18777 putc ('-', file);
18778 machopic_output_function_base_name (file);
18779 return true;
18780 #endif
18782 return false;
18785 /* Target hook for assembling integer objects. The PowerPC version has
18786 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
18787 is defined. It also needs to handle DI-mode objects on 64-bit
18788 targets. */
18790 static bool
18791 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
18793 #ifdef RELOCATABLE_NEEDS_FIXUP
18794 /* Special handling for SI values. */
18795 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
18797 static int recurse = 0;
18799 /* For -mrelocatable, we mark all addresses that need to be fixed up in
18800 the .fixup section. Since the TOC section is already relocated, we
18801 don't need to mark it here. We used to skip the text section, but it
18802 should never be valid for relocated addresses to be placed in the text
18803 section. */
18804 if (TARGET_RELOCATABLE
18805 && in_section != toc_section
18806 && !recurse
18807 && !CONST_SCALAR_INT_P (x)
18808 && CONSTANT_P (x))
18810 char buf[256];
18812 recurse = 1;
18813 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
18814 fixuplabelno++;
18815 ASM_OUTPUT_LABEL (asm_out_file, buf);
18816 fprintf (asm_out_file, "\t.long\t(");
18817 output_addr_const (asm_out_file, x);
18818 fprintf (asm_out_file, ")@fixup\n");
18819 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
18820 ASM_OUTPUT_ALIGN (asm_out_file, 2);
18821 fprintf (asm_out_file, "\t.long\t");
18822 assemble_name (asm_out_file, buf);
18823 fprintf (asm_out_file, "\n\t.previous\n");
18824 recurse = 0;
18825 return true;
18827 /* Remove initial .'s to turn a -mcall-aixdesc function
18828 address into the address of the descriptor, not the function
18829 itself. */
18830 else if (GET_CODE (x) == SYMBOL_REF
18831 && XSTR (x, 0)[0] == '.'
18832 && DEFAULT_ABI == ABI_AIX)
18834 const char *name = XSTR (x, 0);
18835 while (*name == '.')
18836 name++;
18838 fprintf (asm_out_file, "\t.long\t%s\n", name);
18839 return true;
18842 #endif /* RELOCATABLE_NEEDS_FIXUP */
18843 return default_assemble_integer (x, size, aligned_p);
18846 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
18847 /* Emit an assembler directive to set symbol visibility for DECL to
18848 VISIBILITY_TYPE. */
18850 static void
18851 rs6000_assemble_visibility (tree decl, int vis)
18853 if (TARGET_XCOFF)
18854 return;
18856 /* Functions need to have their entry point symbol visibility set as
18857 well as their descriptor symbol visibility. */
18858 if (DEFAULT_ABI == ABI_AIX
18859 && DOT_SYMBOLS
18860 && TREE_CODE (decl) == FUNCTION_DECL)
18862 static const char * const visibility_types[] = {
18863 NULL, "internal", "hidden", "protected"
18866 const char *name, *type;
18868 name = ((* targetm.strip_name_encoding)
18869 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
18870 type = visibility_types[vis];
18872 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
18873 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
18875 else
18876 default_assemble_visibility (decl, vis);
18878 #endif
18880 enum rtx_code
18881 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
18883 /* Reversal of FP compares takes care -- an ordered compare
18884 becomes an unordered compare and vice versa. */
18885 if (mode == CCFPmode
18886 && (!flag_finite_math_only
18887 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
18888 || code == UNEQ || code == LTGT))
18889 return reverse_condition_maybe_unordered (code);
18890 else
18891 return reverse_condition (code);
18894 /* Generate a compare for CODE. Return a brand-new rtx that
18895 represents the result of the compare. */
18897 static rtx
18898 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
18900 enum machine_mode comp_mode;
18901 rtx compare_result;
18902 enum rtx_code code = GET_CODE (cmp);
18903 rtx op0 = XEXP (cmp, 0);
18904 rtx op1 = XEXP (cmp, 1);
18906 if (FLOAT_MODE_P (mode))
18907 comp_mode = CCFPmode;
18908 else if (code == GTU || code == LTU
18909 || code == GEU || code == LEU)
18910 comp_mode = CCUNSmode;
18911 else if ((code == EQ || code == NE)
18912 && unsigned_reg_p (op0)
18913 && (unsigned_reg_p (op1)
18914 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
18915 /* These are unsigned values, perhaps there will be a later
18916 ordering compare that can be shared with this one. */
18917 comp_mode = CCUNSmode;
18918 else
18919 comp_mode = CCmode;
18921 /* If we have an unsigned compare, make sure we don't have a signed value as
18922 an immediate. */
18923 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
18924 && INTVAL (op1) < 0)
18926 op0 = copy_rtx_if_shared (op0);
18927 op1 = force_reg (GET_MODE (op0), op1);
18928 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
18931 /* First, the compare. */
18932 compare_result = gen_reg_rtx (comp_mode);
18934 /* E500 FP compare instructions on the GPRs. Yuck! */
18935 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
18936 && FLOAT_MODE_P (mode))
18938 rtx cmp, or_result, compare_result2;
18939 enum machine_mode op_mode = GET_MODE (op0);
18940 bool reverse_p;
18942 if (op_mode == VOIDmode)
18943 op_mode = GET_MODE (op1);
18945 /* First reverse the condition codes that aren't directly supported. */
18946 switch (code)
18948 case NE:
18949 case UNLT:
18950 case UNLE:
18951 case UNGT:
18952 case UNGE:
18953 code = reverse_condition_maybe_unordered (code);
18954 reverse_p = true;
18955 break;
18957 case EQ:
18958 case LT:
18959 case LE:
18960 case GT:
18961 case GE:
18962 reverse_p = false;
18963 break;
18965 default:
18966 gcc_unreachable ();
18969 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
18970 This explains the following mess. */
18972 switch (code)
18974 case EQ:
18975 switch (op_mode)
18977 case SFmode:
18978 cmp = (flag_finite_math_only && !flag_trapping_math)
18979 ? gen_tstsfeq_gpr (compare_result, op0, op1)
18980 : gen_cmpsfeq_gpr (compare_result, op0, op1);
18981 break;
18983 case DFmode:
18984 cmp = (flag_finite_math_only && !flag_trapping_math)
18985 ? gen_tstdfeq_gpr (compare_result, op0, op1)
18986 : gen_cmpdfeq_gpr (compare_result, op0, op1);
18987 break;
18989 case TFmode:
18990 cmp = (flag_finite_math_only && !flag_trapping_math)
18991 ? gen_tsttfeq_gpr (compare_result, op0, op1)
18992 : gen_cmptfeq_gpr (compare_result, op0, op1);
18993 break;
18995 default:
18996 gcc_unreachable ();
18998 break;
19000 case GT:
19001 case GE:
19002 switch (op_mode)
19004 case SFmode:
19005 cmp = (flag_finite_math_only && !flag_trapping_math)
19006 ? gen_tstsfgt_gpr (compare_result, op0, op1)
19007 : gen_cmpsfgt_gpr (compare_result, op0, op1);
19008 break;
19010 case DFmode:
19011 cmp = (flag_finite_math_only && !flag_trapping_math)
19012 ? gen_tstdfgt_gpr (compare_result, op0, op1)
19013 : gen_cmpdfgt_gpr (compare_result, op0, op1);
19014 break;
19016 case TFmode:
19017 cmp = (flag_finite_math_only && !flag_trapping_math)
19018 ? gen_tsttfgt_gpr (compare_result, op0, op1)
19019 : gen_cmptfgt_gpr (compare_result, op0, op1);
19020 break;
19022 default:
19023 gcc_unreachable ();
19025 break;
19027 case LT:
19028 case LE:
19029 switch (op_mode)
19031 case SFmode:
19032 cmp = (flag_finite_math_only && !flag_trapping_math)
19033 ? gen_tstsflt_gpr (compare_result, op0, op1)
19034 : gen_cmpsflt_gpr (compare_result, op0, op1);
19035 break;
19037 case DFmode:
19038 cmp = (flag_finite_math_only && !flag_trapping_math)
19039 ? gen_tstdflt_gpr (compare_result, op0, op1)
19040 : gen_cmpdflt_gpr (compare_result, op0, op1);
19041 break;
19043 case TFmode:
19044 cmp = (flag_finite_math_only && !flag_trapping_math)
19045 ? gen_tsttflt_gpr (compare_result, op0, op1)
19046 : gen_cmptflt_gpr (compare_result, op0, op1);
19047 break;
19049 default:
19050 gcc_unreachable ();
19052 break;
19054 default:
19055 gcc_unreachable ();
19058 /* Synthesize LE and GE from LT/GT || EQ. */
19059 if (code == LE || code == GE)
19061 emit_insn (cmp);
19063 compare_result2 = gen_reg_rtx (CCFPmode);
19065 /* Do the EQ. */
19066 switch (op_mode)
19068 case SFmode:
19069 cmp = (flag_finite_math_only && !flag_trapping_math)
19070 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
19071 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
19072 break;
19074 case DFmode:
19075 cmp = (flag_finite_math_only && !flag_trapping_math)
19076 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
19077 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
19078 break;
19080 case TFmode:
19081 cmp = (flag_finite_math_only && !flag_trapping_math)
19082 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
19083 : gen_cmptfeq_gpr (compare_result2, op0, op1);
19084 break;
19086 default:
19087 gcc_unreachable ();
19090 emit_insn (cmp);
19092 /* OR them together. */
19093 or_result = gen_reg_rtx (CCFPmode);
19094 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
19095 compare_result2);
19096 compare_result = or_result;
19099 code = reverse_p ? NE : EQ;
19101 emit_insn (cmp);
19103 else
19105 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
19106 CLOBBERs to match cmptf_internal2 pattern. */
19107 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
19108 && GET_MODE (op0) == TFmode
19109 && !TARGET_IEEEQUAD
19110 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
19111 emit_insn (gen_rtx_PARALLEL (VOIDmode,
19112 gen_rtvec (10,
19113 gen_rtx_SET (VOIDmode,
19114 compare_result,
19115 gen_rtx_COMPARE (comp_mode, op0, op1)),
19116 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19117 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19118 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19119 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19120 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19121 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19122 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19123 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19124 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
19125 else if (GET_CODE (op1) == UNSPEC
19126 && XINT (op1, 1) == UNSPEC_SP_TEST)
19128 rtx op1b = XVECEXP (op1, 0, 0);
19129 comp_mode = CCEQmode;
19130 compare_result = gen_reg_rtx (CCEQmode);
19131 if (TARGET_64BIT)
19132 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
19133 else
19134 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
19136 else
19137 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
19138 gen_rtx_COMPARE (comp_mode, op0, op1)));
19141 /* Some kinds of FP comparisons need an OR operation;
19142 under flag_finite_math_only we don't bother. */
19143 if (FLOAT_MODE_P (mode)
19144 && !flag_finite_math_only
19145 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
19146 && (code == LE || code == GE
19147 || code == UNEQ || code == LTGT
19148 || code == UNGT || code == UNLT))
19150 enum rtx_code or1, or2;
19151 rtx or1_rtx, or2_rtx, compare2_rtx;
19152 rtx or_result = gen_reg_rtx (CCEQmode);
19154 switch (code)
19156 case LE: or1 = LT; or2 = EQ; break;
19157 case GE: or1 = GT; or2 = EQ; break;
19158 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
19159 case LTGT: or1 = LT; or2 = GT; break;
19160 case UNGT: or1 = UNORDERED; or2 = GT; break;
19161 case UNLT: or1 = UNORDERED; or2 = LT; break;
19162 default: gcc_unreachable ();
19164 validate_condition_mode (or1, comp_mode);
19165 validate_condition_mode (or2, comp_mode);
19166 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
19167 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
19168 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
19169 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
19170 const_true_rtx);
19171 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
19173 compare_result = or_result;
19174 code = EQ;
19177 validate_condition_mode (code, GET_MODE (compare_result));
19179 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
19183 /* Emit the RTL for an sISEL pattern. */
19185 void
19186 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
19188 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
19191 void
19192 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
19194 rtx condition_rtx;
19195 enum machine_mode op_mode;
19196 enum rtx_code cond_code;
19197 rtx result = operands[0];
19199 if (TARGET_ISEL && (mode == SImode || mode == DImode))
19201 rs6000_emit_sISEL (mode, operands);
19202 return;
19205 condition_rtx = rs6000_generate_compare (operands[1], mode);
19206 cond_code = GET_CODE (condition_rtx);
19208 if (FLOAT_MODE_P (mode)
19209 && !TARGET_FPRS && TARGET_HARD_FLOAT)
19211 rtx t;
19213 PUT_MODE (condition_rtx, SImode);
19214 t = XEXP (condition_rtx, 0);
19216 gcc_assert (cond_code == NE || cond_code == EQ);
19218 if (cond_code == NE)
19219 emit_insn (gen_e500_flip_gt_bit (t, t));
19221 emit_insn (gen_move_from_CR_gt_bit (result, t));
19222 return;
19225 if (cond_code == NE
19226 || cond_code == GE || cond_code == LE
19227 || cond_code == GEU || cond_code == LEU
19228 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
19230 rtx not_result = gen_reg_rtx (CCEQmode);
19231 rtx not_op, rev_cond_rtx;
19232 enum machine_mode cc_mode;
19234 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
19236 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
19237 SImode, XEXP (condition_rtx, 0), const0_rtx);
19238 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
19239 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
19240 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
19243 op_mode = GET_MODE (XEXP (operands[1], 0));
19244 if (op_mode == VOIDmode)
19245 op_mode = GET_MODE (XEXP (operands[1], 1));
19247 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
19249 PUT_MODE (condition_rtx, DImode);
19250 convert_move (result, condition_rtx, 0);
19252 else
19254 PUT_MODE (condition_rtx, SImode);
19255 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
19259 /* Emit a branch of kind CODE to location LOC. */
19261 void
19262 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
19264 rtx condition_rtx, loc_ref;
19266 condition_rtx = rs6000_generate_compare (operands[0], mode);
19267 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
19268 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
19269 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
19270 loc_ref, pc_rtx)));
19273 /* Return the string to output a conditional branch to LABEL, which is
19274 the operand template of the label, or NULL if the branch is really a
19275 conditional return.
19277 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
19278 condition code register and its mode specifies what kind of
19279 comparison we made.
19281 REVERSED is nonzero if we should reverse the sense of the comparison.
19283 INSN is the insn. */
19285 char *
19286 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
19288 static char string[64];
19289 enum rtx_code code = GET_CODE (op);
19290 rtx cc_reg = XEXP (op, 0);
19291 enum machine_mode mode = GET_MODE (cc_reg);
19292 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
19293 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
19294 int really_reversed = reversed ^ need_longbranch;
19295 char *s = string;
19296 const char *ccode;
19297 const char *pred;
19298 rtx note;
19300 validate_condition_mode (code, mode);
19302 /* Work out which way this really branches. We could use
19303 reverse_condition_maybe_unordered here always but this
19304 makes the resulting assembler clearer. */
19305 if (really_reversed)
19307 /* Reversal of FP compares takes care -- an ordered compare
19308 becomes an unordered compare and vice versa. */
19309 if (mode == CCFPmode)
19310 code = reverse_condition_maybe_unordered (code);
19311 else
19312 code = reverse_condition (code);
19315 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
19317 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
19318 to the GT bit. */
19319 switch (code)
19321 case EQ:
19322 /* Opposite of GT. */
19323 code = GT;
19324 break;
19326 case NE:
19327 code = UNLE;
19328 break;
19330 default:
19331 gcc_unreachable ();
19335 switch (code)
19337 /* Not all of these are actually distinct opcodes, but
19338 we distinguish them for clarity of the resulting assembler. */
19339 case NE: case LTGT:
19340 ccode = "ne"; break;
19341 case EQ: case UNEQ:
19342 ccode = "eq"; break;
19343 case GE: case GEU:
19344 ccode = "ge"; break;
19345 case GT: case GTU: case UNGT:
19346 ccode = "gt"; break;
19347 case LE: case LEU:
19348 ccode = "le"; break;
19349 case LT: case LTU: case UNLT:
19350 ccode = "lt"; break;
19351 case UNORDERED: ccode = "un"; break;
19352 case ORDERED: ccode = "nu"; break;
19353 case UNGE: ccode = "nl"; break;
19354 case UNLE: ccode = "ng"; break;
19355 default:
19356 gcc_unreachable ();
19359 /* Maybe we have a guess as to how likely the branch is. */
19360 pred = "";
19361 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
19362 if (note != NULL_RTX)
19364 /* PROB is the difference from 50%. */
19365 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
19367 /* Only hint for highly probable/improbable branches on newer
19368 cpus as static prediction overrides processor dynamic
19369 prediction. For older cpus we may as well always hint, but
19370 assume not taken for branches that are very close to 50% as a
19371 mispredicted taken branch is more expensive than a
19372 mispredicted not-taken branch. */
19373 if (rs6000_always_hint
19374 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
19375 && br_prob_note_reliable_p (note)))
19377 if (abs (prob) > REG_BR_PROB_BASE / 20
19378 && ((prob > 0) ^ need_longbranch))
19379 pred = "+";
19380 else
19381 pred = "-";
19385 if (label == NULL)
19386 s += sprintf (s, "b%slr%s ", ccode, pred);
19387 else
19388 s += sprintf (s, "b%s%s ", ccode, pred);
19390 /* We need to escape any '%' characters in the reg_names string.
19391 Assume they'd only be the first character.... */
19392 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
19393 *s++ = '%';
19394 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
19396 if (label != NULL)
19398 /* If the branch distance was too far, we may have to use an
19399 unconditional branch to go the distance. */
19400 if (need_longbranch)
19401 s += sprintf (s, ",$+8\n\tb %s", label);
19402 else
19403 s += sprintf (s, ",%s", label);
19406 return string;
19409 /* Return the string to flip the GT bit on a CR. */
19410 char *
19411 output_e500_flip_gt_bit (rtx dst, rtx src)
19413 static char string[64];
19414 int a, b;
19416 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
19417 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
19419 /* GT bit. */
19420 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
19421 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
19423 sprintf (string, "crnot %d,%d", a, b);
19424 return string;
19427 /* Return insn for VSX or Altivec comparisons. */
19429 static rtx
19430 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
19432 rtx mask;
19433 enum machine_mode mode = GET_MODE (op0);
19435 switch (code)
19437 default:
19438 break;
19440 case GE:
19441 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
19442 return NULL_RTX;
19444 case EQ:
19445 case GT:
19446 case GTU:
19447 case ORDERED:
19448 case UNORDERED:
19449 case UNEQ:
19450 case LTGT:
19451 mask = gen_reg_rtx (mode);
19452 emit_insn (gen_rtx_SET (VOIDmode,
19453 mask,
19454 gen_rtx_fmt_ee (code, mode, op0, op1)));
19455 return mask;
19458 return NULL_RTX;
19461 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
19462 DMODE is expected destination mode. This is a recursive function. */
19464 static rtx
19465 rs6000_emit_vector_compare (enum rtx_code rcode,
19466 rtx op0, rtx op1,
19467 enum machine_mode dmode)
19469 rtx mask;
19470 bool swap_operands = false;
19471 bool try_again = false;
19473 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
19474 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
19476 /* See if the comparison works as is. */
19477 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19478 if (mask)
19479 return mask;
19481 switch (rcode)
19483 case LT:
19484 rcode = GT;
19485 swap_operands = true;
19486 try_again = true;
19487 break;
19488 case LTU:
19489 rcode = GTU;
19490 swap_operands = true;
19491 try_again = true;
19492 break;
19493 case NE:
19494 case UNLE:
19495 case UNLT:
19496 case UNGE:
19497 case UNGT:
19498 /* Invert condition and try again.
19499 e.g., A != B becomes ~(A==B). */
19501 enum rtx_code rev_code;
19502 enum insn_code nor_code;
19503 rtx mask2;
19505 rev_code = reverse_condition_maybe_unordered (rcode);
19506 if (rev_code == UNKNOWN)
19507 return NULL_RTX;
19509 nor_code = optab_handler (one_cmpl_optab, dmode);
19510 if (nor_code == CODE_FOR_nothing)
19511 return NULL_RTX;
19513 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
19514 if (!mask2)
19515 return NULL_RTX;
19517 mask = gen_reg_rtx (dmode);
19518 emit_insn (GEN_FCN (nor_code) (mask, mask2));
19519 return mask;
19521 break;
19522 case GE:
19523 case GEU:
19524 case LE:
19525 case LEU:
19526 /* Try GT/GTU/LT/LTU OR EQ */
19528 rtx c_rtx, eq_rtx;
19529 enum insn_code ior_code;
19530 enum rtx_code new_code;
19532 switch (rcode)
19534 case GE:
19535 new_code = GT;
19536 break;
19538 case GEU:
19539 new_code = GTU;
19540 break;
19542 case LE:
19543 new_code = LT;
19544 break;
19546 case LEU:
19547 new_code = LTU;
19548 break;
19550 default:
19551 gcc_unreachable ();
19554 ior_code = optab_handler (ior_optab, dmode);
19555 if (ior_code == CODE_FOR_nothing)
19556 return NULL_RTX;
19558 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
19559 if (!c_rtx)
19560 return NULL_RTX;
19562 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
19563 if (!eq_rtx)
19564 return NULL_RTX;
19566 mask = gen_reg_rtx (dmode);
19567 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
19568 return mask;
19570 break;
19571 default:
19572 return NULL_RTX;
19575 if (try_again)
19577 if (swap_operands)
19579 rtx tmp;
19580 tmp = op0;
19581 op0 = op1;
19582 op1 = tmp;
19585 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19586 if (mask)
19587 return mask;
19590 /* You only get two chances. */
19591 return NULL_RTX;
19594 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
19595 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
19596 operands for the relation operation COND. */
19599 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
19600 rtx cond, rtx cc_op0, rtx cc_op1)
19602 enum machine_mode dest_mode = GET_MODE (dest);
19603 enum machine_mode mask_mode = GET_MODE (cc_op0);
19604 enum rtx_code rcode = GET_CODE (cond);
19605 enum machine_mode cc_mode = CCmode;
19606 rtx mask;
19607 rtx cond2;
19608 rtx tmp;
19609 bool invert_move = false;
19611 if (VECTOR_UNIT_NONE_P (dest_mode))
19612 return 0;
19614 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
19615 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
19617 switch (rcode)
19619 /* Swap operands if we can, and fall back to doing the operation as
19620 specified, and doing a NOR to invert the test. */
19621 case NE:
19622 case UNLE:
19623 case UNLT:
19624 case UNGE:
19625 case UNGT:
19626 /* Invert condition and try again.
19627 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
19628 invert_move = true;
19629 rcode = reverse_condition_maybe_unordered (rcode);
19630 if (rcode == UNKNOWN)
19631 return 0;
19632 break;
19634 /* Mark unsigned tests with CCUNSmode. */
19635 case GTU:
19636 case GEU:
19637 case LTU:
19638 case LEU:
19639 cc_mode = CCUNSmode;
19640 break;
19642 default:
19643 break;
19646 /* Get the vector mask for the given relational operations. */
19647 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
19649 if (!mask)
19650 return 0;
19652 if (invert_move)
19654 tmp = op_true;
19655 op_true = op_false;
19656 op_false = tmp;
19659 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
19660 CONST0_RTX (dest_mode));
19661 emit_insn (gen_rtx_SET (VOIDmode,
19662 dest,
19663 gen_rtx_IF_THEN_ELSE (dest_mode,
19664 cond2,
19665 op_true,
19666 op_false)));
19667 return 1;
19670 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
19671 operands of the last comparison is nonzero/true, FALSE_COND if it
19672 is zero/false. Return 0 if the hardware has no such operation. */
19675 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19677 enum rtx_code code = GET_CODE (op);
19678 rtx op0 = XEXP (op, 0);
19679 rtx op1 = XEXP (op, 1);
19680 REAL_VALUE_TYPE c1;
19681 enum machine_mode compare_mode = GET_MODE (op0);
19682 enum machine_mode result_mode = GET_MODE (dest);
19683 rtx temp;
19684 bool is_against_zero;
19686 /* These modes should always match. */
19687 if (GET_MODE (op1) != compare_mode
19688 /* In the isel case however, we can use a compare immediate, so
19689 op1 may be a small constant. */
19690 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
19691 return 0;
19692 if (GET_MODE (true_cond) != result_mode)
19693 return 0;
19694 if (GET_MODE (false_cond) != result_mode)
19695 return 0;
19697 /* Don't allow using floating point comparisons for integer results for
19698 now. */
19699 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
19700 return 0;
19702 /* First, work out if the hardware can do this at all, or
19703 if it's too slow.... */
19704 if (!FLOAT_MODE_P (compare_mode))
19706 if (TARGET_ISEL)
19707 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
19708 return 0;
19710 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
19711 && SCALAR_FLOAT_MODE_P (compare_mode))
19712 return 0;
19714 is_against_zero = op1 == CONST0_RTX (compare_mode);
19716 /* A floating-point subtract might overflow, underflow, or produce
19717 an inexact result, thus changing the floating-point flags, so it
19718 can't be generated if we care about that. It's safe if one side
19719 of the construct is zero, since then no subtract will be
19720 generated. */
19721 if (SCALAR_FLOAT_MODE_P (compare_mode)
19722 && flag_trapping_math && ! is_against_zero)
19723 return 0;
19725 /* Eliminate half of the comparisons by switching operands, this
19726 makes the remaining code simpler. */
19727 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
19728 || code == LTGT || code == LT || code == UNLE)
19730 code = reverse_condition_maybe_unordered (code);
19731 temp = true_cond;
19732 true_cond = false_cond;
19733 false_cond = temp;
19736 /* UNEQ and LTGT take four instructions for a comparison with zero,
19737 it'll probably be faster to use a branch here too. */
19738 if (code == UNEQ && HONOR_NANS (compare_mode))
19739 return 0;
19741 if (GET_CODE (op1) == CONST_DOUBLE)
19742 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
19744 /* We're going to try to implement comparisons by performing
19745 a subtract, then comparing against zero. Unfortunately,
19746 Inf - Inf is NaN which is not zero, and so if we don't
19747 know that the operand is finite and the comparison
19748 would treat EQ different to UNORDERED, we can't do it. */
19749 if (HONOR_INFINITIES (compare_mode)
19750 && code != GT && code != UNGE
19751 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
19752 /* Constructs of the form (a OP b ? a : b) are safe. */
19753 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
19754 || (! rtx_equal_p (op0, true_cond)
19755 && ! rtx_equal_p (op1, true_cond))))
19756 return 0;
19758 /* At this point we know we can use fsel. */
19760 /* Reduce the comparison to a comparison against zero. */
19761 if (! is_against_zero)
19763 temp = gen_reg_rtx (compare_mode);
19764 emit_insn (gen_rtx_SET (VOIDmode, temp,
19765 gen_rtx_MINUS (compare_mode, op0, op1)));
19766 op0 = temp;
19767 op1 = CONST0_RTX (compare_mode);
19770 /* If we don't care about NaNs we can reduce some of the comparisons
19771 down to faster ones. */
19772 if (! HONOR_NANS (compare_mode))
19773 switch (code)
19775 case GT:
19776 code = LE;
19777 temp = true_cond;
19778 true_cond = false_cond;
19779 false_cond = temp;
19780 break;
19781 case UNGE:
19782 code = GE;
19783 break;
19784 case UNEQ:
19785 code = EQ;
19786 break;
19787 default:
19788 break;
19791 /* Now, reduce everything down to a GE. */
19792 switch (code)
19794 case GE:
19795 break;
19797 case LE:
19798 temp = gen_reg_rtx (compare_mode);
19799 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19800 op0 = temp;
19801 break;
19803 case ORDERED:
19804 temp = gen_reg_rtx (compare_mode);
19805 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
19806 op0 = temp;
19807 break;
19809 case EQ:
19810 temp = gen_reg_rtx (compare_mode);
19811 emit_insn (gen_rtx_SET (VOIDmode, temp,
19812 gen_rtx_NEG (compare_mode,
19813 gen_rtx_ABS (compare_mode, op0))));
19814 op0 = temp;
19815 break;
19817 case UNGE:
19818 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
19819 temp = gen_reg_rtx (result_mode);
19820 emit_insn (gen_rtx_SET (VOIDmode, temp,
19821 gen_rtx_IF_THEN_ELSE (result_mode,
19822 gen_rtx_GE (VOIDmode,
19823 op0, op1),
19824 true_cond, false_cond)));
19825 false_cond = true_cond;
19826 true_cond = temp;
19828 temp = gen_reg_rtx (compare_mode);
19829 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19830 op0 = temp;
19831 break;
19833 case GT:
19834 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
19835 temp = gen_reg_rtx (result_mode);
19836 emit_insn (gen_rtx_SET (VOIDmode, temp,
19837 gen_rtx_IF_THEN_ELSE (result_mode,
19838 gen_rtx_GE (VOIDmode,
19839 op0, op1),
19840 true_cond, false_cond)));
19841 true_cond = false_cond;
19842 false_cond = temp;
19844 temp = gen_reg_rtx (compare_mode);
19845 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19846 op0 = temp;
19847 break;
19849 default:
19850 gcc_unreachable ();
19853 emit_insn (gen_rtx_SET (VOIDmode, dest,
19854 gen_rtx_IF_THEN_ELSE (result_mode,
19855 gen_rtx_GE (VOIDmode,
19856 op0, op1),
19857 true_cond, false_cond)));
19858 return 1;
19861 /* Same as above, but for ints (isel). */
19863 static int
19864 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19866 rtx condition_rtx, cr;
19867 enum machine_mode mode = GET_MODE (dest);
19868 enum rtx_code cond_code;
19869 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
19870 bool signedp;
19872 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
19873 return 0;
19875 /* We still have to do the compare, because isel doesn't do a
19876 compare, it just looks at the CRx bits set by a previous compare
19877 instruction. */
19878 condition_rtx = rs6000_generate_compare (op, mode);
19879 cond_code = GET_CODE (condition_rtx);
19880 cr = XEXP (condition_rtx, 0);
19881 signedp = GET_MODE (cr) == CCmode;
19883 isel_func = (mode == SImode
19884 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
19885 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
19887 switch (cond_code)
19889 case LT: case GT: case LTU: case GTU: case EQ:
19890 /* isel handles these directly. */
19891 break;
19893 default:
19894 /* We need to swap the sense of the comparison. */
19896 rtx t = true_cond;
19897 true_cond = false_cond;
19898 false_cond = t;
19899 PUT_CODE (condition_rtx, reverse_condition (cond_code));
19901 break;
19904 false_cond = force_reg (mode, false_cond);
19905 if (true_cond != const0_rtx)
19906 true_cond = force_reg (mode, true_cond);
19908 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
19910 return 1;
19913 const char *
19914 output_isel (rtx *operands)
19916 enum rtx_code code;
19918 code = GET_CODE (operands[1]);
19920 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
19922 gcc_assert (GET_CODE (operands[2]) == REG
19923 && GET_CODE (operands[3]) == REG);
19924 PUT_CODE (operands[1], reverse_condition (code));
19925 return "isel %0,%3,%2,%j1";
19928 return "isel %0,%2,%3,%j1";
19931 void
19932 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
19934 enum machine_mode mode = GET_MODE (op0);
19935 enum rtx_code c;
19936 rtx target;
19938 /* VSX/altivec have direct min/max insns. */
19939 if ((code == SMAX || code == SMIN)
19940 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
19941 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
19943 emit_insn (gen_rtx_SET (VOIDmode,
19944 dest,
19945 gen_rtx_fmt_ee (code, mode, op0, op1)));
19946 return;
19949 if (code == SMAX || code == SMIN)
19950 c = GE;
19951 else
19952 c = GEU;
19954 if (code == SMAX || code == UMAX)
19955 target = emit_conditional_move (dest, c, op0, op1, mode,
19956 op0, op1, mode, 0);
19957 else
19958 target = emit_conditional_move (dest, c, op0, op1, mode,
19959 op1, op0, mode, 0);
19960 gcc_assert (target);
19961 if (target != dest)
19962 emit_move_insn (dest, target);
19965 /* A subroutine of the atomic operation splitters. Jump to LABEL if
19966 COND is true. Mark the jump as unlikely to be taken. */
19968 static void
19969 emit_unlikely_jump (rtx cond, rtx label)
19971 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
19972 rtx x;
19974 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
19975 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
19976 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
19979 /* A subroutine of the atomic operation splitters. Emit a load-locked
19980 instruction in MODE. For QI/HImode, possibly use a pattern than includes
19981 the zero_extend operation. */
19983 static void
19984 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
19986 rtx (*fn) (rtx, rtx) = NULL;
19988 switch (mode)
19990 case QImode:
19991 fn = gen_load_lockedqi;
19992 break;
19993 case HImode:
19994 fn = gen_load_lockedhi;
19995 break;
19996 case SImode:
19997 if (GET_MODE (mem) == QImode)
19998 fn = gen_load_lockedqi_si;
19999 else if (GET_MODE (mem) == HImode)
20000 fn = gen_load_lockedhi_si;
20001 else
20002 fn = gen_load_lockedsi;
20003 break;
20004 case DImode:
20005 fn = gen_load_lockeddi;
20006 break;
20007 case TImode:
20008 fn = gen_load_lockedti;
20009 break;
20010 default:
20011 gcc_unreachable ();
20013 emit_insn (fn (reg, mem));
20016 /* A subroutine of the atomic operation splitters. Emit a store-conditional
20017 instruction in MODE. */
20019 static void
20020 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
20022 rtx (*fn) (rtx, rtx, rtx) = NULL;
20024 switch (mode)
20026 case QImode:
20027 fn = gen_store_conditionalqi;
20028 break;
20029 case HImode:
20030 fn = gen_store_conditionalhi;
20031 break;
20032 case SImode:
20033 fn = gen_store_conditionalsi;
20034 break;
20035 case DImode:
20036 fn = gen_store_conditionaldi;
20037 break;
20038 case TImode:
20039 fn = gen_store_conditionalti;
20040 break;
20041 default:
20042 gcc_unreachable ();
20045 /* Emit sync before stwcx. to address PPC405 Erratum. */
20046 if (PPC405_ERRATUM77)
20047 emit_insn (gen_hwsync ());
20049 emit_insn (fn (res, mem, val));
20052 /* Expand barriers before and after a load_locked/store_cond sequence. */
20054 static rtx
20055 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
20057 rtx addr = XEXP (mem, 0);
20058 int strict_p = (reload_in_progress || reload_completed);
20060 if (!legitimate_indirect_address_p (addr, strict_p)
20061 && !legitimate_indexed_address_p (addr, strict_p))
20063 addr = force_reg (Pmode, addr);
20064 mem = replace_equiv_address_nv (mem, addr);
20067 switch (model)
20069 case MEMMODEL_RELAXED:
20070 case MEMMODEL_CONSUME:
20071 case MEMMODEL_ACQUIRE:
20072 break;
20073 case MEMMODEL_RELEASE:
20074 case MEMMODEL_ACQ_REL:
20075 emit_insn (gen_lwsync ());
20076 break;
20077 case MEMMODEL_SEQ_CST:
20078 emit_insn (gen_hwsync ());
20079 break;
20080 default:
20081 gcc_unreachable ();
20083 return mem;
20086 static void
20087 rs6000_post_atomic_barrier (enum memmodel model)
20089 switch (model)
20091 case MEMMODEL_RELAXED:
20092 case MEMMODEL_CONSUME:
20093 case MEMMODEL_RELEASE:
20094 break;
20095 case MEMMODEL_ACQUIRE:
20096 case MEMMODEL_ACQ_REL:
20097 case MEMMODEL_SEQ_CST:
20098 emit_insn (gen_isync ());
20099 break;
20100 default:
20101 gcc_unreachable ();
20105 /* A subroutine of the various atomic expanders. For sub-word operations,
20106 we must adjust things to operate on SImode. Given the original MEM,
20107 return a new aligned memory. Also build and return the quantities by
20108 which to shift and mask. */
20110 static rtx
20111 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
20113 rtx addr, align, shift, mask, mem;
20114 HOST_WIDE_INT shift_mask;
20115 enum machine_mode mode = GET_MODE (orig_mem);
20117 /* For smaller modes, we have to implement this via SImode. */
20118 shift_mask = (mode == QImode ? 0x18 : 0x10);
20120 addr = XEXP (orig_mem, 0);
20121 addr = force_reg (GET_MODE (addr), addr);
20123 /* Aligned memory containing subword. Generate a new memory. We
20124 do not want any of the existing MEM_ATTR data, as we're now
20125 accessing memory outside the original object. */
20126 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
20127 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20128 mem = gen_rtx_MEM (SImode, align);
20129 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
20130 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
20131 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
20133 /* Shift amount for subword relative to aligned word. */
20134 shift = gen_reg_rtx (SImode);
20135 addr = gen_lowpart (SImode, addr);
20136 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
20137 if (BYTES_BIG_ENDIAN)
20138 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
20139 shift, 1, OPTAB_LIB_WIDEN);
20140 *pshift = shift;
20142 /* Mask for insertion. */
20143 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
20144 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
20145 *pmask = mask;
20147 return mem;
20150 /* A subroutine of the various atomic expanders. For sub-word operands,
20151 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
20153 static rtx
20154 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
20156 rtx x;
20158 x = gen_reg_rtx (SImode);
20159 emit_insn (gen_rtx_SET (VOIDmode, x,
20160 gen_rtx_AND (SImode,
20161 gen_rtx_NOT (SImode, mask),
20162 oldval)));
20164 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
20166 return x;
20169 /* A subroutine of the various atomic expanders. For sub-word operands,
20170 extract WIDE to NARROW via SHIFT. */
20172 static void
20173 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
20175 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
20176 wide, 1, OPTAB_LIB_WIDEN);
20177 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
20180 /* Expand an atomic compare and swap operation. */
20182 void
20183 rs6000_expand_atomic_compare_and_swap (rtx operands[])
20185 rtx boolval, retval, mem, oldval, newval, cond;
20186 rtx label1, label2, x, mask, shift;
20187 enum machine_mode mode, orig_mode;
20188 enum memmodel mod_s, mod_f;
20189 bool is_weak;
20191 boolval = operands[0];
20192 retval = operands[1];
20193 mem = operands[2];
20194 oldval = operands[3];
20195 newval = operands[4];
20196 is_weak = (INTVAL (operands[5]) != 0);
20197 mod_s = (enum memmodel) INTVAL (operands[6]);
20198 mod_f = (enum memmodel) INTVAL (operands[7]);
20199 orig_mode = mode = GET_MODE (mem);
20201 mask = shift = NULL_RTX;
20202 if (mode == QImode || mode == HImode)
20204 /* Before power8, we didn't have access to lbarx/lharx, so generate a
20205 lwarx and shift/mask operations. With power8, we need to do the
20206 comparison in SImode, but the store is still done in QI/HImode. */
20207 oldval = convert_modes (SImode, mode, oldval, 1);
20209 if (!TARGET_SYNC_HI_QI)
20211 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20213 /* Shift and mask OLDVAL into position with the word. */
20214 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
20215 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20217 /* Shift and mask NEWVAL into position within the word. */
20218 newval = convert_modes (SImode, mode, newval, 1);
20219 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
20220 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20223 /* Prepare to adjust the return value. */
20224 retval = gen_reg_rtx (SImode);
20225 mode = SImode;
20227 else if (reg_overlap_mentioned_p (retval, oldval))
20228 oldval = copy_to_reg (oldval);
20230 mem = rs6000_pre_atomic_barrier (mem, mod_s);
20232 label1 = NULL_RTX;
20233 if (!is_weak)
20235 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20236 emit_label (XEXP (label1, 0));
20238 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20240 emit_load_locked (mode, retval, mem);
20242 x = retval;
20243 if (mask)
20245 x = expand_simple_binop (SImode, AND, retval, mask,
20246 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20249 cond = gen_reg_rtx (CCmode);
20250 /* If we have TImode, synthesize a comparison. */
20251 if (mode != TImode)
20252 x = gen_rtx_COMPARE (CCmode, x, oldval);
20253 else
20255 rtx xor1_result = gen_reg_rtx (DImode);
20256 rtx xor2_result = gen_reg_rtx (DImode);
20257 rtx or_result = gen_reg_rtx (DImode);
20258 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
20259 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
20260 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
20261 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
20263 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
20264 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
20265 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
20266 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
20269 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
20271 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20272 emit_unlikely_jump (x, label2);
20274 x = newval;
20275 if (mask)
20276 x = rs6000_mask_atomic_subword (retval, newval, mask);
20278 emit_store_conditional (orig_mode, cond, mem, x);
20280 if (!is_weak)
20282 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20283 emit_unlikely_jump (x, label1);
20286 if (mod_f != MEMMODEL_RELAXED)
20287 emit_label (XEXP (label2, 0));
20289 rs6000_post_atomic_barrier (mod_s);
20291 if (mod_f == MEMMODEL_RELAXED)
20292 emit_label (XEXP (label2, 0));
20294 if (shift)
20295 rs6000_finish_atomic_subword (operands[1], retval, shift);
20296 else if (mode != GET_MODE (operands[1]))
20297 convert_move (operands[1], retval, 1);
20299 /* In all cases, CR0 contains EQ on success, and NE on failure. */
20300 x = gen_rtx_EQ (SImode, cond, const0_rtx);
20301 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
20304 /* Expand an atomic exchange operation. */
20306 void
20307 rs6000_expand_atomic_exchange (rtx operands[])
20309 rtx retval, mem, val, cond;
20310 enum machine_mode mode;
20311 enum memmodel model;
20312 rtx label, x, mask, shift;
20314 retval = operands[0];
20315 mem = operands[1];
20316 val = operands[2];
20317 model = (enum memmodel) INTVAL (operands[3]);
20318 mode = GET_MODE (mem);
20320 mask = shift = NULL_RTX;
20321 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
20323 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20325 /* Shift and mask VAL into position with the word. */
20326 val = convert_modes (SImode, mode, val, 1);
20327 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20328 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20330 /* Prepare to adjust the return value. */
20331 retval = gen_reg_rtx (SImode);
20332 mode = SImode;
20335 mem = rs6000_pre_atomic_barrier (mem, model);
20337 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20338 emit_label (XEXP (label, 0));
20340 emit_load_locked (mode, retval, mem);
20342 x = val;
20343 if (mask)
20344 x = rs6000_mask_atomic_subword (retval, val, mask);
20346 cond = gen_reg_rtx (CCmode);
20347 emit_store_conditional (mode, cond, mem, x);
20349 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20350 emit_unlikely_jump (x, label);
20352 rs6000_post_atomic_barrier (model);
20354 if (shift)
20355 rs6000_finish_atomic_subword (operands[0], retval, shift);
20358 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
20359 to perform. MEM is the memory on which to operate. VAL is the second
20360 operand of the binary operator. BEFORE and AFTER are optional locations to
20361 return the value of MEM either before of after the operation. MODEL_RTX
20362 is a CONST_INT containing the memory model to use. */
20364 void
20365 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
20366 rtx orig_before, rtx orig_after, rtx model_rtx)
20368 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
20369 enum machine_mode mode = GET_MODE (mem);
20370 enum machine_mode store_mode = mode;
20371 rtx label, x, cond, mask, shift;
20372 rtx before = orig_before, after = orig_after;
20374 mask = shift = NULL_RTX;
20375 /* On power8, we want to use SImode for the operation. On previous systems,
20376 use the operation in a subword and shift/mask to get the proper byte or
20377 halfword. */
20378 if (mode == QImode || mode == HImode)
20380 if (TARGET_SYNC_HI_QI)
20382 val = convert_modes (SImode, mode, val, 1);
20384 /* Prepare to adjust the return value. */
20385 before = gen_reg_rtx (SImode);
20386 if (after)
20387 after = gen_reg_rtx (SImode);
20388 mode = SImode;
20390 else
20392 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20394 /* Shift and mask VAL into position with the word. */
20395 val = convert_modes (SImode, mode, val, 1);
20396 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20397 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20399 switch (code)
20401 case IOR:
20402 case XOR:
20403 /* We've already zero-extended VAL. That is sufficient to
20404 make certain that it does not affect other bits. */
20405 mask = NULL;
20406 break;
20408 case AND:
20409 /* If we make certain that all of the other bits in VAL are
20410 set, that will be sufficient to not affect other bits. */
20411 x = gen_rtx_NOT (SImode, mask);
20412 x = gen_rtx_IOR (SImode, x, val);
20413 emit_insn (gen_rtx_SET (VOIDmode, val, x));
20414 mask = NULL;
20415 break;
20417 case NOT:
20418 case PLUS:
20419 case MINUS:
20420 /* These will all affect bits outside the field and need
20421 adjustment via MASK within the loop. */
20422 break;
20424 default:
20425 gcc_unreachable ();
20428 /* Prepare to adjust the return value. */
20429 before = gen_reg_rtx (SImode);
20430 if (after)
20431 after = gen_reg_rtx (SImode);
20432 store_mode = mode = SImode;
20436 mem = rs6000_pre_atomic_barrier (mem, model);
20438 label = gen_label_rtx ();
20439 emit_label (label);
20440 label = gen_rtx_LABEL_REF (VOIDmode, label);
20442 if (before == NULL_RTX)
20443 before = gen_reg_rtx (mode);
20445 emit_load_locked (mode, before, mem);
20447 if (code == NOT)
20449 x = expand_simple_binop (mode, AND, before, val,
20450 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20451 after = expand_simple_unop (mode, NOT, x, after, 1);
20453 else
20455 after = expand_simple_binop (mode, code, before, val,
20456 after, 1, OPTAB_LIB_WIDEN);
20459 x = after;
20460 if (mask)
20462 x = expand_simple_binop (SImode, AND, after, mask,
20463 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20464 x = rs6000_mask_atomic_subword (before, x, mask);
20466 else if (store_mode != mode)
20467 x = convert_modes (store_mode, mode, x, 1);
20469 cond = gen_reg_rtx (CCmode);
20470 emit_store_conditional (store_mode, cond, mem, x);
20472 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20473 emit_unlikely_jump (x, label);
20475 rs6000_post_atomic_barrier (model);
20477 if (shift)
20479 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
20480 then do the calcuations in a SImode register. */
20481 if (orig_before)
20482 rs6000_finish_atomic_subword (orig_before, before, shift);
20483 if (orig_after)
20484 rs6000_finish_atomic_subword (orig_after, after, shift);
20486 else if (store_mode != mode)
20488 /* QImode/HImode on machines with lbarx/lharx where we do the native
20489 operation and then do the calcuations in a SImode register. */
20490 if (orig_before)
20491 convert_move (orig_before, before, 1);
20492 if (orig_after)
20493 convert_move (orig_after, after, 1);
20495 else if (orig_after && after != orig_after)
20496 emit_move_insn (orig_after, after);
20499 /* Emit instructions to move SRC to DST. Called by splitters for
20500 multi-register moves. It will emit at most one instruction for
20501 each register that is accessed; that is, it won't emit li/lis pairs
20502 (or equivalent for 64-bit code). One of SRC or DST must be a hard
20503 register. */
20505 void
20506 rs6000_split_multireg_move (rtx dst, rtx src)
20508 /* The register number of the first register being moved. */
20509 int reg;
20510 /* The mode that is to be moved. */
20511 enum machine_mode mode;
20512 /* The mode that the move is being done in, and its size. */
20513 enum machine_mode reg_mode;
20514 int reg_mode_size;
20515 /* The number of registers that will be moved. */
20516 int nregs;
20518 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
20519 mode = GET_MODE (dst);
20520 nregs = hard_regno_nregs[reg][mode];
20521 if (FP_REGNO_P (reg))
20522 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
20523 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
20524 else if (ALTIVEC_REGNO_P (reg))
20525 reg_mode = V16QImode;
20526 else if (TARGET_E500_DOUBLE && mode == TFmode)
20527 reg_mode = DFmode;
20528 else
20529 reg_mode = word_mode;
20530 reg_mode_size = GET_MODE_SIZE (reg_mode);
20532 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
20534 /* TDmode residing in FP registers is special, since the ISA requires that
20535 the lower-numbered word of a register pair is always the most significant
20536 word, even in little-endian mode. This does not match the usual subreg
20537 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
20538 the appropriate constituent registers "by hand" in little-endian mode.
20540 Note we do not need to check for destructive overlap here since TDmode
20541 can only reside in even/odd register pairs. */
20542 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
20544 rtx p_src, p_dst;
20545 int i;
20547 for (i = 0; i < nregs; i++)
20549 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
20550 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
20551 else
20552 p_src = simplify_gen_subreg (reg_mode, src, mode,
20553 i * reg_mode_size);
20555 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
20556 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
20557 else
20558 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
20559 i * reg_mode_size);
20561 emit_insn (gen_rtx_SET (VOIDmode, p_dst, p_src));
20564 return;
20567 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
20569 /* Move register range backwards, if we might have destructive
20570 overlap. */
20571 int i;
20572 for (i = nregs - 1; i >= 0; i--)
20573 emit_insn (gen_rtx_SET (VOIDmode,
20574 simplify_gen_subreg (reg_mode, dst, mode,
20575 i * reg_mode_size),
20576 simplify_gen_subreg (reg_mode, src, mode,
20577 i * reg_mode_size)));
20579 else
20581 int i;
20582 int j = -1;
20583 bool used_update = false;
20584 rtx restore_basereg = NULL_RTX;
20586 if (MEM_P (src) && INT_REGNO_P (reg))
20588 rtx breg;
20590 if (GET_CODE (XEXP (src, 0)) == PRE_INC
20591 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
20593 rtx delta_rtx;
20594 breg = XEXP (XEXP (src, 0), 0);
20595 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
20596 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
20597 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
20598 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20599 src = replace_equiv_address (src, breg);
20601 else if (! rs6000_offsettable_memref_p (src, reg_mode))
20603 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
20605 rtx basereg = XEXP (XEXP (src, 0), 0);
20606 if (TARGET_UPDATE)
20608 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
20609 emit_insn (gen_rtx_SET (VOIDmode, ndst,
20610 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
20611 used_update = true;
20613 else
20614 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20615 XEXP (XEXP (src, 0), 1)));
20616 src = replace_equiv_address (src, basereg);
20618 else
20620 rtx basereg = gen_rtx_REG (Pmode, reg);
20621 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
20622 src = replace_equiv_address (src, basereg);
20626 breg = XEXP (src, 0);
20627 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
20628 breg = XEXP (breg, 0);
20630 /* If the base register we are using to address memory is
20631 also a destination reg, then change that register last. */
20632 if (REG_P (breg)
20633 && REGNO (breg) >= REGNO (dst)
20634 && REGNO (breg) < REGNO (dst) + nregs)
20635 j = REGNO (breg) - REGNO (dst);
20637 else if (MEM_P (dst) && INT_REGNO_P (reg))
20639 rtx breg;
20641 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
20642 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
20644 rtx delta_rtx;
20645 breg = XEXP (XEXP (dst, 0), 0);
20646 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
20647 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
20648 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
20650 /* We have to update the breg before doing the store.
20651 Use store with update, if available. */
20653 if (TARGET_UPDATE)
20655 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20656 emit_insn (TARGET_32BIT
20657 ? (TARGET_POWERPC64
20658 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
20659 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
20660 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
20661 used_update = true;
20663 else
20664 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20665 dst = replace_equiv_address (dst, breg);
20667 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
20668 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
20670 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
20672 rtx basereg = XEXP (XEXP (dst, 0), 0);
20673 if (TARGET_UPDATE)
20675 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20676 emit_insn (gen_rtx_SET (VOIDmode,
20677 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
20678 used_update = true;
20680 else
20681 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20682 XEXP (XEXP (dst, 0), 1)));
20683 dst = replace_equiv_address (dst, basereg);
20685 else
20687 rtx basereg = XEXP (XEXP (dst, 0), 0);
20688 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
20689 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
20690 && REG_P (basereg)
20691 && REG_P (offsetreg)
20692 && REGNO (basereg) != REGNO (offsetreg));
20693 if (REGNO (basereg) == 0)
20695 rtx tmp = offsetreg;
20696 offsetreg = basereg;
20697 basereg = tmp;
20699 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
20700 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
20701 dst = replace_equiv_address (dst, basereg);
20704 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
20705 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
20708 for (i = 0; i < nregs; i++)
20710 /* Calculate index to next subword. */
20711 ++j;
20712 if (j == nregs)
20713 j = 0;
20715 /* If compiler already emitted move of first word by
20716 store with update, no need to do anything. */
20717 if (j == 0 && used_update)
20718 continue;
20720 emit_insn (gen_rtx_SET (VOIDmode,
20721 simplify_gen_subreg (reg_mode, dst, mode,
20722 j * reg_mode_size),
20723 simplify_gen_subreg (reg_mode, src, mode,
20724 j * reg_mode_size)));
20726 if (restore_basereg != NULL_RTX)
20727 emit_insn (restore_basereg);
20732 /* This page contains routines that are used to determine what the
20733 function prologue and epilogue code will do and write them out. */
20735 static inline bool
20736 save_reg_p (int r)
20738 return !call_used_regs[r] && df_regs_ever_live_p (r);
20741 /* Return the first fixed-point register that is required to be
20742 saved. 32 if none. */
20745 first_reg_to_save (void)
20747 int first_reg;
20749 /* Find lowest numbered live register. */
20750 for (first_reg = 13; first_reg <= 31; first_reg++)
20751 if (save_reg_p (first_reg))
20752 break;
20754 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
20755 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
20756 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
20757 || (TARGET_TOC && TARGET_MINIMAL_TOC))
20758 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20759 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
20761 #if TARGET_MACHO
20762 if (flag_pic
20763 && crtl->uses_pic_offset_table
20764 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
20765 return RS6000_PIC_OFFSET_TABLE_REGNUM;
20766 #endif
20768 return first_reg;
20771 /* Similar, for FP regs. */
20774 first_fp_reg_to_save (void)
20776 int first_reg;
20778 /* Find lowest numbered live register. */
20779 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
20780 if (save_reg_p (first_reg))
20781 break;
20783 return first_reg;
20786 /* Similar, for AltiVec regs. */
20788 static int
20789 first_altivec_reg_to_save (void)
20791 int i;
20793 /* Stack frame remains as is unless we are in AltiVec ABI. */
20794 if (! TARGET_ALTIVEC_ABI)
20795 return LAST_ALTIVEC_REGNO + 1;
20797 /* On Darwin, the unwind routines are compiled without
20798 TARGET_ALTIVEC, and use save_world to save/restore the
20799 altivec registers when necessary. */
20800 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20801 && ! TARGET_ALTIVEC)
20802 return FIRST_ALTIVEC_REGNO + 20;
20804 /* Find lowest numbered live register. */
20805 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
20806 if (save_reg_p (i))
20807 break;
20809 return i;
20812 /* Return a 32-bit mask of the AltiVec registers we need to set in
20813 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
20814 the 32-bit word is 0. */
20816 static unsigned int
20817 compute_vrsave_mask (void)
20819 unsigned int i, mask = 0;
20821 /* On Darwin, the unwind routines are compiled without
20822 TARGET_ALTIVEC, and use save_world to save/restore the
20823 call-saved altivec registers when necessary. */
20824 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20825 && ! TARGET_ALTIVEC)
20826 mask |= 0xFFF;
20828 /* First, find out if we use _any_ altivec registers. */
20829 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
20830 if (df_regs_ever_live_p (i))
20831 mask |= ALTIVEC_REG_BIT (i);
20833 if (mask == 0)
20834 return mask;
20836 /* Next, remove the argument registers from the set. These must
20837 be in the VRSAVE mask set by the caller, so we don't need to add
20838 them in again. More importantly, the mask we compute here is
20839 used to generate CLOBBERs in the set_vrsave insn, and we do not
20840 wish the argument registers to die. */
20841 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
20842 mask &= ~ALTIVEC_REG_BIT (i);
20844 /* Similarly, remove the return value from the set. */
20846 bool yes = false;
20847 diddle_return_value (is_altivec_return_reg, &yes);
20848 if (yes)
20849 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
20852 return mask;
20855 /* For a very restricted set of circumstances, we can cut down the
20856 size of prologues/epilogues by calling our own save/restore-the-world
20857 routines. */
20859 static void
20860 compute_save_world_info (rs6000_stack_t *info_ptr)
20862 info_ptr->world_save_p = 1;
20863 info_ptr->world_save_p
20864 = (WORLD_SAVE_P (info_ptr)
20865 && DEFAULT_ABI == ABI_DARWIN
20866 && !cfun->has_nonlocal_label
20867 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
20868 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
20869 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
20870 && info_ptr->cr_save_p);
20872 /* This will not work in conjunction with sibcalls. Make sure there
20873 are none. (This check is expensive, but seldom executed.) */
20874 if (WORLD_SAVE_P (info_ptr))
20876 rtx_insn *insn;
20877 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
20878 if (CALL_P (insn) && SIBLING_CALL_P (insn))
20880 info_ptr->world_save_p = 0;
20881 break;
20885 if (WORLD_SAVE_P (info_ptr))
20887 /* Even if we're not touching VRsave, make sure there's room on the
20888 stack for it, if it looks like we're calling SAVE_WORLD, which
20889 will attempt to save it. */
20890 info_ptr->vrsave_size = 4;
20892 /* If we are going to save the world, we need to save the link register too. */
20893 info_ptr->lr_save_p = 1;
20895 /* "Save" the VRsave register too if we're saving the world. */
20896 if (info_ptr->vrsave_mask == 0)
20897 info_ptr->vrsave_mask = compute_vrsave_mask ();
20899 /* Because the Darwin register save/restore routines only handle
20900 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
20901 check. */
20902 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
20903 && (info_ptr->first_altivec_reg_save
20904 >= FIRST_SAVED_ALTIVEC_REGNO));
20906 return;
20910 static void
20911 is_altivec_return_reg (rtx reg, void *xyes)
20913 bool *yes = (bool *) xyes;
20914 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
20915 *yes = true;
20919 /* Look for user-defined global regs in the range FIRST to LAST-1.
20920 We should not restore these, and so cannot use lmw or out-of-line
20921 restore functions if there are any. We also can't save them
20922 (well, emit frame notes for them), because frame unwinding during
20923 exception handling will restore saved registers. */
20925 static bool
20926 global_regs_p (unsigned first, unsigned last)
20928 while (first < last)
20929 if (global_regs[first++])
20930 return true;
20931 return false;
20934 /* Determine the strategy for savings/restoring registers. */
20936 enum {
20937 SAVRES_MULTIPLE = 0x1,
20938 SAVE_INLINE_FPRS = 0x2,
20939 SAVE_INLINE_GPRS = 0x4,
20940 REST_INLINE_FPRS = 0x8,
20941 REST_INLINE_GPRS = 0x10,
20942 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
20943 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
20944 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
20945 SAVE_INLINE_VRS = 0x100,
20946 REST_INLINE_VRS = 0x200
20949 static int
20950 rs6000_savres_strategy (rs6000_stack_t *info,
20951 bool using_static_chain_p)
20953 int strategy = 0;
20954 bool lr_save_p;
20956 if (TARGET_MULTIPLE
20957 && !TARGET_POWERPC64
20958 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
20959 && info->first_gp_reg_save < 31
20960 && !global_regs_p (info->first_gp_reg_save, 32))
20961 strategy |= SAVRES_MULTIPLE;
20963 if (crtl->calls_eh_return
20964 || cfun->machine->ra_need_lr)
20965 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
20966 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
20967 | SAVE_INLINE_VRS | REST_INLINE_VRS);
20969 if (info->first_fp_reg_save == 64
20970 /* The out-of-line FP routines use double-precision stores;
20971 we can't use those routines if we don't have such stores. */
20972 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
20973 || global_regs_p (info->first_fp_reg_save, 64))
20974 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20976 if (info->first_gp_reg_save == 32
20977 || (!(strategy & SAVRES_MULTIPLE)
20978 && global_regs_p (info->first_gp_reg_save, 32)))
20979 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20981 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
20982 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
20983 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20985 /* Define cutoff for using out-of-line functions to save registers. */
20986 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
20988 if (!optimize_size)
20990 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20991 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20992 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20994 else
20996 /* Prefer out-of-line restore if it will exit. */
20997 if (info->first_fp_reg_save > 61)
20998 strategy |= SAVE_INLINE_FPRS;
20999 if (info->first_gp_reg_save > 29)
21001 if (info->first_fp_reg_save == 64)
21002 strategy |= SAVE_INLINE_GPRS;
21003 else
21004 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21006 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
21007 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21010 else if (DEFAULT_ABI == ABI_DARWIN)
21012 if (info->first_fp_reg_save > 60)
21013 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
21014 if (info->first_gp_reg_save > 29)
21015 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21016 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21018 else
21020 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
21021 if (info->first_fp_reg_save > 61)
21022 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
21023 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21024 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21027 /* Don't bother to try to save things out-of-line if r11 is occupied
21028 by the static chain. It would require too much fiddling and the
21029 static chain is rarely used anyway. FPRs are saved w.r.t the stack
21030 pointer on Darwin, and AIX uses r1 or r12. */
21031 if (using_static_chain_p
21032 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
21033 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
21034 | SAVE_INLINE_GPRS
21035 | SAVE_INLINE_VRS | REST_INLINE_VRS);
21037 /* We can only use the out-of-line routines to restore if we've
21038 saved all the registers from first_fp_reg_save in the prologue.
21039 Otherwise, we risk loading garbage. */
21040 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
21042 int i;
21044 for (i = info->first_fp_reg_save; i < 64; i++)
21045 if (!save_reg_p (i))
21047 strategy |= REST_INLINE_FPRS;
21048 break;
21052 /* If we are going to use store multiple, then don't even bother
21053 with the out-of-line routines, since the store-multiple
21054 instruction will always be smaller. */
21055 if ((strategy & SAVRES_MULTIPLE))
21056 strategy |= SAVE_INLINE_GPRS;
21058 /* info->lr_save_p isn't yet set if the only reason lr needs to be
21059 saved is an out-of-line save or restore. Set up the value for
21060 the next test (excluding out-of-line gpr restore). */
21061 lr_save_p = (info->lr_save_p
21062 || !(strategy & SAVE_INLINE_GPRS)
21063 || !(strategy & SAVE_INLINE_FPRS)
21064 || !(strategy & SAVE_INLINE_VRS)
21065 || !(strategy & REST_INLINE_FPRS)
21066 || !(strategy & REST_INLINE_VRS));
21068 /* The situation is more complicated with load multiple. We'd
21069 prefer to use the out-of-line routines for restores, since the
21070 "exit" out-of-line routines can handle the restore of LR and the
21071 frame teardown. However if doesn't make sense to use the
21072 out-of-line routine if that is the only reason we'd need to save
21073 LR, and we can't use the "exit" out-of-line gpr restore if we
21074 have saved some fprs; In those cases it is advantageous to use
21075 load multiple when available. */
21076 if ((strategy & SAVRES_MULTIPLE)
21077 && (!lr_save_p
21078 || info->first_fp_reg_save != 64))
21079 strategy |= REST_INLINE_GPRS;
21081 /* Saving CR interferes with the exit routines used on the SPE, so
21082 just punt here. */
21083 if (TARGET_SPE_ABI
21084 && info->spe_64bit_regs_used
21085 && info->cr_save_p)
21086 strategy |= REST_INLINE_GPRS;
21088 /* We can only use load multiple or the out-of-line routines to
21089 restore if we've used store multiple or out-of-line routines
21090 in the prologue, i.e. if we've saved all the registers from
21091 first_gp_reg_save. Otherwise, we risk loading garbage. */
21092 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
21093 == SAVE_INLINE_GPRS)
21095 int i;
21097 for (i = info->first_gp_reg_save; i < 32; i++)
21098 if (!save_reg_p (i))
21100 strategy |= REST_INLINE_GPRS;
21101 break;
21105 if (TARGET_ELF && TARGET_64BIT)
21107 if (!(strategy & SAVE_INLINE_FPRS))
21108 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
21109 else if (!(strategy & SAVE_INLINE_GPRS)
21110 && info->first_fp_reg_save == 64)
21111 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
21113 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
21114 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
21116 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
21117 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
21119 return strategy;
21122 /* Calculate the stack information for the current function. This is
21123 complicated by having two separate calling sequences, the AIX calling
21124 sequence and the V.4 calling sequence.
21126 AIX (and Darwin/Mac OS X) stack frames look like:
21127 32-bit 64-bit
21128 SP----> +---------------------------------------+
21129 | back chain to caller | 0 0
21130 +---------------------------------------+
21131 | saved CR | 4 8 (8-11)
21132 +---------------------------------------+
21133 | saved LR | 8 16
21134 +---------------------------------------+
21135 | reserved for compilers | 12 24
21136 +---------------------------------------+
21137 | reserved for binders | 16 32
21138 +---------------------------------------+
21139 | saved TOC pointer | 20 40
21140 +---------------------------------------+
21141 | Parameter save area (P) | 24 48
21142 +---------------------------------------+
21143 | Alloca space (A) | 24+P etc.
21144 +---------------------------------------+
21145 | Local variable space (L) | 24+P+A
21146 +---------------------------------------+
21147 | Float/int conversion temporary (X) | 24+P+A+L
21148 +---------------------------------------+
21149 | Save area for AltiVec registers (W) | 24+P+A+L+X
21150 +---------------------------------------+
21151 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
21152 +---------------------------------------+
21153 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
21154 +---------------------------------------+
21155 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
21156 +---------------------------------------+
21157 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
21158 +---------------------------------------+
21159 old SP->| back chain to caller's caller |
21160 +---------------------------------------+
21162 The required alignment for AIX configurations is two words (i.e., 8
21163 or 16 bytes).
21165 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
21167 SP----> +---------------------------------------+
21168 | Back chain to caller | 0
21169 +---------------------------------------+
21170 | Save area for CR | 8
21171 +---------------------------------------+
21172 | Saved LR | 16
21173 +---------------------------------------+
21174 | Saved TOC pointer | 24
21175 +---------------------------------------+
21176 | Parameter save area (P) | 32
21177 +---------------------------------------+
21178 | Alloca space (A) | 32+P
21179 +---------------------------------------+
21180 | Local variable space (L) | 32+P+A
21181 +---------------------------------------+
21182 | Save area for AltiVec registers (W) | 32+P+A+L
21183 +---------------------------------------+
21184 | AltiVec alignment padding (Y) | 32+P+A+L+W
21185 +---------------------------------------+
21186 | Save area for GP registers (G) | 32+P+A+L+W+Y
21187 +---------------------------------------+
21188 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
21189 +---------------------------------------+
21190 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
21191 +---------------------------------------+
21194 V.4 stack frames look like:
21196 SP----> +---------------------------------------+
21197 | back chain to caller | 0
21198 +---------------------------------------+
21199 | caller's saved LR | 4
21200 +---------------------------------------+
21201 | Parameter save area (P) | 8
21202 +---------------------------------------+
21203 | Alloca space (A) | 8+P
21204 +---------------------------------------+
21205 | Varargs save area (V) | 8+P+A
21206 +---------------------------------------+
21207 | Local variable space (L) | 8+P+A+V
21208 +---------------------------------------+
21209 | Float/int conversion temporary (X) | 8+P+A+V+L
21210 +---------------------------------------+
21211 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
21212 +---------------------------------------+
21213 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
21214 +---------------------------------------+
21215 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
21216 +---------------------------------------+
21217 | SPE: area for 64-bit GP registers |
21218 +---------------------------------------+
21219 | SPE alignment padding |
21220 +---------------------------------------+
21221 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
21222 +---------------------------------------+
21223 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
21224 +---------------------------------------+
21225 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
21226 +---------------------------------------+
21227 old SP->| back chain to caller's caller |
21228 +---------------------------------------+
21230 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
21231 given. (But note below and in sysv4.h that we require only 8 and
21232 may round up the size of our stack frame anyways. The historical
21233 reason is early versions of powerpc-linux which didn't properly
21234 align the stack at program startup. A happy side-effect is that
21235 -mno-eabi libraries can be used with -meabi programs.)
21237 The EABI configuration defaults to the V.4 layout. However,
21238 the stack alignment requirements may differ. If -mno-eabi is not
21239 given, the required stack alignment is 8 bytes; if -mno-eabi is
21240 given, the required alignment is 16 bytes. (But see V.4 comment
21241 above.) */
21243 #ifndef ABI_STACK_BOUNDARY
21244 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
21245 #endif
21247 static rs6000_stack_t *
21248 rs6000_stack_info (void)
21250 rs6000_stack_t *info_ptr = &stack_info;
21251 int reg_size = TARGET_32BIT ? 4 : 8;
21252 int ehrd_size;
21253 int ehcr_size;
21254 int save_align;
21255 int first_gp;
21256 HOST_WIDE_INT non_fixed_size;
21257 bool using_static_chain_p;
21259 if (reload_completed && info_ptr->reload_completed)
21260 return info_ptr;
21262 memset (info_ptr, 0, sizeof (*info_ptr));
21263 info_ptr->reload_completed = reload_completed;
21265 if (TARGET_SPE)
21267 /* Cache value so we don't rescan instruction chain over and over. */
21268 if (cfun->machine->insn_chain_scanned_p == 0)
21269 cfun->machine->insn_chain_scanned_p
21270 = spe_func_has_64bit_regs_p () + 1;
21271 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
21274 /* Select which calling sequence. */
21275 info_ptr->abi = DEFAULT_ABI;
21277 /* Calculate which registers need to be saved & save area size. */
21278 info_ptr->first_gp_reg_save = first_reg_to_save ();
21279 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
21280 even if it currently looks like we won't. Reload may need it to
21281 get at a constant; if so, it will have already created a constant
21282 pool entry for it. */
21283 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
21284 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
21285 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
21286 && crtl->uses_const_pool
21287 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
21288 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
21289 else
21290 first_gp = info_ptr->first_gp_reg_save;
21292 info_ptr->gp_size = reg_size * (32 - first_gp);
21294 /* For the SPE, we have an additional upper 32-bits on each GPR.
21295 Ideally we should save the entire 64-bits only when the upper
21296 half is used in SIMD instructions. Since we only record
21297 registers live (not the size they are used in), this proves
21298 difficult because we'd have to traverse the instruction chain at
21299 the right time, taking reload into account. This is a real pain,
21300 so we opt to save the GPRs in 64-bits always if but one register
21301 gets used in 64-bits. Otherwise, all the registers in the frame
21302 get saved in 32-bits.
21304 So... since when we save all GPRs (except the SP) in 64-bits, the
21305 traditional GP save area will be empty. */
21306 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21307 info_ptr->gp_size = 0;
21309 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
21310 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
21312 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
21313 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
21314 - info_ptr->first_altivec_reg_save);
21316 /* Does this function call anything? */
21317 info_ptr->calls_p = (! crtl->is_leaf
21318 || cfun->machine->ra_needs_full_frame);
21320 /* Determine if we need to save the condition code registers. */
21321 if (df_regs_ever_live_p (CR2_REGNO)
21322 || df_regs_ever_live_p (CR3_REGNO)
21323 || df_regs_ever_live_p (CR4_REGNO))
21325 info_ptr->cr_save_p = 1;
21326 if (DEFAULT_ABI == ABI_V4)
21327 info_ptr->cr_size = reg_size;
21330 /* If the current function calls __builtin_eh_return, then we need
21331 to allocate stack space for registers that will hold data for
21332 the exception handler. */
21333 if (crtl->calls_eh_return)
21335 unsigned int i;
21336 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
21337 continue;
21339 /* SPE saves EH registers in 64-bits. */
21340 ehrd_size = i * (TARGET_SPE_ABI
21341 && info_ptr->spe_64bit_regs_used != 0
21342 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
21344 else
21345 ehrd_size = 0;
21347 /* In the ELFv2 ABI, we also need to allocate space for separate
21348 CR field save areas if the function calls __builtin_eh_return. */
21349 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
21351 /* This hard-codes that we have three call-saved CR fields. */
21352 ehcr_size = 3 * reg_size;
21353 /* We do *not* use the regular CR save mechanism. */
21354 info_ptr->cr_save_p = 0;
21356 else
21357 ehcr_size = 0;
21359 /* Determine various sizes. */
21360 info_ptr->reg_size = reg_size;
21361 info_ptr->fixed_size = RS6000_SAVE_AREA;
21362 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
21363 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
21364 TARGET_ALTIVEC ? 16 : 8);
21365 if (FRAME_GROWS_DOWNWARD)
21366 info_ptr->vars_size
21367 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
21368 + info_ptr->parm_size,
21369 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
21370 - (info_ptr->fixed_size + info_ptr->vars_size
21371 + info_ptr->parm_size);
21373 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21374 info_ptr->spe_gp_size = 8 * (32 - first_gp);
21375 else
21376 info_ptr->spe_gp_size = 0;
21378 if (TARGET_ALTIVEC_ABI)
21379 info_ptr->vrsave_mask = compute_vrsave_mask ();
21380 else
21381 info_ptr->vrsave_mask = 0;
21383 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
21384 info_ptr->vrsave_size = 4;
21385 else
21386 info_ptr->vrsave_size = 0;
21388 compute_save_world_info (info_ptr);
21390 /* Calculate the offsets. */
21391 switch (DEFAULT_ABI)
21393 case ABI_NONE:
21394 default:
21395 gcc_unreachable ();
21397 case ABI_AIX:
21398 case ABI_ELFv2:
21399 case ABI_DARWIN:
21400 info_ptr->fp_save_offset = - info_ptr->fp_size;
21401 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21403 if (TARGET_ALTIVEC_ABI)
21405 info_ptr->vrsave_save_offset
21406 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
21408 /* Align stack so vector save area is on a quadword boundary.
21409 The padding goes above the vectors. */
21410 if (info_ptr->altivec_size != 0)
21411 info_ptr->altivec_padding_size
21412 = info_ptr->vrsave_save_offset & 0xF;
21413 else
21414 info_ptr->altivec_padding_size = 0;
21416 info_ptr->altivec_save_offset
21417 = info_ptr->vrsave_save_offset
21418 - info_ptr->altivec_padding_size
21419 - info_ptr->altivec_size;
21420 gcc_assert (info_ptr->altivec_size == 0
21421 || info_ptr->altivec_save_offset % 16 == 0);
21423 /* Adjust for AltiVec case. */
21424 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
21426 else
21427 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
21429 info_ptr->ehcr_offset = info_ptr->ehrd_offset - ehcr_size;
21430 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
21431 info_ptr->lr_save_offset = 2*reg_size;
21432 break;
21434 case ABI_V4:
21435 info_ptr->fp_save_offset = - info_ptr->fp_size;
21436 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21437 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
21439 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21441 /* Align stack so SPE GPR save area is aligned on a
21442 double-word boundary. */
21443 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
21444 info_ptr->spe_padding_size
21445 = 8 - (-info_ptr->cr_save_offset % 8);
21446 else
21447 info_ptr->spe_padding_size = 0;
21449 info_ptr->spe_gp_save_offset
21450 = info_ptr->cr_save_offset
21451 - info_ptr->spe_padding_size
21452 - info_ptr->spe_gp_size;
21454 /* Adjust for SPE case. */
21455 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
21457 else if (TARGET_ALTIVEC_ABI)
21459 info_ptr->vrsave_save_offset
21460 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
21462 /* Align stack so vector save area is on a quadword boundary. */
21463 if (info_ptr->altivec_size != 0)
21464 info_ptr->altivec_padding_size
21465 = 16 - (-info_ptr->vrsave_save_offset % 16);
21466 else
21467 info_ptr->altivec_padding_size = 0;
21469 info_ptr->altivec_save_offset
21470 = info_ptr->vrsave_save_offset
21471 - info_ptr->altivec_padding_size
21472 - info_ptr->altivec_size;
21474 /* Adjust for AltiVec case. */
21475 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
21477 else
21478 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
21479 info_ptr->ehrd_offset -= ehrd_size;
21480 info_ptr->lr_save_offset = reg_size;
21481 break;
21484 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
21485 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
21486 + info_ptr->gp_size
21487 + info_ptr->altivec_size
21488 + info_ptr->altivec_padding_size
21489 + info_ptr->spe_gp_size
21490 + info_ptr->spe_padding_size
21491 + ehrd_size
21492 + ehcr_size
21493 + info_ptr->cr_size
21494 + info_ptr->vrsave_size,
21495 save_align);
21497 non_fixed_size = (info_ptr->vars_size
21498 + info_ptr->parm_size
21499 + info_ptr->save_size);
21501 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
21502 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
21504 /* Determine if we need to save the link register. */
21505 if (info_ptr->calls_p
21506 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21507 && crtl->profile
21508 && !TARGET_PROFILE_KERNEL)
21509 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
21510 #ifdef TARGET_RELOCATABLE
21511 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
21512 #endif
21513 || rs6000_ra_ever_killed ())
21514 info_ptr->lr_save_p = 1;
21516 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
21517 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
21518 && call_used_regs[STATIC_CHAIN_REGNUM]);
21519 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
21520 using_static_chain_p);
21522 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
21523 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
21524 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
21525 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
21526 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
21527 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
21528 info_ptr->lr_save_p = 1;
21530 if (info_ptr->lr_save_p)
21531 df_set_regs_ever_live (LR_REGNO, true);
21533 /* Determine if we need to allocate any stack frame:
21535 For AIX we need to push the stack if a frame pointer is needed
21536 (because the stack might be dynamically adjusted), if we are
21537 debugging, if we make calls, or if the sum of fp_save, gp_save,
21538 and local variables are more than the space needed to save all
21539 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
21540 + 18*8 = 288 (GPR13 reserved).
21542 For V.4 we don't have the stack cushion that AIX uses, but assume
21543 that the debugger can handle stackless frames. */
21545 if (info_ptr->calls_p)
21546 info_ptr->push_p = 1;
21548 else if (DEFAULT_ABI == ABI_V4)
21549 info_ptr->push_p = non_fixed_size != 0;
21551 else if (frame_pointer_needed)
21552 info_ptr->push_p = 1;
21554 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
21555 info_ptr->push_p = 1;
21557 else
21558 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
21560 /* Zero offsets if we're not saving those registers. */
21561 if (info_ptr->fp_size == 0)
21562 info_ptr->fp_save_offset = 0;
21564 if (info_ptr->gp_size == 0)
21565 info_ptr->gp_save_offset = 0;
21567 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
21568 info_ptr->altivec_save_offset = 0;
21570 /* Zero VRSAVE offset if not saved and restored. */
21571 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
21572 info_ptr->vrsave_save_offset = 0;
21574 if (! TARGET_SPE_ABI
21575 || info_ptr->spe_64bit_regs_used == 0
21576 || info_ptr->spe_gp_size == 0)
21577 info_ptr->spe_gp_save_offset = 0;
21579 if (! info_ptr->lr_save_p)
21580 info_ptr->lr_save_offset = 0;
21582 if (! info_ptr->cr_save_p)
21583 info_ptr->cr_save_offset = 0;
21585 return info_ptr;
21588 /* Return true if the current function uses any GPRs in 64-bit SIMD
21589 mode. */
21591 static bool
21592 spe_func_has_64bit_regs_p (void)
21594 rtx_insn *insns, *insn;
21596 /* Functions that save and restore all the call-saved registers will
21597 need to save/restore the registers in 64-bits. */
21598 if (crtl->calls_eh_return
21599 || cfun->calls_setjmp
21600 || crtl->has_nonlocal_goto)
21601 return true;
21603 insns = get_insns ();
21605 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
21607 if (INSN_P (insn))
21609 rtx i;
21611 /* FIXME: This should be implemented with attributes...
21613 (set_attr "spe64" "true")....then,
21614 if (get_spe64(insn)) return true;
21616 It's the only reliable way to do the stuff below. */
21618 i = PATTERN (insn);
21619 if (GET_CODE (i) == SET)
21621 enum machine_mode mode = GET_MODE (SET_SRC (i));
21623 if (SPE_VECTOR_MODE (mode))
21624 return true;
21625 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
21626 return true;
21631 return false;
21634 static void
21635 debug_stack_info (rs6000_stack_t *info)
21637 const char *abi_string;
21639 if (! info)
21640 info = rs6000_stack_info ();
21642 fprintf (stderr, "\nStack information for function %s:\n",
21643 ((current_function_decl && DECL_NAME (current_function_decl))
21644 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
21645 : "<unknown>"));
21647 switch (info->abi)
21649 default: abi_string = "Unknown"; break;
21650 case ABI_NONE: abi_string = "NONE"; break;
21651 case ABI_AIX: abi_string = "AIX"; break;
21652 case ABI_ELFv2: abi_string = "ELFv2"; break;
21653 case ABI_DARWIN: abi_string = "Darwin"; break;
21654 case ABI_V4: abi_string = "V.4"; break;
21657 fprintf (stderr, "\tABI = %5s\n", abi_string);
21659 if (TARGET_ALTIVEC_ABI)
21660 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
21662 if (TARGET_SPE_ABI)
21663 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
21665 if (info->first_gp_reg_save != 32)
21666 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
21668 if (info->first_fp_reg_save != 64)
21669 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
21671 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
21672 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
21673 info->first_altivec_reg_save);
21675 if (info->lr_save_p)
21676 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
21678 if (info->cr_save_p)
21679 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
21681 if (info->vrsave_mask)
21682 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
21684 if (info->push_p)
21685 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
21687 if (info->calls_p)
21688 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
21690 if (info->gp_save_offset)
21691 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
21693 if (info->fp_save_offset)
21694 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
21696 if (info->altivec_save_offset)
21697 fprintf (stderr, "\taltivec_save_offset = %5d\n",
21698 info->altivec_save_offset);
21700 if (info->spe_gp_save_offset)
21701 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
21702 info->spe_gp_save_offset);
21704 if (info->vrsave_save_offset)
21705 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
21706 info->vrsave_save_offset);
21708 if (info->lr_save_offset)
21709 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
21711 if (info->cr_save_offset)
21712 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
21714 if (info->varargs_save_offset)
21715 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
21717 if (info->total_size)
21718 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21719 info->total_size);
21721 if (info->vars_size)
21722 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21723 info->vars_size);
21725 if (info->parm_size)
21726 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
21728 if (info->fixed_size)
21729 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
21731 if (info->gp_size)
21732 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
21734 if (info->spe_gp_size)
21735 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
21737 if (info->fp_size)
21738 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
21740 if (info->altivec_size)
21741 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
21743 if (info->vrsave_size)
21744 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
21746 if (info->altivec_padding_size)
21747 fprintf (stderr, "\taltivec_padding_size= %5d\n",
21748 info->altivec_padding_size);
21750 if (info->spe_padding_size)
21751 fprintf (stderr, "\tspe_padding_size = %5d\n",
21752 info->spe_padding_size);
21754 if (info->cr_size)
21755 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
21757 if (info->save_size)
21758 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
21760 if (info->reg_size != 4)
21761 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
21763 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
21765 fprintf (stderr, "\n");
21769 rs6000_return_addr (int count, rtx frame)
21771 /* Currently we don't optimize very well between prolog and body
21772 code and for PIC code the code can be actually quite bad, so
21773 don't try to be too clever here. */
21774 if (count != 0
21775 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
21777 cfun->machine->ra_needs_full_frame = 1;
21779 return
21780 gen_rtx_MEM
21781 (Pmode,
21782 memory_address
21783 (Pmode,
21784 plus_constant (Pmode,
21785 copy_to_reg
21786 (gen_rtx_MEM (Pmode,
21787 memory_address (Pmode, frame))),
21788 RETURN_ADDRESS_OFFSET)));
21791 cfun->machine->ra_need_lr = 1;
21792 return get_hard_reg_initial_val (Pmode, LR_REGNO);
21795 /* Say whether a function is a candidate for sibcall handling or not. */
21797 static bool
21798 rs6000_function_ok_for_sibcall (tree decl, tree exp)
21800 tree fntype;
21802 if (decl)
21803 fntype = TREE_TYPE (decl);
21804 else
21805 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
21807 /* We can't do it if the called function has more vector parameters
21808 than the current function; there's nowhere to put the VRsave code. */
21809 if (TARGET_ALTIVEC_ABI
21810 && TARGET_ALTIVEC_VRSAVE
21811 && !(decl && decl == current_function_decl))
21813 function_args_iterator args_iter;
21814 tree type;
21815 int nvreg = 0;
21817 /* Functions with vector parameters are required to have a
21818 prototype, so the argument type info must be available
21819 here. */
21820 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
21821 if (TREE_CODE (type) == VECTOR_TYPE
21822 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21823 nvreg++;
21825 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
21826 if (TREE_CODE (type) == VECTOR_TYPE
21827 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21828 nvreg--;
21830 if (nvreg > 0)
21831 return false;
21834 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
21835 functions, because the callee may have a different TOC pointer to
21836 the caller and there's no way to ensure we restore the TOC when
21837 we return. With the secure-plt SYSV ABI we can't make non-local
21838 calls when -fpic/PIC because the plt call stubs use r30. */
21839 if (DEFAULT_ABI == ABI_DARWIN
21840 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21841 && decl
21842 && !DECL_EXTERNAL (decl)
21843 && (*targetm.binds_local_p) (decl))
21844 || (DEFAULT_ABI == ABI_V4
21845 && (!TARGET_SECURE_PLT
21846 || !flag_pic
21847 || (decl
21848 && (*targetm.binds_local_p) (decl)))))
21850 tree attr_list = TYPE_ATTRIBUTES (fntype);
21852 if (!lookup_attribute ("longcall", attr_list)
21853 || lookup_attribute ("shortcall", attr_list))
21854 return true;
21857 return false;
21860 static int
21861 rs6000_ra_ever_killed (void)
21863 rtx_insn *top;
21864 rtx reg;
21865 rtx_insn *insn;
21867 if (cfun->is_thunk)
21868 return 0;
21870 if (cfun->machine->lr_save_state)
21871 return cfun->machine->lr_save_state - 1;
21873 /* regs_ever_live has LR marked as used if any sibcalls are present,
21874 but this should not force saving and restoring in the
21875 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
21876 clobbers LR, so that is inappropriate. */
21878 /* Also, the prologue can generate a store into LR that
21879 doesn't really count, like this:
21881 move LR->R0
21882 bcl to set PIC register
21883 move LR->R31
21884 move R0->LR
21886 When we're called from the epilogue, we need to avoid counting
21887 this as a store. */
21889 push_topmost_sequence ();
21890 top = get_insns ();
21891 pop_topmost_sequence ();
21892 reg = gen_rtx_REG (Pmode, LR_REGNO);
21894 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
21896 if (INSN_P (insn))
21898 if (CALL_P (insn))
21900 if (!SIBLING_CALL_P (insn))
21901 return 1;
21903 else if (find_regno_note (insn, REG_INC, LR_REGNO))
21904 return 1;
21905 else if (set_of (reg, insn) != NULL_RTX
21906 && !prologue_epilogue_contains (insn))
21907 return 1;
21910 return 0;
21913 /* Emit instructions needed to load the TOC register.
21914 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
21915 a constant pool; or for SVR4 -fpic. */
21917 void
21918 rs6000_emit_load_toc_table (int fromprolog)
21920 rtx dest;
21921 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
21923 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
21925 char buf[30];
21926 rtx lab, tmp1, tmp2, got;
21928 lab = gen_label_rtx ();
21929 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
21930 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21931 if (flag_pic == 2)
21932 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21933 else
21934 got = rs6000_got_sym ();
21935 tmp1 = tmp2 = dest;
21936 if (!fromprolog)
21938 tmp1 = gen_reg_rtx (Pmode);
21939 tmp2 = gen_reg_rtx (Pmode);
21941 emit_insn (gen_load_toc_v4_PIC_1 (lab));
21942 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
21943 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
21944 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
21946 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
21948 emit_insn (gen_load_toc_v4_pic_si ());
21949 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21951 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
21953 char buf[30];
21954 rtx temp0 = (fromprolog
21955 ? gen_rtx_REG (Pmode, 0)
21956 : gen_reg_rtx (Pmode));
21958 if (fromprolog)
21960 rtx symF, symL;
21962 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
21963 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21965 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
21966 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21968 emit_insn (gen_load_toc_v4_PIC_1 (symF));
21969 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21970 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
21972 else
21974 rtx tocsym, lab;
21976 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21977 lab = gen_label_rtx ();
21978 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
21979 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21980 if (TARGET_LINK_STACK)
21981 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
21982 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
21984 emit_insn (gen_addsi3 (dest, temp0, dest));
21986 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
21988 /* This is for AIX code running in non-PIC ELF32. */
21989 char buf[30];
21990 rtx realsym;
21991 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
21992 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21994 emit_insn (gen_elf_high (dest, realsym));
21995 emit_insn (gen_elf_low (dest, dest, realsym));
21997 else
21999 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
22001 if (TARGET_32BIT)
22002 emit_insn (gen_load_toc_aix_si (dest));
22003 else
22004 emit_insn (gen_load_toc_aix_di (dest));
22008 /* Emit instructions to restore the link register after determining where
22009 its value has been stored. */
22011 void
22012 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
22014 rs6000_stack_t *info = rs6000_stack_info ();
22015 rtx operands[2];
22017 operands[0] = source;
22018 operands[1] = scratch;
22020 if (info->lr_save_p)
22022 rtx frame_rtx = stack_pointer_rtx;
22023 HOST_WIDE_INT sp_offset = 0;
22024 rtx tmp;
22026 if (frame_pointer_needed
22027 || cfun->calls_alloca
22028 || info->total_size > 32767)
22030 tmp = gen_frame_mem (Pmode, frame_rtx);
22031 emit_move_insn (operands[1], tmp);
22032 frame_rtx = operands[1];
22034 else if (info->push_p)
22035 sp_offset = info->total_size;
22037 tmp = plus_constant (Pmode, frame_rtx,
22038 info->lr_save_offset + sp_offset);
22039 tmp = gen_frame_mem (Pmode, tmp);
22040 emit_move_insn (tmp, operands[0]);
22042 else
22043 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
22045 /* Freeze lr_save_p. We've just emitted rtl that depends on the
22046 state of lr_save_p so any change from here on would be a bug. In
22047 particular, stop rs6000_ra_ever_killed from considering the SET
22048 of lr we may have added just above. */
22049 cfun->machine->lr_save_state = info->lr_save_p + 1;
22052 static GTY(()) alias_set_type set = -1;
22054 alias_set_type
22055 get_TOC_alias_set (void)
22057 if (set == -1)
22058 set = new_alias_set ();
22059 return set;
22062 /* This returns nonzero if the current function uses the TOC. This is
22063 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
22064 is generated by the ABI_V4 load_toc_* patterns. */
22065 #if TARGET_ELF
22066 static int
22067 uses_TOC (void)
22069 rtx_insn *insn;
22071 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
22072 if (INSN_P (insn))
22074 rtx pat = PATTERN (insn);
22075 int i;
22077 if (GET_CODE (pat) == PARALLEL)
22078 for (i = 0; i < XVECLEN (pat, 0); i++)
22080 rtx sub = XVECEXP (pat, 0, i);
22081 if (GET_CODE (sub) == USE)
22083 sub = XEXP (sub, 0);
22084 if (GET_CODE (sub) == UNSPEC
22085 && XINT (sub, 1) == UNSPEC_TOC)
22086 return 1;
22090 return 0;
22092 #endif
22095 create_TOC_reference (rtx symbol, rtx largetoc_reg)
22097 rtx tocrel, tocreg, hi;
22099 if (TARGET_DEBUG_ADDR)
22101 if (GET_CODE (symbol) == SYMBOL_REF)
22102 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
22103 XSTR (symbol, 0));
22104 else
22106 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
22107 GET_RTX_NAME (GET_CODE (symbol)));
22108 debug_rtx (symbol);
22112 if (!can_create_pseudo_p ())
22113 df_set_regs_ever_live (TOC_REGISTER, true);
22115 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
22116 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
22117 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
22118 return tocrel;
22120 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
22121 if (largetoc_reg != NULL)
22123 emit_move_insn (largetoc_reg, hi);
22124 hi = largetoc_reg;
22126 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
22129 /* Issue assembly directives that create a reference to the given DWARF
22130 FRAME_TABLE_LABEL from the current function section. */
22131 void
22132 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
22134 fprintf (asm_out_file, "\t.ref %s\n",
22135 (* targetm.strip_name_encoding) (frame_table_label));
22138 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
22139 and the change to the stack pointer. */
22141 static void
22142 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
22144 rtvec p;
22145 int i;
22146 rtx regs[3];
22148 i = 0;
22149 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22150 if (hard_frame_needed)
22151 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
22152 if (!(REGNO (fp) == STACK_POINTER_REGNUM
22153 || (hard_frame_needed
22154 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
22155 regs[i++] = fp;
22157 p = rtvec_alloc (i);
22158 while (--i >= 0)
22160 rtx mem = gen_frame_mem (BLKmode, regs[i]);
22161 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
22164 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
22167 /* Emit the correct code for allocating stack space, as insns.
22168 If COPY_REG, make sure a copy of the old frame is left there.
22169 The generated code may use hard register 0 as a temporary. */
22171 static void
22172 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
22174 rtx_insn *insn;
22175 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22176 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
22177 rtx todec = gen_int_mode (-size, Pmode);
22178 rtx par, set, mem;
22180 if (INTVAL (todec) != -size)
22182 warning (0, "stack frame too large");
22183 emit_insn (gen_trap ());
22184 return;
22187 if (crtl->limit_stack)
22189 if (REG_P (stack_limit_rtx)
22190 && REGNO (stack_limit_rtx) > 1
22191 && REGNO (stack_limit_rtx) <= 31)
22193 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
22194 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
22195 const0_rtx));
22197 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
22198 && TARGET_32BIT
22199 && DEFAULT_ABI == ABI_V4)
22201 rtx toload = gen_rtx_CONST (VOIDmode,
22202 gen_rtx_PLUS (Pmode,
22203 stack_limit_rtx,
22204 GEN_INT (size)));
22206 emit_insn (gen_elf_high (tmp_reg, toload));
22207 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
22208 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
22209 const0_rtx));
22211 else
22212 warning (0, "stack limit expression is not supported");
22215 if (copy_reg)
22217 if (copy_off != 0)
22218 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
22219 else
22220 emit_move_insn (copy_reg, stack_reg);
22223 if (size > 32767)
22225 /* Need a note here so that try_split doesn't get confused. */
22226 if (get_last_insn () == NULL_RTX)
22227 emit_note (NOTE_INSN_DELETED);
22228 insn = emit_move_insn (tmp_reg, todec);
22229 try_split (PATTERN (insn), insn, 0);
22230 todec = tmp_reg;
22233 insn = emit_insn (TARGET_32BIT
22234 ? gen_movsi_update_stack (stack_reg, stack_reg,
22235 todec, stack_reg)
22236 : gen_movdi_di_update_stack (stack_reg, stack_reg,
22237 todec, stack_reg));
22238 /* Since we didn't use gen_frame_mem to generate the MEM, grab
22239 it now and set the alias set/attributes. The above gen_*_update
22240 calls will generate a PARALLEL with the MEM set being the first
22241 operation. */
22242 par = PATTERN (insn);
22243 gcc_assert (GET_CODE (par) == PARALLEL);
22244 set = XVECEXP (par, 0, 0);
22245 gcc_assert (GET_CODE (set) == SET);
22246 mem = SET_DEST (set);
22247 gcc_assert (MEM_P (mem));
22248 MEM_NOTRAP_P (mem) = 1;
22249 set_mem_alias_set (mem, get_frame_alias_set ());
22251 RTX_FRAME_RELATED_P (insn) = 1;
22252 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
22253 gen_rtx_SET (VOIDmode, stack_reg,
22254 gen_rtx_PLUS (Pmode, stack_reg,
22255 GEN_INT (-size))));
22258 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
22260 #if PROBE_INTERVAL > 32768
22261 #error Cannot use indexed addressing mode for stack probing
22262 #endif
22264 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
22265 inclusive. These are offsets from the current stack pointer. */
22267 static void
22268 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
22270 /* See if we have a constant small number of probes to generate. If so,
22271 that's the easy case. */
22272 if (first + size <= 32768)
22274 HOST_WIDE_INT i;
22276 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
22277 it exceeds SIZE. If only one probe is needed, this will not
22278 generate any code. Then probe at FIRST + SIZE. */
22279 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
22280 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22281 -(first + i)));
22283 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22284 -(first + size)));
22287 /* Otherwise, do the same as above, but in a loop. Note that we must be
22288 extra careful with variables wrapping around because we might be at
22289 the very top (or the very bottom) of the address space and we have
22290 to be able to handle this case properly; in particular, we use an
22291 equality test for the loop condition. */
22292 else
22294 HOST_WIDE_INT rounded_size;
22295 rtx r12 = gen_rtx_REG (Pmode, 12);
22296 rtx r0 = gen_rtx_REG (Pmode, 0);
22298 /* Sanity check for the addressing mode we're going to use. */
22299 gcc_assert (first <= 32768);
22301 /* Step 1: round SIZE to the previous multiple of the interval. */
22303 rounded_size = size & -PROBE_INTERVAL;
22306 /* Step 2: compute initial and final value of the loop counter. */
22308 /* TEST_ADDR = SP + FIRST. */
22309 emit_insn (gen_rtx_SET (VOIDmode, r12,
22310 plus_constant (Pmode, stack_pointer_rtx,
22311 -first)));
22313 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
22314 if (rounded_size > 32768)
22316 emit_move_insn (r0, GEN_INT (-rounded_size));
22317 emit_insn (gen_rtx_SET (VOIDmode, r0,
22318 gen_rtx_PLUS (Pmode, r12, r0)));
22320 else
22321 emit_insn (gen_rtx_SET (VOIDmode, r0,
22322 plus_constant (Pmode, r12, -rounded_size)));
22325 /* Step 3: the loop
22327 while (TEST_ADDR != LAST_ADDR)
22329 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
22330 probe at TEST_ADDR
22333 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
22334 until it is equal to ROUNDED_SIZE. */
22336 if (TARGET_64BIT)
22337 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
22338 else
22339 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
22342 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
22343 that SIZE is equal to ROUNDED_SIZE. */
22345 if (size != rounded_size)
22346 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
22350 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
22351 absolute addresses. */
22353 const char *
22354 output_probe_stack_range (rtx reg1, rtx reg2)
22356 static int labelno = 0;
22357 char loop_lab[32], end_lab[32];
22358 rtx xops[2];
22360 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
22361 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
22363 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
22365 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
22366 xops[0] = reg1;
22367 xops[1] = reg2;
22368 if (TARGET_64BIT)
22369 output_asm_insn ("cmpd 0,%0,%1", xops);
22370 else
22371 output_asm_insn ("cmpw 0,%0,%1", xops);
22373 fputs ("\tbeq 0,", asm_out_file);
22374 assemble_name_raw (asm_out_file, end_lab);
22375 fputc ('\n', asm_out_file);
22377 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
22378 xops[1] = GEN_INT (-PROBE_INTERVAL);
22379 output_asm_insn ("addi %0,%0,%1", xops);
22381 /* Probe at TEST_ADDR and branch. */
22382 xops[1] = gen_rtx_REG (Pmode, 0);
22383 output_asm_insn ("stw %1,0(%0)", xops);
22384 fprintf (asm_out_file, "\tb ");
22385 assemble_name_raw (asm_out_file, loop_lab);
22386 fputc ('\n', asm_out_file);
22388 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
22390 return "";
22393 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
22394 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
22395 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
22396 deduce these equivalences by itself so it wasn't necessary to hold
22397 its hand so much. Don't be tempted to always supply d2_f_d_e with
22398 the actual cfa register, ie. r31 when we are using a hard frame
22399 pointer. That fails when saving regs off r1, and sched moves the
22400 r31 setup past the reg saves. */
22402 static rtx
22403 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
22404 rtx reg2, rtx rreg, rtx split_reg)
22406 rtx real, temp;
22408 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
22410 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
22411 int i;
22413 gcc_checking_assert (val == 0);
22414 real = PATTERN (insn);
22415 if (GET_CODE (real) == PARALLEL)
22416 for (i = 0; i < XVECLEN (real, 0); i++)
22417 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22419 rtx set = XVECEXP (real, 0, i);
22421 RTX_FRAME_RELATED_P (set) = 1;
22423 RTX_FRAME_RELATED_P (insn) = 1;
22424 return insn;
22427 /* copy_rtx will not make unique copies of registers, so we need to
22428 ensure we don't have unwanted sharing here. */
22429 if (reg == reg2)
22430 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22432 if (reg == rreg)
22433 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22435 real = copy_rtx (PATTERN (insn));
22437 if (reg2 != NULL_RTX)
22438 real = replace_rtx (real, reg2, rreg);
22440 if (REGNO (reg) == STACK_POINTER_REGNUM)
22441 gcc_checking_assert (val == 0);
22442 else
22443 real = replace_rtx (real, reg,
22444 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
22445 STACK_POINTER_REGNUM),
22446 GEN_INT (val)));
22448 /* We expect that 'real' is either a SET or a PARALLEL containing
22449 SETs (and possibly other stuff). In a PARALLEL, all the SETs
22450 are important so they all have to be marked RTX_FRAME_RELATED_P. */
22452 if (GET_CODE (real) == SET)
22454 rtx set = real;
22456 temp = simplify_rtx (SET_SRC (set));
22457 if (temp)
22458 SET_SRC (set) = temp;
22459 temp = simplify_rtx (SET_DEST (set));
22460 if (temp)
22461 SET_DEST (set) = temp;
22462 if (GET_CODE (SET_DEST (set)) == MEM)
22464 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22465 if (temp)
22466 XEXP (SET_DEST (set), 0) = temp;
22469 else
22471 int i;
22473 gcc_assert (GET_CODE (real) == PARALLEL);
22474 for (i = 0; i < XVECLEN (real, 0); i++)
22475 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22477 rtx set = XVECEXP (real, 0, i);
22479 temp = simplify_rtx (SET_SRC (set));
22480 if (temp)
22481 SET_SRC (set) = temp;
22482 temp = simplify_rtx (SET_DEST (set));
22483 if (temp)
22484 SET_DEST (set) = temp;
22485 if (GET_CODE (SET_DEST (set)) == MEM)
22487 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22488 if (temp)
22489 XEXP (SET_DEST (set), 0) = temp;
22491 RTX_FRAME_RELATED_P (set) = 1;
22495 /* If a store insn has been split into multiple insns, the
22496 true source register is given by split_reg. */
22497 if (split_reg != NULL_RTX)
22498 real = gen_rtx_SET (VOIDmode, SET_DEST (real), split_reg);
22500 RTX_FRAME_RELATED_P (insn) = 1;
22501 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
22503 return insn;
22506 /* Returns an insn that has a vrsave set operation with the
22507 appropriate CLOBBERs. */
22509 static rtx
22510 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
22512 int nclobs, i;
22513 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
22514 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
22516 clobs[0]
22517 = gen_rtx_SET (VOIDmode,
22518 vrsave,
22519 gen_rtx_UNSPEC_VOLATILE (SImode,
22520 gen_rtvec (2, reg, vrsave),
22521 UNSPECV_SET_VRSAVE));
22523 nclobs = 1;
22525 /* We need to clobber the registers in the mask so the scheduler
22526 does not move sets to VRSAVE before sets of AltiVec registers.
22528 However, if the function receives nonlocal gotos, reload will set
22529 all call saved registers live. We will end up with:
22531 (set (reg 999) (mem))
22532 (parallel [ (set (reg vrsave) (unspec blah))
22533 (clobber (reg 999))])
22535 The clobber will cause the store into reg 999 to be dead, and
22536 flow will attempt to delete an epilogue insn. In this case, we
22537 need an unspec use/set of the register. */
22539 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
22540 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
22542 if (!epiloguep || call_used_regs [i])
22543 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
22544 gen_rtx_REG (V4SImode, i));
22545 else
22547 rtx reg = gen_rtx_REG (V4SImode, i);
22549 clobs[nclobs++]
22550 = gen_rtx_SET (VOIDmode,
22551 reg,
22552 gen_rtx_UNSPEC (V4SImode,
22553 gen_rtvec (1, reg), 27));
22557 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
22559 for (i = 0; i < nclobs; ++i)
22560 XVECEXP (insn, 0, i) = clobs[i];
22562 return insn;
22565 static rtx
22566 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
22568 rtx addr, mem;
22570 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
22571 mem = gen_frame_mem (GET_MODE (reg), addr);
22572 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
22575 static rtx
22576 gen_frame_load (rtx reg, rtx frame_reg, int offset)
22578 return gen_frame_set (reg, frame_reg, offset, false);
22581 static rtx
22582 gen_frame_store (rtx reg, rtx frame_reg, int offset)
22584 return gen_frame_set (reg, frame_reg, offset, true);
22587 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
22588 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
22590 static rtx
22591 emit_frame_save (rtx frame_reg, enum machine_mode mode,
22592 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
22594 rtx reg, insn;
22596 /* Some cases that need register indexed addressing. */
22597 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
22598 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
22599 || (TARGET_E500_DOUBLE && mode == DFmode)
22600 || (TARGET_SPE_ABI
22601 && SPE_VECTOR_MODE (mode)
22602 && !SPE_CONST_OFFSET_OK (offset))));
22604 reg = gen_rtx_REG (mode, regno);
22605 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
22606 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
22607 NULL_RTX, NULL_RTX, NULL_RTX);
22610 /* Emit an offset memory reference suitable for a frame store, while
22611 converting to a valid addressing mode. */
22613 static rtx
22614 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
22616 rtx int_rtx, offset_rtx;
22618 int_rtx = GEN_INT (offset);
22620 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
22621 || (TARGET_E500_DOUBLE && mode == DFmode))
22623 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
22624 emit_move_insn (offset_rtx, int_rtx);
22626 else
22627 offset_rtx = int_rtx;
22629 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
22632 #ifndef TARGET_FIX_AND_CONTINUE
22633 #define TARGET_FIX_AND_CONTINUE 0
22634 #endif
22636 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
22637 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
22638 #define LAST_SAVRES_REGISTER 31
22639 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
22641 enum {
22642 SAVRES_LR = 0x1,
22643 SAVRES_SAVE = 0x2,
22644 SAVRES_REG = 0x0c,
22645 SAVRES_GPR = 0,
22646 SAVRES_FPR = 4,
22647 SAVRES_VR = 8
22650 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
22652 /* Temporary holding space for an out-of-line register save/restore
22653 routine name. */
22654 static char savres_routine_name[30];
22656 /* Return the name for an out-of-line register save/restore routine.
22657 We are saving/restoring GPRs if GPR is true. */
22659 static char *
22660 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
22662 const char *prefix = "";
22663 const char *suffix = "";
22665 /* Different targets are supposed to define
22666 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
22667 routine name could be defined with:
22669 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
22671 This is a nice idea in practice, but in reality, things are
22672 complicated in several ways:
22674 - ELF targets have save/restore routines for GPRs.
22676 - SPE targets use different prefixes for 32/64-bit registers, and
22677 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
22679 - PPC64 ELF targets have routines for save/restore of GPRs that
22680 differ in what they do with the link register, so having a set
22681 prefix doesn't work. (We only use one of the save routines at
22682 the moment, though.)
22684 - PPC32 elf targets have "exit" versions of the restore routines
22685 that restore the link register and can save some extra space.
22686 These require an extra suffix. (There are also "tail" versions
22687 of the restore routines and "GOT" versions of the save routines,
22688 but we don't generate those at present. Same problems apply,
22689 though.)
22691 We deal with all this by synthesizing our own prefix/suffix and
22692 using that for the simple sprintf call shown above. */
22693 if (TARGET_SPE)
22695 /* No floating point saves on the SPE. */
22696 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
22698 if ((sel & SAVRES_SAVE))
22699 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
22700 else
22701 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
22703 if ((sel & SAVRES_LR))
22704 suffix = "_x";
22706 else if (DEFAULT_ABI == ABI_V4)
22708 if (TARGET_64BIT)
22709 goto aix_names;
22711 if ((sel & SAVRES_REG) == SAVRES_GPR)
22712 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
22713 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22714 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
22715 else if ((sel & SAVRES_REG) == SAVRES_VR)
22716 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22717 else
22718 abort ();
22720 if ((sel & SAVRES_LR))
22721 suffix = "_x";
22723 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22725 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
22726 /* No out-of-line save/restore routines for GPRs on AIX. */
22727 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
22728 #endif
22730 aix_names:
22731 if ((sel & SAVRES_REG) == SAVRES_GPR)
22732 prefix = ((sel & SAVRES_SAVE)
22733 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
22734 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
22735 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22737 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
22738 if ((sel & SAVRES_LR))
22739 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
22740 else
22741 #endif
22743 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
22744 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
22747 else if ((sel & SAVRES_REG) == SAVRES_VR)
22748 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22749 else
22750 abort ();
22753 if (DEFAULT_ABI == ABI_DARWIN)
22755 /* The Darwin approach is (slightly) different, in order to be
22756 compatible with code generated by the system toolchain. There is a
22757 single symbol for the start of save sequence, and the code here
22758 embeds an offset into that code on the basis of the first register
22759 to be saved. */
22760 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
22761 if ((sel & SAVRES_REG) == SAVRES_GPR)
22762 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
22763 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
22764 (regno - 13) * 4, prefix, regno);
22765 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22766 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
22767 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
22768 else if ((sel & SAVRES_REG) == SAVRES_VR)
22769 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
22770 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
22771 else
22772 abort ();
22774 else
22775 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
22777 return savres_routine_name;
22780 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
22781 We are saving/restoring GPRs if GPR is true. */
22783 static rtx
22784 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
22786 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
22787 ? info->first_gp_reg_save
22788 : (sel & SAVRES_REG) == SAVRES_FPR
22789 ? info->first_fp_reg_save - 32
22790 : (sel & SAVRES_REG) == SAVRES_VR
22791 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
22792 : -1);
22793 rtx sym;
22794 int select = sel;
22796 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
22797 versions of the gpr routines. */
22798 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
22799 && info->spe_64bit_regs_used)
22800 select ^= SAVRES_FPR ^ SAVRES_GPR;
22802 /* Don't generate bogus routine names. */
22803 gcc_assert (FIRST_SAVRES_REGISTER <= regno
22804 && regno <= LAST_SAVRES_REGISTER
22805 && select >= 0 && select <= 12);
22807 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
22809 if (sym == NULL)
22811 char *name;
22813 name = rs6000_savres_routine_name (info, regno, sel);
22815 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
22816 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
22817 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
22820 return sym;
22823 /* Emit a sequence of insns, including a stack tie if needed, for
22824 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
22825 reset the stack pointer, but move the base of the frame into
22826 reg UPDT_REGNO for use by out-of-line register restore routines. */
22828 static rtx
22829 rs6000_emit_stack_reset (rs6000_stack_t *info,
22830 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
22831 unsigned updt_regno)
22833 rtx updt_reg_rtx;
22835 /* This blockage is needed so that sched doesn't decide to move
22836 the sp change before the register restores. */
22837 if (DEFAULT_ABI == ABI_V4
22838 || (TARGET_SPE_ABI
22839 && info->spe_64bit_regs_used != 0
22840 && info->first_gp_reg_save != 32))
22841 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
22843 /* If we are restoring registers out-of-line, we will be using the
22844 "exit" variants of the restore routines, which will reset the
22845 stack for us. But we do need to point updt_reg into the
22846 right place for those routines. */
22847 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
22849 if (frame_off != 0)
22850 return emit_insn (gen_add3_insn (updt_reg_rtx,
22851 frame_reg_rtx, GEN_INT (frame_off)));
22852 else if (REGNO (frame_reg_rtx) != updt_regno)
22853 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
22855 return NULL_RTX;
22858 /* Return the register number used as a pointer by out-of-line
22859 save/restore functions. */
22861 static inline unsigned
22862 ptr_regno_for_savres (int sel)
22864 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22865 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
22866 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
22869 /* Construct a parallel rtx describing the effect of a call to an
22870 out-of-line register save/restore routine, and emit the insn
22871 or jump_insn as appropriate. */
22873 static rtx
22874 rs6000_emit_savres_rtx (rs6000_stack_t *info,
22875 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
22876 enum machine_mode reg_mode, int sel)
22878 int i;
22879 int offset, start_reg, end_reg, n_regs, use_reg;
22880 int reg_size = GET_MODE_SIZE (reg_mode);
22881 rtx sym;
22882 rtvec p;
22883 rtx par, insn;
22885 offset = 0;
22886 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22887 ? info->first_gp_reg_save
22888 : (sel & SAVRES_REG) == SAVRES_FPR
22889 ? info->first_fp_reg_save
22890 : (sel & SAVRES_REG) == SAVRES_VR
22891 ? info->first_altivec_reg_save
22892 : -1);
22893 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22894 ? 32
22895 : (sel & SAVRES_REG) == SAVRES_FPR
22896 ? 64
22897 : (sel & SAVRES_REG) == SAVRES_VR
22898 ? LAST_ALTIVEC_REGNO + 1
22899 : -1);
22900 n_regs = end_reg - start_reg;
22901 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
22902 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
22903 + n_regs);
22905 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22906 RTVEC_ELT (p, offset++) = ret_rtx;
22908 RTVEC_ELT (p, offset++)
22909 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
22911 sym = rs6000_savres_routine_sym (info, sel);
22912 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
22914 use_reg = ptr_regno_for_savres (sel);
22915 if ((sel & SAVRES_REG) == SAVRES_VR)
22917 /* Vector regs are saved/restored using [reg+reg] addressing. */
22918 RTVEC_ELT (p, offset++)
22919 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22920 RTVEC_ELT (p, offset++)
22921 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
22923 else
22924 RTVEC_ELT (p, offset++)
22925 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22927 for (i = 0; i < end_reg - start_reg; i++)
22928 RTVEC_ELT (p, i + offset)
22929 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
22930 frame_reg_rtx, save_area_offset + reg_size * i,
22931 (sel & SAVRES_SAVE) != 0);
22933 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22934 RTVEC_ELT (p, i + offset)
22935 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
22937 par = gen_rtx_PARALLEL (VOIDmode, p);
22939 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22941 insn = emit_jump_insn (par);
22942 JUMP_LABEL (insn) = ret_rtx;
22944 else
22945 insn = emit_insn (par);
22946 return insn;
22949 /* Emit code to store CR fields that need to be saved into REG. */
22951 static void
22952 rs6000_emit_move_from_cr (rtx reg)
22954 /* Only the ELFv2 ABI allows storing only selected fields. */
22955 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
22957 int i, cr_reg[8], count = 0;
22959 /* Collect CR fields that must be saved. */
22960 for (i = 0; i < 8; i++)
22961 if (save_reg_p (CR0_REGNO + i))
22962 cr_reg[count++] = i;
22964 /* If it's just a single one, use mfcrf. */
22965 if (count == 1)
22967 rtvec p = rtvec_alloc (1);
22968 rtvec r = rtvec_alloc (2);
22969 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
22970 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
22971 RTVEC_ELT (p, 0)
22972 = gen_rtx_SET (VOIDmode, reg,
22973 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
22975 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22976 return;
22979 /* ??? It might be better to handle count == 2 / 3 cases here
22980 as well, using logical operations to combine the values. */
22983 emit_insn (gen_movesi_from_cr (reg));
22986 /* Determine whether the gp REG is really used. */
22988 static bool
22989 rs6000_reg_live_or_pic_offset_p (int reg)
22991 /* If the function calls eh_return, claim used all the registers that would
22992 be checked for liveness otherwise. This is required for the PIC offset
22993 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
22994 register allocation purposes in this case. */
22996 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
22997 && (!call_used_regs[reg]
22998 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
22999 && !TARGET_SINGLE_PIC_BASE
23000 && TARGET_TOC && TARGET_MINIMAL_TOC)))
23001 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23002 && !TARGET_SINGLE_PIC_BASE
23003 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
23004 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
23007 /* Emit function prologue as insns. */
23009 void
23010 rs6000_emit_prologue (void)
23012 rs6000_stack_t *info = rs6000_stack_info ();
23013 enum machine_mode reg_mode = Pmode;
23014 int reg_size = TARGET_32BIT ? 4 : 8;
23015 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
23016 rtx frame_reg_rtx = sp_reg_rtx;
23017 unsigned int cr_save_regno;
23018 rtx cr_save_rtx = NULL_RTX;
23019 rtx insn;
23020 int strategy;
23021 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
23022 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
23023 && call_used_regs[STATIC_CHAIN_REGNUM]);
23024 /* Offset to top of frame for frame_reg and sp respectively. */
23025 HOST_WIDE_INT frame_off = 0;
23026 HOST_WIDE_INT sp_off = 0;
23028 #ifdef ENABLE_CHECKING
23029 /* Track and check usage of r0, r11, r12. */
23030 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
23031 #define START_USE(R) do \
23033 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
23034 reg_inuse |= 1 << (R); \
23035 } while (0)
23036 #define END_USE(R) do \
23038 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
23039 reg_inuse &= ~(1 << (R)); \
23040 } while (0)
23041 #define NOT_INUSE(R) do \
23043 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
23044 } while (0)
23045 #else
23046 #define START_USE(R) do {} while (0)
23047 #define END_USE(R) do {} while (0)
23048 #define NOT_INUSE(R) do {} while (0)
23049 #endif
23051 if (DEFAULT_ABI == ABI_ELFv2)
23053 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
23055 /* With -mminimal-toc we may generate an extra use of r2 below. */
23056 if (!TARGET_SINGLE_PIC_BASE
23057 && TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
23058 cfun->machine->r2_setup_needed = true;
23062 if (flag_stack_usage_info)
23063 current_function_static_stack_size = info->total_size;
23065 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
23067 HOST_WIDE_INT size = info->total_size;
23069 if (crtl->is_leaf && !cfun->calls_alloca)
23071 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
23072 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
23073 size - STACK_CHECK_PROTECT);
23075 else if (size > 0)
23076 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
23079 if (TARGET_FIX_AND_CONTINUE)
23081 /* gdb on darwin arranges to forward a function from the old
23082 address by modifying the first 5 instructions of the function
23083 to branch to the overriding function. This is necessary to
23084 permit function pointers that point to the old function to
23085 actually forward to the new function. */
23086 emit_insn (gen_nop ());
23087 emit_insn (gen_nop ());
23088 emit_insn (gen_nop ());
23089 emit_insn (gen_nop ());
23090 emit_insn (gen_nop ());
23093 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
23095 reg_mode = V2SImode;
23096 reg_size = 8;
23099 /* Handle world saves specially here. */
23100 if (WORLD_SAVE_P (info))
23102 int i, j, sz;
23103 rtx treg;
23104 rtvec p;
23105 rtx reg0;
23107 /* save_world expects lr in r0. */
23108 reg0 = gen_rtx_REG (Pmode, 0);
23109 if (info->lr_save_p)
23111 insn = emit_move_insn (reg0,
23112 gen_rtx_REG (Pmode, LR_REGNO));
23113 RTX_FRAME_RELATED_P (insn) = 1;
23116 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
23117 assumptions about the offsets of various bits of the stack
23118 frame. */
23119 gcc_assert (info->gp_save_offset == -220
23120 && info->fp_save_offset == -144
23121 && info->lr_save_offset == 8
23122 && info->cr_save_offset == 4
23123 && info->push_p
23124 && info->lr_save_p
23125 && (!crtl->calls_eh_return
23126 || info->ehrd_offset == -432)
23127 && info->vrsave_save_offset == -224
23128 && info->altivec_save_offset == -416);
23130 treg = gen_rtx_REG (SImode, 11);
23131 emit_move_insn (treg, GEN_INT (-info->total_size));
23133 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
23134 in R11. It also clobbers R12, so beware! */
23136 /* Preserve CR2 for save_world prologues */
23137 sz = 5;
23138 sz += 32 - info->first_gp_reg_save;
23139 sz += 64 - info->first_fp_reg_save;
23140 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
23141 p = rtvec_alloc (sz);
23142 j = 0;
23143 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
23144 gen_rtx_REG (SImode,
23145 LR_REGNO));
23146 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
23147 gen_rtx_SYMBOL_REF (Pmode,
23148 "*save_world"));
23149 /* We do floats first so that the instruction pattern matches
23150 properly. */
23151 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23152 RTVEC_ELT (p, j++)
23153 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23154 ? DFmode : SFmode,
23155 info->first_fp_reg_save + i),
23156 frame_reg_rtx,
23157 info->fp_save_offset + frame_off + 8 * i);
23158 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
23159 RTVEC_ELT (p, j++)
23160 = gen_frame_store (gen_rtx_REG (V4SImode,
23161 info->first_altivec_reg_save + i),
23162 frame_reg_rtx,
23163 info->altivec_save_offset + frame_off + 16 * i);
23164 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23165 RTVEC_ELT (p, j++)
23166 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23167 frame_reg_rtx,
23168 info->gp_save_offset + frame_off + reg_size * i);
23170 /* CR register traditionally saved as CR2. */
23171 RTVEC_ELT (p, j++)
23172 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
23173 frame_reg_rtx, info->cr_save_offset + frame_off);
23174 /* Explain about use of R0. */
23175 if (info->lr_save_p)
23176 RTVEC_ELT (p, j++)
23177 = gen_frame_store (reg0,
23178 frame_reg_rtx, info->lr_save_offset + frame_off);
23179 /* Explain what happens to the stack pointer. */
23181 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
23182 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
23185 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23186 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23187 treg, GEN_INT (-info->total_size), NULL_RTX);
23188 sp_off = frame_off = info->total_size;
23191 strategy = info->savres_strategy;
23193 /* For V.4, update stack before we do any saving and set back pointer. */
23194 if (! WORLD_SAVE_P (info)
23195 && info->push_p
23196 && (DEFAULT_ABI == ABI_V4
23197 || crtl->calls_eh_return))
23199 bool need_r11 = (TARGET_SPE
23200 ? (!(strategy & SAVE_INLINE_GPRS)
23201 && info->spe_64bit_regs_used == 0)
23202 : (!(strategy & SAVE_INLINE_FPRS)
23203 || !(strategy & SAVE_INLINE_GPRS)
23204 || !(strategy & SAVE_INLINE_VRS)));
23205 int ptr_regno = -1;
23206 rtx ptr_reg = NULL_RTX;
23207 int ptr_off = 0;
23209 if (info->total_size < 32767)
23210 frame_off = info->total_size;
23211 else if (need_r11)
23212 ptr_regno = 11;
23213 else if (info->cr_save_p
23214 || info->lr_save_p
23215 || info->first_fp_reg_save < 64
23216 || info->first_gp_reg_save < 32
23217 || info->altivec_size != 0
23218 || info->vrsave_mask != 0
23219 || crtl->calls_eh_return)
23220 ptr_regno = 12;
23221 else
23223 /* The prologue won't be saving any regs so there is no need
23224 to set up a frame register to access any frame save area.
23225 We also won't be using frame_off anywhere below, but set
23226 the correct value anyway to protect against future
23227 changes to this function. */
23228 frame_off = info->total_size;
23230 if (ptr_regno != -1)
23232 /* Set up the frame offset to that needed by the first
23233 out-of-line save function. */
23234 START_USE (ptr_regno);
23235 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23236 frame_reg_rtx = ptr_reg;
23237 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
23238 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
23239 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
23240 ptr_off = info->gp_save_offset + info->gp_size;
23241 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
23242 ptr_off = info->altivec_save_offset + info->altivec_size;
23243 frame_off = -ptr_off;
23245 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
23246 sp_off = info->total_size;
23247 if (frame_reg_rtx != sp_reg_rtx)
23248 rs6000_emit_stack_tie (frame_reg_rtx, false);
23251 /* If we use the link register, get it into r0. */
23252 if (!WORLD_SAVE_P (info) && info->lr_save_p)
23254 rtx addr, reg, mem;
23256 reg = gen_rtx_REG (Pmode, 0);
23257 START_USE (0);
23258 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
23259 RTX_FRAME_RELATED_P (insn) = 1;
23261 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
23262 | SAVE_NOINLINE_FPRS_SAVES_LR)))
23264 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
23265 GEN_INT (info->lr_save_offset + frame_off));
23266 mem = gen_rtx_MEM (Pmode, addr);
23267 /* This should not be of rs6000_sr_alias_set, because of
23268 __builtin_return_address. */
23270 insn = emit_move_insn (mem, reg);
23271 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23272 NULL_RTX, NULL_RTX, NULL_RTX);
23273 END_USE (0);
23277 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
23278 r12 will be needed by out-of-line gpr restore. */
23279 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23280 && !(strategy & (SAVE_INLINE_GPRS
23281 | SAVE_NOINLINE_GPRS_SAVES_LR))
23282 ? 11 : 12);
23283 if (!WORLD_SAVE_P (info)
23284 && info->cr_save_p
23285 && REGNO (frame_reg_rtx) != cr_save_regno
23286 && !(using_static_chain_p && cr_save_regno == 11))
23288 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
23289 START_USE (cr_save_regno);
23290 rs6000_emit_move_from_cr (cr_save_rtx);
23293 /* Do any required saving of fpr's. If only one or two to save, do
23294 it ourselves. Otherwise, call function. */
23295 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
23297 int i;
23298 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23299 if (save_reg_p (info->first_fp_reg_save + i))
23300 emit_frame_save (frame_reg_rtx,
23301 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23302 ? DFmode : SFmode),
23303 info->first_fp_reg_save + i,
23304 info->fp_save_offset + frame_off + 8 * i,
23305 sp_off - frame_off);
23307 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
23309 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23310 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23311 unsigned ptr_regno = ptr_regno_for_savres (sel);
23312 rtx ptr_reg = frame_reg_rtx;
23314 if (REGNO (frame_reg_rtx) == ptr_regno)
23315 gcc_checking_assert (frame_off == 0);
23316 else
23318 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23319 NOT_INUSE (ptr_regno);
23320 emit_insn (gen_add3_insn (ptr_reg,
23321 frame_reg_rtx, GEN_INT (frame_off)));
23323 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23324 info->fp_save_offset,
23325 info->lr_save_offset,
23326 DFmode, sel);
23327 rs6000_frame_related (insn, ptr_reg, sp_off,
23328 NULL_RTX, NULL_RTX, NULL_RTX);
23329 if (lr)
23330 END_USE (0);
23333 /* Save GPRs. This is done as a PARALLEL if we are using
23334 the store-multiple instructions. */
23335 if (!WORLD_SAVE_P (info)
23336 && TARGET_SPE_ABI
23337 && info->spe_64bit_regs_used != 0
23338 && info->first_gp_reg_save != 32)
23340 int i;
23341 rtx spe_save_area_ptr;
23342 HOST_WIDE_INT save_off;
23343 int ool_adjust = 0;
23345 /* Determine whether we can address all of the registers that need
23346 to be saved with an offset from frame_reg_rtx that fits in
23347 the small const field for SPE memory instructions. */
23348 int spe_regs_addressable
23349 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
23350 + reg_size * (32 - info->first_gp_reg_save - 1))
23351 && (strategy & SAVE_INLINE_GPRS));
23353 if (spe_regs_addressable)
23355 spe_save_area_ptr = frame_reg_rtx;
23356 save_off = frame_off;
23358 else
23360 /* Make r11 point to the start of the SPE save area. We need
23361 to be careful here if r11 is holding the static chain. If
23362 it is, then temporarily save it in r0. */
23363 HOST_WIDE_INT offset;
23365 if (!(strategy & SAVE_INLINE_GPRS))
23366 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
23367 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
23368 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
23369 save_off = frame_off - offset;
23371 if (using_static_chain_p)
23373 rtx r0 = gen_rtx_REG (Pmode, 0);
23375 START_USE (0);
23376 gcc_assert (info->first_gp_reg_save > 11);
23378 emit_move_insn (r0, spe_save_area_ptr);
23380 else if (REGNO (frame_reg_rtx) != 11)
23381 START_USE (11);
23383 emit_insn (gen_addsi3 (spe_save_area_ptr,
23384 frame_reg_rtx, GEN_INT (offset)));
23385 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
23386 frame_off = -info->spe_gp_save_offset + ool_adjust;
23389 if ((strategy & SAVE_INLINE_GPRS))
23391 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23392 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23393 emit_frame_save (spe_save_area_ptr, reg_mode,
23394 info->first_gp_reg_save + i,
23395 (info->spe_gp_save_offset + save_off
23396 + reg_size * i),
23397 sp_off - save_off);
23399 else
23401 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
23402 info->spe_gp_save_offset + save_off,
23403 0, reg_mode,
23404 SAVRES_SAVE | SAVRES_GPR);
23406 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
23407 NULL_RTX, NULL_RTX, NULL_RTX);
23410 /* Move the static chain pointer back. */
23411 if (!spe_regs_addressable)
23413 if (using_static_chain_p)
23415 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
23416 END_USE (0);
23418 else if (REGNO (frame_reg_rtx) != 11)
23419 END_USE (11);
23422 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
23424 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
23425 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
23426 unsigned ptr_regno = ptr_regno_for_savres (sel);
23427 rtx ptr_reg = frame_reg_rtx;
23428 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
23429 int end_save = info->gp_save_offset + info->gp_size;
23430 int ptr_off;
23432 if (!ptr_set_up)
23433 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23435 /* Need to adjust r11 (r12) if we saved any FPRs. */
23436 if (end_save + frame_off != 0)
23438 rtx offset = GEN_INT (end_save + frame_off);
23440 if (ptr_set_up)
23441 frame_off = -end_save;
23442 else
23443 NOT_INUSE (ptr_regno);
23444 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23446 else if (!ptr_set_up)
23448 NOT_INUSE (ptr_regno);
23449 emit_move_insn (ptr_reg, frame_reg_rtx);
23451 ptr_off = -end_save;
23452 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23453 info->gp_save_offset + ptr_off,
23454 info->lr_save_offset + ptr_off,
23455 reg_mode, sel);
23456 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
23457 NULL_RTX, NULL_RTX, NULL_RTX);
23458 if (lr)
23459 END_USE (0);
23461 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
23463 rtvec p;
23464 int i;
23465 p = rtvec_alloc (32 - info->first_gp_reg_save);
23466 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23467 RTVEC_ELT (p, i)
23468 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23469 frame_reg_rtx,
23470 info->gp_save_offset + frame_off + reg_size * i);
23471 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23472 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23473 NULL_RTX, NULL_RTX, NULL_RTX);
23475 else if (!WORLD_SAVE_P (info))
23477 int i;
23478 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23479 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23480 emit_frame_save (frame_reg_rtx, reg_mode,
23481 info->first_gp_reg_save + i,
23482 info->gp_save_offset + frame_off + reg_size * i,
23483 sp_off - frame_off);
23486 if (crtl->calls_eh_return)
23488 unsigned int i;
23489 rtvec p;
23491 for (i = 0; ; ++i)
23493 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23494 if (regno == INVALID_REGNUM)
23495 break;
23498 p = rtvec_alloc (i);
23500 for (i = 0; ; ++i)
23502 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23503 if (regno == INVALID_REGNUM)
23504 break;
23506 insn
23507 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
23508 sp_reg_rtx,
23509 info->ehrd_offset + sp_off + reg_size * (int) i);
23510 RTVEC_ELT (p, i) = insn;
23511 RTX_FRAME_RELATED_P (insn) = 1;
23514 insn = emit_insn (gen_blockage ());
23515 RTX_FRAME_RELATED_P (insn) = 1;
23516 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
23519 /* In AIX ABI we need to make sure r2 is really saved. */
23520 if (TARGET_AIX && crtl->calls_eh_return)
23522 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
23523 rtx save_insn, join_insn, note;
23524 long toc_restore_insn;
23526 tmp_reg = gen_rtx_REG (Pmode, 11);
23527 tmp_reg_si = gen_rtx_REG (SImode, 11);
23528 if (using_static_chain_p)
23530 START_USE (0);
23531 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
23533 else
23534 START_USE (11);
23535 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
23536 /* Peek at instruction to which this function returns. If it's
23537 restoring r2, then we know we've already saved r2. We can't
23538 unconditionally save r2 because the value we have will already
23539 be updated if we arrived at this function via a plt call or
23540 toc adjusting stub. */
23541 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
23542 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
23543 + RS6000_TOC_SAVE_SLOT);
23544 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
23545 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
23546 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
23547 validate_condition_mode (EQ, CCUNSmode);
23548 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
23549 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
23550 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
23551 toc_save_done = gen_label_rtx ();
23552 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
23553 gen_rtx_EQ (VOIDmode, compare_result,
23554 const0_rtx),
23555 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
23556 pc_rtx);
23557 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
23558 JUMP_LABEL (jump) = toc_save_done;
23559 LABEL_NUSES (toc_save_done) += 1;
23561 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
23562 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
23563 sp_off - frame_off);
23565 emit_label (toc_save_done);
23567 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
23568 have a CFG that has different saves along different paths.
23569 Move the note to a dummy blockage insn, which describes that
23570 R2 is unconditionally saved after the label. */
23571 /* ??? An alternate representation might be a special insn pattern
23572 containing both the branch and the store. That might let the
23573 code that minimizes the number of DW_CFA_advance opcodes better
23574 freedom in placing the annotations. */
23575 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
23576 if (note)
23577 remove_note (save_insn, note);
23578 else
23579 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
23580 copy_rtx (PATTERN (save_insn)), NULL_RTX);
23581 RTX_FRAME_RELATED_P (save_insn) = 0;
23583 join_insn = emit_insn (gen_blockage ());
23584 REG_NOTES (join_insn) = note;
23585 RTX_FRAME_RELATED_P (join_insn) = 1;
23587 if (using_static_chain_p)
23589 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
23590 END_USE (0);
23592 else
23593 END_USE (11);
23596 /* Save CR if we use any that must be preserved. */
23597 if (!WORLD_SAVE_P (info) && info->cr_save_p)
23599 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
23600 GEN_INT (info->cr_save_offset + frame_off));
23601 rtx mem = gen_frame_mem (SImode, addr);
23603 /* If we didn't copy cr before, do so now using r0. */
23604 if (cr_save_rtx == NULL_RTX)
23606 START_USE (0);
23607 cr_save_rtx = gen_rtx_REG (SImode, 0);
23608 rs6000_emit_move_from_cr (cr_save_rtx);
23611 /* Saving CR requires a two-instruction sequence: one instruction
23612 to move the CR to a general-purpose register, and a second
23613 instruction that stores the GPR to memory.
23615 We do not emit any DWARF CFI records for the first of these,
23616 because we cannot properly represent the fact that CR is saved in
23617 a register. One reason is that we cannot express that multiple
23618 CR fields are saved; another reason is that on 64-bit, the size
23619 of the CR register in DWARF (4 bytes) differs from the size of
23620 a general-purpose register.
23622 This means if any intervening instruction were to clobber one of
23623 the call-saved CR fields, we'd have incorrect CFI. To prevent
23624 this from happening, we mark the store to memory as a use of
23625 those CR fields, which prevents any such instruction from being
23626 scheduled in between the two instructions. */
23627 rtx crsave_v[9];
23628 int n_crsave = 0;
23629 int i;
23631 crsave_v[n_crsave++] = gen_rtx_SET (VOIDmode, mem, cr_save_rtx);
23632 for (i = 0; i < 8; i++)
23633 if (save_reg_p (CR0_REGNO + i))
23634 crsave_v[n_crsave++]
23635 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23637 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
23638 gen_rtvec_v (n_crsave, crsave_v)));
23639 END_USE (REGNO (cr_save_rtx));
23641 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
23642 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
23643 so we need to construct a frame expression manually. */
23644 RTX_FRAME_RELATED_P (insn) = 1;
23646 /* Update address to be stack-pointer relative, like
23647 rs6000_frame_related would do. */
23648 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
23649 GEN_INT (info->cr_save_offset + sp_off));
23650 mem = gen_frame_mem (SImode, addr);
23652 if (DEFAULT_ABI == ABI_ELFv2)
23654 /* In the ELFv2 ABI we generate separate CFI records for each
23655 CR field that was actually saved. They all point to the
23656 same 32-bit stack slot. */
23657 rtx crframe[8];
23658 int n_crframe = 0;
23660 for (i = 0; i < 8; i++)
23661 if (save_reg_p (CR0_REGNO + i))
23663 crframe[n_crframe]
23664 = gen_rtx_SET (VOIDmode, mem,
23665 gen_rtx_REG (SImode, CR0_REGNO + i));
23667 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
23668 n_crframe++;
23671 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23672 gen_rtx_PARALLEL (VOIDmode,
23673 gen_rtvec_v (n_crframe, crframe)));
23675 else
23677 /* In other ABIs, by convention, we use a single CR regnum to
23678 represent the fact that all call-saved CR fields are saved.
23679 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
23680 rtx set = gen_rtx_SET (VOIDmode, mem,
23681 gen_rtx_REG (SImode, CR2_REGNO));
23682 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
23686 /* In the ELFv2 ABI we need to save all call-saved CR fields into
23687 *separate* slots if the routine calls __builtin_eh_return, so
23688 that they can be independently restored by the unwinder. */
23689 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
23691 int i, cr_off = info->ehcr_offset;
23692 rtx crsave;
23694 /* ??? We might get better performance by using multiple mfocrf
23695 instructions. */
23696 crsave = gen_rtx_REG (SImode, 0);
23697 emit_insn (gen_movesi_from_cr (crsave));
23699 for (i = 0; i < 8; i++)
23700 if (!call_used_regs[CR0_REGNO + i])
23702 rtvec p = rtvec_alloc (2);
23703 RTVEC_ELT (p, 0)
23704 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
23705 RTVEC_ELT (p, 1)
23706 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23708 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23710 RTX_FRAME_RELATED_P (insn) = 1;
23711 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23712 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
23713 sp_reg_rtx, cr_off + sp_off));
23715 cr_off += reg_size;
23719 /* Update stack and set back pointer unless this is V.4,
23720 for which it was done previously. */
23721 if (!WORLD_SAVE_P (info) && info->push_p
23722 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
23724 rtx ptr_reg = NULL;
23725 int ptr_off = 0;
23727 /* If saving altivec regs we need to be able to address all save
23728 locations using a 16-bit offset. */
23729 if ((strategy & SAVE_INLINE_VRS) == 0
23730 || (info->altivec_size != 0
23731 && (info->altivec_save_offset + info->altivec_size - 16
23732 + info->total_size - frame_off) > 32767)
23733 || (info->vrsave_size != 0
23734 && (info->vrsave_save_offset
23735 + info->total_size - frame_off) > 32767))
23737 int sel = SAVRES_SAVE | SAVRES_VR;
23738 unsigned ptr_regno = ptr_regno_for_savres (sel);
23740 if (using_static_chain_p
23741 && ptr_regno == STATIC_CHAIN_REGNUM)
23742 ptr_regno = 12;
23743 if (REGNO (frame_reg_rtx) != ptr_regno)
23744 START_USE (ptr_regno);
23745 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23746 frame_reg_rtx = ptr_reg;
23747 ptr_off = info->altivec_save_offset + info->altivec_size;
23748 frame_off = -ptr_off;
23750 else if (REGNO (frame_reg_rtx) == 1)
23751 frame_off = info->total_size;
23752 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
23753 sp_off = info->total_size;
23754 if (frame_reg_rtx != sp_reg_rtx)
23755 rs6000_emit_stack_tie (frame_reg_rtx, false);
23758 /* Set frame pointer, if needed. */
23759 if (frame_pointer_needed)
23761 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
23762 sp_reg_rtx);
23763 RTX_FRAME_RELATED_P (insn) = 1;
23766 /* Save AltiVec registers if needed. Save here because the red zone does
23767 not always include AltiVec registers. */
23768 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23769 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
23771 int end_save = info->altivec_save_offset + info->altivec_size;
23772 int ptr_off;
23773 /* Oddly, the vector save/restore functions point r0 at the end
23774 of the save area, then use r11 or r12 to load offsets for
23775 [reg+reg] addressing. */
23776 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
23777 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
23778 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
23780 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
23781 NOT_INUSE (0);
23782 if (end_save + frame_off != 0)
23784 rtx offset = GEN_INT (end_save + frame_off);
23786 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23788 else
23789 emit_move_insn (ptr_reg, frame_reg_rtx);
23791 ptr_off = -end_save;
23792 insn = rs6000_emit_savres_rtx (info, scratch_reg,
23793 info->altivec_save_offset + ptr_off,
23794 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
23795 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
23796 NULL_RTX, NULL_RTX, NULL_RTX);
23797 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
23799 /* The oddity mentioned above clobbered our frame reg. */
23800 emit_move_insn (frame_reg_rtx, ptr_reg);
23801 frame_off = ptr_off;
23804 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23805 && info->altivec_size != 0)
23807 int i;
23809 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
23810 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
23812 rtx areg, savereg, mem, split_reg;
23813 int offset;
23815 offset = (info->altivec_save_offset + frame_off
23816 + 16 * (i - info->first_altivec_reg_save));
23818 savereg = gen_rtx_REG (V4SImode, i);
23820 NOT_INUSE (0);
23821 areg = gen_rtx_REG (Pmode, 0);
23822 emit_move_insn (areg, GEN_INT (offset));
23824 /* AltiVec addressing mode is [reg+reg]. */
23825 mem = gen_frame_mem (V4SImode,
23826 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
23828 insn = emit_move_insn (mem, savereg);
23830 /* When we split a VSX store into two insns, we need to make
23831 sure the DWARF info knows which register we are storing.
23832 Pass it in to be used on the appropriate note. */
23833 if (!BYTES_BIG_ENDIAN
23834 && GET_CODE (PATTERN (insn)) == SET
23835 && GET_CODE (SET_SRC (PATTERN (insn))) == VEC_SELECT)
23836 split_reg = savereg;
23837 else
23838 split_reg = NULL_RTX;
23840 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23841 areg, GEN_INT (offset), split_reg);
23845 /* VRSAVE is a bit vector representing which AltiVec registers
23846 are used. The OS uses this to determine which vector
23847 registers to save on a context switch. We need to save
23848 VRSAVE on the stack frame, add whatever AltiVec registers we
23849 used in this function, and do the corresponding magic in the
23850 epilogue. */
23852 if (!WORLD_SAVE_P (info)
23853 && TARGET_ALTIVEC
23854 && TARGET_ALTIVEC_VRSAVE
23855 && info->vrsave_mask != 0)
23857 rtx reg, vrsave;
23858 int offset;
23859 int save_regno;
23861 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
23862 be using r12 as frame_reg_rtx and r11 as the static chain
23863 pointer for nested functions. */
23864 save_regno = 12;
23865 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23866 && !using_static_chain_p)
23867 save_regno = 11;
23868 else if (REGNO (frame_reg_rtx) == 12)
23870 save_regno = 11;
23871 if (using_static_chain_p)
23872 save_regno = 0;
23875 NOT_INUSE (save_regno);
23876 reg = gen_rtx_REG (SImode, save_regno);
23877 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
23878 if (TARGET_MACHO)
23879 emit_insn (gen_get_vrsave_internal (reg));
23880 else
23881 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
23883 /* Save VRSAVE. */
23884 offset = info->vrsave_save_offset + frame_off;
23885 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
23887 /* Include the registers in the mask. */
23888 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
23890 insn = emit_insn (generate_set_vrsave (reg, info, 0));
23893 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
23894 if (!TARGET_SINGLE_PIC_BASE
23895 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
23896 || (DEFAULT_ABI == ABI_V4
23897 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
23898 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
23900 /* If emit_load_toc_table will use the link register, we need to save
23901 it. We use R12 for this purpose because emit_load_toc_table
23902 can use register 0. This allows us to use a plain 'blr' to return
23903 from the procedure more often. */
23904 int save_LR_around_toc_setup = (TARGET_ELF
23905 && DEFAULT_ABI == ABI_V4
23906 && flag_pic
23907 && ! info->lr_save_p
23908 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
23909 if (save_LR_around_toc_setup)
23911 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23912 rtx tmp = gen_rtx_REG (Pmode, 12);
23914 insn = emit_move_insn (tmp, lr);
23915 RTX_FRAME_RELATED_P (insn) = 1;
23917 rs6000_emit_load_toc_table (TRUE);
23919 insn = emit_move_insn (lr, tmp);
23920 add_reg_note (insn, REG_CFA_RESTORE, lr);
23921 RTX_FRAME_RELATED_P (insn) = 1;
23923 else
23924 rs6000_emit_load_toc_table (TRUE);
23927 #if TARGET_MACHO
23928 if (!TARGET_SINGLE_PIC_BASE
23929 && DEFAULT_ABI == ABI_DARWIN
23930 && flag_pic && crtl->uses_pic_offset_table)
23932 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23933 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
23935 /* Save and restore LR locally around this call (in R0). */
23936 if (!info->lr_save_p)
23937 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
23939 emit_insn (gen_load_macho_picbase (src));
23941 emit_move_insn (gen_rtx_REG (Pmode,
23942 RS6000_PIC_OFFSET_TABLE_REGNUM),
23943 lr);
23945 if (!info->lr_save_p)
23946 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
23948 #endif
23950 /* If we need to, save the TOC register after doing the stack setup.
23951 Do not emit eh frame info for this save. The unwinder wants info,
23952 conceptually attached to instructions in this function, about
23953 register values in the caller of this function. This R2 may have
23954 already been changed from the value in the caller.
23955 We don't attempt to write accurate DWARF EH frame info for R2
23956 because code emitted by gcc for a (non-pointer) function call
23957 doesn't save and restore R2. Instead, R2 is managed out-of-line
23958 by a linker generated plt call stub when the function resides in
23959 a shared library. This behaviour is costly to describe in DWARF,
23960 both in terms of the size of DWARF info and the time taken in the
23961 unwinder to interpret it. R2 changes, apart from the
23962 calls_eh_return case earlier in this function, are handled by
23963 linux-unwind.h frob_update_context. */
23964 if (rs6000_save_toc_in_prologue_p ())
23966 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
23967 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
23971 /* Write function prologue. */
23973 static void
23974 rs6000_output_function_prologue (FILE *file,
23975 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
23977 rs6000_stack_t *info = rs6000_stack_info ();
23979 if (TARGET_DEBUG_STACK)
23980 debug_stack_info (info);
23982 /* Write .extern for any function we will call to save and restore
23983 fp values. */
23984 if (info->first_fp_reg_save < 64
23985 && !TARGET_MACHO
23986 && !TARGET_ELF)
23988 char *name;
23989 int regno = info->first_fp_reg_save - 32;
23991 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
23993 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23994 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23995 name = rs6000_savres_routine_name (info, regno, sel);
23996 fprintf (file, "\t.extern %s\n", name);
23998 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
24000 bool lr = (info->savres_strategy
24001 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24002 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24003 name = rs6000_savres_routine_name (info, regno, sel);
24004 fprintf (file, "\t.extern %s\n", name);
24008 /* ELFv2 ABI r2 setup code and local entry point. This must follow
24009 immediately after the global entry point label. */
24010 if (DEFAULT_ABI == ABI_ELFv2 && cfun->machine->r2_setup_needed)
24012 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
24014 fprintf (file, "0:\taddis 2,12,.TOC.-0b@ha\n");
24015 fprintf (file, "\taddi 2,2,.TOC.-0b@l\n");
24017 fputs ("\t.localentry\t", file);
24018 assemble_name (file, name);
24019 fputs (",.-", file);
24020 assemble_name (file, name);
24021 fputs ("\n", file);
24024 /* Output -mprofile-kernel code. This needs to be done here instead of
24025 in output_function_profile since it must go after the ELFv2 ABI
24026 local entry point. */
24027 if (TARGET_PROFILE_KERNEL && crtl->profile)
24029 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24030 gcc_assert (!TARGET_32BIT);
24032 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
24033 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
24035 /* In the ELFv2 ABI we have no compiler stack word. It must be
24036 the resposibility of _mcount to preserve the static chain
24037 register if required. */
24038 if (DEFAULT_ABI != ABI_ELFv2
24039 && cfun->static_chain_decl != NULL)
24041 asm_fprintf (file, "\tstd %s,24(%s)\n",
24042 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
24043 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
24044 asm_fprintf (file, "\tld %s,24(%s)\n",
24045 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
24047 else
24048 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
24051 rs6000_pic_labelno++;
24054 /* Non-zero if vmx regs are restored before the frame pop, zero if
24055 we restore after the pop when possible. */
24056 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
24058 /* Restoring cr is a two step process: loading a reg from the frame
24059 save, then moving the reg to cr. For ABI_V4 we must let the
24060 unwinder know that the stack location is no longer valid at or
24061 before the stack deallocation, but we can't emit a cfa_restore for
24062 cr at the stack deallocation like we do for other registers.
24063 The trouble is that it is possible for the move to cr to be
24064 scheduled after the stack deallocation. So say exactly where cr
24065 is located on each of the two insns. */
24067 static rtx
24068 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
24070 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
24071 rtx reg = gen_rtx_REG (SImode, regno);
24072 rtx_insn *insn = emit_move_insn (reg, mem);
24074 if (!exit_func && DEFAULT_ABI == ABI_V4)
24076 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
24077 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
24079 add_reg_note (insn, REG_CFA_REGISTER, set);
24080 RTX_FRAME_RELATED_P (insn) = 1;
24082 return reg;
24085 /* Reload CR from REG. */
24087 static void
24088 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
24090 int count = 0;
24091 int i;
24093 if (using_mfcr_multiple)
24095 for (i = 0; i < 8; i++)
24096 if (save_reg_p (CR0_REGNO + i))
24097 count++;
24098 gcc_assert (count);
24101 if (using_mfcr_multiple && count > 1)
24103 rtx_insn *insn;
24104 rtvec p;
24105 int ndx;
24107 p = rtvec_alloc (count);
24109 ndx = 0;
24110 for (i = 0; i < 8; i++)
24111 if (save_reg_p (CR0_REGNO + i))
24113 rtvec r = rtvec_alloc (2);
24114 RTVEC_ELT (r, 0) = reg;
24115 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
24116 RTVEC_ELT (p, ndx) =
24117 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
24118 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
24119 ndx++;
24121 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
24122 gcc_assert (ndx == count);
24124 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
24125 CR field separately. */
24126 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
24128 for (i = 0; i < 8; i++)
24129 if (save_reg_p (CR0_REGNO + i))
24130 add_reg_note (insn, REG_CFA_RESTORE,
24131 gen_rtx_REG (SImode, CR0_REGNO + i));
24133 RTX_FRAME_RELATED_P (insn) = 1;
24136 else
24137 for (i = 0; i < 8; i++)
24138 if (save_reg_p (CR0_REGNO + i))
24140 rtx insn = emit_insn (gen_movsi_to_cr_one
24141 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
24143 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
24144 CR field separately, attached to the insn that in fact
24145 restores this particular CR field. */
24146 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
24148 add_reg_note (insn, REG_CFA_RESTORE,
24149 gen_rtx_REG (SImode, CR0_REGNO + i));
24151 RTX_FRAME_RELATED_P (insn) = 1;
24155 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
24156 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
24157 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
24159 rtx_insn *insn = get_last_insn ();
24160 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
24162 add_reg_note (insn, REG_CFA_RESTORE, cr);
24163 RTX_FRAME_RELATED_P (insn) = 1;
24167 /* Like cr, the move to lr instruction can be scheduled after the
24168 stack deallocation, but unlike cr, its stack frame save is still
24169 valid. So we only need to emit the cfa_restore on the correct
24170 instruction. */
24172 static void
24173 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
24175 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
24176 rtx reg = gen_rtx_REG (Pmode, regno);
24178 emit_move_insn (reg, mem);
24181 static void
24182 restore_saved_lr (int regno, bool exit_func)
24184 rtx reg = gen_rtx_REG (Pmode, regno);
24185 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
24186 rtx_insn *insn = emit_move_insn (lr, reg);
24188 if (!exit_func && flag_shrink_wrap)
24190 add_reg_note (insn, REG_CFA_RESTORE, lr);
24191 RTX_FRAME_RELATED_P (insn) = 1;
24195 static rtx
24196 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
24198 if (DEFAULT_ABI == ABI_ELFv2)
24200 int i;
24201 for (i = 0; i < 8; i++)
24202 if (save_reg_p (CR0_REGNO + i))
24204 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
24205 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
24206 cfa_restores);
24209 else if (info->cr_save_p)
24210 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24211 gen_rtx_REG (SImode, CR2_REGNO),
24212 cfa_restores);
24214 if (info->lr_save_p)
24215 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24216 gen_rtx_REG (Pmode, LR_REGNO),
24217 cfa_restores);
24218 return cfa_restores;
24221 /* Return true if OFFSET from stack pointer can be clobbered by signals.
24222 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
24223 below stack pointer not cloberred by signals. */
24225 static inline bool
24226 offset_below_red_zone_p (HOST_WIDE_INT offset)
24228 return offset < (DEFAULT_ABI == ABI_V4
24230 : TARGET_32BIT ? -220 : -288);
24233 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
24235 static void
24236 emit_cfa_restores (rtx cfa_restores)
24238 rtx_insn *insn = get_last_insn ();
24239 rtx *loc = &REG_NOTES (insn);
24241 while (*loc)
24242 loc = &XEXP (*loc, 1);
24243 *loc = cfa_restores;
24244 RTX_FRAME_RELATED_P (insn) = 1;
24247 /* Emit function epilogue as insns. */
24249 void
24250 rs6000_emit_epilogue (int sibcall)
24252 rs6000_stack_t *info;
24253 int restoring_GPRs_inline;
24254 int restoring_FPRs_inline;
24255 int using_load_multiple;
24256 int using_mtcr_multiple;
24257 int use_backchain_to_restore_sp;
24258 int restore_lr;
24259 int strategy;
24260 HOST_WIDE_INT frame_off = 0;
24261 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
24262 rtx frame_reg_rtx = sp_reg_rtx;
24263 rtx cfa_restores = NULL_RTX;
24264 rtx insn;
24265 rtx cr_save_reg = NULL_RTX;
24266 enum machine_mode reg_mode = Pmode;
24267 int reg_size = TARGET_32BIT ? 4 : 8;
24268 int i;
24269 bool exit_func;
24270 unsigned ptr_regno;
24272 info = rs6000_stack_info ();
24274 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24276 reg_mode = V2SImode;
24277 reg_size = 8;
24280 strategy = info->savres_strategy;
24281 using_load_multiple = strategy & SAVRES_MULTIPLE;
24282 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
24283 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
24284 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
24285 || rs6000_cpu == PROCESSOR_PPC603
24286 || rs6000_cpu == PROCESSOR_PPC750
24287 || optimize_size);
24288 /* Restore via the backchain when we have a large frame, since this
24289 is more efficient than an addis, addi pair. The second condition
24290 here will not trigger at the moment; We don't actually need a
24291 frame pointer for alloca, but the generic parts of the compiler
24292 give us one anyway. */
24293 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
24294 || (cfun->calls_alloca
24295 && !frame_pointer_needed));
24296 restore_lr = (info->lr_save_p
24297 && (restoring_FPRs_inline
24298 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
24299 && (restoring_GPRs_inline
24300 || info->first_fp_reg_save < 64));
24302 if (WORLD_SAVE_P (info))
24304 int i, j;
24305 char rname[30];
24306 const char *alloc_rname;
24307 rtvec p;
24309 /* eh_rest_world_r10 will return to the location saved in the LR
24310 stack slot (which is not likely to be our caller.)
24311 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
24312 rest_world is similar, except any R10 parameter is ignored.
24313 The exception-handling stuff that was here in 2.95 is no
24314 longer necessary. */
24316 p = rtvec_alloc (9
24318 + 32 - info->first_gp_reg_save
24319 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
24320 + 63 + 1 - info->first_fp_reg_save);
24322 strcpy (rname, ((crtl->calls_eh_return) ?
24323 "*eh_rest_world_r10" : "*rest_world"));
24324 alloc_rname = ggc_strdup (rname);
24326 j = 0;
24327 RTVEC_ELT (p, j++) = ret_rtx;
24328 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
24329 gen_rtx_REG (Pmode,
24330 LR_REGNO));
24331 RTVEC_ELT (p, j++)
24332 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
24333 /* The instruction pattern requires a clobber here;
24334 it is shared with the restVEC helper. */
24335 RTVEC_ELT (p, j++)
24336 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
24339 /* CR register traditionally saved as CR2. */
24340 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
24341 RTVEC_ELT (p, j++)
24342 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
24343 if (flag_shrink_wrap)
24345 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24346 gen_rtx_REG (Pmode, LR_REGNO),
24347 cfa_restores);
24348 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24352 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24354 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24355 RTVEC_ELT (p, j++)
24356 = gen_frame_load (reg,
24357 frame_reg_rtx, info->gp_save_offset + reg_size * i);
24358 if (flag_shrink_wrap)
24359 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24361 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
24363 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
24364 RTVEC_ELT (p, j++)
24365 = gen_frame_load (reg,
24366 frame_reg_rtx, info->altivec_save_offset + 16 * i);
24367 if (flag_shrink_wrap)
24368 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24370 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
24372 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24373 ? DFmode : SFmode),
24374 info->first_fp_reg_save + i);
24375 RTVEC_ELT (p, j++)
24376 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
24377 if (flag_shrink_wrap)
24378 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24380 RTVEC_ELT (p, j++)
24381 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
24382 RTVEC_ELT (p, j++)
24383 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
24384 RTVEC_ELT (p, j++)
24385 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
24386 RTVEC_ELT (p, j++)
24387 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
24388 RTVEC_ELT (p, j++)
24389 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
24390 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
24392 if (flag_shrink_wrap)
24394 REG_NOTES (insn) = cfa_restores;
24395 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24396 RTX_FRAME_RELATED_P (insn) = 1;
24398 return;
24401 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
24402 if (info->push_p)
24403 frame_off = info->total_size;
24405 /* Restore AltiVec registers if we must do so before adjusting the
24406 stack. */
24407 if (TARGET_ALTIVEC_ABI
24408 && info->altivec_size != 0
24409 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24410 || (DEFAULT_ABI != ABI_V4
24411 && offset_below_red_zone_p (info->altivec_save_offset))))
24413 int i;
24414 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24416 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
24417 if (use_backchain_to_restore_sp)
24419 int frame_regno = 11;
24421 if ((strategy & REST_INLINE_VRS) == 0)
24423 /* Of r11 and r12, select the one not clobbered by an
24424 out-of-line restore function for the frame register. */
24425 frame_regno = 11 + 12 - scratch_regno;
24427 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
24428 emit_move_insn (frame_reg_rtx,
24429 gen_rtx_MEM (Pmode, sp_reg_rtx));
24430 frame_off = 0;
24432 else if (frame_pointer_needed)
24433 frame_reg_rtx = hard_frame_pointer_rtx;
24435 if ((strategy & REST_INLINE_VRS) == 0)
24437 int end_save = info->altivec_save_offset + info->altivec_size;
24438 int ptr_off;
24439 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24440 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24442 if (end_save + frame_off != 0)
24444 rtx offset = GEN_INT (end_save + frame_off);
24446 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24448 else
24449 emit_move_insn (ptr_reg, frame_reg_rtx);
24451 ptr_off = -end_save;
24452 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24453 info->altivec_save_offset + ptr_off,
24454 0, V4SImode, SAVRES_VR);
24456 else
24458 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24459 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24461 rtx addr, areg, mem, reg;
24463 areg = gen_rtx_REG (Pmode, 0);
24464 emit_move_insn
24465 (areg, GEN_INT (info->altivec_save_offset
24466 + frame_off
24467 + 16 * (i - info->first_altivec_reg_save)));
24469 /* AltiVec addressing mode is [reg+reg]. */
24470 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24471 mem = gen_frame_mem (V4SImode, addr);
24473 reg = gen_rtx_REG (V4SImode, i);
24474 emit_move_insn (reg, mem);
24478 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24479 if (((strategy & REST_INLINE_VRS) == 0
24480 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24481 && (flag_shrink_wrap
24482 || (offset_below_red_zone_p
24483 (info->altivec_save_offset
24484 + 16 * (i - info->first_altivec_reg_save)))))
24486 rtx reg = gen_rtx_REG (V4SImode, i);
24487 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24491 /* Restore VRSAVE if we must do so before adjusting the stack. */
24492 if (TARGET_ALTIVEC
24493 && TARGET_ALTIVEC_VRSAVE
24494 && info->vrsave_mask != 0
24495 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24496 || (DEFAULT_ABI != ABI_V4
24497 && offset_below_red_zone_p (info->vrsave_save_offset))))
24499 rtx reg;
24501 if (frame_reg_rtx == sp_reg_rtx)
24503 if (use_backchain_to_restore_sp)
24505 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24506 emit_move_insn (frame_reg_rtx,
24507 gen_rtx_MEM (Pmode, sp_reg_rtx));
24508 frame_off = 0;
24510 else if (frame_pointer_needed)
24511 frame_reg_rtx = hard_frame_pointer_rtx;
24514 reg = gen_rtx_REG (SImode, 12);
24515 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24516 info->vrsave_save_offset + frame_off));
24518 emit_insn (generate_set_vrsave (reg, info, 1));
24521 insn = NULL_RTX;
24522 /* If we have a large stack frame, restore the old stack pointer
24523 using the backchain. */
24524 if (use_backchain_to_restore_sp)
24526 if (frame_reg_rtx == sp_reg_rtx)
24528 /* Under V.4, don't reset the stack pointer until after we're done
24529 loading the saved registers. */
24530 if (DEFAULT_ABI == ABI_V4)
24531 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24533 insn = emit_move_insn (frame_reg_rtx,
24534 gen_rtx_MEM (Pmode, sp_reg_rtx));
24535 frame_off = 0;
24537 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24538 && DEFAULT_ABI == ABI_V4)
24539 /* frame_reg_rtx has been set up by the altivec restore. */
24541 else
24543 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
24544 frame_reg_rtx = sp_reg_rtx;
24547 /* If we have a frame pointer, we can restore the old stack pointer
24548 from it. */
24549 else if (frame_pointer_needed)
24551 frame_reg_rtx = sp_reg_rtx;
24552 if (DEFAULT_ABI == ABI_V4)
24553 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24554 /* Prevent reordering memory accesses against stack pointer restore. */
24555 else if (cfun->calls_alloca
24556 || offset_below_red_zone_p (-info->total_size))
24557 rs6000_emit_stack_tie (frame_reg_rtx, true);
24559 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
24560 GEN_INT (info->total_size)));
24561 frame_off = 0;
24563 else if (info->push_p
24564 && DEFAULT_ABI != ABI_V4
24565 && !crtl->calls_eh_return)
24567 /* Prevent reordering memory accesses against stack pointer restore. */
24568 if (cfun->calls_alloca
24569 || offset_below_red_zone_p (-info->total_size))
24570 rs6000_emit_stack_tie (frame_reg_rtx, false);
24571 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
24572 GEN_INT (info->total_size)));
24573 frame_off = 0;
24575 if (insn && frame_reg_rtx == sp_reg_rtx)
24577 if (cfa_restores)
24579 REG_NOTES (insn) = cfa_restores;
24580 cfa_restores = NULL_RTX;
24582 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24583 RTX_FRAME_RELATED_P (insn) = 1;
24586 /* Restore AltiVec registers if we have not done so already. */
24587 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24588 && TARGET_ALTIVEC_ABI
24589 && info->altivec_size != 0
24590 && (DEFAULT_ABI == ABI_V4
24591 || !offset_below_red_zone_p (info->altivec_save_offset)))
24593 int i;
24595 if ((strategy & REST_INLINE_VRS) == 0)
24597 int end_save = info->altivec_save_offset + info->altivec_size;
24598 int ptr_off;
24599 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24600 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24601 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24603 if (end_save + frame_off != 0)
24605 rtx offset = GEN_INT (end_save + frame_off);
24607 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24609 else
24610 emit_move_insn (ptr_reg, frame_reg_rtx);
24612 ptr_off = -end_save;
24613 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24614 info->altivec_save_offset + ptr_off,
24615 0, V4SImode, SAVRES_VR);
24616 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
24618 /* Frame reg was clobbered by out-of-line save. Restore it
24619 from ptr_reg, and if we are calling out-of-line gpr or
24620 fpr restore set up the correct pointer and offset. */
24621 unsigned newptr_regno = 1;
24622 if (!restoring_GPRs_inline)
24624 bool lr = info->gp_save_offset + info->gp_size == 0;
24625 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24626 newptr_regno = ptr_regno_for_savres (sel);
24627 end_save = info->gp_save_offset + info->gp_size;
24629 else if (!restoring_FPRs_inline)
24631 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
24632 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24633 newptr_regno = ptr_regno_for_savres (sel);
24634 end_save = info->gp_save_offset + info->gp_size;
24637 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
24638 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
24640 if (end_save + ptr_off != 0)
24642 rtx offset = GEN_INT (end_save + ptr_off);
24644 frame_off = -end_save;
24645 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
24647 else
24649 frame_off = ptr_off;
24650 emit_move_insn (frame_reg_rtx, ptr_reg);
24654 else
24656 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24657 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24659 rtx addr, areg, mem, reg;
24661 areg = gen_rtx_REG (Pmode, 0);
24662 emit_move_insn
24663 (areg, GEN_INT (info->altivec_save_offset
24664 + frame_off
24665 + 16 * (i - info->first_altivec_reg_save)));
24667 /* AltiVec addressing mode is [reg+reg]. */
24668 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24669 mem = gen_frame_mem (V4SImode, addr);
24671 reg = gen_rtx_REG (V4SImode, i);
24672 emit_move_insn (reg, mem);
24676 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24677 if (((strategy & REST_INLINE_VRS) == 0
24678 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24679 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
24681 rtx reg = gen_rtx_REG (V4SImode, i);
24682 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24686 /* Restore VRSAVE if we have not done so already. */
24687 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24688 && TARGET_ALTIVEC
24689 && TARGET_ALTIVEC_VRSAVE
24690 && info->vrsave_mask != 0
24691 && (DEFAULT_ABI == ABI_V4
24692 || !offset_below_red_zone_p (info->vrsave_save_offset)))
24694 rtx reg;
24696 reg = gen_rtx_REG (SImode, 12);
24697 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24698 info->vrsave_save_offset + frame_off));
24700 emit_insn (generate_set_vrsave (reg, info, 1));
24703 /* If we exit by an out-of-line restore function on ABI_V4 then that
24704 function will deallocate the stack, so we don't need to worry
24705 about the unwinder restoring cr from an invalid stack frame
24706 location. */
24707 exit_func = (!restoring_FPRs_inline
24708 || (!restoring_GPRs_inline
24709 && info->first_fp_reg_save == 64));
24711 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
24712 *separate* slots if the routine calls __builtin_eh_return, so
24713 that they can be independently restored by the unwinder. */
24714 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24716 int i, cr_off = info->ehcr_offset;
24718 for (i = 0; i < 8; i++)
24719 if (!call_used_regs[CR0_REGNO + i])
24721 rtx reg = gen_rtx_REG (SImode, 0);
24722 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24723 cr_off + frame_off));
24725 insn = emit_insn (gen_movsi_to_cr_one
24726 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
24728 if (!exit_func && flag_shrink_wrap)
24730 add_reg_note (insn, REG_CFA_RESTORE,
24731 gen_rtx_REG (SImode, CR0_REGNO + i));
24733 RTX_FRAME_RELATED_P (insn) = 1;
24736 cr_off += reg_size;
24740 /* Get the old lr if we saved it. If we are restoring registers
24741 out-of-line, then the out-of-line routines can do this for us. */
24742 if (restore_lr && restoring_GPRs_inline)
24743 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24745 /* Get the old cr if we saved it. */
24746 if (info->cr_save_p)
24748 unsigned cr_save_regno = 12;
24750 if (!restoring_GPRs_inline)
24752 /* Ensure we don't use the register used by the out-of-line
24753 gpr register restore below. */
24754 bool lr = info->gp_save_offset + info->gp_size == 0;
24755 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24756 int gpr_ptr_regno = ptr_regno_for_savres (sel);
24758 if (gpr_ptr_regno == 12)
24759 cr_save_regno = 11;
24760 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
24762 else if (REGNO (frame_reg_rtx) == 12)
24763 cr_save_regno = 11;
24765 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
24766 info->cr_save_offset + frame_off,
24767 exit_func);
24770 /* Set LR here to try to overlap restores below. */
24771 if (restore_lr && restoring_GPRs_inline)
24772 restore_saved_lr (0, exit_func);
24774 /* Load exception handler data registers, if needed. */
24775 if (crtl->calls_eh_return)
24777 unsigned int i, regno;
24779 if (TARGET_AIX)
24781 rtx reg = gen_rtx_REG (reg_mode, 2);
24782 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24783 frame_off + RS6000_TOC_SAVE_SLOT));
24786 for (i = 0; ; ++i)
24788 rtx mem;
24790 regno = EH_RETURN_DATA_REGNO (i);
24791 if (regno == INVALID_REGNUM)
24792 break;
24794 /* Note: possible use of r0 here to address SPE regs. */
24795 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
24796 info->ehrd_offset + frame_off
24797 + reg_size * (int) i);
24799 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
24803 /* Restore GPRs. This is done as a PARALLEL if we are using
24804 the load-multiple instructions. */
24805 if (TARGET_SPE_ABI
24806 && info->spe_64bit_regs_used
24807 && info->first_gp_reg_save != 32)
24809 /* Determine whether we can address all of the registers that need
24810 to be saved with an offset from frame_reg_rtx that fits in
24811 the small const field for SPE memory instructions. */
24812 int spe_regs_addressable
24813 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
24814 + reg_size * (32 - info->first_gp_reg_save - 1))
24815 && restoring_GPRs_inline);
24817 if (!spe_regs_addressable)
24819 int ool_adjust = 0;
24820 rtx old_frame_reg_rtx = frame_reg_rtx;
24821 /* Make r11 point to the start of the SPE save area. We worried about
24822 not clobbering it when we were saving registers in the prologue.
24823 There's no need to worry here because the static chain is passed
24824 anew to every function. */
24826 if (!restoring_GPRs_inline)
24827 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
24828 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24829 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
24830 GEN_INT (info->spe_gp_save_offset
24831 + frame_off
24832 - ool_adjust)));
24833 /* Keep the invariant that frame_reg_rtx + frame_off points
24834 at the top of the stack frame. */
24835 frame_off = -info->spe_gp_save_offset + ool_adjust;
24838 if (restoring_GPRs_inline)
24840 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
24842 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24843 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24845 rtx offset, addr, mem, reg;
24847 /* We're doing all this to ensure that the immediate offset
24848 fits into the immediate field of 'evldd'. */
24849 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
24851 offset = GEN_INT (spe_offset + reg_size * i);
24852 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
24853 mem = gen_rtx_MEM (V2SImode, addr);
24854 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24856 emit_move_insn (reg, mem);
24859 else
24860 rs6000_emit_savres_rtx (info, frame_reg_rtx,
24861 info->spe_gp_save_offset + frame_off,
24862 info->lr_save_offset + frame_off,
24863 reg_mode,
24864 SAVRES_GPR | SAVRES_LR);
24866 else if (!restoring_GPRs_inline)
24868 /* We are jumping to an out-of-line function. */
24869 rtx ptr_reg;
24870 int end_save = info->gp_save_offset + info->gp_size;
24871 bool can_use_exit = end_save == 0;
24872 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
24873 int ptr_off;
24875 /* Emit stack reset code if we need it. */
24876 ptr_regno = ptr_regno_for_savres (sel);
24877 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
24878 if (can_use_exit)
24879 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
24880 else if (end_save + frame_off != 0)
24881 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
24882 GEN_INT (end_save + frame_off)));
24883 else if (REGNO (frame_reg_rtx) != ptr_regno)
24884 emit_move_insn (ptr_reg, frame_reg_rtx);
24885 if (REGNO (frame_reg_rtx) == ptr_regno)
24886 frame_off = -end_save;
24888 if (can_use_exit && info->cr_save_p)
24889 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
24891 ptr_off = -end_save;
24892 rs6000_emit_savres_rtx (info, ptr_reg,
24893 info->gp_save_offset + ptr_off,
24894 info->lr_save_offset + ptr_off,
24895 reg_mode, sel);
24897 else if (using_load_multiple)
24899 rtvec p;
24900 p = rtvec_alloc (32 - info->first_gp_reg_save);
24901 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24902 RTVEC_ELT (p, i)
24903 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24904 frame_reg_rtx,
24905 info->gp_save_offset + frame_off + reg_size * i);
24906 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
24908 else
24910 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24911 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24912 emit_insn (gen_frame_load
24913 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24914 frame_reg_rtx,
24915 info->gp_save_offset + frame_off + reg_size * i));
24918 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24920 /* If the frame pointer was used then we can't delay emitting
24921 a REG_CFA_DEF_CFA note. This must happen on the insn that
24922 restores the frame pointer, r31. We may have already emitted
24923 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
24924 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
24925 be harmless if emitted. */
24926 if (frame_pointer_needed)
24928 insn = get_last_insn ();
24929 add_reg_note (insn, REG_CFA_DEF_CFA,
24930 plus_constant (Pmode, frame_reg_rtx, frame_off));
24931 RTX_FRAME_RELATED_P (insn) = 1;
24934 /* Set up cfa_restores. We always need these when
24935 shrink-wrapping. If not shrink-wrapping then we only need
24936 the cfa_restore when the stack location is no longer valid.
24937 The cfa_restores must be emitted on or before the insn that
24938 invalidates the stack, and of course must not be emitted
24939 before the insn that actually does the restore. The latter
24940 is why it is a bad idea to emit the cfa_restores as a group
24941 on the last instruction here that actually does a restore:
24942 That insn may be reordered with respect to others doing
24943 restores. */
24944 if (flag_shrink_wrap
24945 && !restoring_GPRs_inline
24946 && info->first_fp_reg_save == 64)
24947 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
24949 for (i = info->first_gp_reg_save; i < 32; i++)
24950 if (!restoring_GPRs_inline
24951 || using_load_multiple
24952 || rs6000_reg_live_or_pic_offset_p (i))
24954 rtx reg = gen_rtx_REG (reg_mode, i);
24956 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24960 if (!restoring_GPRs_inline
24961 && info->first_fp_reg_save == 64)
24963 /* We are jumping to an out-of-line function. */
24964 if (cfa_restores)
24965 emit_cfa_restores (cfa_restores);
24966 return;
24969 if (restore_lr && !restoring_GPRs_inline)
24971 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24972 restore_saved_lr (0, exit_func);
24975 /* Restore fpr's if we need to do it without calling a function. */
24976 if (restoring_FPRs_inline)
24977 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
24978 if (save_reg_p (info->first_fp_reg_save + i))
24980 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24981 ? DFmode : SFmode),
24982 info->first_fp_reg_save + i);
24983 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24984 info->fp_save_offset + frame_off + 8 * i));
24985 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24986 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24989 /* If we saved cr, restore it here. Just those that were used. */
24990 if (info->cr_save_p)
24991 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
24993 /* If this is V.4, unwind the stack pointer after all of the loads
24994 have been done, or set up r11 if we are restoring fp out of line. */
24995 ptr_regno = 1;
24996 if (!restoring_FPRs_inline)
24998 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24999 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
25000 ptr_regno = ptr_regno_for_savres (sel);
25003 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
25004 if (REGNO (frame_reg_rtx) == ptr_regno)
25005 frame_off = 0;
25007 if (insn && restoring_FPRs_inline)
25009 if (cfa_restores)
25011 REG_NOTES (insn) = cfa_restores;
25012 cfa_restores = NULL_RTX;
25014 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
25015 RTX_FRAME_RELATED_P (insn) = 1;
25018 if (crtl->calls_eh_return)
25020 rtx sa = EH_RETURN_STACKADJ_RTX;
25021 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
25024 if (!sibcall)
25026 rtvec p;
25027 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
25028 if (! restoring_FPRs_inline)
25030 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
25031 RTVEC_ELT (p, 0) = ret_rtx;
25033 else
25035 if (cfa_restores)
25037 /* We can't hang the cfa_restores off a simple return,
25038 since the shrink-wrap code sometimes uses an existing
25039 return. This means there might be a path from
25040 pre-prologue code to this return, and dwarf2cfi code
25041 wants the eh_frame unwinder state to be the same on
25042 all paths to any point. So we need to emit the
25043 cfa_restores before the return. For -m64 we really
25044 don't need epilogue cfa_restores at all, except for
25045 this irritating dwarf2cfi with shrink-wrap
25046 requirement; The stack red-zone means eh_frame info
25047 from the prologue telling the unwinder to restore
25048 from the stack is perfectly good right to the end of
25049 the function. */
25050 emit_insn (gen_blockage ());
25051 emit_cfa_restores (cfa_restores);
25052 cfa_restores = NULL_RTX;
25054 p = rtvec_alloc (2);
25055 RTVEC_ELT (p, 0) = simple_return_rtx;
25058 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
25059 ? gen_rtx_USE (VOIDmode,
25060 gen_rtx_REG (Pmode, LR_REGNO))
25061 : gen_rtx_CLOBBER (VOIDmode,
25062 gen_rtx_REG (Pmode, LR_REGNO)));
25064 /* If we have to restore more than two FP registers, branch to the
25065 restore function. It will return to our caller. */
25066 if (! restoring_FPRs_inline)
25068 int i;
25069 int reg;
25070 rtx sym;
25072 if (flag_shrink_wrap)
25073 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
25075 sym = rs6000_savres_routine_sym (info,
25076 SAVRES_FPR | (lr ? SAVRES_LR : 0));
25077 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
25078 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
25079 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
25081 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
25083 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
25085 RTVEC_ELT (p, i + 4)
25086 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
25087 if (flag_shrink_wrap)
25088 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
25089 cfa_restores);
25093 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
25096 if (cfa_restores)
25098 if (sibcall)
25099 /* Ensure the cfa_restores are hung off an insn that won't
25100 be reordered above other restores. */
25101 emit_insn (gen_blockage ());
25103 emit_cfa_restores (cfa_restores);
25107 /* Write function epilogue. */
25109 static void
25110 rs6000_output_function_epilogue (FILE *file,
25111 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
25113 #if TARGET_MACHO
25114 macho_branch_islands ();
25115 /* Mach-O doesn't support labels at the end of objects, so if
25116 it looks like we might want one, insert a NOP. */
25118 rtx_insn *insn = get_last_insn ();
25119 rtx_insn *deleted_debug_label = NULL;
25120 while (insn
25121 && NOTE_P (insn)
25122 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
25124 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
25125 notes only, instead set their CODE_LABEL_NUMBER to -1,
25126 otherwise there would be code generation differences
25127 in between -g and -g0. */
25128 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
25129 deleted_debug_label = insn;
25130 insn = PREV_INSN (insn);
25132 if (insn
25133 && (LABEL_P (insn)
25134 || (NOTE_P (insn)
25135 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
25136 fputs ("\tnop\n", file);
25137 else if (deleted_debug_label)
25138 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
25139 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
25140 CODE_LABEL_NUMBER (insn) = -1;
25142 #endif
25144 /* Output a traceback table here. See /usr/include/sys/debug.h for info
25145 on its format.
25147 We don't output a traceback table if -finhibit-size-directive was
25148 used. The documentation for -finhibit-size-directive reads
25149 ``don't output a @code{.size} assembler directive, or anything
25150 else that would cause trouble if the function is split in the
25151 middle, and the two halves are placed at locations far apart in
25152 memory.'' The traceback table has this property, since it
25153 includes the offset from the start of the function to the
25154 traceback table itself.
25156 System V.4 Powerpc's (and the embedded ABI derived from it) use a
25157 different traceback table. */
25158 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25159 && ! flag_inhibit_size_directive
25160 && rs6000_traceback != traceback_none && !cfun->is_thunk)
25162 const char *fname = NULL;
25163 const char *language_string = lang_hooks.name;
25164 int fixed_parms = 0, float_parms = 0, parm_info = 0;
25165 int i;
25166 int optional_tbtab;
25167 rs6000_stack_t *info = rs6000_stack_info ();
25169 if (rs6000_traceback == traceback_full)
25170 optional_tbtab = 1;
25171 else if (rs6000_traceback == traceback_part)
25172 optional_tbtab = 0;
25173 else
25174 optional_tbtab = !optimize_size && !TARGET_ELF;
25176 if (optional_tbtab)
25178 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
25179 while (*fname == '.') /* V.4 encodes . in the name */
25180 fname++;
25182 /* Need label immediately before tbtab, so we can compute
25183 its offset from the function start. */
25184 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
25185 ASM_OUTPUT_LABEL (file, fname);
25188 /* The .tbtab pseudo-op can only be used for the first eight
25189 expressions, since it can't handle the possibly variable
25190 length fields that follow. However, if you omit the optional
25191 fields, the assembler outputs zeros for all optional fields
25192 anyways, giving each variable length field is minimum length
25193 (as defined in sys/debug.h). Thus we can not use the .tbtab
25194 pseudo-op at all. */
25196 /* An all-zero word flags the start of the tbtab, for debuggers
25197 that have to find it by searching forward from the entry
25198 point or from the current pc. */
25199 fputs ("\t.long 0\n", file);
25201 /* Tbtab format type. Use format type 0. */
25202 fputs ("\t.byte 0,", file);
25204 /* Language type. Unfortunately, there does not seem to be any
25205 official way to discover the language being compiled, so we
25206 use language_string.
25207 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
25208 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
25209 a number, so for now use 9. LTO, Go, and UPC aren't assigned numbers
25210 either, so for now use 0. */
25211 if (! strcmp (language_string, "GNU C")
25212 || ! strcmp (language_string, "GNU GIMPLE")
25213 || ! strcmp (language_string, "GNU Go")
25214 || ! strcmp (language_string, "GNU UPC"))
25215 i = 0;
25216 else if (! strcmp (language_string, "GNU F77")
25217 || ! strcmp (language_string, "GNU Fortran"))
25218 i = 1;
25219 else if (! strcmp (language_string, "GNU Pascal"))
25220 i = 2;
25221 else if (! strcmp (language_string, "GNU Ada"))
25222 i = 3;
25223 else if (! strcmp (language_string, "GNU C++")
25224 || ! strcmp (language_string, "GNU Objective-C++"))
25225 i = 9;
25226 else if (! strcmp (language_string, "GNU Java"))
25227 i = 13;
25228 else if (! strcmp (language_string, "GNU Objective-C"))
25229 i = 14;
25230 else
25231 gcc_unreachable ();
25232 fprintf (file, "%d,", i);
25234 /* 8 single bit fields: global linkage (not set for C extern linkage,
25235 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
25236 from start of procedure stored in tbtab, internal function, function
25237 has controlled storage, function has no toc, function uses fp,
25238 function logs/aborts fp operations. */
25239 /* Assume that fp operations are used if any fp reg must be saved. */
25240 fprintf (file, "%d,",
25241 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
25243 /* 6 bitfields: function is interrupt handler, name present in
25244 proc table, function calls alloca, on condition directives
25245 (controls stack walks, 3 bits), saves condition reg, saves
25246 link reg. */
25247 /* The `function calls alloca' bit seems to be set whenever reg 31 is
25248 set up as a frame pointer, even when there is no alloca call. */
25249 fprintf (file, "%d,",
25250 ((optional_tbtab << 6)
25251 | ((optional_tbtab & frame_pointer_needed) << 5)
25252 | (info->cr_save_p << 1)
25253 | (info->lr_save_p)));
25255 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
25256 (6 bits). */
25257 fprintf (file, "%d,",
25258 (info->push_p << 7) | (64 - info->first_fp_reg_save));
25260 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
25261 fprintf (file, "%d,", (32 - first_reg_to_save ()));
25263 if (optional_tbtab)
25265 /* Compute the parameter info from the function decl argument
25266 list. */
25267 tree decl;
25268 int next_parm_info_bit = 31;
25270 for (decl = DECL_ARGUMENTS (current_function_decl);
25271 decl; decl = DECL_CHAIN (decl))
25273 rtx parameter = DECL_INCOMING_RTL (decl);
25274 enum machine_mode mode = GET_MODE (parameter);
25276 if (GET_CODE (parameter) == REG)
25278 if (SCALAR_FLOAT_MODE_P (mode))
25280 int bits;
25282 float_parms++;
25284 switch (mode)
25286 case SFmode:
25287 case SDmode:
25288 bits = 0x2;
25289 break;
25291 case DFmode:
25292 case DDmode:
25293 case TFmode:
25294 case TDmode:
25295 bits = 0x3;
25296 break;
25298 default:
25299 gcc_unreachable ();
25302 /* If only one bit will fit, don't or in this entry. */
25303 if (next_parm_info_bit > 0)
25304 parm_info |= (bits << (next_parm_info_bit - 1));
25305 next_parm_info_bit -= 2;
25307 else
25309 fixed_parms += ((GET_MODE_SIZE (mode)
25310 + (UNITS_PER_WORD - 1))
25311 / UNITS_PER_WORD);
25312 next_parm_info_bit -= 1;
25318 /* Number of fixed point parameters. */
25319 /* This is actually the number of words of fixed point parameters; thus
25320 an 8 byte struct counts as 2; and thus the maximum value is 8. */
25321 fprintf (file, "%d,", fixed_parms);
25323 /* 2 bitfields: number of floating point parameters (7 bits), parameters
25324 all on stack. */
25325 /* This is actually the number of fp registers that hold parameters;
25326 and thus the maximum value is 13. */
25327 /* Set parameters on stack bit if parameters are not in their original
25328 registers, regardless of whether they are on the stack? Xlc
25329 seems to set the bit when not optimizing. */
25330 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
25332 if (! optional_tbtab)
25333 return;
25335 /* Optional fields follow. Some are variable length. */
25337 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
25338 11 double float. */
25339 /* There is an entry for each parameter in a register, in the order that
25340 they occur in the parameter list. Any intervening arguments on the
25341 stack are ignored. If the list overflows a long (max possible length
25342 34 bits) then completely leave off all elements that don't fit. */
25343 /* Only emit this long if there was at least one parameter. */
25344 if (fixed_parms || float_parms)
25345 fprintf (file, "\t.long %d\n", parm_info);
25347 /* Offset from start of code to tb table. */
25348 fputs ("\t.long ", file);
25349 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
25350 RS6000_OUTPUT_BASENAME (file, fname);
25351 putc ('-', file);
25352 rs6000_output_function_entry (file, fname);
25353 putc ('\n', file);
25355 /* Interrupt handler mask. */
25356 /* Omit this long, since we never set the interrupt handler bit
25357 above. */
25359 /* Number of CTL (controlled storage) anchors. */
25360 /* Omit this long, since the has_ctl bit is never set above. */
25362 /* Displacement into stack of each CTL anchor. */
25363 /* Omit this list of longs, because there are no CTL anchors. */
25365 /* Length of function name. */
25366 if (*fname == '*')
25367 ++fname;
25368 fprintf (file, "\t.short %d\n", (int) strlen (fname));
25370 /* Function name. */
25371 assemble_string (fname, strlen (fname));
25373 /* Register for alloca automatic storage; this is always reg 31.
25374 Only emit this if the alloca bit was set above. */
25375 if (frame_pointer_needed)
25376 fputs ("\t.byte 31\n", file);
25378 fputs ("\t.align 2\n", file);
25382 /* A C compound statement that outputs the assembler code for a thunk
25383 function, used to implement C++ virtual function calls with
25384 multiple inheritance. The thunk acts as a wrapper around a virtual
25385 function, adjusting the implicit object parameter before handing
25386 control off to the real function.
25388 First, emit code to add the integer DELTA to the location that
25389 contains the incoming first argument. Assume that this argument
25390 contains a pointer, and is the one used to pass the `this' pointer
25391 in C++. This is the incoming argument *before* the function
25392 prologue, e.g. `%o0' on a sparc. The addition must preserve the
25393 values of all other incoming arguments.
25395 After the addition, emit code to jump to FUNCTION, which is a
25396 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
25397 not touch the return address. Hence returning from FUNCTION will
25398 return to whoever called the current `thunk'.
25400 The effect must be as if FUNCTION had been called directly with the
25401 adjusted first argument. This macro is responsible for emitting
25402 all of the code for a thunk function; output_function_prologue()
25403 and output_function_epilogue() are not invoked.
25405 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
25406 been extracted from it.) It might possibly be useful on some
25407 targets, but probably not.
25409 If you do not define this macro, the target-independent code in the
25410 C++ frontend will generate a less efficient heavyweight thunk that
25411 calls FUNCTION instead of jumping to it. The generic approach does
25412 not support varargs. */
25414 static void
25415 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
25416 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
25417 tree function)
25419 rtx this_rtx, funexp;
25420 rtx_insn *insn;
25422 reload_completed = 1;
25423 epilogue_completed = 1;
25425 /* Mark the end of the (empty) prologue. */
25426 emit_note (NOTE_INSN_PROLOGUE_END);
25428 /* Find the "this" pointer. If the function returns a structure,
25429 the structure return pointer is in r3. */
25430 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
25431 this_rtx = gen_rtx_REG (Pmode, 4);
25432 else
25433 this_rtx = gen_rtx_REG (Pmode, 3);
25435 /* Apply the constant offset, if required. */
25436 if (delta)
25437 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
25439 /* Apply the offset from the vtable, if required. */
25440 if (vcall_offset)
25442 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
25443 rtx tmp = gen_rtx_REG (Pmode, 12);
25445 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
25446 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
25448 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
25449 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
25451 else
25453 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
25455 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
25457 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
25460 /* Generate a tail call to the target function. */
25461 if (!TREE_USED (function))
25463 assemble_external (function);
25464 TREE_USED (function) = 1;
25466 funexp = XEXP (DECL_RTL (function), 0);
25467 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
25469 #if TARGET_MACHO
25470 if (MACHOPIC_INDIRECT)
25471 funexp = machopic_indirect_call_target (funexp);
25472 #endif
25474 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
25475 generate sibcall RTL explicitly. */
25476 insn = emit_call_insn (
25477 gen_rtx_PARALLEL (VOIDmode,
25478 gen_rtvec (4,
25479 gen_rtx_CALL (VOIDmode,
25480 funexp, const0_rtx),
25481 gen_rtx_USE (VOIDmode, const0_rtx),
25482 gen_rtx_USE (VOIDmode,
25483 gen_rtx_REG (SImode,
25484 LR_REGNO)),
25485 simple_return_rtx)));
25486 SIBLING_CALL_P (insn) = 1;
25487 emit_barrier ();
25489 /* Ensure we have a global entry point for the thunk. ??? We could
25490 avoid that if the target routine doesn't need a global entry point,
25491 but we do not know whether this is the case at this point. */
25492 if (DEFAULT_ABI == ABI_ELFv2)
25493 cfun->machine->r2_setup_needed = true;
25495 /* Run just enough of rest_of_compilation to get the insns emitted.
25496 There's not really enough bulk here to make other passes such as
25497 instruction scheduling worth while. Note that use_thunk calls
25498 assemble_start_function and assemble_end_function. */
25499 insn = get_insns ();
25500 shorten_branches (insn);
25501 final_start_function (insn, file, 1);
25502 final (insn, file, 1);
25503 final_end_function ();
25505 reload_completed = 0;
25506 epilogue_completed = 0;
25509 /* A quick summary of the various types of 'constant-pool tables'
25510 under PowerPC:
25512 Target Flags Name One table per
25513 AIX (none) AIX TOC object file
25514 AIX -mfull-toc AIX TOC object file
25515 AIX -mminimal-toc AIX minimal TOC translation unit
25516 SVR4/EABI (none) SVR4 SDATA object file
25517 SVR4/EABI -fpic SVR4 pic object file
25518 SVR4/EABI -fPIC SVR4 PIC translation unit
25519 SVR4/EABI -mrelocatable EABI TOC function
25520 SVR4/EABI -maix AIX TOC object file
25521 SVR4/EABI -maix -mminimal-toc
25522 AIX minimal TOC translation unit
25524 Name Reg. Set by entries contains:
25525 made by addrs? fp? sum?
25527 AIX TOC 2 crt0 as Y option option
25528 AIX minimal TOC 30 prolog gcc Y Y option
25529 SVR4 SDATA 13 crt0 gcc N Y N
25530 SVR4 pic 30 prolog ld Y not yet N
25531 SVR4 PIC 30 prolog gcc Y option option
25532 EABI TOC 30 prolog gcc Y option option
25536 /* Hash functions for the hash table. */
25538 static unsigned
25539 rs6000_hash_constant (rtx k)
25541 enum rtx_code code = GET_CODE (k);
25542 enum machine_mode mode = GET_MODE (k);
25543 unsigned result = (code << 3) ^ mode;
25544 const char *format;
25545 int flen, fidx;
25547 format = GET_RTX_FORMAT (code);
25548 flen = strlen (format);
25549 fidx = 0;
25551 switch (code)
25553 case LABEL_REF:
25554 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
25556 case CONST_WIDE_INT:
25558 int i;
25559 flen = CONST_WIDE_INT_NUNITS (k);
25560 for (i = 0; i < flen; i++)
25561 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
25562 return result;
25565 case CONST_DOUBLE:
25566 if (mode != VOIDmode)
25567 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
25568 flen = 2;
25569 break;
25571 case CODE_LABEL:
25572 fidx = 3;
25573 break;
25575 default:
25576 break;
25579 for (; fidx < flen; fidx++)
25580 switch (format[fidx])
25582 case 's':
25584 unsigned i, len;
25585 const char *str = XSTR (k, fidx);
25586 len = strlen (str);
25587 result = result * 613 + len;
25588 for (i = 0; i < len; i++)
25589 result = result * 613 + (unsigned) str[i];
25590 break;
25592 case 'u':
25593 case 'e':
25594 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
25595 break;
25596 case 'i':
25597 case 'n':
25598 result = result * 613 + (unsigned) XINT (k, fidx);
25599 break;
25600 case 'w':
25601 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
25602 result = result * 613 + (unsigned) XWINT (k, fidx);
25603 else
25605 size_t i;
25606 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
25607 result = result * 613 + (unsigned) (XWINT (k, fidx)
25608 >> CHAR_BIT * i);
25610 break;
25611 case '0':
25612 break;
25613 default:
25614 gcc_unreachable ();
25617 return result;
25620 static unsigned
25621 toc_hash_function (const void *hash_entry)
25623 const struct toc_hash_struct *thc =
25624 (const struct toc_hash_struct *) hash_entry;
25625 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
25628 /* Compare H1 and H2 for equivalence. */
25630 static int
25631 toc_hash_eq (const void *h1, const void *h2)
25633 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
25634 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
25636 if (((const struct toc_hash_struct *) h1)->key_mode
25637 != ((const struct toc_hash_struct *) h2)->key_mode)
25638 return 0;
25640 return rtx_equal_p (r1, r2);
25643 /* These are the names given by the C++ front-end to vtables, and
25644 vtable-like objects. Ideally, this logic should not be here;
25645 instead, there should be some programmatic way of inquiring as
25646 to whether or not an object is a vtable. */
25648 #define VTABLE_NAME_P(NAME) \
25649 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
25650 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
25651 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
25652 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
25653 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
25655 #ifdef NO_DOLLAR_IN_LABEL
25656 /* Return a GGC-allocated character string translating dollar signs in
25657 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
25659 const char *
25660 rs6000_xcoff_strip_dollar (const char *name)
25662 char *strip, *p;
25663 const char *q;
25664 size_t len;
25666 q = (const char *) strchr (name, '$');
25668 if (q == 0 || q == name)
25669 return name;
25671 len = strlen (name);
25672 strip = XALLOCAVEC (char, len + 1);
25673 strcpy (strip, name);
25674 p = strip + (q - name);
25675 while (p)
25677 *p = '_';
25678 p = strchr (p + 1, '$');
25681 return ggc_alloc_string (strip, len);
25683 #endif
25685 void
25686 rs6000_output_symbol_ref (FILE *file, rtx x)
25688 /* Currently C++ toc references to vtables can be emitted before it
25689 is decided whether the vtable is public or private. If this is
25690 the case, then the linker will eventually complain that there is
25691 a reference to an unknown section. Thus, for vtables only,
25692 we emit the TOC reference to reference the symbol and not the
25693 section. */
25694 const char *name = XSTR (x, 0);
25696 if (VTABLE_NAME_P (name))
25698 RS6000_OUTPUT_BASENAME (file, name);
25700 else
25701 assemble_name (file, name);
25704 /* Output a TOC entry. We derive the entry name from what is being
25705 written. */
25707 void
25708 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
25710 char buf[256];
25711 const char *name = buf;
25712 rtx base = x;
25713 HOST_WIDE_INT offset = 0;
25715 gcc_assert (!TARGET_NO_TOC);
25717 /* When the linker won't eliminate them, don't output duplicate
25718 TOC entries (this happens on AIX if there is any kind of TOC,
25719 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
25720 CODE_LABELs. */
25721 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
25723 struct toc_hash_struct *h;
25724 void * * found;
25726 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
25727 time because GGC is not initialized at that point. */
25728 if (toc_hash_table == NULL)
25729 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
25730 toc_hash_eq, NULL);
25732 h = ggc_alloc<toc_hash_struct> ();
25733 h->key = x;
25734 h->key_mode = mode;
25735 h->labelno = labelno;
25737 found = htab_find_slot (toc_hash_table, h, INSERT);
25738 if (*found == NULL)
25739 *found = h;
25740 else /* This is indeed a duplicate.
25741 Set this label equal to that label. */
25743 fputs ("\t.set ", file);
25744 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25745 fprintf (file, "%d,", labelno);
25746 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25747 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25748 found)->labelno));
25750 #ifdef HAVE_AS_TLS
25751 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
25752 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
25753 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
25755 fputs ("\t.set ", file);
25756 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25757 fprintf (file, "%d,", labelno);
25758 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25759 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25760 found)->labelno));
25762 #endif
25763 return;
25767 /* If we're going to put a double constant in the TOC, make sure it's
25768 aligned properly when strict alignment is on. */
25769 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
25770 && STRICT_ALIGNMENT
25771 && GET_MODE_BITSIZE (mode) >= 64
25772 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
25773 ASM_OUTPUT_ALIGN (file, 3);
25776 (*targetm.asm_out.internal_label) (file, "LC", labelno);
25778 /* Handle FP constants specially. Note that if we have a minimal
25779 TOC, things we put here aren't actually in the TOC, so we can allow
25780 FP constants. */
25781 if (GET_CODE (x) == CONST_DOUBLE &&
25782 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
25784 REAL_VALUE_TYPE rv;
25785 long k[4];
25787 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25788 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25789 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
25790 else
25791 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
25793 if (TARGET_64BIT)
25795 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25796 fputs (DOUBLE_INT_ASM_OP, file);
25797 else
25798 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25799 k[0] & 0xffffffff, k[1] & 0xffffffff,
25800 k[2] & 0xffffffff, k[3] & 0xffffffff);
25801 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
25802 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25803 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
25804 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
25805 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
25806 return;
25808 else
25810 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25811 fputs ("\t.long ", file);
25812 else
25813 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25814 k[0] & 0xffffffff, k[1] & 0xffffffff,
25815 k[2] & 0xffffffff, k[3] & 0xffffffff);
25816 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
25817 k[0] & 0xffffffff, k[1] & 0xffffffff,
25818 k[2] & 0xffffffff, k[3] & 0xffffffff);
25819 return;
25822 else if (GET_CODE (x) == CONST_DOUBLE &&
25823 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
25825 REAL_VALUE_TYPE rv;
25826 long k[2];
25828 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25830 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25831 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
25832 else
25833 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
25835 if (TARGET_64BIT)
25837 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25838 fputs (DOUBLE_INT_ASM_OP, file);
25839 else
25840 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25841 k[0] & 0xffffffff, k[1] & 0xffffffff);
25842 fprintf (file, "0x%lx%08lx\n",
25843 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25844 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
25845 return;
25847 else
25849 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25850 fputs ("\t.long ", file);
25851 else
25852 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25853 k[0] & 0xffffffff, k[1] & 0xffffffff);
25854 fprintf (file, "0x%lx,0x%lx\n",
25855 k[0] & 0xffffffff, k[1] & 0xffffffff);
25856 return;
25859 else if (GET_CODE (x) == CONST_DOUBLE &&
25860 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
25862 REAL_VALUE_TYPE rv;
25863 long l;
25865 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25866 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25867 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
25868 else
25869 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
25871 if (TARGET_64BIT)
25873 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25874 fputs (DOUBLE_INT_ASM_OP, file);
25875 else
25876 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25877 if (WORDS_BIG_ENDIAN)
25878 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
25879 else
25880 fprintf (file, "0x%lx\n", l & 0xffffffff);
25881 return;
25883 else
25885 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25886 fputs ("\t.long ", file);
25887 else
25888 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25889 fprintf (file, "0x%lx\n", l & 0xffffffff);
25890 return;
25893 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
25895 unsigned HOST_WIDE_INT low;
25896 HOST_WIDE_INT high;
25898 low = INTVAL (x) & 0xffffffff;
25899 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
25901 /* TOC entries are always Pmode-sized, so when big-endian
25902 smaller integer constants in the TOC need to be padded.
25903 (This is still a win over putting the constants in
25904 a separate constant pool, because then we'd have
25905 to have both a TOC entry _and_ the actual constant.)
25907 For a 32-bit target, CONST_INT values are loaded and shifted
25908 entirely within `low' and can be stored in one TOC entry. */
25910 /* It would be easy to make this work, but it doesn't now. */
25911 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
25913 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
25915 low |= high << 32;
25916 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
25917 high = (HOST_WIDE_INT) low >> 32;
25918 low &= 0xffffffff;
25921 if (TARGET_64BIT)
25923 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25924 fputs (DOUBLE_INT_ASM_OP, file);
25925 else
25926 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25927 (long) high & 0xffffffff, (long) low & 0xffffffff);
25928 fprintf (file, "0x%lx%08lx\n",
25929 (long) high & 0xffffffff, (long) low & 0xffffffff);
25930 return;
25932 else
25934 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
25936 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25937 fputs ("\t.long ", file);
25938 else
25939 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25940 (long) high & 0xffffffff, (long) low & 0xffffffff);
25941 fprintf (file, "0x%lx,0x%lx\n",
25942 (long) high & 0xffffffff, (long) low & 0xffffffff);
25944 else
25946 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25947 fputs ("\t.long ", file);
25948 else
25949 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
25950 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
25952 return;
25956 if (GET_CODE (x) == CONST)
25958 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
25959 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
25961 base = XEXP (XEXP (x, 0), 0);
25962 offset = INTVAL (XEXP (XEXP (x, 0), 1));
25965 switch (GET_CODE (base))
25967 case SYMBOL_REF:
25968 name = XSTR (base, 0);
25969 break;
25971 case LABEL_REF:
25972 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
25973 CODE_LABEL_NUMBER (XEXP (base, 0)));
25974 break;
25976 case CODE_LABEL:
25977 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
25978 break;
25980 default:
25981 gcc_unreachable ();
25984 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25985 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
25986 else
25988 fputs ("\t.tc ", file);
25989 RS6000_OUTPUT_BASENAME (file, name);
25991 if (offset < 0)
25992 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
25993 else if (offset)
25994 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
25996 /* Mark large TOC symbols on AIX with [TE] so they are mapped
25997 after other TOC symbols, reducing overflow of small TOC access
25998 to [TC] symbols. */
25999 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
26000 ? "[TE]," : "[TC],", file);
26003 /* Currently C++ toc references to vtables can be emitted before it
26004 is decided whether the vtable is public or private. If this is
26005 the case, then the linker will eventually complain that there is
26006 a TOC reference to an unknown section. Thus, for vtables only,
26007 we emit the TOC reference to reference the symbol and not the
26008 section. */
26009 if (VTABLE_NAME_P (name))
26011 RS6000_OUTPUT_BASENAME (file, name);
26012 if (offset < 0)
26013 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
26014 else if (offset > 0)
26015 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
26017 else
26018 output_addr_const (file, x);
26020 #if HAVE_AS_TLS
26021 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
26022 && SYMBOL_REF_TLS_MODEL (base) != 0)
26024 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
26025 fputs ("@le", file);
26026 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
26027 fputs ("@ie", file);
26028 /* Use global-dynamic for local-dynamic. */
26029 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
26030 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
26032 putc ('\n', file);
26033 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
26034 fputs ("\t.tc .", file);
26035 RS6000_OUTPUT_BASENAME (file, name);
26036 fputs ("[TC],", file);
26037 output_addr_const (file, x);
26038 fputs ("@m", file);
26041 #endif
26043 putc ('\n', file);
26046 /* Output an assembler pseudo-op to write an ASCII string of N characters
26047 starting at P to FILE.
26049 On the RS/6000, we have to do this using the .byte operation and
26050 write out special characters outside the quoted string.
26051 Also, the assembler is broken; very long strings are truncated,
26052 so we must artificially break them up early. */
26054 void
26055 output_ascii (FILE *file, const char *p, int n)
26057 char c;
26058 int i, count_string;
26059 const char *for_string = "\t.byte \"";
26060 const char *for_decimal = "\t.byte ";
26061 const char *to_close = NULL;
26063 count_string = 0;
26064 for (i = 0; i < n; i++)
26066 c = *p++;
26067 if (c >= ' ' && c < 0177)
26069 if (for_string)
26070 fputs (for_string, file);
26071 putc (c, file);
26073 /* Write two quotes to get one. */
26074 if (c == '"')
26076 putc (c, file);
26077 ++count_string;
26080 for_string = NULL;
26081 for_decimal = "\"\n\t.byte ";
26082 to_close = "\"\n";
26083 ++count_string;
26085 if (count_string >= 512)
26087 fputs (to_close, file);
26089 for_string = "\t.byte \"";
26090 for_decimal = "\t.byte ";
26091 to_close = NULL;
26092 count_string = 0;
26095 else
26097 if (for_decimal)
26098 fputs (for_decimal, file);
26099 fprintf (file, "%d", c);
26101 for_string = "\n\t.byte \"";
26102 for_decimal = ", ";
26103 to_close = "\n";
26104 count_string = 0;
26108 /* Now close the string if we have written one. Then end the line. */
26109 if (to_close)
26110 fputs (to_close, file);
26113 /* Generate a unique section name for FILENAME for a section type
26114 represented by SECTION_DESC. Output goes into BUF.
26116 SECTION_DESC can be any string, as long as it is different for each
26117 possible section type.
26119 We name the section in the same manner as xlc. The name begins with an
26120 underscore followed by the filename (after stripping any leading directory
26121 names) with the last period replaced by the string SECTION_DESC. If
26122 FILENAME does not contain a period, SECTION_DESC is appended to the end of
26123 the name. */
26125 void
26126 rs6000_gen_section_name (char **buf, const char *filename,
26127 const char *section_desc)
26129 const char *q, *after_last_slash, *last_period = 0;
26130 char *p;
26131 int len;
26133 after_last_slash = filename;
26134 for (q = filename; *q; q++)
26136 if (*q == '/')
26137 after_last_slash = q + 1;
26138 else if (*q == '.')
26139 last_period = q;
26142 len = strlen (after_last_slash) + strlen (section_desc) + 2;
26143 *buf = (char *) xmalloc (len);
26145 p = *buf;
26146 *p++ = '_';
26148 for (q = after_last_slash; *q; q++)
26150 if (q == last_period)
26152 strcpy (p, section_desc);
26153 p += strlen (section_desc);
26154 break;
26157 else if (ISALNUM (*q))
26158 *p++ = *q;
26161 if (last_period == 0)
26162 strcpy (p, section_desc);
26163 else
26164 *p = '\0';
26167 /* Emit profile function. */
26169 void
26170 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
26172 /* Non-standard profiling for kernels, which just saves LR then calls
26173 _mcount without worrying about arg saves. The idea is to change
26174 the function prologue as little as possible as it isn't easy to
26175 account for arg save/restore code added just for _mcount. */
26176 if (TARGET_PROFILE_KERNEL)
26177 return;
26179 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26181 #ifndef NO_PROFILE_COUNTERS
26182 # define NO_PROFILE_COUNTERS 0
26183 #endif
26184 if (NO_PROFILE_COUNTERS)
26185 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
26186 LCT_NORMAL, VOIDmode, 0);
26187 else
26189 char buf[30];
26190 const char *label_name;
26191 rtx fun;
26193 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
26194 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
26195 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
26197 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
26198 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
26201 else if (DEFAULT_ABI == ABI_DARWIN)
26203 const char *mcount_name = RS6000_MCOUNT;
26204 int caller_addr_regno = LR_REGNO;
26206 /* Be conservative and always set this, at least for now. */
26207 crtl->uses_pic_offset_table = 1;
26209 #if TARGET_MACHO
26210 /* For PIC code, set up a stub and collect the caller's address
26211 from r0, which is where the prologue puts it. */
26212 if (MACHOPIC_INDIRECT
26213 && crtl->uses_pic_offset_table)
26214 caller_addr_regno = 0;
26215 #endif
26216 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
26217 LCT_NORMAL, VOIDmode, 1,
26218 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
26222 /* Write function profiler code. */
26224 void
26225 output_function_profiler (FILE *file, int labelno)
26227 char buf[100];
26229 switch (DEFAULT_ABI)
26231 default:
26232 gcc_unreachable ();
26234 case ABI_V4:
26235 if (!TARGET_32BIT)
26237 warning (0, "no profiling of 64-bit code for this ABI");
26238 return;
26240 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
26241 fprintf (file, "\tmflr %s\n", reg_names[0]);
26242 if (NO_PROFILE_COUNTERS)
26244 asm_fprintf (file, "\tstw %s,4(%s)\n",
26245 reg_names[0], reg_names[1]);
26247 else if (TARGET_SECURE_PLT && flag_pic)
26249 if (TARGET_LINK_STACK)
26251 char name[32];
26252 get_ppc476_thunk_name (name);
26253 asm_fprintf (file, "\tbl %s\n", name);
26255 else
26256 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
26257 asm_fprintf (file, "\tstw %s,4(%s)\n",
26258 reg_names[0], reg_names[1]);
26259 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
26260 asm_fprintf (file, "\taddis %s,%s,",
26261 reg_names[12], reg_names[12]);
26262 assemble_name (file, buf);
26263 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
26264 assemble_name (file, buf);
26265 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
26267 else if (flag_pic == 1)
26269 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
26270 asm_fprintf (file, "\tstw %s,4(%s)\n",
26271 reg_names[0], reg_names[1]);
26272 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
26273 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
26274 assemble_name (file, buf);
26275 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
26277 else if (flag_pic > 1)
26279 asm_fprintf (file, "\tstw %s,4(%s)\n",
26280 reg_names[0], reg_names[1]);
26281 /* Now, we need to get the address of the label. */
26282 if (TARGET_LINK_STACK)
26284 char name[32];
26285 get_ppc476_thunk_name (name);
26286 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
26287 assemble_name (file, buf);
26288 fputs ("-.\n1:", file);
26289 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26290 asm_fprintf (file, "\taddi %s,%s,4\n",
26291 reg_names[11], reg_names[11]);
26293 else
26295 fputs ("\tbcl 20,31,1f\n\t.long ", file);
26296 assemble_name (file, buf);
26297 fputs ("-.\n1:", file);
26298 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26300 asm_fprintf (file, "\tlwz %s,0(%s)\n",
26301 reg_names[0], reg_names[11]);
26302 asm_fprintf (file, "\tadd %s,%s,%s\n",
26303 reg_names[0], reg_names[0], reg_names[11]);
26305 else
26307 asm_fprintf (file, "\tlis %s,", reg_names[12]);
26308 assemble_name (file, buf);
26309 fputs ("@ha\n", file);
26310 asm_fprintf (file, "\tstw %s,4(%s)\n",
26311 reg_names[0], reg_names[1]);
26312 asm_fprintf (file, "\tla %s,", reg_names[0]);
26313 assemble_name (file, buf);
26314 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
26317 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
26318 fprintf (file, "\tbl %s%s\n",
26319 RS6000_MCOUNT, flag_pic ? "@plt" : "");
26320 break;
26322 case ABI_AIX:
26323 case ABI_ELFv2:
26324 case ABI_DARWIN:
26325 /* Don't do anything, done in output_profile_hook (). */
26326 break;
26332 /* The following variable value is the last issued insn. */
26334 static rtx last_scheduled_insn;
26336 /* The following variable helps to balance issuing of load and
26337 store instructions */
26339 static int load_store_pendulum;
26341 /* Power4 load update and store update instructions are cracked into a
26342 load or store and an integer insn which are executed in the same cycle.
26343 Branches have their own dispatch slot which does not count against the
26344 GCC issue rate, but it changes the program flow so there are no other
26345 instructions to issue in this cycle. */
26347 static int
26348 rs6000_variable_issue_1 (rtx_insn *insn, int more)
26350 last_scheduled_insn = insn;
26351 if (GET_CODE (PATTERN (insn)) == USE
26352 || GET_CODE (PATTERN (insn)) == CLOBBER)
26354 cached_can_issue_more = more;
26355 return cached_can_issue_more;
26358 if (insn_terminates_group_p (insn, current_group))
26360 cached_can_issue_more = 0;
26361 return cached_can_issue_more;
26364 /* If no reservation, but reach here */
26365 if (recog_memoized (insn) < 0)
26366 return more;
26368 if (rs6000_sched_groups)
26370 if (is_microcoded_insn (insn))
26371 cached_can_issue_more = 0;
26372 else if (is_cracked_insn (insn))
26373 cached_can_issue_more = more > 2 ? more - 2 : 0;
26374 else
26375 cached_can_issue_more = more - 1;
26377 return cached_can_issue_more;
26380 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
26381 return 0;
26383 cached_can_issue_more = more - 1;
26384 return cached_can_issue_more;
26387 static int
26388 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
26390 int r = rs6000_variable_issue_1 (insn, more);
26391 if (verbose)
26392 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
26393 return r;
26396 /* Adjust the cost of a scheduling dependency. Return the new cost of
26397 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
26399 static int
26400 rs6000_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
26402 enum attr_type attr_type;
26404 if (! recog_memoized (insn))
26405 return 0;
26407 switch (REG_NOTE_KIND (link))
26409 case REG_DEP_TRUE:
26411 /* Data dependency; DEP_INSN writes a register that INSN reads
26412 some cycles later. */
26414 /* Separate a load from a narrower, dependent store. */
26415 if (rs6000_sched_groups
26416 && GET_CODE (PATTERN (insn)) == SET
26417 && GET_CODE (PATTERN (dep_insn)) == SET
26418 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
26419 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
26420 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
26421 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
26422 return cost + 14;
26424 attr_type = get_attr_type (insn);
26426 switch (attr_type)
26428 case TYPE_JMPREG:
26429 /* Tell the first scheduling pass about the latency between
26430 a mtctr and bctr (and mtlr and br/blr). The first
26431 scheduling pass will not know about this latency since
26432 the mtctr instruction, which has the latency associated
26433 to it, will be generated by reload. */
26434 return 4;
26435 case TYPE_BRANCH:
26436 /* Leave some extra cycles between a compare and its
26437 dependent branch, to inhibit expensive mispredicts. */
26438 if ((rs6000_cpu_attr == CPU_PPC603
26439 || rs6000_cpu_attr == CPU_PPC604
26440 || rs6000_cpu_attr == CPU_PPC604E
26441 || rs6000_cpu_attr == CPU_PPC620
26442 || rs6000_cpu_attr == CPU_PPC630
26443 || rs6000_cpu_attr == CPU_PPC750
26444 || rs6000_cpu_attr == CPU_PPC7400
26445 || rs6000_cpu_attr == CPU_PPC7450
26446 || rs6000_cpu_attr == CPU_PPCE5500
26447 || rs6000_cpu_attr == CPU_PPCE6500
26448 || rs6000_cpu_attr == CPU_POWER4
26449 || rs6000_cpu_attr == CPU_POWER5
26450 || rs6000_cpu_attr == CPU_POWER7
26451 || rs6000_cpu_attr == CPU_POWER8
26452 || rs6000_cpu_attr == CPU_CELL)
26453 && recog_memoized (dep_insn)
26454 && (INSN_CODE (dep_insn) >= 0))
26456 switch (get_attr_type (dep_insn))
26458 case TYPE_CMP:
26459 case TYPE_COMPARE:
26460 case TYPE_FPCOMPARE:
26461 case TYPE_CR_LOGICAL:
26462 case TYPE_DELAYED_CR:
26463 return cost + 2;
26464 case TYPE_MUL:
26465 if (get_attr_dot (dep_insn) == DOT_YES)
26466 return cost + 2;
26467 else
26468 break;
26469 case TYPE_SHIFT:
26470 if (get_attr_dot (dep_insn) == DOT_YES
26471 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
26472 return cost + 2;
26473 else
26474 break;
26475 default:
26476 break;
26478 break;
26480 case TYPE_STORE:
26481 case TYPE_FPSTORE:
26482 if ((rs6000_cpu == PROCESSOR_POWER6)
26483 && recog_memoized (dep_insn)
26484 && (INSN_CODE (dep_insn) >= 0))
26487 if (GET_CODE (PATTERN (insn)) != SET)
26488 /* If this happens, we have to extend this to schedule
26489 optimally. Return default for now. */
26490 return cost;
26492 /* Adjust the cost for the case where the value written
26493 by a fixed point operation is used as the address
26494 gen value on a store. */
26495 switch (get_attr_type (dep_insn))
26497 case TYPE_LOAD:
26498 case TYPE_CNTLZ:
26500 if (! store_data_bypass_p (dep_insn, insn))
26501 return get_attr_sign_extend (dep_insn)
26502 == SIGN_EXTEND_YES ? 6 : 4;
26503 break;
26505 case TYPE_SHIFT:
26507 if (! store_data_bypass_p (dep_insn, insn))
26508 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
26509 6 : 3;
26510 break;
26512 case TYPE_INTEGER:
26513 case TYPE_ADD:
26514 case TYPE_LOGICAL:
26515 case TYPE_COMPARE:
26516 case TYPE_EXTS:
26517 case TYPE_INSERT:
26519 if (! store_data_bypass_p (dep_insn, insn))
26520 return 3;
26521 break;
26523 case TYPE_STORE:
26524 case TYPE_FPLOAD:
26525 case TYPE_FPSTORE:
26527 if (get_attr_update (dep_insn) == UPDATE_YES
26528 && ! store_data_bypass_p (dep_insn, insn))
26529 return 3;
26530 break;
26532 case TYPE_MUL:
26534 if (! store_data_bypass_p (dep_insn, insn))
26535 return 17;
26536 break;
26538 case TYPE_DIV:
26540 if (! store_data_bypass_p (dep_insn, insn))
26541 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
26542 break;
26544 default:
26545 break;
26548 break;
26550 case TYPE_LOAD:
26551 if ((rs6000_cpu == PROCESSOR_POWER6)
26552 && recog_memoized (dep_insn)
26553 && (INSN_CODE (dep_insn) >= 0))
26556 /* Adjust the cost for the case where the value written
26557 by a fixed point instruction is used within the address
26558 gen portion of a subsequent load(u)(x) */
26559 switch (get_attr_type (dep_insn))
26561 case TYPE_LOAD:
26562 case TYPE_CNTLZ:
26564 if (set_to_load_agen (dep_insn, insn))
26565 return get_attr_sign_extend (dep_insn)
26566 == SIGN_EXTEND_YES ? 6 : 4;
26567 break;
26569 case TYPE_SHIFT:
26571 if (set_to_load_agen (dep_insn, insn))
26572 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
26573 6 : 3;
26574 break;
26576 case TYPE_INTEGER:
26577 case TYPE_ADD:
26578 case TYPE_LOGICAL:
26579 case TYPE_COMPARE:
26580 case TYPE_EXTS:
26581 case TYPE_INSERT:
26583 if (set_to_load_agen (dep_insn, insn))
26584 return 3;
26585 break;
26587 case TYPE_STORE:
26588 case TYPE_FPLOAD:
26589 case TYPE_FPSTORE:
26591 if (get_attr_update (dep_insn) == UPDATE_YES
26592 && set_to_load_agen (dep_insn, insn))
26593 return 3;
26594 break;
26596 case TYPE_MUL:
26598 if (set_to_load_agen (dep_insn, insn))
26599 return 17;
26600 break;
26602 case TYPE_DIV:
26604 if (set_to_load_agen (dep_insn, insn))
26605 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
26606 break;
26608 default:
26609 break;
26612 break;
26614 case TYPE_FPLOAD:
26615 if ((rs6000_cpu == PROCESSOR_POWER6)
26616 && get_attr_update (insn) == UPDATE_NO
26617 && recog_memoized (dep_insn)
26618 && (INSN_CODE (dep_insn) >= 0)
26619 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
26620 return 2;
26622 default:
26623 break;
26626 /* Fall out to return default cost. */
26628 break;
26630 case REG_DEP_OUTPUT:
26631 /* Output dependency; DEP_INSN writes a register that INSN writes some
26632 cycles later. */
26633 if ((rs6000_cpu == PROCESSOR_POWER6)
26634 && recog_memoized (dep_insn)
26635 && (INSN_CODE (dep_insn) >= 0))
26637 attr_type = get_attr_type (insn);
26639 switch (attr_type)
26641 case TYPE_FP:
26642 if (get_attr_type (dep_insn) == TYPE_FP)
26643 return 1;
26644 break;
26645 case TYPE_FPLOAD:
26646 if (get_attr_update (insn) == UPDATE_NO
26647 && get_attr_type (dep_insn) == TYPE_MFFGPR)
26648 return 2;
26649 break;
26650 default:
26651 break;
26654 case REG_DEP_ANTI:
26655 /* Anti dependency; DEP_INSN reads a register that INSN writes some
26656 cycles later. */
26657 return 0;
26659 default:
26660 gcc_unreachable ();
26663 return cost;
26666 /* Debug version of rs6000_adjust_cost. */
26668 static int
26669 rs6000_debug_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn,
26670 int cost)
26672 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
26674 if (ret != cost)
26676 const char *dep;
26678 switch (REG_NOTE_KIND (link))
26680 default: dep = "unknown depencency"; break;
26681 case REG_DEP_TRUE: dep = "data dependency"; break;
26682 case REG_DEP_OUTPUT: dep = "output dependency"; break;
26683 case REG_DEP_ANTI: dep = "anti depencency"; break;
26686 fprintf (stderr,
26687 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
26688 "%s, insn:\n", ret, cost, dep);
26690 debug_rtx (insn);
26693 return ret;
26696 /* The function returns a true if INSN is microcoded.
26697 Return false otherwise. */
26699 static bool
26700 is_microcoded_insn (rtx insn)
26702 if (!insn || !NONDEBUG_INSN_P (insn)
26703 || GET_CODE (PATTERN (insn)) == USE
26704 || GET_CODE (PATTERN (insn)) == CLOBBER)
26705 return false;
26707 if (rs6000_cpu_attr == CPU_CELL)
26708 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
26710 if (rs6000_sched_groups
26711 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26713 enum attr_type type = get_attr_type (insn);
26714 if ((type == TYPE_LOAD
26715 && get_attr_update (insn) == UPDATE_YES
26716 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
26717 || ((type == TYPE_LOAD || type == TYPE_STORE)
26718 && get_attr_update (insn) == UPDATE_YES
26719 && get_attr_indexed (insn) == INDEXED_YES)
26720 || type == TYPE_MFCR)
26721 return true;
26724 return false;
26727 /* The function returns true if INSN is cracked into 2 instructions
26728 by the processor (and therefore occupies 2 issue slots). */
26730 static bool
26731 is_cracked_insn (rtx insn)
26733 if (!insn || !NONDEBUG_INSN_P (insn)
26734 || GET_CODE (PATTERN (insn)) == USE
26735 || GET_CODE (PATTERN (insn)) == CLOBBER)
26736 return false;
26738 if (rs6000_sched_groups
26739 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26741 enum attr_type type = get_attr_type (insn);
26742 if ((type == TYPE_LOAD
26743 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26744 && get_attr_update (insn) == UPDATE_NO)
26745 || (type == TYPE_LOAD
26746 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
26747 && get_attr_update (insn) == UPDATE_YES
26748 && get_attr_indexed (insn) == INDEXED_NO)
26749 || (type == TYPE_STORE
26750 && get_attr_update (insn) == UPDATE_YES
26751 && get_attr_indexed (insn) == INDEXED_NO)
26752 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
26753 && get_attr_update (insn) == UPDATE_YES)
26754 || type == TYPE_DELAYED_CR
26755 || type == TYPE_COMPARE
26756 || (type == TYPE_SHIFT
26757 && get_attr_dot (insn) == DOT_YES
26758 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
26759 || (type == TYPE_MUL
26760 && get_attr_dot (insn) == DOT_YES)
26761 || type == TYPE_DIV
26762 || (type == TYPE_INSERT
26763 && get_attr_size (insn) == SIZE_32))
26764 return true;
26767 return false;
26770 /* The function returns true if INSN can be issued only from
26771 the branch slot. */
26773 static bool
26774 is_branch_slot_insn (rtx insn)
26776 if (!insn || !NONDEBUG_INSN_P (insn)
26777 || GET_CODE (PATTERN (insn)) == USE
26778 || GET_CODE (PATTERN (insn)) == CLOBBER)
26779 return false;
26781 if (rs6000_sched_groups)
26783 enum attr_type type = get_attr_type (insn);
26784 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
26785 return true;
26786 return false;
26789 return false;
26792 /* The function returns true if out_inst sets a value that is
26793 used in the address generation computation of in_insn */
26794 static bool
26795 set_to_load_agen (rtx out_insn, rtx in_insn)
26797 rtx out_set, in_set;
26799 /* For performance reasons, only handle the simple case where
26800 both loads are a single_set. */
26801 out_set = single_set (out_insn);
26802 if (out_set)
26804 in_set = single_set (in_insn);
26805 if (in_set)
26806 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
26809 return false;
26812 /* Try to determine base/offset/size parts of the given MEM.
26813 Return true if successful, false if all the values couldn't
26814 be determined.
26816 This function only looks for REG or REG+CONST address forms.
26817 REG+REG address form will return false. */
26819 static bool
26820 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
26821 HOST_WIDE_INT *size)
26823 rtx addr_rtx;
26824 if MEM_SIZE_KNOWN_P (mem)
26825 *size = MEM_SIZE (mem);
26826 else
26827 return false;
26829 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
26830 addr_rtx = XEXP (XEXP (mem, 0), 1);
26831 else
26832 addr_rtx = (XEXP (mem, 0));
26834 if (GET_CODE (addr_rtx) == REG)
26836 *base = addr_rtx;
26837 *offset = 0;
26839 else if (GET_CODE (addr_rtx) == PLUS
26840 && CONST_INT_P (XEXP (addr_rtx, 1)))
26842 *base = XEXP (addr_rtx, 0);
26843 *offset = INTVAL (XEXP (addr_rtx, 1));
26845 else
26846 return false;
26848 return true;
26851 /* The function returns true if the target storage location of
26852 mem1 is adjacent to the target storage location of mem2 */
26853 /* Return 1 if memory locations are adjacent. */
26855 static bool
26856 adjacent_mem_locations (rtx mem1, rtx mem2)
26858 rtx reg1, reg2;
26859 HOST_WIDE_INT off1, size1, off2, size2;
26861 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26862 && get_memref_parts (mem2, &reg2, &off2, &size2))
26863 return ((REGNO (reg1) == REGNO (reg2))
26864 && ((off1 + size1 == off2)
26865 || (off2 + size2 == off1)));
26867 return false;
26870 /* This function returns true if it can be determined that the two MEM
26871 locations overlap by at least 1 byte based on base reg/offset/size. */
26873 static bool
26874 mem_locations_overlap (rtx mem1, rtx mem2)
26876 rtx reg1, reg2;
26877 HOST_WIDE_INT off1, size1, off2, size2;
26879 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26880 && get_memref_parts (mem2, &reg2, &off2, &size2))
26881 return ((REGNO (reg1) == REGNO (reg2))
26882 && (((off1 <= off2) && (off1 + size1 > off2))
26883 || ((off2 <= off1) && (off2 + size2 > off1))));
26885 return false;
26888 /* A C statement (sans semicolon) to update the integer scheduling
26889 priority INSN_PRIORITY (INSN). Increase the priority to execute the
26890 INSN earlier, reduce the priority to execute INSN later. Do not
26891 define this macro if you do not need to adjust the scheduling
26892 priorities of insns. */
26894 static int
26895 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
26897 rtx load_mem, str_mem;
26898 /* On machines (like the 750) which have asymmetric integer units,
26899 where one integer unit can do multiply and divides and the other
26900 can't, reduce the priority of multiply/divide so it is scheduled
26901 before other integer operations. */
26903 #if 0
26904 if (! INSN_P (insn))
26905 return priority;
26907 if (GET_CODE (PATTERN (insn)) == USE)
26908 return priority;
26910 switch (rs6000_cpu_attr) {
26911 case CPU_PPC750:
26912 switch (get_attr_type (insn))
26914 default:
26915 break;
26917 case TYPE_MUL:
26918 case TYPE_DIV:
26919 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
26920 priority, priority);
26921 if (priority >= 0 && priority < 0x01000000)
26922 priority >>= 3;
26923 break;
26926 #endif
26928 if (insn_must_be_first_in_group (insn)
26929 && reload_completed
26930 && current_sched_info->sched_max_insns_priority
26931 && rs6000_sched_restricted_insns_priority)
26934 /* Prioritize insns that can be dispatched only in the first
26935 dispatch slot. */
26936 if (rs6000_sched_restricted_insns_priority == 1)
26937 /* Attach highest priority to insn. This means that in
26938 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
26939 precede 'priority' (critical path) considerations. */
26940 return current_sched_info->sched_max_insns_priority;
26941 else if (rs6000_sched_restricted_insns_priority == 2)
26942 /* Increase priority of insn by a minimal amount. This means that in
26943 haifa-sched.c:ready_sort(), only 'priority' (critical path)
26944 considerations precede dispatch-slot restriction considerations. */
26945 return (priority + 1);
26948 if (rs6000_cpu == PROCESSOR_POWER6
26949 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
26950 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
26951 /* Attach highest priority to insn if the scheduler has just issued two
26952 stores and this instruction is a load, or two loads and this instruction
26953 is a store. Power6 wants loads and stores scheduled alternately
26954 when possible */
26955 return current_sched_info->sched_max_insns_priority;
26957 return priority;
26960 /* Return true if the instruction is nonpipelined on the Cell. */
26961 static bool
26962 is_nonpipeline_insn (rtx insn)
26964 enum attr_type type;
26965 if (!insn || !NONDEBUG_INSN_P (insn)
26966 || GET_CODE (PATTERN (insn)) == USE
26967 || GET_CODE (PATTERN (insn)) == CLOBBER)
26968 return false;
26970 type = get_attr_type (insn);
26971 if (type == TYPE_MUL
26972 || type == TYPE_DIV
26973 || type == TYPE_SDIV
26974 || type == TYPE_DDIV
26975 || type == TYPE_SSQRT
26976 || type == TYPE_DSQRT
26977 || type == TYPE_MFCR
26978 || type == TYPE_MFCRF
26979 || type == TYPE_MFJMPR)
26981 return true;
26983 return false;
26987 /* Return how many instructions the machine can issue per cycle. */
26989 static int
26990 rs6000_issue_rate (void)
26992 /* Unless scheduling for register pressure, use issue rate of 1 for
26993 first scheduling pass to decrease degradation. */
26994 if (!reload_completed && !flag_sched_pressure)
26995 return 1;
26997 switch (rs6000_cpu_attr) {
26998 case CPU_RS64A:
26999 case CPU_PPC601: /* ? */
27000 case CPU_PPC7450:
27001 return 3;
27002 case CPU_PPC440:
27003 case CPU_PPC603:
27004 case CPU_PPC750:
27005 case CPU_PPC7400:
27006 case CPU_PPC8540:
27007 case CPU_PPC8548:
27008 case CPU_CELL:
27009 case CPU_PPCE300C2:
27010 case CPU_PPCE300C3:
27011 case CPU_PPCE500MC:
27012 case CPU_PPCE500MC64:
27013 case CPU_PPCE5500:
27014 case CPU_PPCE6500:
27015 case CPU_TITAN:
27016 return 2;
27017 case CPU_PPC476:
27018 case CPU_PPC604:
27019 case CPU_PPC604E:
27020 case CPU_PPC620:
27021 case CPU_PPC630:
27022 return 4;
27023 case CPU_POWER4:
27024 case CPU_POWER5:
27025 case CPU_POWER6:
27026 case CPU_POWER7:
27027 return 5;
27028 case CPU_POWER8:
27029 return 7;
27030 default:
27031 return 1;
27035 /* Return how many instructions to look ahead for better insn
27036 scheduling. */
27038 static int
27039 rs6000_use_sched_lookahead (void)
27041 switch (rs6000_cpu_attr)
27043 case CPU_PPC8540:
27044 case CPU_PPC8548:
27045 return 4;
27047 case CPU_CELL:
27048 return (reload_completed ? 8 : 0);
27050 default:
27051 return 0;
27055 /* We are choosing insn from the ready queue. Return zero if INSN can be
27056 chosen. */
27057 static int
27058 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
27060 if (ready_index == 0)
27061 return 0;
27063 if (rs6000_cpu_attr != CPU_CELL)
27064 return 0;
27066 gcc_assert (insn != NULL_RTX && INSN_P (insn));
27068 if (!reload_completed
27069 || is_nonpipeline_insn (insn)
27070 || is_microcoded_insn (insn))
27071 return 1;
27073 return 0;
27076 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
27077 and return true. */
27079 static bool
27080 find_mem_ref (rtx pat, rtx *mem_ref)
27082 const char * fmt;
27083 int i, j;
27085 /* stack_tie does not produce any real memory traffic. */
27086 if (tie_operand (pat, VOIDmode))
27087 return false;
27089 if (GET_CODE (pat) == MEM)
27091 *mem_ref = pat;
27092 return true;
27095 /* Recursively process the pattern. */
27096 fmt = GET_RTX_FORMAT (GET_CODE (pat));
27098 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
27100 if (fmt[i] == 'e')
27102 if (find_mem_ref (XEXP (pat, i), mem_ref))
27103 return true;
27105 else if (fmt[i] == 'E')
27106 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
27108 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
27109 return true;
27113 return false;
27116 /* Determine if PAT is a PATTERN of a load insn. */
27118 static bool
27119 is_load_insn1 (rtx pat, rtx *load_mem)
27121 if (!pat || pat == NULL_RTX)
27122 return false;
27124 if (GET_CODE (pat) == SET)
27125 return find_mem_ref (SET_SRC (pat), load_mem);
27127 if (GET_CODE (pat) == PARALLEL)
27129 int i;
27131 for (i = 0; i < XVECLEN (pat, 0); i++)
27132 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
27133 return true;
27136 return false;
27139 /* Determine if INSN loads from memory. */
27141 static bool
27142 is_load_insn (rtx insn, rtx *load_mem)
27144 if (!insn || !INSN_P (insn))
27145 return false;
27147 if (CALL_P (insn))
27148 return false;
27150 return is_load_insn1 (PATTERN (insn), load_mem);
27153 /* Determine if PAT is a PATTERN of a store insn. */
27155 static bool
27156 is_store_insn1 (rtx pat, rtx *str_mem)
27158 if (!pat || pat == NULL_RTX)
27159 return false;
27161 if (GET_CODE (pat) == SET)
27162 return find_mem_ref (SET_DEST (pat), str_mem);
27164 if (GET_CODE (pat) == PARALLEL)
27166 int i;
27168 for (i = 0; i < XVECLEN (pat, 0); i++)
27169 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
27170 return true;
27173 return false;
27176 /* Determine if INSN stores to memory. */
27178 static bool
27179 is_store_insn (rtx insn, rtx *str_mem)
27181 if (!insn || !INSN_P (insn))
27182 return false;
27184 return is_store_insn1 (PATTERN (insn), str_mem);
27187 /* Returns whether the dependence between INSN and NEXT is considered
27188 costly by the given target. */
27190 static bool
27191 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
27193 rtx insn;
27194 rtx next;
27195 rtx load_mem, str_mem;
27197 /* If the flag is not enabled - no dependence is considered costly;
27198 allow all dependent insns in the same group.
27199 This is the most aggressive option. */
27200 if (rs6000_sched_costly_dep == no_dep_costly)
27201 return false;
27203 /* If the flag is set to 1 - a dependence is always considered costly;
27204 do not allow dependent instructions in the same group.
27205 This is the most conservative option. */
27206 if (rs6000_sched_costly_dep == all_deps_costly)
27207 return true;
27209 insn = DEP_PRO (dep);
27210 next = DEP_CON (dep);
27212 if (rs6000_sched_costly_dep == store_to_load_dep_costly
27213 && is_load_insn (next, &load_mem)
27214 && is_store_insn (insn, &str_mem))
27215 /* Prevent load after store in the same group. */
27216 return true;
27218 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
27219 && is_load_insn (next, &load_mem)
27220 && is_store_insn (insn, &str_mem)
27221 && DEP_TYPE (dep) == REG_DEP_TRUE
27222 && mem_locations_overlap(str_mem, load_mem))
27223 /* Prevent load after store in the same group if it is a true
27224 dependence. */
27225 return true;
27227 /* The flag is set to X; dependences with latency >= X are considered costly,
27228 and will not be scheduled in the same group. */
27229 if (rs6000_sched_costly_dep <= max_dep_latency
27230 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
27231 return true;
27233 return false;
27236 /* Return the next insn after INSN that is found before TAIL is reached,
27237 skipping any "non-active" insns - insns that will not actually occupy
27238 an issue slot. Return NULL_RTX if such an insn is not found. */
27240 static rtx_insn *
27241 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
27243 if (insn == NULL_RTX || insn == tail)
27244 return NULL;
27246 while (1)
27248 insn = NEXT_INSN (insn);
27249 if (insn == NULL_RTX || insn == tail)
27250 return NULL;
27252 if (CALL_P (insn)
27253 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
27254 || (NONJUMP_INSN_P (insn)
27255 && GET_CODE (PATTERN (insn)) != USE
27256 && GET_CODE (PATTERN (insn)) != CLOBBER
27257 && INSN_CODE (insn) != CODE_FOR_stack_tie))
27258 break;
27260 return insn;
27263 /* We are about to begin issuing insns for this clock cycle. */
27265 static int
27266 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
27267 rtx_insn **ready ATTRIBUTE_UNUSED,
27268 int *pn_ready ATTRIBUTE_UNUSED,
27269 int clock_var ATTRIBUTE_UNUSED)
27271 int n_ready = *pn_ready;
27273 if (sched_verbose)
27274 fprintf (dump, "// rs6000_sched_reorder :\n");
27276 /* Reorder the ready list, if the second to last ready insn
27277 is a nonepipeline insn. */
27278 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
27280 if (is_nonpipeline_insn (ready[n_ready - 1])
27281 && (recog_memoized (ready[n_ready - 2]) > 0))
27282 /* Simply swap first two insns. */
27284 rtx_insn *tmp = ready[n_ready - 1];
27285 ready[n_ready - 1] = ready[n_ready - 2];
27286 ready[n_ready - 2] = tmp;
27290 if (rs6000_cpu == PROCESSOR_POWER6)
27291 load_store_pendulum = 0;
27293 return rs6000_issue_rate ();
27296 /* Like rs6000_sched_reorder, but called after issuing each insn. */
27298 static int
27299 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
27300 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
27302 if (sched_verbose)
27303 fprintf (dump, "// rs6000_sched_reorder2 :\n");
27305 /* For Power6, we need to handle some special cases to try and keep the
27306 store queue from overflowing and triggering expensive flushes.
27308 This code monitors how load and store instructions are being issued
27309 and skews the ready list one way or the other to increase the likelihood
27310 that a desired instruction is issued at the proper time.
27312 A couple of things are done. First, we maintain a "load_store_pendulum"
27313 to track the current state of load/store issue.
27315 - If the pendulum is at zero, then no loads or stores have been
27316 issued in the current cycle so we do nothing.
27318 - If the pendulum is 1, then a single load has been issued in this
27319 cycle and we attempt to locate another load in the ready list to
27320 issue with it.
27322 - If the pendulum is -2, then two stores have already been
27323 issued in this cycle, so we increase the priority of the first load
27324 in the ready list to increase it's likelihood of being chosen first
27325 in the next cycle.
27327 - If the pendulum is -1, then a single store has been issued in this
27328 cycle and we attempt to locate another store in the ready list to
27329 issue with it, preferring a store to an adjacent memory location to
27330 facilitate store pairing in the store queue.
27332 - If the pendulum is 2, then two loads have already been
27333 issued in this cycle, so we increase the priority of the first store
27334 in the ready list to increase it's likelihood of being chosen first
27335 in the next cycle.
27337 - If the pendulum < -2 or > 2, then do nothing.
27339 Note: This code covers the most common scenarios. There exist non
27340 load/store instructions which make use of the LSU and which
27341 would need to be accounted for to strictly model the behavior
27342 of the machine. Those instructions are currently unaccounted
27343 for to help minimize compile time overhead of this code.
27345 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
27347 int pos;
27348 int i;
27349 rtx_insn *tmp;
27350 rtx load_mem, str_mem;
27352 if (is_store_insn (last_scheduled_insn, &str_mem))
27353 /* Issuing a store, swing the load_store_pendulum to the left */
27354 load_store_pendulum--;
27355 else if (is_load_insn (last_scheduled_insn, &load_mem))
27356 /* Issuing a load, swing the load_store_pendulum to the right */
27357 load_store_pendulum++;
27358 else
27359 return cached_can_issue_more;
27361 /* If the pendulum is balanced, or there is only one instruction on
27362 the ready list, then all is well, so return. */
27363 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
27364 return cached_can_issue_more;
27366 if (load_store_pendulum == 1)
27368 /* A load has been issued in this cycle. Scan the ready list
27369 for another load to issue with it */
27370 pos = *pn_ready-1;
27372 while (pos >= 0)
27374 if (is_load_insn (ready[pos], &load_mem))
27376 /* Found a load. Move it to the head of the ready list,
27377 and adjust it's priority so that it is more likely to
27378 stay there */
27379 tmp = ready[pos];
27380 for (i=pos; i<*pn_ready-1; i++)
27381 ready[i] = ready[i + 1];
27382 ready[*pn_ready-1] = tmp;
27384 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27385 INSN_PRIORITY (tmp)++;
27386 break;
27388 pos--;
27391 else if (load_store_pendulum == -2)
27393 /* Two stores have been issued in this cycle. Increase the
27394 priority of the first load in the ready list to favor it for
27395 issuing in the next cycle. */
27396 pos = *pn_ready-1;
27398 while (pos >= 0)
27400 if (is_load_insn (ready[pos], &load_mem)
27401 && !sel_sched_p ()
27402 && INSN_PRIORITY_KNOWN (ready[pos]))
27404 INSN_PRIORITY (ready[pos])++;
27406 /* Adjust the pendulum to account for the fact that a load
27407 was found and increased in priority. This is to prevent
27408 increasing the priority of multiple loads */
27409 load_store_pendulum--;
27411 break;
27413 pos--;
27416 else if (load_store_pendulum == -1)
27418 /* A store has been issued in this cycle. Scan the ready list for
27419 another store to issue with it, preferring a store to an adjacent
27420 memory location */
27421 int first_store_pos = -1;
27423 pos = *pn_ready-1;
27425 while (pos >= 0)
27427 if (is_store_insn (ready[pos], &str_mem))
27429 rtx str_mem2;
27430 /* Maintain the index of the first store found on the
27431 list */
27432 if (first_store_pos == -1)
27433 first_store_pos = pos;
27435 if (is_store_insn (last_scheduled_insn, &str_mem2)
27436 && adjacent_mem_locations (str_mem, str_mem2))
27438 /* Found an adjacent store. Move it to the head of the
27439 ready list, and adjust it's priority so that it is
27440 more likely to stay there */
27441 tmp = ready[pos];
27442 for (i=pos; i<*pn_ready-1; i++)
27443 ready[i] = ready[i + 1];
27444 ready[*pn_ready-1] = tmp;
27446 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27447 INSN_PRIORITY (tmp)++;
27449 first_store_pos = -1;
27451 break;
27454 pos--;
27457 if (first_store_pos >= 0)
27459 /* An adjacent store wasn't found, but a non-adjacent store was,
27460 so move the non-adjacent store to the front of the ready
27461 list, and adjust its priority so that it is more likely to
27462 stay there. */
27463 tmp = ready[first_store_pos];
27464 for (i=first_store_pos; i<*pn_ready-1; i++)
27465 ready[i] = ready[i + 1];
27466 ready[*pn_ready-1] = tmp;
27467 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27468 INSN_PRIORITY (tmp)++;
27471 else if (load_store_pendulum == 2)
27473 /* Two loads have been issued in this cycle. Increase the priority
27474 of the first store in the ready list to favor it for issuing in
27475 the next cycle. */
27476 pos = *pn_ready-1;
27478 while (pos >= 0)
27480 if (is_store_insn (ready[pos], &str_mem)
27481 && !sel_sched_p ()
27482 && INSN_PRIORITY_KNOWN (ready[pos]))
27484 INSN_PRIORITY (ready[pos])++;
27486 /* Adjust the pendulum to account for the fact that a store
27487 was found and increased in priority. This is to prevent
27488 increasing the priority of multiple stores */
27489 load_store_pendulum++;
27491 break;
27493 pos--;
27498 return cached_can_issue_more;
27501 /* Return whether the presence of INSN causes a dispatch group termination
27502 of group WHICH_GROUP.
27504 If WHICH_GROUP == current_group, this function will return true if INSN
27505 causes the termination of the current group (i.e, the dispatch group to
27506 which INSN belongs). This means that INSN will be the last insn in the
27507 group it belongs to.
27509 If WHICH_GROUP == previous_group, this function will return true if INSN
27510 causes the termination of the previous group (i.e, the dispatch group that
27511 precedes the group to which INSN belongs). This means that INSN will be
27512 the first insn in the group it belongs to). */
27514 static bool
27515 insn_terminates_group_p (rtx insn, enum group_termination which_group)
27517 bool first, last;
27519 if (! insn)
27520 return false;
27522 first = insn_must_be_first_in_group (insn);
27523 last = insn_must_be_last_in_group (insn);
27525 if (first && last)
27526 return true;
27528 if (which_group == current_group)
27529 return last;
27530 else if (which_group == previous_group)
27531 return first;
27533 return false;
27537 static bool
27538 insn_must_be_first_in_group (rtx insn)
27540 enum attr_type type;
27542 if (!insn
27543 || NOTE_P (insn)
27544 || DEBUG_INSN_P (insn)
27545 || GET_CODE (PATTERN (insn)) == USE
27546 || GET_CODE (PATTERN (insn)) == CLOBBER)
27547 return false;
27549 switch (rs6000_cpu)
27551 case PROCESSOR_POWER5:
27552 if (is_cracked_insn (insn))
27553 return true;
27554 case PROCESSOR_POWER4:
27555 if (is_microcoded_insn (insn))
27556 return true;
27558 if (!rs6000_sched_groups)
27559 return false;
27561 type = get_attr_type (insn);
27563 switch (type)
27565 case TYPE_MFCR:
27566 case TYPE_MFCRF:
27567 case TYPE_MTCR:
27568 case TYPE_DELAYED_CR:
27569 case TYPE_CR_LOGICAL:
27570 case TYPE_MTJMPR:
27571 case TYPE_MFJMPR:
27572 case TYPE_DIV:
27573 case TYPE_LOAD_L:
27574 case TYPE_STORE_C:
27575 case TYPE_ISYNC:
27576 case TYPE_SYNC:
27577 return true;
27578 default:
27579 break;
27581 break;
27582 case PROCESSOR_POWER6:
27583 type = get_attr_type (insn);
27585 switch (type)
27587 case TYPE_EXTS:
27588 case TYPE_CNTLZ:
27589 case TYPE_TRAP:
27590 case TYPE_MUL:
27591 case TYPE_INSERT:
27592 case TYPE_FPCOMPARE:
27593 case TYPE_MFCR:
27594 case TYPE_MTCR:
27595 case TYPE_MFJMPR:
27596 case TYPE_MTJMPR:
27597 case TYPE_ISYNC:
27598 case TYPE_SYNC:
27599 case TYPE_LOAD_L:
27600 case TYPE_STORE_C:
27601 return true;
27602 case TYPE_SHIFT:
27603 if (get_attr_dot (insn) == DOT_NO
27604 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
27605 return true;
27606 else
27607 break;
27608 case TYPE_DIV:
27609 if (get_attr_size (insn) == SIZE_32)
27610 return true;
27611 else
27612 break;
27613 case TYPE_LOAD:
27614 case TYPE_STORE:
27615 case TYPE_FPLOAD:
27616 case TYPE_FPSTORE:
27617 if (get_attr_update (insn) == UPDATE_YES)
27618 return true;
27619 else
27620 break;
27621 default:
27622 break;
27624 break;
27625 case PROCESSOR_POWER7:
27626 type = get_attr_type (insn);
27628 switch (type)
27630 case TYPE_CR_LOGICAL:
27631 case TYPE_MFCR:
27632 case TYPE_MFCRF:
27633 case TYPE_MTCR:
27634 case TYPE_DIV:
27635 case TYPE_COMPARE:
27636 case TYPE_ISYNC:
27637 case TYPE_LOAD_L:
27638 case TYPE_STORE_C:
27639 case TYPE_MFJMPR:
27640 case TYPE_MTJMPR:
27641 return true;
27642 case TYPE_MUL:
27643 case TYPE_SHIFT:
27644 if (get_attr_dot (insn) == DOT_YES)
27645 return true;
27646 else
27647 break;
27648 case TYPE_LOAD:
27649 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27650 || get_attr_update (insn) == UPDATE_YES)
27651 return true;
27652 else
27653 break;
27654 case TYPE_STORE:
27655 case TYPE_FPLOAD:
27656 case TYPE_FPSTORE:
27657 if (get_attr_update (insn) == UPDATE_YES)
27658 return true;
27659 else
27660 break;
27661 default:
27662 break;
27664 break;
27665 case PROCESSOR_POWER8:
27666 type = get_attr_type (insn);
27668 switch (type)
27670 case TYPE_CR_LOGICAL:
27671 case TYPE_DELAYED_CR:
27672 case TYPE_MFCR:
27673 case TYPE_MFCRF:
27674 case TYPE_MTCR:
27675 case TYPE_COMPARE:
27676 case TYPE_SYNC:
27677 case TYPE_ISYNC:
27678 case TYPE_LOAD_L:
27679 case TYPE_STORE_C:
27680 case TYPE_VECSTORE:
27681 case TYPE_MFJMPR:
27682 case TYPE_MTJMPR:
27683 return true;
27684 case TYPE_SHIFT:
27685 case TYPE_MUL:
27686 if (get_attr_dot (insn) == DOT_YES)
27687 return true;
27688 else
27689 break;
27690 case TYPE_LOAD:
27691 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27692 || get_attr_update (insn) == UPDATE_YES)
27693 return true;
27694 else
27695 break;
27696 case TYPE_STORE:
27697 if (get_attr_update (insn) == UPDATE_YES
27698 && get_attr_indexed (insn) == INDEXED_YES)
27699 return true;
27700 else
27701 break;
27702 default:
27703 break;
27705 break;
27706 default:
27707 break;
27710 return false;
27713 static bool
27714 insn_must_be_last_in_group (rtx insn)
27716 enum attr_type type;
27718 if (!insn
27719 || NOTE_P (insn)
27720 || DEBUG_INSN_P (insn)
27721 || GET_CODE (PATTERN (insn)) == USE
27722 || GET_CODE (PATTERN (insn)) == CLOBBER)
27723 return false;
27725 switch (rs6000_cpu) {
27726 case PROCESSOR_POWER4:
27727 case PROCESSOR_POWER5:
27728 if (is_microcoded_insn (insn))
27729 return true;
27731 if (is_branch_slot_insn (insn))
27732 return true;
27734 break;
27735 case PROCESSOR_POWER6:
27736 type = get_attr_type (insn);
27738 switch (type)
27740 case TYPE_EXTS:
27741 case TYPE_CNTLZ:
27742 case TYPE_TRAP:
27743 case TYPE_MUL:
27744 case TYPE_FPCOMPARE:
27745 case TYPE_MFCR:
27746 case TYPE_MTCR:
27747 case TYPE_MFJMPR:
27748 case TYPE_MTJMPR:
27749 case TYPE_ISYNC:
27750 case TYPE_SYNC:
27751 case TYPE_LOAD_L:
27752 case TYPE_STORE_C:
27753 return true;
27754 case TYPE_SHIFT:
27755 if (get_attr_dot (insn) == DOT_NO
27756 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
27757 return true;
27758 else
27759 break;
27760 case TYPE_DIV:
27761 if (get_attr_size (insn) == SIZE_32)
27762 return true;
27763 else
27764 break;
27765 default:
27766 break;
27768 break;
27769 case PROCESSOR_POWER7:
27770 type = get_attr_type (insn);
27772 switch (type)
27774 case TYPE_ISYNC:
27775 case TYPE_SYNC:
27776 case TYPE_LOAD_L:
27777 case TYPE_STORE_C:
27778 return true;
27779 case TYPE_LOAD:
27780 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27781 && get_attr_update (insn) == UPDATE_YES)
27782 return true;
27783 else
27784 break;
27785 case TYPE_STORE:
27786 if (get_attr_update (insn) == UPDATE_YES
27787 && get_attr_indexed (insn) == INDEXED_YES)
27788 return true;
27789 else
27790 break;
27791 default:
27792 break;
27794 break;
27795 case PROCESSOR_POWER8:
27796 type = get_attr_type (insn);
27798 switch (type)
27800 case TYPE_MFCR:
27801 case TYPE_MTCR:
27802 case TYPE_ISYNC:
27803 case TYPE_SYNC:
27804 case TYPE_LOAD_L:
27805 case TYPE_STORE_C:
27806 return true;
27807 case TYPE_LOAD:
27808 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27809 && get_attr_update (insn) == UPDATE_YES)
27810 return true;
27811 else
27812 break;
27813 case TYPE_STORE:
27814 if (get_attr_update (insn) == UPDATE_YES
27815 && get_attr_indexed (insn) == INDEXED_YES)
27816 return true;
27817 else
27818 break;
27819 default:
27820 break;
27822 break;
27823 default:
27824 break;
27827 return false;
27830 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
27831 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
27833 static bool
27834 is_costly_group (rtx *group_insns, rtx next_insn)
27836 int i;
27837 int issue_rate = rs6000_issue_rate ();
27839 for (i = 0; i < issue_rate; i++)
27841 sd_iterator_def sd_it;
27842 dep_t dep;
27843 rtx insn = group_insns[i];
27845 if (!insn)
27846 continue;
27848 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
27850 rtx next = DEP_CON (dep);
27852 if (next == next_insn
27853 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
27854 return true;
27858 return false;
27861 /* Utility of the function redefine_groups.
27862 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
27863 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
27864 to keep it "far" (in a separate group) from GROUP_INSNS, following
27865 one of the following schemes, depending on the value of the flag
27866 -minsert_sched_nops = X:
27867 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
27868 in order to force NEXT_INSN into a separate group.
27869 (2) X < sched_finish_regroup_exact: insert exactly X nops.
27870 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
27871 insertion (has a group just ended, how many vacant issue slots remain in the
27872 last group, and how many dispatch groups were encountered so far). */
27874 static int
27875 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
27876 rtx next_insn, bool *group_end, int can_issue_more,
27877 int *group_count)
27879 rtx nop;
27880 bool force;
27881 int issue_rate = rs6000_issue_rate ();
27882 bool end = *group_end;
27883 int i;
27885 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
27886 return can_issue_more;
27888 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
27889 return can_issue_more;
27891 force = is_costly_group (group_insns, next_insn);
27892 if (!force)
27893 return can_issue_more;
27895 if (sched_verbose > 6)
27896 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
27897 *group_count ,can_issue_more);
27899 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
27901 if (*group_end)
27902 can_issue_more = 0;
27904 /* Since only a branch can be issued in the last issue_slot, it is
27905 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
27906 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
27907 in this case the last nop will start a new group and the branch
27908 will be forced to the new group. */
27909 if (can_issue_more && !is_branch_slot_insn (next_insn))
27910 can_issue_more--;
27912 /* Do we have a special group ending nop? */
27913 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
27914 || rs6000_cpu_attr == CPU_POWER8)
27916 nop = gen_group_ending_nop ();
27917 emit_insn_before (nop, next_insn);
27918 can_issue_more = 0;
27920 else
27921 while (can_issue_more > 0)
27923 nop = gen_nop ();
27924 emit_insn_before (nop, next_insn);
27925 can_issue_more--;
27928 *group_end = true;
27929 return 0;
27932 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
27934 int n_nops = rs6000_sched_insert_nops;
27936 /* Nops can't be issued from the branch slot, so the effective
27937 issue_rate for nops is 'issue_rate - 1'. */
27938 if (can_issue_more == 0)
27939 can_issue_more = issue_rate;
27940 can_issue_more--;
27941 if (can_issue_more == 0)
27943 can_issue_more = issue_rate - 1;
27944 (*group_count)++;
27945 end = true;
27946 for (i = 0; i < issue_rate; i++)
27948 group_insns[i] = 0;
27952 while (n_nops > 0)
27954 nop = gen_nop ();
27955 emit_insn_before (nop, next_insn);
27956 if (can_issue_more == issue_rate - 1) /* new group begins */
27957 end = false;
27958 can_issue_more--;
27959 if (can_issue_more == 0)
27961 can_issue_more = issue_rate - 1;
27962 (*group_count)++;
27963 end = true;
27964 for (i = 0; i < issue_rate; i++)
27966 group_insns[i] = 0;
27969 n_nops--;
27972 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
27973 can_issue_more++;
27975 /* Is next_insn going to start a new group? */
27976 *group_end
27977 = (end
27978 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
27979 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
27980 || (can_issue_more < issue_rate &&
27981 insn_terminates_group_p (next_insn, previous_group)));
27982 if (*group_end && end)
27983 (*group_count)--;
27985 if (sched_verbose > 6)
27986 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
27987 *group_count, can_issue_more);
27988 return can_issue_more;
27991 return can_issue_more;
27994 /* This function tries to synch the dispatch groups that the compiler "sees"
27995 with the dispatch groups that the processor dispatcher is expected to
27996 form in practice. It tries to achieve this synchronization by forcing the
27997 estimated processor grouping on the compiler (as opposed to the function
27998 'pad_goups' which tries to force the scheduler's grouping on the processor).
28000 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
28001 examines the (estimated) dispatch groups that will be formed by the processor
28002 dispatcher. It marks these group boundaries to reflect the estimated
28003 processor grouping, overriding the grouping that the scheduler had marked.
28004 Depending on the value of the flag '-minsert-sched-nops' this function can
28005 force certain insns into separate groups or force a certain distance between
28006 them by inserting nops, for example, if there exists a "costly dependence"
28007 between the insns.
28009 The function estimates the group boundaries that the processor will form as
28010 follows: It keeps track of how many vacant issue slots are available after
28011 each insn. A subsequent insn will start a new group if one of the following
28012 4 cases applies:
28013 - no more vacant issue slots remain in the current dispatch group.
28014 - only the last issue slot, which is the branch slot, is vacant, but the next
28015 insn is not a branch.
28016 - only the last 2 or less issue slots, including the branch slot, are vacant,
28017 which means that a cracked insn (which occupies two issue slots) can't be
28018 issued in this group.
28019 - less than 'issue_rate' slots are vacant, and the next insn always needs to
28020 start a new group. */
28022 static int
28023 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
28024 rtx_insn *tail)
28026 rtx_insn *insn, *next_insn;
28027 int issue_rate;
28028 int can_issue_more;
28029 int slot, i;
28030 bool group_end;
28031 int group_count = 0;
28032 rtx *group_insns;
28034 /* Initialize. */
28035 issue_rate = rs6000_issue_rate ();
28036 group_insns = XALLOCAVEC (rtx, issue_rate);
28037 for (i = 0; i < issue_rate; i++)
28039 group_insns[i] = 0;
28041 can_issue_more = issue_rate;
28042 slot = 0;
28043 insn = get_next_active_insn (prev_head_insn, tail);
28044 group_end = false;
28046 while (insn != NULL_RTX)
28048 slot = (issue_rate - can_issue_more);
28049 group_insns[slot] = insn;
28050 can_issue_more =
28051 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
28052 if (insn_terminates_group_p (insn, current_group))
28053 can_issue_more = 0;
28055 next_insn = get_next_active_insn (insn, tail);
28056 if (next_insn == NULL_RTX)
28057 return group_count + 1;
28059 /* Is next_insn going to start a new group? */
28060 group_end
28061 = (can_issue_more == 0
28062 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
28063 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
28064 || (can_issue_more < issue_rate &&
28065 insn_terminates_group_p (next_insn, previous_group)));
28067 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
28068 next_insn, &group_end, can_issue_more,
28069 &group_count);
28071 if (group_end)
28073 group_count++;
28074 can_issue_more = 0;
28075 for (i = 0; i < issue_rate; i++)
28077 group_insns[i] = 0;
28081 if (GET_MODE (next_insn) == TImode && can_issue_more)
28082 PUT_MODE (next_insn, VOIDmode);
28083 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
28084 PUT_MODE (next_insn, TImode);
28086 insn = next_insn;
28087 if (can_issue_more == 0)
28088 can_issue_more = issue_rate;
28089 } /* while */
28091 return group_count;
28094 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
28095 dispatch group boundaries that the scheduler had marked. Pad with nops
28096 any dispatch groups which have vacant issue slots, in order to force the
28097 scheduler's grouping on the processor dispatcher. The function
28098 returns the number of dispatch groups found. */
28100 static int
28101 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
28102 rtx_insn *tail)
28104 rtx_insn *insn, *next_insn;
28105 rtx nop;
28106 int issue_rate;
28107 int can_issue_more;
28108 int group_end;
28109 int group_count = 0;
28111 /* Initialize issue_rate. */
28112 issue_rate = rs6000_issue_rate ();
28113 can_issue_more = issue_rate;
28115 insn = get_next_active_insn (prev_head_insn, tail);
28116 next_insn = get_next_active_insn (insn, tail);
28118 while (insn != NULL_RTX)
28120 can_issue_more =
28121 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
28123 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
28125 if (next_insn == NULL_RTX)
28126 break;
28128 if (group_end)
28130 /* If the scheduler had marked group termination at this location
28131 (between insn and next_insn), and neither insn nor next_insn will
28132 force group termination, pad the group with nops to force group
28133 termination. */
28134 if (can_issue_more
28135 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
28136 && !insn_terminates_group_p (insn, current_group)
28137 && !insn_terminates_group_p (next_insn, previous_group))
28139 if (!is_branch_slot_insn (next_insn))
28140 can_issue_more--;
28142 while (can_issue_more)
28144 nop = gen_nop ();
28145 emit_insn_before (nop, next_insn);
28146 can_issue_more--;
28150 can_issue_more = issue_rate;
28151 group_count++;
28154 insn = next_insn;
28155 next_insn = get_next_active_insn (insn, tail);
28158 return group_count;
28161 /* We're beginning a new block. Initialize data structures as necessary. */
28163 static void
28164 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
28165 int sched_verbose ATTRIBUTE_UNUSED,
28166 int max_ready ATTRIBUTE_UNUSED)
28168 last_scheduled_insn = NULL_RTX;
28169 load_store_pendulum = 0;
28172 /* The following function is called at the end of scheduling BB.
28173 After reload, it inserts nops at insn group bundling. */
28175 static void
28176 rs6000_sched_finish (FILE *dump, int sched_verbose)
28178 int n_groups;
28180 if (sched_verbose)
28181 fprintf (dump, "=== Finishing schedule.\n");
28183 if (reload_completed && rs6000_sched_groups)
28185 /* Do not run sched_finish hook when selective scheduling enabled. */
28186 if (sel_sched_p ())
28187 return;
28189 if (rs6000_sched_insert_nops == sched_finish_none)
28190 return;
28192 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
28193 n_groups = pad_groups (dump, sched_verbose,
28194 current_sched_info->prev_head,
28195 current_sched_info->next_tail);
28196 else
28197 n_groups = redefine_groups (dump, sched_verbose,
28198 current_sched_info->prev_head,
28199 current_sched_info->next_tail);
28201 if (sched_verbose >= 6)
28203 fprintf (dump, "ngroups = %d\n", n_groups);
28204 print_rtl (dump, current_sched_info->prev_head);
28205 fprintf (dump, "Done finish_sched\n");
28210 struct _rs6000_sched_context
28212 short cached_can_issue_more;
28213 rtx last_scheduled_insn;
28214 int load_store_pendulum;
28217 typedef struct _rs6000_sched_context rs6000_sched_context_def;
28218 typedef rs6000_sched_context_def *rs6000_sched_context_t;
28220 /* Allocate store for new scheduling context. */
28221 static void *
28222 rs6000_alloc_sched_context (void)
28224 return xmalloc (sizeof (rs6000_sched_context_def));
28227 /* If CLEAN_P is true then initializes _SC with clean data,
28228 and from the global context otherwise. */
28229 static void
28230 rs6000_init_sched_context (void *_sc, bool clean_p)
28232 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
28234 if (clean_p)
28236 sc->cached_can_issue_more = 0;
28237 sc->last_scheduled_insn = NULL_RTX;
28238 sc->load_store_pendulum = 0;
28240 else
28242 sc->cached_can_issue_more = cached_can_issue_more;
28243 sc->last_scheduled_insn = last_scheduled_insn;
28244 sc->load_store_pendulum = load_store_pendulum;
28248 /* Sets the global scheduling context to the one pointed to by _SC. */
28249 static void
28250 rs6000_set_sched_context (void *_sc)
28252 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
28254 gcc_assert (sc != NULL);
28256 cached_can_issue_more = sc->cached_can_issue_more;
28257 last_scheduled_insn = sc->last_scheduled_insn;
28258 load_store_pendulum = sc->load_store_pendulum;
28261 /* Free _SC. */
28262 static void
28263 rs6000_free_sched_context (void *_sc)
28265 gcc_assert (_sc != NULL);
28267 free (_sc);
28271 /* Length in units of the trampoline for entering a nested function. */
28274 rs6000_trampoline_size (void)
28276 int ret = 0;
28278 switch (DEFAULT_ABI)
28280 default:
28281 gcc_unreachable ();
28283 case ABI_AIX:
28284 ret = (TARGET_32BIT) ? 12 : 24;
28285 break;
28287 case ABI_ELFv2:
28288 gcc_assert (!TARGET_32BIT);
28289 ret = 32;
28290 break;
28292 case ABI_DARWIN:
28293 case ABI_V4:
28294 ret = (TARGET_32BIT) ? 40 : 48;
28295 break;
28298 return ret;
28301 /* Emit RTL insns to initialize the variable parts of a trampoline.
28302 FNADDR is an RTX for the address of the function's pure code.
28303 CXT is an RTX for the static chain value for the function. */
28305 static void
28306 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
28308 int regsize = (TARGET_32BIT) ? 4 : 8;
28309 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
28310 rtx ctx_reg = force_reg (Pmode, cxt);
28311 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
28313 switch (DEFAULT_ABI)
28315 default:
28316 gcc_unreachable ();
28318 /* Under AIX, just build the 3 word function descriptor */
28319 case ABI_AIX:
28321 rtx fnmem, fn_reg, toc_reg;
28323 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28324 error ("You cannot take the address of a nested function if you use "
28325 "the -mno-pointers-to-nested-functions option.");
28327 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
28328 fn_reg = gen_reg_rtx (Pmode);
28329 toc_reg = gen_reg_rtx (Pmode);
28331 /* Macro to shorten the code expansions below. */
28332 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
28334 m_tramp = replace_equiv_address (m_tramp, addr);
28336 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
28337 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
28338 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
28339 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
28340 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
28342 # undef MEM_PLUS
28344 break;
28346 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
28347 case ABI_ELFv2:
28348 case ABI_DARWIN:
28349 case ABI_V4:
28350 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
28351 LCT_NORMAL, VOIDmode, 4,
28352 addr, Pmode,
28353 GEN_INT (rs6000_trampoline_size ()), SImode,
28354 fnaddr, Pmode,
28355 ctx_reg, Pmode);
28356 break;
28361 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
28362 identifier as an argument, so the front end shouldn't look it up. */
28364 static bool
28365 rs6000_attribute_takes_identifier_p (const_tree attr_id)
28367 return is_attribute_p ("altivec", attr_id);
28370 /* Handle the "altivec" attribute. The attribute may have
28371 arguments as follows:
28373 __attribute__((altivec(vector__)))
28374 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
28375 __attribute__((altivec(bool__))) (always followed by 'unsigned')
28377 and may appear more than once (e.g., 'vector bool char') in a
28378 given declaration. */
28380 static tree
28381 rs6000_handle_altivec_attribute (tree *node,
28382 tree name ATTRIBUTE_UNUSED,
28383 tree args,
28384 int flags ATTRIBUTE_UNUSED,
28385 bool *no_add_attrs)
28387 tree type = *node, result = NULL_TREE;
28388 enum machine_mode mode;
28389 int unsigned_p;
28390 char altivec_type
28391 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
28392 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
28393 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
28394 : '?');
28396 while (POINTER_TYPE_P (type)
28397 || TREE_CODE (type) == FUNCTION_TYPE
28398 || TREE_CODE (type) == METHOD_TYPE
28399 || TREE_CODE (type) == ARRAY_TYPE)
28400 type = TREE_TYPE (type);
28402 mode = TYPE_MODE (type);
28404 /* Check for invalid AltiVec type qualifiers. */
28405 if (type == long_double_type_node)
28406 error ("use of %<long double%> in AltiVec types is invalid");
28407 else if (type == boolean_type_node)
28408 error ("use of boolean types in AltiVec types is invalid");
28409 else if (TREE_CODE (type) == COMPLEX_TYPE)
28410 error ("use of %<complex%> in AltiVec types is invalid");
28411 else if (DECIMAL_FLOAT_MODE_P (mode))
28412 error ("use of decimal floating point types in AltiVec types is invalid");
28413 else if (!TARGET_VSX)
28415 if (type == long_unsigned_type_node || type == long_integer_type_node)
28417 if (TARGET_64BIT)
28418 error ("use of %<long%> in AltiVec types is invalid for "
28419 "64-bit code without -mvsx");
28420 else if (rs6000_warn_altivec_long)
28421 warning (0, "use of %<long%> in AltiVec types is deprecated; "
28422 "use %<int%>");
28424 else if (type == long_long_unsigned_type_node
28425 || type == long_long_integer_type_node)
28426 error ("use of %<long long%> in AltiVec types is invalid without "
28427 "-mvsx");
28428 else if (type == double_type_node)
28429 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
28432 switch (altivec_type)
28434 case 'v':
28435 unsigned_p = TYPE_UNSIGNED (type);
28436 switch (mode)
28438 case TImode:
28439 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
28440 break;
28441 case DImode:
28442 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
28443 break;
28444 case SImode:
28445 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
28446 break;
28447 case HImode:
28448 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
28449 break;
28450 case QImode:
28451 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
28452 break;
28453 case SFmode: result = V4SF_type_node; break;
28454 case DFmode: result = V2DF_type_node; break;
28455 /* If the user says 'vector int bool', we may be handed the 'bool'
28456 attribute _before_ the 'vector' attribute, and so select the
28457 proper type in the 'b' case below. */
28458 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
28459 case V2DImode: case V2DFmode:
28460 result = type;
28461 default: break;
28463 break;
28464 case 'b':
28465 switch (mode)
28467 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
28468 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
28469 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
28470 case QImode: case V16QImode: result = bool_V16QI_type_node;
28471 default: break;
28473 break;
28474 case 'p':
28475 switch (mode)
28477 case V8HImode: result = pixel_V8HI_type_node;
28478 default: break;
28480 default: break;
28483 /* Propagate qualifiers attached to the element type
28484 onto the vector type. */
28485 if (result && result != type && TYPE_QUALS (type))
28486 result = build_qualified_type (result, TYPE_QUALS (type));
28488 *no_add_attrs = true; /* No need to hang on to the attribute. */
28490 if (result)
28491 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
28493 return NULL_TREE;
28496 /* AltiVec defines four built-in scalar types that serve as vector
28497 elements; we must teach the compiler how to mangle them. */
28499 static const char *
28500 rs6000_mangle_type (const_tree type)
28502 type = TYPE_MAIN_VARIANT (type);
28504 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28505 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28506 return NULL;
28508 if (type == bool_char_type_node) return "U6__boolc";
28509 if (type == bool_short_type_node) return "U6__bools";
28510 if (type == pixel_type_node) return "u7__pixel";
28511 if (type == bool_int_type_node) return "U6__booli";
28512 if (type == bool_long_type_node) return "U6__booll";
28514 /* Mangle IBM extended float long double as `g' (__float128) on
28515 powerpc*-linux where long-double-64 previously was the default. */
28516 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
28517 && TARGET_ELF
28518 && TARGET_LONG_DOUBLE_128
28519 && !TARGET_IEEEQUAD)
28520 return "g";
28522 /* For all other types, use normal C++ mangling. */
28523 return NULL;
28526 /* Handle a "longcall" or "shortcall" attribute; arguments as in
28527 struct attribute_spec.handler. */
28529 static tree
28530 rs6000_handle_longcall_attribute (tree *node, tree name,
28531 tree args ATTRIBUTE_UNUSED,
28532 int flags ATTRIBUTE_UNUSED,
28533 bool *no_add_attrs)
28535 if (TREE_CODE (*node) != FUNCTION_TYPE
28536 && TREE_CODE (*node) != FIELD_DECL
28537 && TREE_CODE (*node) != TYPE_DECL)
28539 warning (OPT_Wattributes, "%qE attribute only applies to functions",
28540 name);
28541 *no_add_attrs = true;
28544 return NULL_TREE;
28547 /* Set longcall attributes on all functions declared when
28548 rs6000_default_long_calls is true. */
28549 static void
28550 rs6000_set_default_type_attributes (tree type)
28552 if (rs6000_default_long_calls
28553 && (TREE_CODE (type) == FUNCTION_TYPE
28554 || TREE_CODE (type) == METHOD_TYPE))
28555 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
28556 NULL_TREE,
28557 TYPE_ATTRIBUTES (type));
28559 #if TARGET_MACHO
28560 darwin_set_default_type_attributes (type);
28561 #endif
28564 /* Return a reference suitable for calling a function with the
28565 longcall attribute. */
28568 rs6000_longcall_ref (rtx call_ref)
28570 const char *call_name;
28571 tree node;
28573 if (GET_CODE (call_ref) != SYMBOL_REF)
28574 return call_ref;
28576 /* System V adds '.' to the internal name, so skip them. */
28577 call_name = XSTR (call_ref, 0);
28578 if (*call_name == '.')
28580 while (*call_name == '.')
28581 call_name++;
28583 node = get_identifier (call_name);
28584 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
28587 return force_reg (Pmode, call_ref);
28590 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
28591 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
28592 #endif
28594 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
28595 struct attribute_spec.handler. */
28596 static tree
28597 rs6000_handle_struct_attribute (tree *node, tree name,
28598 tree args ATTRIBUTE_UNUSED,
28599 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
28601 tree *type = NULL;
28602 if (DECL_P (*node))
28604 if (TREE_CODE (*node) == TYPE_DECL)
28605 type = &TREE_TYPE (*node);
28607 else
28608 type = node;
28610 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
28611 || TREE_CODE (*type) == UNION_TYPE)))
28613 warning (OPT_Wattributes, "%qE attribute ignored", name);
28614 *no_add_attrs = true;
28617 else if ((is_attribute_p ("ms_struct", name)
28618 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
28619 || ((is_attribute_p ("gcc_struct", name)
28620 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
28622 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
28623 name);
28624 *no_add_attrs = true;
28627 return NULL_TREE;
28630 static bool
28631 rs6000_ms_bitfield_layout_p (const_tree record_type)
28633 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
28634 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
28635 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
28638 #ifdef USING_ELFOS_H
28640 /* A get_unnamed_section callback, used for switching to toc_section. */
28642 static void
28643 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
28645 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28646 && TARGET_MINIMAL_TOC
28647 && !TARGET_RELOCATABLE)
28649 if (!toc_initialized)
28651 toc_initialized = 1;
28652 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28653 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
28654 fprintf (asm_out_file, "\t.tc ");
28655 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
28656 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28657 fprintf (asm_out_file, "\n");
28659 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28660 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28661 fprintf (asm_out_file, " = .+32768\n");
28663 else
28664 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28666 else if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28667 && !TARGET_RELOCATABLE)
28668 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28669 else
28671 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28672 if (!toc_initialized)
28674 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28675 fprintf (asm_out_file, " = .+32768\n");
28676 toc_initialized = 1;
28681 /* Implement TARGET_ASM_INIT_SECTIONS. */
28683 static void
28684 rs6000_elf_asm_init_sections (void)
28686 toc_section
28687 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
28689 sdata2_section
28690 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
28691 SDATA2_SECTION_ASM_OP);
28694 /* Implement TARGET_SELECT_RTX_SECTION. */
28696 static section *
28697 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
28698 unsigned HOST_WIDE_INT align)
28700 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
28701 return toc_section;
28702 else
28703 return default_elf_select_rtx_section (mode, x, align);
28706 /* For a SYMBOL_REF, set generic flags and then perform some
28707 target-specific processing.
28709 When the AIX ABI is requested on a non-AIX system, replace the
28710 function name with the real name (with a leading .) rather than the
28711 function descriptor name. This saves a lot of overriding code to
28712 read the prefixes. */
28714 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
28715 static void
28716 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
28718 default_encode_section_info (decl, rtl, first);
28720 if (first
28721 && TREE_CODE (decl) == FUNCTION_DECL
28722 && !TARGET_AIX
28723 && DEFAULT_ABI == ABI_AIX)
28725 rtx sym_ref = XEXP (rtl, 0);
28726 size_t len = strlen (XSTR (sym_ref, 0));
28727 char *str = XALLOCAVEC (char, len + 2);
28728 str[0] = '.';
28729 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
28730 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
28734 static inline bool
28735 compare_section_name (const char *section, const char *templ)
28737 int len;
28739 len = strlen (templ);
28740 return (strncmp (section, templ, len) == 0
28741 && (section[len] == 0 || section[len] == '.'));
28744 bool
28745 rs6000_elf_in_small_data_p (const_tree decl)
28747 if (rs6000_sdata == SDATA_NONE)
28748 return false;
28750 /* We want to merge strings, so we never consider them small data. */
28751 if (TREE_CODE (decl) == STRING_CST)
28752 return false;
28754 /* Functions are never in the small data area. */
28755 if (TREE_CODE (decl) == FUNCTION_DECL)
28756 return false;
28758 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
28760 const char *section = DECL_SECTION_NAME (decl);
28761 if (compare_section_name (section, ".sdata")
28762 || compare_section_name (section, ".sdata2")
28763 || compare_section_name (section, ".gnu.linkonce.s")
28764 || compare_section_name (section, ".sbss")
28765 || compare_section_name (section, ".sbss2")
28766 || compare_section_name (section, ".gnu.linkonce.sb")
28767 || strcmp (section, ".PPC.EMB.sdata0") == 0
28768 || strcmp (section, ".PPC.EMB.sbss0") == 0)
28769 return true;
28771 else
28773 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
28775 if (size > 0
28776 && size <= g_switch_value
28777 /* If it's not public, and we're not going to reference it there,
28778 there's no need to put it in the small data section. */
28779 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
28780 return true;
28783 return false;
28786 #endif /* USING_ELFOS_H */
28788 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
28790 static bool
28791 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
28793 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
28796 /* Do not place thread-local symbols refs in the object blocks. */
28798 static bool
28799 rs6000_use_blocks_for_decl_p (const_tree decl)
28801 return !DECL_THREAD_LOCAL_P (decl);
28804 /* Return a REG that occurs in ADDR with coefficient 1.
28805 ADDR can be effectively incremented by incrementing REG.
28807 r0 is special and we must not select it as an address
28808 register by this routine since our caller will try to
28809 increment the returned register via an "la" instruction. */
28812 find_addr_reg (rtx addr)
28814 while (GET_CODE (addr) == PLUS)
28816 if (GET_CODE (XEXP (addr, 0)) == REG
28817 && REGNO (XEXP (addr, 0)) != 0)
28818 addr = XEXP (addr, 0);
28819 else if (GET_CODE (XEXP (addr, 1)) == REG
28820 && REGNO (XEXP (addr, 1)) != 0)
28821 addr = XEXP (addr, 1);
28822 else if (CONSTANT_P (XEXP (addr, 0)))
28823 addr = XEXP (addr, 1);
28824 else if (CONSTANT_P (XEXP (addr, 1)))
28825 addr = XEXP (addr, 0);
28826 else
28827 gcc_unreachable ();
28829 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
28830 return addr;
28833 void
28834 rs6000_fatal_bad_address (rtx op)
28836 fatal_insn ("bad address", op);
28839 #if TARGET_MACHO
28841 typedef struct branch_island_d {
28842 tree function_name;
28843 tree label_name;
28844 int line_number;
28845 } branch_island;
28848 static vec<branch_island, va_gc> *branch_islands;
28850 /* Remember to generate a branch island for far calls to the given
28851 function. */
28853 static void
28854 add_compiler_branch_island (tree label_name, tree function_name,
28855 int line_number)
28857 branch_island bi = {function_name, label_name, line_number};
28858 vec_safe_push (branch_islands, bi);
28861 /* Generate far-jump branch islands for everything recorded in
28862 branch_islands. Invoked immediately after the last instruction of
28863 the epilogue has been emitted; the branch islands must be appended
28864 to, and contiguous with, the function body. Mach-O stubs are
28865 generated in machopic_output_stub(). */
28867 static void
28868 macho_branch_islands (void)
28870 char tmp_buf[512];
28872 while (!vec_safe_is_empty (branch_islands))
28874 branch_island *bi = &branch_islands->last ();
28875 const char *label = IDENTIFIER_POINTER (bi->label_name);
28876 const char *name = IDENTIFIER_POINTER (bi->function_name);
28877 char name_buf[512];
28878 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
28879 if (name[0] == '*' || name[0] == '&')
28880 strcpy (name_buf, name+1);
28881 else
28883 name_buf[0] = '_';
28884 strcpy (name_buf+1, name);
28886 strcpy (tmp_buf, "\n");
28887 strcat (tmp_buf, label);
28888 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28889 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28890 dbxout_stabd (N_SLINE, bi->line_number);
28891 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28892 if (flag_pic)
28894 if (TARGET_LINK_STACK)
28896 char name[32];
28897 get_ppc476_thunk_name (name);
28898 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
28899 strcat (tmp_buf, name);
28900 strcat (tmp_buf, "\n");
28901 strcat (tmp_buf, label);
28902 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28904 else
28906 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
28907 strcat (tmp_buf, label);
28908 strcat (tmp_buf, "_pic\n");
28909 strcat (tmp_buf, label);
28910 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28913 strcat (tmp_buf, "\taddis r11,r11,ha16(");
28914 strcat (tmp_buf, name_buf);
28915 strcat (tmp_buf, " - ");
28916 strcat (tmp_buf, label);
28917 strcat (tmp_buf, "_pic)\n");
28919 strcat (tmp_buf, "\tmtlr r0\n");
28921 strcat (tmp_buf, "\taddi r12,r11,lo16(");
28922 strcat (tmp_buf, name_buf);
28923 strcat (tmp_buf, " - ");
28924 strcat (tmp_buf, label);
28925 strcat (tmp_buf, "_pic)\n");
28927 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
28929 else
28931 strcat (tmp_buf, ":\nlis r12,hi16(");
28932 strcat (tmp_buf, name_buf);
28933 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
28934 strcat (tmp_buf, name_buf);
28935 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
28937 output_asm_insn (tmp_buf, 0);
28938 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28939 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28940 dbxout_stabd (N_SLINE, bi->line_number);
28941 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28942 branch_islands->pop ();
28946 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
28947 already there or not. */
28949 static int
28950 no_previous_def (tree function_name)
28952 branch_island *bi;
28953 unsigned ix;
28955 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28956 if (function_name == bi->function_name)
28957 return 0;
28958 return 1;
28961 /* GET_PREV_LABEL gets the label name from the previous definition of
28962 the function. */
28964 static tree
28965 get_prev_label (tree function_name)
28967 branch_island *bi;
28968 unsigned ix;
28970 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28971 if (function_name == bi->function_name)
28972 return bi->label_name;
28973 return NULL_TREE;
28976 /* INSN is either a function call or a millicode call. It may have an
28977 unconditional jump in its delay slot.
28979 CALL_DEST is the routine we are calling. */
28981 char *
28982 output_call (rtx insn, rtx *operands, int dest_operand_number,
28983 int cookie_operand_number)
28985 static char buf[256];
28986 if (darwin_emit_branch_islands
28987 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
28988 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
28990 tree labelname;
28991 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
28993 if (no_previous_def (funname))
28995 rtx label_rtx = gen_label_rtx ();
28996 char *label_buf, temp_buf[256];
28997 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
28998 CODE_LABEL_NUMBER (label_rtx));
28999 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
29000 labelname = get_identifier (label_buf);
29001 add_compiler_branch_island (labelname, funname, insn_line (insn));
29003 else
29004 labelname = get_prev_label (funname);
29006 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
29007 instruction will reach 'foo', otherwise link as 'bl L42'".
29008 "L42" should be a 'branch island', that will do a far jump to
29009 'foo'. Branch islands are generated in
29010 macho_branch_islands(). */
29011 sprintf (buf, "jbsr %%z%d,%.246s",
29012 dest_operand_number, IDENTIFIER_POINTER (labelname));
29014 else
29015 sprintf (buf, "bl %%z%d", dest_operand_number);
29016 return buf;
29019 /* Generate PIC and indirect symbol stubs. */
29021 void
29022 machopic_output_stub (FILE *file, const char *symb, const char *stub)
29024 unsigned int length;
29025 char *symbol_name, *lazy_ptr_name;
29026 char *local_label_0;
29027 static int label = 0;
29029 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
29030 symb = (*targetm.strip_name_encoding) (symb);
29033 length = strlen (symb);
29034 symbol_name = XALLOCAVEC (char, length + 32);
29035 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
29037 lazy_ptr_name = XALLOCAVEC (char, length + 32);
29038 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
29040 if (flag_pic == 2)
29041 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
29042 else
29043 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
29045 if (flag_pic == 2)
29047 fprintf (file, "\t.align 5\n");
29049 fprintf (file, "%s:\n", stub);
29050 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29052 label++;
29053 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
29054 sprintf (local_label_0, "\"L%011d$spb\"", label);
29056 fprintf (file, "\tmflr r0\n");
29057 if (TARGET_LINK_STACK)
29059 char name[32];
29060 get_ppc476_thunk_name (name);
29061 fprintf (file, "\tbl %s\n", name);
29062 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
29064 else
29066 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
29067 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
29069 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
29070 lazy_ptr_name, local_label_0);
29071 fprintf (file, "\tmtlr r0\n");
29072 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
29073 (TARGET_64BIT ? "ldu" : "lwzu"),
29074 lazy_ptr_name, local_label_0);
29075 fprintf (file, "\tmtctr r12\n");
29076 fprintf (file, "\tbctr\n");
29078 else
29080 fprintf (file, "\t.align 4\n");
29082 fprintf (file, "%s:\n", stub);
29083 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29085 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
29086 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
29087 (TARGET_64BIT ? "ldu" : "lwzu"),
29088 lazy_ptr_name);
29089 fprintf (file, "\tmtctr r12\n");
29090 fprintf (file, "\tbctr\n");
29093 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
29094 fprintf (file, "%s:\n", lazy_ptr_name);
29095 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29096 fprintf (file, "%sdyld_stub_binding_helper\n",
29097 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
29100 /* Legitimize PIC addresses. If the address is already
29101 position-independent, we return ORIG. Newly generated
29102 position-independent addresses go into a reg. This is REG if non
29103 zero, otherwise we allocate register(s) as necessary. */
29105 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
29108 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
29109 rtx reg)
29111 rtx base, offset;
29113 if (reg == NULL && ! reload_in_progress && ! reload_completed)
29114 reg = gen_reg_rtx (Pmode);
29116 if (GET_CODE (orig) == CONST)
29118 rtx reg_temp;
29120 if (GET_CODE (XEXP (orig, 0)) == PLUS
29121 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
29122 return orig;
29124 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
29126 /* Use a different reg for the intermediate value, as
29127 it will be marked UNCHANGING. */
29128 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
29129 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
29130 Pmode, reg_temp);
29131 offset =
29132 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
29133 Pmode, reg);
29135 if (GET_CODE (offset) == CONST_INT)
29137 if (SMALL_INT (offset))
29138 return plus_constant (Pmode, base, INTVAL (offset));
29139 else if (! reload_in_progress && ! reload_completed)
29140 offset = force_reg (Pmode, offset);
29141 else
29143 rtx mem = force_const_mem (Pmode, orig);
29144 return machopic_legitimize_pic_address (mem, Pmode, reg);
29147 return gen_rtx_PLUS (Pmode, base, offset);
29150 /* Fall back on generic machopic code. */
29151 return machopic_legitimize_pic_address (orig, mode, reg);
29154 /* Output a .machine directive for the Darwin assembler, and call
29155 the generic start_file routine. */
29157 static void
29158 rs6000_darwin_file_start (void)
29160 static const struct
29162 const char *arg;
29163 const char *name;
29164 HOST_WIDE_INT if_set;
29165 } mapping[] = {
29166 { "ppc64", "ppc64", MASK_64BIT },
29167 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
29168 { "power4", "ppc970", 0 },
29169 { "G5", "ppc970", 0 },
29170 { "7450", "ppc7450", 0 },
29171 { "7400", "ppc7400", MASK_ALTIVEC },
29172 { "G4", "ppc7400", 0 },
29173 { "750", "ppc750", 0 },
29174 { "740", "ppc750", 0 },
29175 { "G3", "ppc750", 0 },
29176 { "604e", "ppc604e", 0 },
29177 { "604", "ppc604", 0 },
29178 { "603e", "ppc603", 0 },
29179 { "603", "ppc603", 0 },
29180 { "601", "ppc601", 0 },
29181 { NULL, "ppc", 0 } };
29182 const char *cpu_id = "";
29183 size_t i;
29185 rs6000_file_start ();
29186 darwin_file_start ();
29188 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
29190 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
29191 cpu_id = rs6000_default_cpu;
29193 if (global_options_set.x_rs6000_cpu_index)
29194 cpu_id = processor_target_table[rs6000_cpu_index].name;
29196 /* Look through the mapping array. Pick the first name that either
29197 matches the argument, has a bit set in IF_SET that is also set
29198 in the target flags, or has a NULL name. */
29200 i = 0;
29201 while (mapping[i].arg != NULL
29202 && strcmp (mapping[i].arg, cpu_id) != 0
29203 && (mapping[i].if_set & rs6000_isa_flags) == 0)
29204 i++;
29206 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
29209 #endif /* TARGET_MACHO */
29211 #if TARGET_ELF
29212 static int
29213 rs6000_elf_reloc_rw_mask (void)
29215 if (flag_pic)
29216 return 3;
29217 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29218 return 2;
29219 else
29220 return 0;
29223 /* Record an element in the table of global constructors. SYMBOL is
29224 a SYMBOL_REF of the function to be called; PRIORITY is a number
29225 between 0 and MAX_INIT_PRIORITY.
29227 This differs from default_named_section_asm_out_constructor in
29228 that we have special handling for -mrelocatable. */
29230 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
29231 static void
29232 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
29234 const char *section = ".ctors";
29235 char buf[16];
29237 if (priority != DEFAULT_INIT_PRIORITY)
29239 sprintf (buf, ".ctors.%.5u",
29240 /* Invert the numbering so the linker puts us in the proper
29241 order; constructors are run from right to left, and the
29242 linker sorts in increasing order. */
29243 MAX_INIT_PRIORITY - priority);
29244 section = buf;
29247 switch_to_section (get_section (section, SECTION_WRITE, NULL));
29248 assemble_align (POINTER_SIZE);
29250 if (TARGET_RELOCATABLE)
29252 fputs ("\t.long (", asm_out_file);
29253 output_addr_const (asm_out_file, symbol);
29254 fputs (")@fixup\n", asm_out_file);
29256 else
29257 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
29260 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
29261 static void
29262 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
29264 const char *section = ".dtors";
29265 char buf[16];
29267 if (priority != DEFAULT_INIT_PRIORITY)
29269 sprintf (buf, ".dtors.%.5u",
29270 /* Invert the numbering so the linker puts us in the proper
29271 order; constructors are run from right to left, and the
29272 linker sorts in increasing order. */
29273 MAX_INIT_PRIORITY - priority);
29274 section = buf;
29277 switch_to_section (get_section (section, SECTION_WRITE, NULL));
29278 assemble_align (POINTER_SIZE);
29280 if (TARGET_RELOCATABLE)
29282 fputs ("\t.long (", asm_out_file);
29283 output_addr_const (asm_out_file, symbol);
29284 fputs (")@fixup\n", asm_out_file);
29286 else
29287 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
29290 void
29291 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
29293 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
29295 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
29296 ASM_OUTPUT_LABEL (file, name);
29297 fputs (DOUBLE_INT_ASM_OP, file);
29298 rs6000_output_function_entry (file, name);
29299 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
29300 if (DOT_SYMBOLS)
29302 fputs ("\t.size\t", file);
29303 assemble_name (file, name);
29304 fputs (",24\n\t.type\t.", file);
29305 assemble_name (file, name);
29306 fputs (",@function\n", file);
29307 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
29309 fputs ("\t.globl\t.", file);
29310 assemble_name (file, name);
29311 putc ('\n', file);
29314 else
29315 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29316 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29317 rs6000_output_function_entry (file, name);
29318 fputs (":\n", file);
29319 return;
29322 if (TARGET_RELOCATABLE
29323 && !TARGET_SECURE_PLT
29324 && (get_pool_size () != 0 || crtl->profile)
29325 && uses_TOC ())
29327 char buf[256];
29329 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
29331 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
29332 fprintf (file, "\t.long ");
29333 assemble_name (file, buf);
29334 putc ('-', file);
29335 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
29336 assemble_name (file, buf);
29337 putc ('\n', file);
29340 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29341 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29343 if (DEFAULT_ABI == ABI_AIX)
29345 const char *desc_name, *orig_name;
29347 orig_name = (*targetm.strip_name_encoding) (name);
29348 desc_name = orig_name;
29349 while (*desc_name == '.')
29350 desc_name++;
29352 if (TREE_PUBLIC (decl))
29353 fprintf (file, "\t.globl %s\n", desc_name);
29355 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
29356 fprintf (file, "%s:\n", desc_name);
29357 fprintf (file, "\t.long %s\n", orig_name);
29358 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
29359 fputs ("\t.long 0\n", file);
29360 fprintf (file, "\t.previous\n");
29362 ASM_OUTPUT_LABEL (file, name);
29365 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
29366 static void
29367 rs6000_elf_file_end (void)
29369 #ifdef HAVE_AS_GNU_ATTRIBUTE
29370 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
29372 if (rs6000_passes_float)
29373 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
29374 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
29375 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
29376 : 2));
29377 if (rs6000_passes_vector)
29378 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
29379 (TARGET_ALTIVEC_ABI ? 2
29380 : TARGET_SPE_ABI ? 3
29381 : 1));
29382 if (rs6000_returns_struct)
29383 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
29384 aix_struct_return ? 2 : 1);
29386 #endif
29387 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
29388 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
29389 file_end_indicate_exec_stack ();
29390 #endif
29392 #endif
29394 #if TARGET_XCOFF
29395 static void
29396 rs6000_xcoff_asm_output_anchor (rtx symbol)
29398 char buffer[100];
29400 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
29401 SYMBOL_REF_BLOCK_OFFSET (symbol));
29402 fprintf (asm_out_file, "%s", SET_ASM_OP);
29403 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
29404 fprintf (asm_out_file, ",");
29405 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
29406 fprintf (asm_out_file, "\n");
29409 static void
29410 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
29412 fputs (GLOBAL_ASM_OP, stream);
29413 RS6000_OUTPUT_BASENAME (stream, name);
29414 putc ('\n', stream);
29417 /* A get_unnamed_decl callback, used for read-only sections. PTR
29418 points to the section string variable. */
29420 static void
29421 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
29423 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
29424 *(const char *const *) directive,
29425 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29428 /* Likewise for read-write sections. */
29430 static void
29431 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
29433 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
29434 *(const char *const *) directive,
29435 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29438 static void
29439 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
29441 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
29442 *(const char *const *) directive,
29443 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29446 /* A get_unnamed_section callback, used for switching to toc_section. */
29448 static void
29449 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
29451 if (TARGET_MINIMAL_TOC)
29453 /* toc_section is always selected at least once from
29454 rs6000_xcoff_file_start, so this is guaranteed to
29455 always be defined once and only once in each file. */
29456 if (!toc_initialized)
29458 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
29459 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
29460 toc_initialized = 1;
29462 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
29463 (TARGET_32BIT ? "" : ",3"));
29465 else
29466 fputs ("\t.toc\n", asm_out_file);
29469 /* Implement TARGET_ASM_INIT_SECTIONS. */
29471 static void
29472 rs6000_xcoff_asm_init_sections (void)
29474 read_only_data_section
29475 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29476 &xcoff_read_only_section_name);
29478 private_data_section
29479 = get_unnamed_section (SECTION_WRITE,
29480 rs6000_xcoff_output_readwrite_section_asm_op,
29481 &xcoff_private_data_section_name);
29483 tls_data_section
29484 = get_unnamed_section (SECTION_TLS,
29485 rs6000_xcoff_output_tls_section_asm_op,
29486 &xcoff_tls_data_section_name);
29488 tls_private_data_section
29489 = get_unnamed_section (SECTION_TLS,
29490 rs6000_xcoff_output_tls_section_asm_op,
29491 &xcoff_private_data_section_name);
29493 read_only_private_data_section
29494 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29495 &xcoff_private_data_section_name);
29497 toc_section
29498 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
29500 readonly_data_section = read_only_data_section;
29501 exception_section = data_section;
29504 static int
29505 rs6000_xcoff_reloc_rw_mask (void)
29507 return 3;
29510 static void
29511 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
29512 tree decl ATTRIBUTE_UNUSED)
29514 int smclass;
29515 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
29517 if (flags & SECTION_CODE)
29518 smclass = 0;
29519 else if (flags & SECTION_TLS)
29520 smclass = 3;
29521 else if (flags & SECTION_WRITE)
29522 smclass = 2;
29523 else
29524 smclass = 1;
29526 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
29527 (flags & SECTION_CODE) ? "." : "",
29528 name, suffix[smclass], flags & SECTION_ENTSIZE);
29531 #define IN_NAMED_SECTION(DECL) \
29532 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
29533 && DECL_SECTION_NAME (DECL) != NULL)
29535 static section *
29536 rs6000_xcoff_select_section (tree decl, int reloc,
29537 unsigned HOST_WIDE_INT align)
29539 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
29540 named section. */
29541 if (align > BIGGEST_ALIGNMENT)
29543 resolve_unique_section (decl, reloc, true);
29544 if (IN_NAMED_SECTION (decl))
29545 return get_named_section (decl, NULL, reloc);
29548 if (decl_readonly_section (decl, reloc))
29550 if (TREE_PUBLIC (decl))
29551 return read_only_data_section;
29552 else
29553 return read_only_private_data_section;
29555 else
29557 #if HAVE_AS_TLS
29558 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29560 if (TREE_PUBLIC (decl))
29561 return tls_data_section;
29562 else if (bss_initializer_p (decl))
29564 /* Convert to COMMON to emit in BSS. */
29565 DECL_COMMON (decl) = 1;
29566 return tls_comm_section;
29568 else
29569 return tls_private_data_section;
29571 else
29572 #endif
29573 if (TREE_PUBLIC (decl))
29574 return data_section;
29575 else
29576 return private_data_section;
29580 static void
29581 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
29583 const char *name;
29585 /* Use select_section for private data and uninitialized data with
29586 alignment <= BIGGEST_ALIGNMENT. */
29587 if (!TREE_PUBLIC (decl)
29588 || DECL_COMMON (decl)
29589 || (DECL_INITIAL (decl) == NULL_TREE
29590 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
29591 || DECL_INITIAL (decl) == error_mark_node
29592 || (flag_zero_initialized_in_bss
29593 && initializer_zerop (DECL_INITIAL (decl))))
29594 return;
29596 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
29597 name = (*targetm.strip_name_encoding) (name);
29598 set_decl_section_name (decl, name);
29601 /* Select section for constant in constant pool.
29603 On RS/6000, all constants are in the private read-only data area.
29604 However, if this is being placed in the TOC it must be output as a
29605 toc entry. */
29607 static section *
29608 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
29609 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
29611 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
29612 return toc_section;
29613 else
29614 return read_only_private_data_section;
29617 /* Remove any trailing [DS] or the like from the symbol name. */
29619 static const char *
29620 rs6000_xcoff_strip_name_encoding (const char *name)
29622 size_t len;
29623 if (*name == '*')
29624 name++;
29625 len = strlen (name);
29626 if (name[len - 1] == ']')
29627 return ggc_alloc_string (name, len - 4);
29628 else
29629 return name;
29632 /* Section attributes. AIX is always PIC. */
29634 static unsigned int
29635 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
29637 unsigned int align;
29638 unsigned int flags = default_section_type_flags (decl, name, reloc);
29640 /* Align to at least UNIT size. */
29641 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
29642 align = MIN_UNITS_PER_WORD;
29643 else
29644 /* Increase alignment of large objects if not already stricter. */
29645 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
29646 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
29647 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
29649 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
29652 /* Output at beginning of assembler file.
29654 Initialize the section names for the RS/6000 at this point.
29656 Specify filename, including full path, to assembler.
29658 We want to go into the TOC section so at least one .toc will be emitted.
29659 Also, in order to output proper .bs/.es pairs, we need at least one static
29660 [RW] section emitted.
29662 Finally, declare mcount when profiling to make the assembler happy. */
29664 static void
29665 rs6000_xcoff_file_start (void)
29667 rs6000_gen_section_name (&xcoff_bss_section_name,
29668 main_input_filename, ".bss_");
29669 rs6000_gen_section_name (&xcoff_private_data_section_name,
29670 main_input_filename, ".rw_");
29671 rs6000_gen_section_name (&xcoff_read_only_section_name,
29672 main_input_filename, ".ro_");
29673 rs6000_gen_section_name (&xcoff_tls_data_section_name,
29674 main_input_filename, ".tls_");
29675 rs6000_gen_section_name (&xcoff_tbss_section_name,
29676 main_input_filename, ".tbss_[UL]");
29678 fputs ("\t.file\t", asm_out_file);
29679 output_quoted_string (asm_out_file, main_input_filename);
29680 fputc ('\n', asm_out_file);
29681 if (write_symbols != NO_DEBUG)
29682 switch_to_section (private_data_section);
29683 switch_to_section (text_section);
29684 if (profile_flag)
29685 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
29686 rs6000_file_start ();
29689 /* Output at end of assembler file.
29690 On the RS/6000, referencing data should automatically pull in text. */
29692 static void
29693 rs6000_xcoff_file_end (void)
29695 switch_to_section (text_section);
29696 fputs ("_section_.text:\n", asm_out_file);
29697 switch_to_section (data_section);
29698 fputs (TARGET_32BIT
29699 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
29700 asm_out_file);
29703 struct declare_alias_data
29705 FILE *file;
29706 bool function_descriptor;
29709 /* Declare alias N. A helper function for for_node_and_aliases. */
29711 static bool
29712 rs6000_declare_alias (struct symtab_node *n, void *d)
29714 struct declare_alias_data *data = (struct declare_alias_data *)d;
29715 /* Main symbol is output specially, because varasm machinery does part of
29716 the job for us - we do not need to declare .globl/lglobs and such. */
29717 if (!n->alias || n->weakref)
29718 return false;
29720 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
29721 return false;
29723 /* Prevent assemble_alias from trying to use .set pseudo operation
29724 that does not behave as expected by the middle-end. */
29725 TREE_ASM_WRITTEN (n->decl) = true;
29727 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
29728 char *buffer = (char *) alloca (strlen (name) + 2);
29729 char *p;
29730 int dollar_inside = 0;
29732 strcpy (buffer, name);
29733 p = strchr (buffer, '$');
29734 while (p) {
29735 *p = '_';
29736 dollar_inside++;
29737 p = strchr (p + 1, '$');
29739 if (TREE_PUBLIC (n->decl))
29741 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
29743 if (dollar_inside) {
29744 if (data->function_descriptor)
29745 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
29746 else
29747 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
29749 if (data->function_descriptor)
29750 fputs ("\t.globl .", data->file);
29751 else
29752 fputs ("\t.globl ", data->file);
29753 RS6000_OUTPUT_BASENAME (data->file, buffer);
29754 putc ('\n', data->file);
29756 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
29757 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
29759 else
29761 if (dollar_inside)
29763 if (data->function_descriptor)
29764 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
29765 else
29766 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
29768 if (data->function_descriptor)
29769 fputs ("\t.lglobl .", data->file);
29770 else
29771 fputs ("\t.lglobl ", data->file);
29772 RS6000_OUTPUT_BASENAME (data->file, buffer);
29773 putc ('\n', data->file);
29775 if (data->function_descriptor)
29776 fputs (".", data->file);
29777 RS6000_OUTPUT_BASENAME (data->file, buffer);
29778 fputs (":\n", data->file);
29779 return false;
29782 /* This macro produces the initial definition of a function name.
29783 On the RS/6000, we need to place an extra '.' in the function name and
29784 output the function descriptor.
29785 Dollar signs are converted to underscores.
29787 The csect for the function will have already been created when
29788 text_section was selected. We do have to go back to that csect, however.
29790 The third and fourth parameters to the .function pseudo-op (16 and 044)
29791 are placeholders which no longer have any use.
29793 Because AIX assembler's .set command has unexpected semantics, we output
29794 all aliases as alternative labels in front of the definition. */
29796 void
29797 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
29799 char *buffer = (char *) alloca (strlen (name) + 1);
29800 char *p;
29801 int dollar_inside = 0;
29802 struct declare_alias_data data = {file, false};
29804 strcpy (buffer, name);
29805 p = strchr (buffer, '$');
29806 while (p) {
29807 *p = '_';
29808 dollar_inside++;
29809 p = strchr (p + 1, '$');
29811 if (TREE_PUBLIC (decl))
29813 if (!RS6000_WEAK || !DECL_WEAK (decl))
29815 if (dollar_inside) {
29816 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
29817 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
29819 fputs ("\t.globl .", file);
29820 RS6000_OUTPUT_BASENAME (file, buffer);
29821 putc ('\n', file);
29824 else
29826 if (dollar_inside) {
29827 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
29828 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
29830 fputs ("\t.lglobl .", file);
29831 RS6000_OUTPUT_BASENAME (file, buffer);
29832 putc ('\n', file);
29834 fputs ("\t.csect ", file);
29835 RS6000_OUTPUT_BASENAME (file, buffer);
29836 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
29837 RS6000_OUTPUT_BASENAME (file, buffer);
29838 fputs (":\n", file);
29839 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
29840 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
29841 RS6000_OUTPUT_BASENAME (file, buffer);
29842 fputs (", TOC[tc0], 0\n", file);
29843 in_section = NULL;
29844 switch_to_section (function_section (decl));
29845 putc ('.', file);
29846 RS6000_OUTPUT_BASENAME (file, buffer);
29847 fputs (":\n", file);
29848 data.function_descriptor = true;
29849 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
29850 if (write_symbols != NO_DEBUG && !DECL_IGNORED_P (decl))
29851 xcoffout_declare_function (file, decl, buffer);
29852 return;
29855 /* This macro produces the initial definition of a object (variable) name.
29856 Because AIX assembler's .set command has unexpected semantics, we output
29857 all aliases as alternative labels in front of the definition. */
29859 void
29860 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
29862 struct declare_alias_data data = {file, false};
29863 RS6000_OUTPUT_BASENAME (file, name);
29864 fputs (":\n", file);
29865 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
29868 #ifdef HAVE_AS_TLS
29869 static void
29870 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
29872 rtx symbol;
29873 int flags;
29875 default_encode_section_info (decl, rtl, first);
29877 /* Careful not to prod global register variables. */
29878 if (!MEM_P (rtl))
29879 return;
29880 symbol = XEXP (rtl, 0);
29881 if (GET_CODE (symbol) != SYMBOL_REF)
29882 return;
29884 flags = SYMBOL_REF_FLAGS (symbol);
29886 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29887 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
29889 SYMBOL_REF_FLAGS (symbol) = flags;
29891 #endif /* HAVE_AS_TLS */
29892 #endif /* TARGET_XCOFF */
29894 /* Compute a (partial) cost for rtx X. Return true if the complete
29895 cost has been computed, and false if subexpressions should be
29896 scanned. In either case, *TOTAL contains the cost result. */
29898 static bool
29899 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
29900 int *total, bool speed)
29902 enum machine_mode mode = GET_MODE (x);
29904 switch (code)
29906 /* On the RS/6000, if it is valid in the insn, it is free. */
29907 case CONST_INT:
29908 if (((outer_code == SET
29909 || outer_code == PLUS
29910 || outer_code == MINUS)
29911 && (satisfies_constraint_I (x)
29912 || satisfies_constraint_L (x)))
29913 || (outer_code == AND
29914 && (satisfies_constraint_K (x)
29915 || (mode == SImode
29916 ? satisfies_constraint_L (x)
29917 : satisfies_constraint_J (x))
29918 || mask_operand (x, mode)
29919 || (mode == DImode
29920 && mask64_operand (x, DImode))))
29921 || ((outer_code == IOR || outer_code == XOR)
29922 && (satisfies_constraint_K (x)
29923 || (mode == SImode
29924 ? satisfies_constraint_L (x)
29925 : satisfies_constraint_J (x))))
29926 || outer_code == ASHIFT
29927 || outer_code == ASHIFTRT
29928 || outer_code == LSHIFTRT
29929 || outer_code == ROTATE
29930 || outer_code == ROTATERT
29931 || outer_code == ZERO_EXTRACT
29932 || (outer_code == MULT
29933 && satisfies_constraint_I (x))
29934 || ((outer_code == DIV || outer_code == UDIV
29935 || outer_code == MOD || outer_code == UMOD)
29936 && exact_log2 (INTVAL (x)) >= 0)
29937 || (outer_code == COMPARE
29938 && (satisfies_constraint_I (x)
29939 || satisfies_constraint_K (x)))
29940 || ((outer_code == EQ || outer_code == NE)
29941 && (satisfies_constraint_I (x)
29942 || satisfies_constraint_K (x)
29943 || (mode == SImode
29944 ? satisfies_constraint_L (x)
29945 : satisfies_constraint_J (x))))
29946 || (outer_code == GTU
29947 && satisfies_constraint_I (x))
29948 || (outer_code == LTU
29949 && satisfies_constraint_P (x)))
29951 *total = 0;
29952 return true;
29954 else if ((outer_code == PLUS
29955 && reg_or_add_cint_operand (x, VOIDmode))
29956 || (outer_code == MINUS
29957 && reg_or_sub_cint_operand (x, VOIDmode))
29958 || ((outer_code == SET
29959 || outer_code == IOR
29960 || outer_code == XOR)
29961 && (INTVAL (x)
29962 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
29964 *total = COSTS_N_INSNS (1);
29965 return true;
29967 /* FALLTHRU */
29969 case CONST_DOUBLE:
29970 case CONST_WIDE_INT:
29971 case CONST:
29972 case HIGH:
29973 case SYMBOL_REF:
29974 case MEM:
29975 /* When optimizing for size, MEM should be slightly more expensive
29976 than generating address, e.g., (plus (reg) (const)).
29977 L1 cache latency is about two instructions. */
29978 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
29979 return true;
29981 case LABEL_REF:
29982 *total = 0;
29983 return true;
29985 case PLUS:
29986 case MINUS:
29987 if (FLOAT_MODE_P (mode))
29988 *total = rs6000_cost->fp;
29989 else
29990 *total = COSTS_N_INSNS (1);
29991 return false;
29993 case MULT:
29994 if (GET_CODE (XEXP (x, 1)) == CONST_INT
29995 && satisfies_constraint_I (XEXP (x, 1)))
29997 if (INTVAL (XEXP (x, 1)) >= -256
29998 && INTVAL (XEXP (x, 1)) <= 255)
29999 *total = rs6000_cost->mulsi_const9;
30000 else
30001 *total = rs6000_cost->mulsi_const;
30003 else if (mode == SFmode)
30004 *total = rs6000_cost->fp;
30005 else if (FLOAT_MODE_P (mode))
30006 *total = rs6000_cost->dmul;
30007 else if (mode == DImode)
30008 *total = rs6000_cost->muldi;
30009 else
30010 *total = rs6000_cost->mulsi;
30011 return false;
30013 case FMA:
30014 if (mode == SFmode)
30015 *total = rs6000_cost->fp;
30016 else
30017 *total = rs6000_cost->dmul;
30018 break;
30020 case DIV:
30021 case MOD:
30022 if (FLOAT_MODE_P (mode))
30024 *total = mode == DFmode ? rs6000_cost->ddiv
30025 : rs6000_cost->sdiv;
30026 return false;
30028 /* FALLTHRU */
30030 case UDIV:
30031 case UMOD:
30032 if (GET_CODE (XEXP (x, 1)) == CONST_INT
30033 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
30035 if (code == DIV || code == MOD)
30036 /* Shift, addze */
30037 *total = COSTS_N_INSNS (2);
30038 else
30039 /* Shift */
30040 *total = COSTS_N_INSNS (1);
30042 else
30044 if (GET_MODE (XEXP (x, 1)) == DImode)
30045 *total = rs6000_cost->divdi;
30046 else
30047 *total = rs6000_cost->divsi;
30049 /* Add in shift and subtract for MOD. */
30050 if (code == MOD || code == UMOD)
30051 *total += COSTS_N_INSNS (2);
30052 return false;
30054 case CTZ:
30055 case FFS:
30056 *total = COSTS_N_INSNS (4);
30057 return false;
30059 case POPCOUNT:
30060 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
30061 return false;
30063 case PARITY:
30064 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
30065 return false;
30067 case NOT:
30068 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
30070 *total = 0;
30071 return false;
30073 /* FALLTHRU */
30075 case AND:
30076 case CLZ:
30077 case IOR:
30078 case XOR:
30079 case ZERO_EXTRACT:
30080 *total = COSTS_N_INSNS (1);
30081 return false;
30083 case ASHIFT:
30084 case ASHIFTRT:
30085 case LSHIFTRT:
30086 case ROTATE:
30087 case ROTATERT:
30088 /* Handle mul_highpart. */
30089 if (outer_code == TRUNCATE
30090 && GET_CODE (XEXP (x, 0)) == MULT)
30092 if (mode == DImode)
30093 *total = rs6000_cost->muldi;
30094 else
30095 *total = rs6000_cost->mulsi;
30096 return true;
30098 else if (outer_code == AND)
30099 *total = 0;
30100 else
30101 *total = COSTS_N_INSNS (1);
30102 return false;
30104 case SIGN_EXTEND:
30105 case ZERO_EXTEND:
30106 if (GET_CODE (XEXP (x, 0)) == MEM)
30107 *total = 0;
30108 else
30109 *total = COSTS_N_INSNS (1);
30110 return false;
30112 case COMPARE:
30113 case NEG:
30114 case ABS:
30115 if (!FLOAT_MODE_P (mode))
30117 *total = COSTS_N_INSNS (1);
30118 return false;
30120 /* FALLTHRU */
30122 case FLOAT:
30123 case UNSIGNED_FLOAT:
30124 case FIX:
30125 case UNSIGNED_FIX:
30126 case FLOAT_TRUNCATE:
30127 *total = rs6000_cost->fp;
30128 return false;
30130 case FLOAT_EXTEND:
30131 if (mode == DFmode)
30132 *total = 0;
30133 else
30134 *total = rs6000_cost->fp;
30135 return false;
30137 case UNSPEC:
30138 switch (XINT (x, 1))
30140 case UNSPEC_FRSP:
30141 *total = rs6000_cost->fp;
30142 return true;
30144 default:
30145 break;
30147 break;
30149 case CALL:
30150 case IF_THEN_ELSE:
30151 if (!speed)
30153 *total = COSTS_N_INSNS (1);
30154 return true;
30156 else if (FLOAT_MODE_P (mode)
30157 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
30159 *total = rs6000_cost->fp;
30160 return false;
30162 break;
30164 case EQ:
30165 case GTU:
30166 case LTU:
30167 /* Carry bit requires mode == Pmode.
30168 NEG or PLUS already counted so only add one. */
30169 if (mode == Pmode
30170 && (outer_code == NEG || outer_code == PLUS))
30172 *total = COSTS_N_INSNS (1);
30173 return true;
30175 if (outer_code == SET)
30177 if (XEXP (x, 1) == const0_rtx)
30179 if (TARGET_ISEL && !TARGET_MFCRF)
30180 *total = COSTS_N_INSNS (8);
30181 else
30182 *total = COSTS_N_INSNS (2);
30183 return true;
30185 else if (mode == Pmode)
30187 *total = COSTS_N_INSNS (3);
30188 return false;
30191 /* FALLTHRU */
30193 case GT:
30194 case LT:
30195 case UNORDERED:
30196 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
30198 if (TARGET_ISEL && !TARGET_MFCRF)
30199 *total = COSTS_N_INSNS (8);
30200 else
30201 *total = COSTS_N_INSNS (2);
30202 return true;
30204 /* CC COMPARE. */
30205 if (outer_code == COMPARE)
30207 *total = 0;
30208 return true;
30210 break;
30212 default:
30213 break;
30216 return false;
30219 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
30221 static bool
30222 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
30223 bool speed)
30225 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
30227 fprintf (stderr,
30228 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
30229 "opno = %d, total = %d, speed = %s, x:\n",
30230 ret ? "complete" : "scan inner",
30231 GET_RTX_NAME (code),
30232 GET_RTX_NAME (outer_code),
30233 opno,
30234 *total,
30235 speed ? "true" : "false");
30237 debug_rtx (x);
30239 return ret;
30242 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
30244 static int
30245 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
30246 addr_space_t as, bool speed)
30248 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
30250 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
30251 ret, speed ? "true" : "false");
30252 debug_rtx (x);
30254 return ret;
30258 /* A C expression returning the cost of moving data from a register of class
30259 CLASS1 to one of CLASS2. */
30261 static int
30262 rs6000_register_move_cost (enum machine_mode mode,
30263 reg_class_t from, reg_class_t to)
30265 int ret;
30267 if (TARGET_DEBUG_COST)
30268 dbg_cost_ctrl++;
30270 /* Moves from/to GENERAL_REGS. */
30271 if (reg_classes_intersect_p (to, GENERAL_REGS)
30272 || reg_classes_intersect_p (from, GENERAL_REGS))
30274 reg_class_t rclass = from;
30276 if (! reg_classes_intersect_p (to, GENERAL_REGS))
30277 rclass = to;
30279 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
30280 ret = (rs6000_memory_move_cost (mode, rclass, false)
30281 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
30283 /* It's more expensive to move CR_REGS than CR0_REGS because of the
30284 shift. */
30285 else if (rclass == CR_REGS)
30286 ret = 4;
30288 /* For those processors that have slow LR/CTR moves, make them more
30289 expensive than memory in order to bias spills to memory .*/
30290 else if ((rs6000_cpu == PROCESSOR_POWER6
30291 || rs6000_cpu == PROCESSOR_POWER7
30292 || rs6000_cpu == PROCESSOR_POWER8)
30293 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
30294 ret = 6 * hard_regno_nregs[0][mode];
30296 else
30297 /* A move will cost one instruction per GPR moved. */
30298 ret = 2 * hard_regno_nregs[0][mode];
30301 /* If we have VSX, we can easily move between FPR or Altivec registers. */
30302 else if (VECTOR_MEM_VSX_P (mode)
30303 && reg_classes_intersect_p (to, VSX_REGS)
30304 && reg_classes_intersect_p (from, VSX_REGS))
30305 ret = 2 * hard_regno_nregs[32][mode];
30307 /* Moving between two similar registers is just one instruction. */
30308 else if (reg_classes_intersect_p (to, from))
30309 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
30311 /* Everything else has to go through GENERAL_REGS. */
30312 else
30313 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
30314 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
30316 if (TARGET_DEBUG_COST)
30318 if (dbg_cost_ctrl == 1)
30319 fprintf (stderr,
30320 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
30321 ret, GET_MODE_NAME (mode), reg_class_names[from],
30322 reg_class_names[to]);
30323 dbg_cost_ctrl--;
30326 return ret;
30329 /* A C expressions returning the cost of moving data of MODE from a register to
30330 or from memory. */
30332 static int
30333 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
30334 bool in ATTRIBUTE_UNUSED)
30336 int ret;
30338 if (TARGET_DEBUG_COST)
30339 dbg_cost_ctrl++;
30341 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
30342 ret = 4 * hard_regno_nregs[0][mode];
30343 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
30344 || reg_classes_intersect_p (rclass, VSX_REGS)))
30345 ret = 4 * hard_regno_nregs[32][mode];
30346 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
30347 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
30348 else
30349 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
30351 if (TARGET_DEBUG_COST)
30353 if (dbg_cost_ctrl == 1)
30354 fprintf (stderr,
30355 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
30356 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
30357 dbg_cost_ctrl--;
30360 return ret;
30363 /* Returns a code for a target-specific builtin that implements
30364 reciprocal of the function, or NULL_TREE if not available. */
30366 static tree
30367 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
30368 bool sqrt ATTRIBUTE_UNUSED)
30370 if (optimize_insn_for_size_p ())
30371 return NULL_TREE;
30373 if (md_fn)
30374 switch (fn)
30376 case VSX_BUILTIN_XVSQRTDP:
30377 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
30378 return NULL_TREE;
30380 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
30382 case VSX_BUILTIN_XVSQRTSP:
30383 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
30384 return NULL_TREE;
30386 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
30388 default:
30389 return NULL_TREE;
30392 else
30393 switch (fn)
30395 case BUILT_IN_SQRT:
30396 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
30397 return NULL_TREE;
30399 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
30401 case BUILT_IN_SQRTF:
30402 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
30403 return NULL_TREE;
30405 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
30407 default:
30408 return NULL_TREE;
30412 /* Load up a constant. If the mode is a vector mode, splat the value across
30413 all of the vector elements. */
30415 static rtx
30416 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
30418 rtx reg;
30420 if (mode == SFmode || mode == DFmode)
30422 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
30423 reg = force_reg (mode, d);
30425 else if (mode == V4SFmode)
30427 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
30428 rtvec v = gen_rtvec (4, d, d, d, d);
30429 reg = gen_reg_rtx (mode);
30430 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
30432 else if (mode == V2DFmode)
30434 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
30435 rtvec v = gen_rtvec (2, d, d);
30436 reg = gen_reg_rtx (mode);
30437 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
30439 else
30440 gcc_unreachable ();
30442 return reg;
30445 /* Generate an FMA instruction. */
30447 static void
30448 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
30450 enum machine_mode mode = GET_MODE (target);
30451 rtx dst;
30453 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30454 gcc_assert (dst != NULL);
30456 if (dst != target)
30457 emit_move_insn (target, dst);
30460 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
30462 static void
30463 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
30465 enum machine_mode mode = GET_MODE (target);
30466 rtx dst;
30468 /* Altivec does not support fms directly;
30469 generate in terms of fma in that case. */
30470 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
30471 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
30472 else
30474 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
30475 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30477 gcc_assert (dst != NULL);
30479 if (dst != target)
30480 emit_move_insn (target, dst);
30483 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
30485 static void
30486 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
30488 enum machine_mode mode = GET_MODE (dst);
30489 rtx r;
30491 /* This is a tad more complicated, since the fnma_optab is for
30492 a different expression: fma(-m1, m2, a), which is the same
30493 thing except in the case of signed zeros.
30495 Fortunately we know that if FMA is supported that FNMSUB is
30496 also supported in the ISA. Just expand it directly. */
30498 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
30500 r = gen_rtx_NEG (mode, a);
30501 r = gen_rtx_FMA (mode, m1, m2, r);
30502 r = gen_rtx_NEG (mode, r);
30503 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
30506 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
30507 add a reg_note saying that this was a division. Support both scalar and
30508 vector divide. Assumes no trapping math and finite arguments. */
30510 void
30511 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
30513 enum machine_mode mode = GET_MODE (dst);
30514 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
30515 int i;
30517 /* Low precision estimates guarantee 5 bits of accuracy. High
30518 precision estimates guarantee 14 bits of accuracy. SFmode
30519 requires 23 bits of accuracy. DFmode requires 52 bits of
30520 accuracy. Each pass at least doubles the accuracy, leading
30521 to the following. */
30522 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
30523 if (mode == DFmode || mode == V2DFmode)
30524 passes++;
30526 enum insn_code code = optab_handler (smul_optab, mode);
30527 insn_gen_fn gen_mul = GEN_FCN (code);
30529 gcc_assert (code != CODE_FOR_nothing);
30531 one = rs6000_load_constant_and_splat (mode, dconst1);
30533 /* x0 = 1./d estimate */
30534 x0 = gen_reg_rtx (mode);
30535 emit_insn (gen_rtx_SET (VOIDmode, x0,
30536 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
30537 UNSPEC_FRES)));
30539 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
30540 if (passes > 1) {
30542 /* e0 = 1. - d * x0 */
30543 e0 = gen_reg_rtx (mode);
30544 rs6000_emit_nmsub (e0, d, x0, one);
30546 /* x1 = x0 + e0 * x0 */
30547 x1 = gen_reg_rtx (mode);
30548 rs6000_emit_madd (x1, e0, x0, x0);
30550 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
30551 ++i, xprev = xnext, eprev = enext) {
30553 /* enext = eprev * eprev */
30554 enext = gen_reg_rtx (mode);
30555 emit_insn (gen_mul (enext, eprev, eprev));
30557 /* xnext = xprev + enext * xprev */
30558 xnext = gen_reg_rtx (mode);
30559 rs6000_emit_madd (xnext, enext, xprev, xprev);
30562 } else
30563 xprev = x0;
30565 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
30567 /* u = n * xprev */
30568 u = gen_reg_rtx (mode);
30569 emit_insn (gen_mul (u, n, xprev));
30571 /* v = n - (d * u) */
30572 v = gen_reg_rtx (mode);
30573 rs6000_emit_nmsub (v, d, u, n);
30575 /* dst = (v * xprev) + u */
30576 rs6000_emit_madd (dst, v, xprev, u);
30578 if (note_p)
30579 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
30582 /* Newton-Raphson approximation of single/double-precision floating point
30583 rsqrt. Assumes no trapping math and finite arguments. */
30585 void
30586 rs6000_emit_swrsqrt (rtx dst, rtx src)
30588 enum machine_mode mode = GET_MODE (src);
30589 rtx x0 = gen_reg_rtx (mode);
30590 rtx y = gen_reg_rtx (mode);
30592 /* Low precision estimates guarantee 5 bits of accuracy. High
30593 precision estimates guarantee 14 bits of accuracy. SFmode
30594 requires 23 bits of accuracy. DFmode requires 52 bits of
30595 accuracy. Each pass at least doubles the accuracy, leading
30596 to the following. */
30597 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
30598 if (mode == DFmode || mode == V2DFmode)
30599 passes++;
30601 REAL_VALUE_TYPE dconst3_2;
30602 int i;
30603 rtx halfthree;
30604 enum insn_code code = optab_handler (smul_optab, mode);
30605 insn_gen_fn gen_mul = GEN_FCN (code);
30607 gcc_assert (code != CODE_FOR_nothing);
30609 /* Load up the constant 1.5 either as a scalar, or as a vector. */
30610 real_from_integer (&dconst3_2, VOIDmode, 3, SIGNED);
30611 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
30613 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
30615 /* x0 = rsqrt estimate */
30616 emit_insn (gen_rtx_SET (VOIDmode, x0,
30617 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
30618 UNSPEC_RSQRT)));
30620 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
30621 rs6000_emit_msub (y, src, halfthree, src);
30623 for (i = 0; i < passes; i++)
30625 rtx x1 = gen_reg_rtx (mode);
30626 rtx u = gen_reg_rtx (mode);
30627 rtx v = gen_reg_rtx (mode);
30629 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
30630 emit_insn (gen_mul (u, x0, x0));
30631 rs6000_emit_nmsub (v, y, u, halfthree);
30632 emit_insn (gen_mul (x1, x0, v));
30633 x0 = x1;
30636 emit_move_insn (dst, x0);
30637 return;
30640 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
30641 (Power7) targets. DST is the target, and SRC is the argument operand. */
30643 void
30644 rs6000_emit_popcount (rtx dst, rtx src)
30646 enum machine_mode mode = GET_MODE (dst);
30647 rtx tmp1, tmp2;
30649 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
30650 if (TARGET_POPCNTD)
30652 if (mode == SImode)
30653 emit_insn (gen_popcntdsi2 (dst, src));
30654 else
30655 emit_insn (gen_popcntddi2 (dst, src));
30656 return;
30659 tmp1 = gen_reg_rtx (mode);
30661 if (mode == SImode)
30663 emit_insn (gen_popcntbsi2 (tmp1, src));
30664 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
30665 NULL_RTX, 0);
30666 tmp2 = force_reg (SImode, tmp2);
30667 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
30669 else
30671 emit_insn (gen_popcntbdi2 (tmp1, src));
30672 tmp2 = expand_mult (DImode, tmp1,
30673 GEN_INT ((HOST_WIDE_INT)
30674 0x01010101 << 32 | 0x01010101),
30675 NULL_RTX, 0);
30676 tmp2 = force_reg (DImode, tmp2);
30677 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
30682 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
30683 target, and SRC is the argument operand. */
30685 void
30686 rs6000_emit_parity (rtx dst, rtx src)
30688 enum machine_mode mode = GET_MODE (dst);
30689 rtx tmp;
30691 tmp = gen_reg_rtx (mode);
30693 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
30694 if (TARGET_CMPB)
30696 if (mode == SImode)
30698 emit_insn (gen_popcntbsi2 (tmp, src));
30699 emit_insn (gen_paritysi2_cmpb (dst, tmp));
30701 else
30703 emit_insn (gen_popcntbdi2 (tmp, src));
30704 emit_insn (gen_paritydi2_cmpb (dst, tmp));
30706 return;
30709 if (mode == SImode)
30711 /* Is mult+shift >= shift+xor+shift+xor? */
30712 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
30714 rtx tmp1, tmp2, tmp3, tmp4;
30716 tmp1 = gen_reg_rtx (SImode);
30717 emit_insn (gen_popcntbsi2 (tmp1, src));
30719 tmp2 = gen_reg_rtx (SImode);
30720 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
30721 tmp3 = gen_reg_rtx (SImode);
30722 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
30724 tmp4 = gen_reg_rtx (SImode);
30725 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
30726 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
30728 else
30729 rs6000_emit_popcount (tmp, src);
30730 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
30732 else
30734 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
30735 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
30737 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
30739 tmp1 = gen_reg_rtx (DImode);
30740 emit_insn (gen_popcntbdi2 (tmp1, src));
30742 tmp2 = gen_reg_rtx (DImode);
30743 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
30744 tmp3 = gen_reg_rtx (DImode);
30745 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
30747 tmp4 = gen_reg_rtx (DImode);
30748 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
30749 tmp5 = gen_reg_rtx (DImode);
30750 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
30752 tmp6 = gen_reg_rtx (DImode);
30753 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
30754 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
30756 else
30757 rs6000_emit_popcount (tmp, src);
30758 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
30762 /* Expand an Altivec constant permutation for little endian mode.
30763 There are two issues: First, the two input operands must be
30764 swapped so that together they form a double-wide array in LE
30765 order. Second, the vperm instruction has surprising behavior
30766 in LE mode: it interprets the elements of the source vectors
30767 in BE mode ("left to right") and interprets the elements of
30768 the destination vector in LE mode ("right to left"). To
30769 correct for this, we must subtract each element of the permute
30770 control vector from 31.
30772 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
30773 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
30774 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
30775 serve as the permute control vector. Then, in BE mode,
30777 vperm 9,10,11,12
30779 places the desired result in vr9. However, in LE mode the
30780 vector contents will be
30782 vr10 = 00000003 00000002 00000001 00000000
30783 vr11 = 00000007 00000006 00000005 00000004
30785 The result of the vperm using the same permute control vector is
30787 vr9 = 05000000 07000000 01000000 03000000
30789 That is, the leftmost 4 bytes of vr10 are interpreted as the
30790 source for the rightmost 4 bytes of vr9, and so on.
30792 If we change the permute control vector to
30794 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
30796 and issue
30798 vperm 9,11,10,12
30800 we get the desired
30802 vr9 = 00000006 00000004 00000002 00000000. */
30804 void
30805 altivec_expand_vec_perm_const_le (rtx operands[4])
30807 unsigned int i;
30808 rtx perm[16];
30809 rtx constv, unspec;
30810 rtx target = operands[0];
30811 rtx op0 = operands[1];
30812 rtx op1 = operands[2];
30813 rtx sel = operands[3];
30815 /* Unpack and adjust the constant selector. */
30816 for (i = 0; i < 16; ++i)
30818 rtx e = XVECEXP (sel, 0, i);
30819 unsigned int elt = 31 - (INTVAL (e) & 31);
30820 perm[i] = GEN_INT (elt);
30823 /* Expand to a permute, swapping the inputs and using the
30824 adjusted selector. */
30825 if (!REG_P (op0))
30826 op0 = force_reg (V16QImode, op0);
30827 if (!REG_P (op1))
30828 op1 = force_reg (V16QImode, op1);
30830 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
30831 constv = force_reg (V16QImode, constv);
30832 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
30833 UNSPEC_VPERM);
30834 if (!REG_P (target))
30836 rtx tmp = gen_reg_rtx (V16QImode);
30837 emit_move_insn (tmp, unspec);
30838 unspec = tmp;
30841 emit_move_insn (target, unspec);
30844 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
30845 permute control vector. But here it's not a constant, so we must
30846 generate a vector NAND or NOR to do the adjustment. */
30848 void
30849 altivec_expand_vec_perm_le (rtx operands[4])
30851 rtx notx, iorx, unspec;
30852 rtx target = operands[0];
30853 rtx op0 = operands[1];
30854 rtx op1 = operands[2];
30855 rtx sel = operands[3];
30856 rtx tmp = target;
30857 rtx norreg = gen_reg_rtx (V16QImode);
30858 enum machine_mode mode = GET_MODE (target);
30860 /* Get everything in regs so the pattern matches. */
30861 if (!REG_P (op0))
30862 op0 = force_reg (mode, op0);
30863 if (!REG_P (op1))
30864 op1 = force_reg (mode, op1);
30865 if (!REG_P (sel))
30866 sel = force_reg (V16QImode, sel);
30867 if (!REG_P (target))
30868 tmp = gen_reg_rtx (mode);
30870 /* Invert the selector with a VNAND if available, else a VNOR.
30871 The VNAND is preferred for future fusion opportunities. */
30872 notx = gen_rtx_NOT (V16QImode, sel);
30873 iorx = (TARGET_P8_VECTOR
30874 ? gen_rtx_IOR (V16QImode, notx, notx)
30875 : gen_rtx_AND (V16QImode, notx, notx));
30876 emit_insn (gen_rtx_SET (VOIDmode, norreg, iorx));
30878 /* Permute with operands reversed and adjusted selector. */
30879 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
30880 UNSPEC_VPERM);
30882 /* Copy into target, possibly by way of a register. */
30883 if (!REG_P (target))
30885 emit_move_insn (tmp, unspec);
30886 unspec = tmp;
30889 emit_move_insn (target, unspec);
30892 /* Expand an Altivec constant permutation. Return true if we match
30893 an efficient implementation; false to fall back to VPERM. */
30895 bool
30896 altivec_expand_vec_perm_const (rtx operands[4])
30898 struct altivec_perm_insn {
30899 HOST_WIDE_INT mask;
30900 enum insn_code impl;
30901 unsigned char perm[16];
30903 static const struct altivec_perm_insn patterns[] = {
30904 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
30905 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
30906 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
30907 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
30908 { OPTION_MASK_ALTIVEC,
30909 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
30910 : CODE_FOR_altivec_vmrglb_direct),
30911 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
30912 { OPTION_MASK_ALTIVEC,
30913 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
30914 : CODE_FOR_altivec_vmrglh_direct),
30915 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
30916 { OPTION_MASK_ALTIVEC,
30917 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
30918 : CODE_FOR_altivec_vmrglw_direct),
30919 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
30920 { OPTION_MASK_ALTIVEC,
30921 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
30922 : CODE_FOR_altivec_vmrghb_direct),
30923 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
30924 { OPTION_MASK_ALTIVEC,
30925 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
30926 : CODE_FOR_altivec_vmrghh_direct),
30927 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
30928 { OPTION_MASK_ALTIVEC,
30929 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
30930 : CODE_FOR_altivec_vmrghw_direct),
30931 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
30932 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
30933 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
30934 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
30935 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
30938 unsigned int i, j, elt, which;
30939 unsigned char perm[16];
30940 rtx target, op0, op1, sel, x;
30941 bool one_vec;
30943 target = operands[0];
30944 op0 = operands[1];
30945 op1 = operands[2];
30946 sel = operands[3];
30948 /* Unpack the constant selector. */
30949 for (i = which = 0; i < 16; ++i)
30951 rtx e = XVECEXP (sel, 0, i);
30952 elt = INTVAL (e) & 31;
30953 which |= (elt < 16 ? 1 : 2);
30954 perm[i] = elt;
30957 /* Simplify the constant selector based on operands. */
30958 switch (which)
30960 default:
30961 gcc_unreachable ();
30963 case 3:
30964 one_vec = false;
30965 if (!rtx_equal_p (op0, op1))
30966 break;
30967 /* FALLTHRU */
30969 case 2:
30970 for (i = 0; i < 16; ++i)
30971 perm[i] &= 15;
30972 op0 = op1;
30973 one_vec = true;
30974 break;
30976 case 1:
30977 op1 = op0;
30978 one_vec = true;
30979 break;
30982 /* Look for splat patterns. */
30983 if (one_vec)
30985 elt = perm[0];
30987 for (i = 0; i < 16; ++i)
30988 if (perm[i] != elt)
30989 break;
30990 if (i == 16)
30992 if (!BYTES_BIG_ENDIAN)
30993 elt = 15 - elt;
30994 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
30995 return true;
30998 if (elt % 2 == 0)
31000 for (i = 0; i < 16; i += 2)
31001 if (perm[i] != elt || perm[i + 1] != elt + 1)
31002 break;
31003 if (i == 16)
31005 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
31006 x = gen_reg_rtx (V8HImode);
31007 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
31008 GEN_INT (field)));
31009 emit_move_insn (target, gen_lowpart (V16QImode, x));
31010 return true;
31014 if (elt % 4 == 0)
31016 for (i = 0; i < 16; i += 4)
31017 if (perm[i] != elt
31018 || perm[i + 1] != elt + 1
31019 || perm[i + 2] != elt + 2
31020 || perm[i + 3] != elt + 3)
31021 break;
31022 if (i == 16)
31024 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
31025 x = gen_reg_rtx (V4SImode);
31026 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
31027 GEN_INT (field)));
31028 emit_move_insn (target, gen_lowpart (V16QImode, x));
31029 return true;
31034 /* Look for merge and pack patterns. */
31035 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
31037 bool swapped;
31039 if ((patterns[j].mask & rs6000_isa_flags) == 0)
31040 continue;
31042 elt = patterns[j].perm[0];
31043 if (perm[0] == elt)
31044 swapped = false;
31045 else if (perm[0] == elt + 16)
31046 swapped = true;
31047 else
31048 continue;
31049 for (i = 1; i < 16; ++i)
31051 elt = patterns[j].perm[i];
31052 if (swapped)
31053 elt = (elt >= 16 ? elt - 16 : elt + 16);
31054 else if (one_vec && elt >= 16)
31055 elt -= 16;
31056 if (perm[i] != elt)
31057 break;
31059 if (i == 16)
31061 enum insn_code icode = patterns[j].impl;
31062 enum machine_mode omode = insn_data[icode].operand[0].mode;
31063 enum machine_mode imode = insn_data[icode].operand[1].mode;
31065 /* For little-endian, don't use vpkuwum and vpkuhum if the
31066 underlying vector type is not V4SI and V8HI, respectively.
31067 For example, using vpkuwum with a V8HI picks up the even
31068 halfwords (BE numbering) when the even halfwords (LE
31069 numbering) are what we need. */
31070 if (!BYTES_BIG_ENDIAN
31071 && icode == CODE_FOR_altivec_vpkuwum_direct
31072 && ((GET_CODE (op0) == REG
31073 && GET_MODE (op0) != V4SImode)
31074 || (GET_CODE (op0) == SUBREG
31075 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
31076 continue;
31077 if (!BYTES_BIG_ENDIAN
31078 && icode == CODE_FOR_altivec_vpkuhum_direct
31079 && ((GET_CODE (op0) == REG
31080 && GET_MODE (op0) != V8HImode)
31081 || (GET_CODE (op0) == SUBREG
31082 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
31083 continue;
31085 /* For little-endian, the two input operands must be swapped
31086 (or swapped back) to ensure proper right-to-left numbering
31087 from 0 to 2N-1. */
31088 if (swapped ^ !BYTES_BIG_ENDIAN)
31089 x = op0, op0 = op1, op1 = x;
31090 if (imode != V16QImode)
31092 op0 = gen_lowpart (imode, op0);
31093 op1 = gen_lowpart (imode, op1);
31095 if (omode == V16QImode)
31096 x = target;
31097 else
31098 x = gen_reg_rtx (omode);
31099 emit_insn (GEN_FCN (icode) (x, op0, op1));
31100 if (omode != V16QImode)
31101 emit_move_insn (target, gen_lowpart (V16QImode, x));
31102 return true;
31106 if (!BYTES_BIG_ENDIAN)
31108 altivec_expand_vec_perm_const_le (operands);
31109 return true;
31112 return false;
31115 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
31116 Return true if we match an efficient implementation. */
31118 static bool
31119 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
31120 unsigned char perm0, unsigned char perm1)
31122 rtx x;
31124 /* If both selectors come from the same operand, fold to single op. */
31125 if ((perm0 & 2) == (perm1 & 2))
31127 if (perm0 & 2)
31128 op0 = op1;
31129 else
31130 op1 = op0;
31132 /* If both operands are equal, fold to simpler permutation. */
31133 if (rtx_equal_p (op0, op1))
31135 perm0 = perm0 & 1;
31136 perm1 = (perm1 & 1) + 2;
31138 /* If the first selector comes from the second operand, swap. */
31139 else if (perm0 & 2)
31141 if (perm1 & 2)
31142 return false;
31143 perm0 -= 2;
31144 perm1 += 2;
31145 x = op0, op0 = op1, op1 = x;
31147 /* If the second selector does not come from the second operand, fail. */
31148 else if ((perm1 & 2) == 0)
31149 return false;
31151 /* Success! */
31152 if (target != NULL)
31154 enum machine_mode vmode, dmode;
31155 rtvec v;
31157 vmode = GET_MODE (target);
31158 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
31159 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
31160 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
31161 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
31162 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
31163 emit_insn (gen_rtx_SET (VOIDmode, target, x));
31165 return true;
31168 bool
31169 rs6000_expand_vec_perm_const (rtx operands[4])
31171 rtx target, op0, op1, sel;
31172 unsigned char perm0, perm1;
31174 target = operands[0];
31175 op0 = operands[1];
31176 op1 = operands[2];
31177 sel = operands[3];
31179 /* Unpack the constant selector. */
31180 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
31181 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
31183 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
31186 /* Test whether a constant permutation is supported. */
31188 static bool
31189 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
31190 const unsigned char *sel)
31192 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
31193 if (TARGET_ALTIVEC)
31194 return true;
31196 /* Check for ps_merge* or evmerge* insns. */
31197 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
31198 || (TARGET_SPE && vmode == V2SImode))
31200 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
31201 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
31202 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
31205 return false;
31208 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
31210 static void
31211 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
31212 enum machine_mode vmode, unsigned nelt, rtx perm[])
31214 enum machine_mode imode;
31215 rtx x;
31217 imode = vmode;
31218 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
31220 imode = GET_MODE_INNER (vmode);
31221 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
31222 imode = mode_for_vector (imode, nelt);
31225 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
31226 x = expand_vec_perm (vmode, op0, op1, x, target);
31227 if (x != target)
31228 emit_move_insn (target, x);
31231 /* Expand an extract even operation. */
31233 void
31234 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
31236 enum machine_mode vmode = GET_MODE (target);
31237 unsigned i, nelt = GET_MODE_NUNITS (vmode);
31238 rtx perm[16];
31240 for (i = 0; i < nelt; i++)
31241 perm[i] = GEN_INT (i * 2);
31243 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
31246 /* Expand a vector interleave operation. */
31248 void
31249 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
31251 enum machine_mode vmode = GET_MODE (target);
31252 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
31253 rtx perm[16];
31255 high = (highp ? 0 : nelt / 2);
31256 for (i = 0; i < nelt / 2; i++)
31258 perm[i * 2] = GEN_INT (i + high);
31259 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
31262 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
31265 /* Return an RTX representing where to find the function value of a
31266 function returning MODE. */
31267 static rtx
31268 rs6000_complex_function_value (enum machine_mode mode)
31270 unsigned int regno;
31271 rtx r1, r2;
31272 enum machine_mode inner = GET_MODE_INNER (mode);
31273 unsigned int inner_bytes = GET_MODE_SIZE (inner);
31275 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31276 regno = FP_ARG_RETURN;
31277 else
31279 regno = GP_ARG_RETURN;
31281 /* 32-bit is OK since it'll go in r3/r4. */
31282 if (TARGET_32BIT && inner_bytes >= 4)
31283 return gen_rtx_REG (mode, regno);
31286 if (inner_bytes >= 8)
31287 return gen_rtx_REG (mode, regno);
31289 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
31290 const0_rtx);
31291 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
31292 GEN_INT (inner_bytes));
31293 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
31296 /* Target hook for TARGET_FUNCTION_VALUE.
31298 On the SPE, both FPs and vectors are returned in r3.
31300 On RS/6000 an integer value is in r3 and a floating-point value is in
31301 fp1, unless -msoft-float. */
31303 static rtx
31304 rs6000_function_value (const_tree valtype,
31305 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
31306 bool outgoing ATTRIBUTE_UNUSED)
31308 enum machine_mode mode;
31309 unsigned int regno;
31310 enum machine_mode elt_mode;
31311 int n_elts;
31313 /* Special handling for structs in darwin64. */
31314 if (TARGET_MACHO
31315 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
31317 CUMULATIVE_ARGS valcum;
31318 rtx valret;
31320 valcum.words = 0;
31321 valcum.fregno = FP_ARG_MIN_REG;
31322 valcum.vregno = ALTIVEC_ARG_MIN_REG;
31323 /* Do a trial code generation as if this were going to be passed as
31324 an argument; if any part goes in memory, we return NULL. */
31325 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
31326 if (valret)
31327 return valret;
31328 /* Otherwise fall through to standard ABI rules. */
31331 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
31332 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (valtype), valtype,
31333 &elt_mode, &n_elts))
31335 int first_reg, n_regs, i;
31336 rtx par;
31338 if (SCALAR_FLOAT_MODE_P (elt_mode))
31340 /* _Decimal128 must use even/odd register pairs. */
31341 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31342 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
31344 else
31346 first_reg = ALTIVEC_ARG_RETURN;
31347 n_regs = 1;
31350 par = gen_rtx_PARALLEL (TYPE_MODE (valtype), rtvec_alloc (n_elts));
31351 for (i = 0; i < n_elts; i++)
31353 rtx r = gen_rtx_REG (elt_mode, first_reg + i * n_regs);
31354 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
31355 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
31358 return par;
31361 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
31363 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
31364 return gen_rtx_PARALLEL (DImode,
31365 gen_rtvec (2,
31366 gen_rtx_EXPR_LIST (VOIDmode,
31367 gen_rtx_REG (SImode, GP_ARG_RETURN),
31368 const0_rtx),
31369 gen_rtx_EXPR_LIST (VOIDmode,
31370 gen_rtx_REG (SImode,
31371 GP_ARG_RETURN + 1),
31372 GEN_INT (4))));
31374 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
31376 return gen_rtx_PARALLEL (DCmode,
31377 gen_rtvec (4,
31378 gen_rtx_EXPR_LIST (VOIDmode,
31379 gen_rtx_REG (SImode, GP_ARG_RETURN),
31380 const0_rtx),
31381 gen_rtx_EXPR_LIST (VOIDmode,
31382 gen_rtx_REG (SImode,
31383 GP_ARG_RETURN + 1),
31384 GEN_INT (4)),
31385 gen_rtx_EXPR_LIST (VOIDmode,
31386 gen_rtx_REG (SImode,
31387 GP_ARG_RETURN + 2),
31388 GEN_INT (8)),
31389 gen_rtx_EXPR_LIST (VOIDmode,
31390 gen_rtx_REG (SImode,
31391 GP_ARG_RETURN + 3),
31392 GEN_INT (12))));
31395 mode = TYPE_MODE (valtype);
31396 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
31397 || (POINTER_TYPE_P (valtype) && !upc_shared_type_p (TREE_TYPE (valtype))))
31398 mode = TARGET_32BIT ? SImode : DImode;
31400 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31401 /* _Decimal128 must use an even/odd register pair. */
31402 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31403 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
31404 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
31405 regno = FP_ARG_RETURN;
31406 else if (TREE_CODE (valtype) == COMPLEX_TYPE
31407 && targetm.calls.split_complex_arg)
31408 return rs6000_complex_function_value (mode);
31409 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31410 return register is used in both cases, and we won't see V2DImode/V2DFmode
31411 for pure altivec, combine the two cases. */
31412 else if (TREE_CODE (valtype) == VECTOR_TYPE
31413 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
31414 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
31415 regno = ALTIVEC_ARG_RETURN;
31416 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
31417 && (mode == DFmode || mode == DCmode
31418 || mode == TFmode || mode == TCmode))
31419 return spe_build_register_parallel (mode, GP_ARG_RETURN);
31420 else
31421 regno = GP_ARG_RETURN;
31423 return gen_rtx_REG (mode, regno);
31426 /* Define how to find the value returned by a library function
31427 assuming the value has mode MODE. */
31429 rs6000_libcall_value (enum machine_mode mode)
31431 unsigned int regno;
31433 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
31435 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
31436 return gen_rtx_PARALLEL (DImode,
31437 gen_rtvec (2,
31438 gen_rtx_EXPR_LIST (VOIDmode,
31439 gen_rtx_REG (SImode, GP_ARG_RETURN),
31440 const0_rtx),
31441 gen_rtx_EXPR_LIST (VOIDmode,
31442 gen_rtx_REG (SImode,
31443 GP_ARG_RETURN + 1),
31444 GEN_INT (4))));
31447 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31448 /* _Decimal128 must use an even/odd register pair. */
31449 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31450 else if (SCALAR_FLOAT_MODE_P (mode)
31451 && TARGET_HARD_FLOAT && TARGET_FPRS
31452 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
31453 regno = FP_ARG_RETURN;
31454 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31455 return register is used in both cases, and we won't see V2DImode/V2DFmode
31456 for pure altivec, combine the two cases. */
31457 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
31458 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
31459 regno = ALTIVEC_ARG_RETURN;
31460 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
31461 return rs6000_complex_function_value (mode);
31462 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
31463 && (mode == DFmode || mode == DCmode
31464 || mode == TFmode || mode == TCmode))
31465 return spe_build_register_parallel (mode, GP_ARG_RETURN);
31466 else
31467 regno = GP_ARG_RETURN;
31469 return gen_rtx_REG (mode, regno);
31473 /* Return true if we use LRA instead of reload pass. */
31474 static bool
31475 rs6000_lra_p (void)
31477 return rs6000_lra_flag;
31480 /* Given FROM and TO register numbers, say whether this elimination is allowed.
31481 Frame pointer elimination is automatically handled.
31483 For the RS/6000, if frame pointer elimination is being done, we would like
31484 to convert ap into fp, not sp.
31486 We need r30 if -mminimal-toc was specified, and there are constant pool
31487 references. */
31489 static bool
31490 rs6000_can_eliminate (const int from, const int to)
31492 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
31493 ? ! frame_pointer_needed
31494 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
31495 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
31496 : true);
31499 /* Define the offset between two registers, FROM to be eliminated and its
31500 replacement TO, at the start of a routine. */
31501 HOST_WIDE_INT
31502 rs6000_initial_elimination_offset (int from, int to)
31504 rs6000_stack_t *info = rs6000_stack_info ();
31505 HOST_WIDE_INT offset;
31507 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31508 offset = info->push_p ? 0 : -info->total_size;
31509 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31511 offset = info->push_p ? 0 : -info->total_size;
31512 if (FRAME_GROWS_DOWNWARD)
31513 offset += info->fixed_size + info->vars_size + info->parm_size;
31515 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31516 offset = FRAME_GROWS_DOWNWARD
31517 ? info->fixed_size + info->vars_size + info->parm_size
31518 : 0;
31519 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31520 offset = info->total_size;
31521 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31522 offset = info->push_p ? info->total_size : 0;
31523 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
31524 offset = 0;
31525 else
31526 gcc_unreachable ();
31528 return offset;
31531 static rtx
31532 rs6000_dwarf_register_span (rtx reg)
31534 rtx parts[8];
31535 int i, words;
31536 unsigned regno = REGNO (reg);
31537 enum machine_mode mode = GET_MODE (reg);
31539 if (TARGET_SPE
31540 && regno < 32
31541 && (SPE_VECTOR_MODE (GET_MODE (reg))
31542 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
31543 && mode != SFmode && mode != SDmode && mode != SCmode)))
31545 else
31546 return NULL_RTX;
31548 regno = REGNO (reg);
31550 /* The duality of the SPE register size wreaks all kinds of havoc.
31551 This is a way of distinguishing r0 in 32-bits from r0 in
31552 64-bits. */
31553 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
31554 gcc_assert (words <= 4);
31555 for (i = 0; i < words; i++, regno++)
31557 if (BYTES_BIG_ENDIAN)
31559 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
31560 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
31562 else
31564 parts[2 * i] = gen_rtx_REG (SImode, regno);
31565 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
31569 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
31572 /* Fill in sizes for SPE register high parts in table used by unwinder. */
31574 static void
31575 rs6000_init_dwarf_reg_sizes_extra (tree address)
31577 if (TARGET_SPE)
31579 int i;
31580 enum machine_mode mode = TYPE_MODE (char_type_node);
31581 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
31582 rtx mem = gen_rtx_MEM (BLKmode, addr);
31583 rtx value = gen_int_mode (4, mode);
31585 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
31587 int column = DWARF_REG_TO_UNWIND_COLUMN
31588 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
31589 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
31591 emit_move_insn (adjust_address (mem, mode, offset), value);
31595 if (TARGET_MACHO && ! TARGET_ALTIVEC)
31597 int i;
31598 enum machine_mode mode = TYPE_MODE (char_type_node);
31599 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
31600 rtx mem = gen_rtx_MEM (BLKmode, addr);
31601 rtx value = gen_int_mode (16, mode);
31603 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
31604 The unwinder still needs to know the size of Altivec registers. */
31606 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
31608 int column = DWARF_REG_TO_UNWIND_COLUMN
31609 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
31610 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
31612 emit_move_insn (adjust_address (mem, mode, offset), value);
31617 /* Map internal gcc register numbers to DWARF2 register numbers. */
31619 unsigned int
31620 rs6000_dbx_register_number (unsigned int regno)
31622 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
31623 return regno;
31624 if (regno == LR_REGNO)
31625 return 108;
31626 if (regno == CTR_REGNO)
31627 return 109;
31628 if (CR_REGNO_P (regno))
31629 return regno - CR0_REGNO + 86;
31630 if (regno == CA_REGNO)
31631 return 101; /* XER */
31632 if (ALTIVEC_REGNO_P (regno))
31633 return regno - FIRST_ALTIVEC_REGNO + 1124;
31634 if (regno == VRSAVE_REGNO)
31635 return 356;
31636 if (regno == VSCR_REGNO)
31637 return 67;
31638 if (regno == SPE_ACC_REGNO)
31639 return 99;
31640 if (regno == SPEFSCR_REGNO)
31641 return 612;
31642 if (SPE_HIGH_REGNO_P (regno))
31643 return regno - FIRST_SPE_HIGH_REGNO + 1200;
31644 return regno;
31647 /* target hook eh_return_filter_mode */
31648 static enum machine_mode
31649 rs6000_eh_return_filter_mode (void)
31651 return TARGET_32BIT ? SImode : word_mode;
31654 /* Target hook for scalar_mode_supported_p. */
31655 static bool
31656 rs6000_scalar_mode_supported_p (enum machine_mode mode)
31658 if (DECIMAL_FLOAT_MODE_P (mode))
31659 return default_decimal_float_supported_p ();
31660 else
31661 return default_scalar_mode_supported_p (mode);
31664 /* Target hook for vector_mode_supported_p. */
31665 static bool
31666 rs6000_vector_mode_supported_p (enum machine_mode mode)
31669 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
31670 return true;
31672 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
31673 return true;
31675 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
31676 return true;
31678 else
31679 return false;
31682 /* Target hook for invalid_arg_for_unprototyped_fn. */
31683 static const char *
31684 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
31686 return (!rs6000_darwin64_abi
31687 && typelist == 0
31688 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
31689 && (funcdecl == NULL_TREE
31690 || (TREE_CODE (funcdecl) == FUNCTION_DECL
31691 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
31692 ? N_("AltiVec argument passed to unprototyped function")
31693 : NULL;
31696 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
31697 setup by using __stack_chk_fail_local hidden function instead of
31698 calling __stack_chk_fail directly. Otherwise it is better to call
31699 __stack_chk_fail directly. */
31701 static tree ATTRIBUTE_UNUSED
31702 rs6000_stack_protect_fail (void)
31704 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
31705 ? default_hidden_stack_protect_fail ()
31706 : default_external_stack_protect_fail ();
31709 void
31710 rs6000_final_prescan_insn (rtx_insn *insn, rtx *operand ATTRIBUTE_UNUSED,
31711 int num_operands ATTRIBUTE_UNUSED)
31713 if (rs6000_warn_cell_microcode)
31715 const char *temp;
31716 int insn_code_number = recog_memoized (insn);
31717 location_t location = INSN_LOCATION (insn);
31719 /* Punt on insns we cannot recognize. */
31720 if (insn_code_number < 0)
31721 return;
31723 temp = get_insn_template (insn_code_number, insn);
31725 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
31726 warning_at (location, OPT_mwarn_cell_microcode,
31727 "emitting microcode insn %s\t[%s] #%d",
31728 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31729 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
31730 warning_at (location, OPT_mwarn_cell_microcode,
31731 "emitting conditional microcode insn %s\t[%s] #%d",
31732 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31736 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
31738 #if TARGET_ELF
31739 static unsigned HOST_WIDE_INT
31740 rs6000_asan_shadow_offset (void)
31742 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
31744 #endif
31746 /* Mask options that we want to support inside of attribute((target)) and
31747 #pragma GCC target operations. Note, we do not include things like
31748 64/32-bit, endianess, hard/soft floating point, etc. that would have
31749 different calling sequences. */
31751 struct rs6000_opt_mask {
31752 const char *name; /* option name */
31753 HOST_WIDE_INT mask; /* mask to set */
31754 bool invert; /* invert sense of mask */
31755 bool valid_target; /* option is a target option */
31758 static struct rs6000_opt_mask const rs6000_opt_masks[] =
31760 { "altivec", OPTION_MASK_ALTIVEC, false, true },
31761 { "cmpb", OPTION_MASK_CMPB, false, true },
31762 { "crypto", OPTION_MASK_CRYPTO, false, true },
31763 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
31764 { "dlmzb", OPTION_MASK_DLMZB, false, true },
31765 { "fprnd", OPTION_MASK_FPRND, false, true },
31766 { "hard-dfp", OPTION_MASK_DFP, false, true },
31767 { "htm", OPTION_MASK_HTM, false, true },
31768 { "isel", OPTION_MASK_ISEL, false, true },
31769 { "mfcrf", OPTION_MASK_MFCRF, false, true },
31770 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
31771 { "mulhw", OPTION_MASK_MULHW, false, true },
31772 { "multiple", OPTION_MASK_MULTIPLE, false, true },
31773 { "popcntb", OPTION_MASK_POPCNTB, false, true },
31774 { "popcntd", OPTION_MASK_POPCNTD, false, true },
31775 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
31776 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
31777 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
31778 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
31779 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
31780 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
31781 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
31782 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
31783 { "string", OPTION_MASK_STRING, false, true },
31784 { "update", OPTION_MASK_NO_UPDATE, true , true },
31785 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, false },
31786 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, false },
31787 { "vsx", OPTION_MASK_VSX, false, true },
31788 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
31789 #ifdef OPTION_MASK_64BIT
31790 #if TARGET_AIX_OS
31791 { "aix64", OPTION_MASK_64BIT, false, false },
31792 { "aix32", OPTION_MASK_64BIT, true, false },
31793 #else
31794 { "64", OPTION_MASK_64BIT, false, false },
31795 { "32", OPTION_MASK_64BIT, true, false },
31796 #endif
31797 #endif
31798 #ifdef OPTION_MASK_EABI
31799 { "eabi", OPTION_MASK_EABI, false, false },
31800 #endif
31801 #ifdef OPTION_MASK_LITTLE_ENDIAN
31802 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
31803 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
31804 #endif
31805 #ifdef OPTION_MASK_RELOCATABLE
31806 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
31807 #endif
31808 #ifdef OPTION_MASK_STRICT_ALIGN
31809 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
31810 #endif
31811 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
31812 { "string", OPTION_MASK_STRING, false, false },
31815 /* Builtin mask mapping for printing the flags. */
31816 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
31818 { "altivec", RS6000_BTM_ALTIVEC, false, false },
31819 { "vsx", RS6000_BTM_VSX, false, false },
31820 { "spe", RS6000_BTM_SPE, false, false },
31821 { "paired", RS6000_BTM_PAIRED, false, false },
31822 { "fre", RS6000_BTM_FRE, false, false },
31823 { "fres", RS6000_BTM_FRES, false, false },
31824 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
31825 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
31826 { "popcntd", RS6000_BTM_POPCNTD, false, false },
31827 { "cell", RS6000_BTM_CELL, false, false },
31828 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
31829 { "crypto", RS6000_BTM_CRYPTO, false, false },
31830 { "htm", RS6000_BTM_HTM, false, false },
31831 { "hard-dfp", RS6000_BTM_DFP, false, false },
31832 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
31833 { "long-double-128", RS6000_BTM_LDBL128, false, false },
31836 /* Option variables that we want to support inside attribute((target)) and
31837 #pragma GCC target operations. */
31839 struct rs6000_opt_var {
31840 const char *name; /* option name */
31841 size_t global_offset; /* offset of the option in global_options. */
31842 size_t target_offset; /* offset of the option in target optiosn. */
31845 static struct rs6000_opt_var const rs6000_opt_vars[] =
31847 { "friz",
31848 offsetof (struct gcc_options, x_TARGET_FRIZ),
31849 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
31850 { "avoid-indexed-addresses",
31851 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
31852 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
31853 { "paired",
31854 offsetof (struct gcc_options, x_rs6000_paired_float),
31855 offsetof (struct cl_target_option, x_rs6000_paired_float), },
31856 { "longcall",
31857 offsetof (struct gcc_options, x_rs6000_default_long_calls),
31858 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
31861 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
31862 parsing. Return true if there were no errors. */
31864 static bool
31865 rs6000_inner_target_options (tree args, bool attr_p)
31867 bool ret = true;
31869 if (args == NULL_TREE)
31872 else if (TREE_CODE (args) == STRING_CST)
31874 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31875 char *q;
31877 while ((q = strtok (p, ",")) != NULL)
31879 bool error_p = false;
31880 bool not_valid_p = false;
31881 const char *cpu_opt = NULL;
31883 p = NULL;
31884 if (strncmp (q, "cpu=", 4) == 0)
31886 int cpu_index = rs6000_cpu_name_lookup (q+4);
31887 if (cpu_index >= 0)
31888 rs6000_cpu_index = cpu_index;
31889 else
31891 error_p = true;
31892 cpu_opt = q+4;
31895 else if (strncmp (q, "tune=", 5) == 0)
31897 int tune_index = rs6000_cpu_name_lookup (q+5);
31898 if (tune_index >= 0)
31899 rs6000_tune_index = tune_index;
31900 else
31902 error_p = true;
31903 cpu_opt = q+5;
31906 else
31908 size_t i;
31909 bool invert = false;
31910 char *r = q;
31912 error_p = true;
31913 if (strncmp (r, "no-", 3) == 0)
31915 invert = true;
31916 r += 3;
31919 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
31920 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
31922 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
31924 if (!rs6000_opt_masks[i].valid_target)
31925 not_valid_p = true;
31926 else
31928 error_p = false;
31929 rs6000_isa_flags_explicit |= mask;
31931 /* VSX needs altivec, so -mvsx automagically sets
31932 altivec. */
31933 if (mask == OPTION_MASK_VSX && !invert)
31934 mask |= OPTION_MASK_ALTIVEC;
31936 if (rs6000_opt_masks[i].invert)
31937 invert = !invert;
31939 if (invert)
31940 rs6000_isa_flags &= ~mask;
31941 else
31942 rs6000_isa_flags |= mask;
31944 break;
31947 if (error_p && !not_valid_p)
31949 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
31950 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
31952 size_t j = rs6000_opt_vars[i].global_offset;
31953 *((int *) ((char *)&global_options + j)) = !invert;
31954 error_p = false;
31955 break;
31960 if (error_p)
31962 const char *eprefix, *esuffix;
31964 ret = false;
31965 if (attr_p)
31967 eprefix = "__attribute__((__target__(";
31968 esuffix = ")))";
31970 else
31972 eprefix = "#pragma GCC target ";
31973 esuffix = "";
31976 if (cpu_opt)
31977 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
31978 q, esuffix);
31979 else if (not_valid_p)
31980 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
31981 else
31982 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
31987 else if (TREE_CODE (args) == TREE_LIST)
31991 tree value = TREE_VALUE (args);
31992 if (value)
31994 bool ret2 = rs6000_inner_target_options (value, attr_p);
31995 if (!ret2)
31996 ret = false;
31998 args = TREE_CHAIN (args);
32000 while (args != NULL_TREE);
32003 else
32004 gcc_unreachable ();
32006 return ret;
32009 /* Print out the target options as a list for -mdebug=target. */
32011 static void
32012 rs6000_debug_target_options (tree args, const char *prefix)
32014 if (args == NULL_TREE)
32015 fprintf (stderr, "%s<NULL>", prefix);
32017 else if (TREE_CODE (args) == STRING_CST)
32019 char *p = ASTRDUP (TREE_STRING_POINTER (args));
32020 char *q;
32022 while ((q = strtok (p, ",")) != NULL)
32024 p = NULL;
32025 fprintf (stderr, "%s\"%s\"", prefix, q);
32026 prefix = ", ";
32030 else if (TREE_CODE (args) == TREE_LIST)
32034 tree value = TREE_VALUE (args);
32035 if (value)
32037 rs6000_debug_target_options (value, prefix);
32038 prefix = ", ";
32040 args = TREE_CHAIN (args);
32042 while (args != NULL_TREE);
32045 else
32046 gcc_unreachable ();
32048 return;
32052 /* Hook to validate attribute((target("..."))). */
32054 static bool
32055 rs6000_valid_attribute_p (tree fndecl,
32056 tree ARG_UNUSED (name),
32057 tree args,
32058 int flags)
32060 struct cl_target_option cur_target;
32061 bool ret;
32062 tree old_optimize = build_optimization_node (&global_options);
32063 tree new_target, new_optimize;
32064 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
32066 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
32068 if (TARGET_DEBUG_TARGET)
32070 tree tname = DECL_NAME (fndecl);
32071 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
32072 if (tname)
32073 fprintf (stderr, "function: %.*s\n",
32074 (int) IDENTIFIER_LENGTH (tname),
32075 IDENTIFIER_POINTER (tname));
32076 else
32077 fprintf (stderr, "function: unknown\n");
32079 fprintf (stderr, "args:");
32080 rs6000_debug_target_options (args, " ");
32081 fprintf (stderr, "\n");
32083 if (flags)
32084 fprintf (stderr, "flags: 0x%x\n", flags);
32086 fprintf (stderr, "--------------------\n");
32089 old_optimize = build_optimization_node (&global_options);
32090 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
32092 /* If the function changed the optimization levels as well as setting target
32093 options, start with the optimizations specified. */
32094 if (func_optimize && func_optimize != old_optimize)
32095 cl_optimization_restore (&global_options,
32096 TREE_OPTIMIZATION (func_optimize));
32098 /* The target attributes may also change some optimization flags, so update
32099 the optimization options if necessary. */
32100 cl_target_option_save (&cur_target, &global_options);
32101 rs6000_cpu_index = rs6000_tune_index = -1;
32102 ret = rs6000_inner_target_options (args, true);
32104 /* Set up any additional state. */
32105 if (ret)
32107 ret = rs6000_option_override_internal (false);
32108 new_target = build_target_option_node (&global_options);
32110 else
32111 new_target = NULL;
32113 new_optimize = build_optimization_node (&global_options);
32115 if (!new_target)
32116 ret = false;
32118 else if (fndecl)
32120 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
32122 if (old_optimize != new_optimize)
32123 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
32126 cl_target_option_restore (&global_options, &cur_target);
32128 if (old_optimize != new_optimize)
32129 cl_optimization_restore (&global_options,
32130 TREE_OPTIMIZATION (old_optimize));
32132 return ret;
32136 /* Hook to validate the current #pragma GCC target and set the state, and
32137 update the macros based on what was changed. If ARGS is NULL, then
32138 POP_TARGET is used to reset the options. */
32140 bool
32141 rs6000_pragma_target_parse (tree args, tree pop_target)
32143 tree prev_tree = build_target_option_node (&global_options);
32144 tree cur_tree;
32145 struct cl_target_option *prev_opt, *cur_opt;
32146 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
32147 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
32149 if (TARGET_DEBUG_TARGET)
32151 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
32152 fprintf (stderr, "args:");
32153 rs6000_debug_target_options (args, " ");
32154 fprintf (stderr, "\n");
32156 if (pop_target)
32158 fprintf (stderr, "pop_target:\n");
32159 debug_tree (pop_target);
32161 else
32162 fprintf (stderr, "pop_target: <NULL>\n");
32164 fprintf (stderr, "--------------------\n");
32167 if (! args)
32169 cur_tree = ((pop_target)
32170 ? pop_target
32171 : target_option_default_node);
32172 cl_target_option_restore (&global_options,
32173 TREE_TARGET_OPTION (cur_tree));
32175 else
32177 rs6000_cpu_index = rs6000_tune_index = -1;
32178 if (!rs6000_inner_target_options (args, false)
32179 || !rs6000_option_override_internal (false)
32180 || (cur_tree = build_target_option_node (&global_options))
32181 == NULL_TREE)
32183 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
32184 fprintf (stderr, "invalid pragma\n");
32186 return false;
32190 target_option_current_node = cur_tree;
32192 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
32193 change the macros that are defined. */
32194 if (rs6000_target_modify_macros_ptr)
32196 prev_opt = TREE_TARGET_OPTION (prev_tree);
32197 prev_bumask = prev_opt->x_rs6000_builtin_mask;
32198 prev_flags = prev_opt->x_rs6000_isa_flags;
32200 cur_opt = TREE_TARGET_OPTION (cur_tree);
32201 cur_flags = cur_opt->x_rs6000_isa_flags;
32202 cur_bumask = cur_opt->x_rs6000_builtin_mask;
32204 diff_bumask = (prev_bumask ^ cur_bumask);
32205 diff_flags = (prev_flags ^ cur_flags);
32207 if ((diff_flags != 0) || (diff_bumask != 0))
32209 /* Delete old macros. */
32210 rs6000_target_modify_macros_ptr (false,
32211 prev_flags & diff_flags,
32212 prev_bumask & diff_bumask);
32214 /* Define new macros. */
32215 rs6000_target_modify_macros_ptr (true,
32216 cur_flags & diff_flags,
32217 cur_bumask & diff_bumask);
32221 return true;
32225 /* Remember the last target of rs6000_set_current_function. */
32226 static GTY(()) tree rs6000_previous_fndecl;
32228 /* Establish appropriate back-end context for processing the function
32229 FNDECL. The argument might be NULL to indicate processing at top
32230 level, outside of any function scope. */
32231 static void
32232 rs6000_set_current_function (tree fndecl)
32234 tree old_tree = (rs6000_previous_fndecl
32235 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
32236 : NULL_TREE);
32238 tree new_tree = (fndecl
32239 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
32240 : NULL_TREE);
32242 if (TARGET_DEBUG_TARGET)
32244 bool print_final = false;
32245 fprintf (stderr, "\n==================== rs6000_set_current_function");
32247 if (fndecl)
32248 fprintf (stderr, ", fndecl %s (%p)",
32249 (DECL_NAME (fndecl)
32250 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
32251 : "<unknown>"), (void *)fndecl);
32253 if (rs6000_previous_fndecl)
32254 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
32256 fprintf (stderr, "\n");
32257 if (new_tree)
32259 fprintf (stderr, "\nnew fndecl target specific options:\n");
32260 debug_tree (new_tree);
32261 print_final = true;
32264 if (old_tree)
32266 fprintf (stderr, "\nold fndecl target specific options:\n");
32267 debug_tree (old_tree);
32268 print_final = true;
32271 if (print_final)
32272 fprintf (stderr, "--------------------\n");
32275 /* Only change the context if the function changes. This hook is called
32276 several times in the course of compiling a function, and we don't want to
32277 slow things down too much or call target_reinit when it isn't safe. */
32278 if (fndecl && fndecl != rs6000_previous_fndecl)
32280 rs6000_previous_fndecl = fndecl;
32281 if (old_tree == new_tree)
32284 else if (new_tree)
32286 cl_target_option_restore (&global_options,
32287 TREE_TARGET_OPTION (new_tree));
32288 if (TREE_TARGET_GLOBALS (new_tree))
32289 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
32290 else
32291 TREE_TARGET_GLOBALS (new_tree)
32292 = save_target_globals_default_opts ();
32295 else if (old_tree)
32297 new_tree = target_option_current_node;
32298 cl_target_option_restore (&global_options,
32299 TREE_TARGET_OPTION (new_tree));
32300 if (TREE_TARGET_GLOBALS (new_tree))
32301 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
32302 else if (new_tree == target_option_default_node)
32303 restore_target_globals (&default_target_globals);
32304 else
32305 TREE_TARGET_GLOBALS (new_tree)
32306 = save_target_globals_default_opts ();
32312 /* Save the current options */
32314 static void
32315 rs6000_function_specific_save (struct cl_target_option *ptr,
32316 struct gcc_options *opts)
32318 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
32319 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
32322 /* Restore the current options */
32324 static void
32325 rs6000_function_specific_restore (struct gcc_options *opts,
32326 struct cl_target_option *ptr)
32329 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
32330 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
32331 (void) rs6000_option_override_internal (false);
32334 /* Print the current options */
32336 static void
32337 rs6000_function_specific_print (FILE *file, int indent,
32338 struct cl_target_option *ptr)
32340 rs6000_print_isa_options (file, indent, "Isa options set",
32341 ptr->x_rs6000_isa_flags);
32343 rs6000_print_isa_options (file, indent, "Isa options explicit",
32344 ptr->x_rs6000_isa_flags_explicit);
32347 /* Helper function to print the current isa or misc options on a line. */
32349 static void
32350 rs6000_print_options_internal (FILE *file,
32351 int indent,
32352 const char *string,
32353 HOST_WIDE_INT flags,
32354 const char *prefix,
32355 const struct rs6000_opt_mask *opts,
32356 size_t num_elements)
32358 size_t i;
32359 size_t start_column = 0;
32360 size_t cur_column;
32361 size_t max_column = 76;
32362 const char *comma = "";
32364 if (indent)
32365 start_column += fprintf (file, "%*s", indent, "");
32367 if (!flags)
32369 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
32370 return;
32373 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
32375 /* Print the various mask options. */
32376 cur_column = start_column;
32377 for (i = 0; i < num_elements; i++)
32379 if ((flags & opts[i].mask) != 0)
32381 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
32382 size_t len = (strlen (comma)
32383 + strlen (prefix)
32384 + strlen (no_str)
32385 + strlen (rs6000_opt_masks[i].name));
32387 cur_column += len;
32388 if (cur_column > max_column)
32390 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
32391 cur_column = start_column + len;
32392 comma = "";
32395 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
32396 rs6000_opt_masks[i].name);
32397 flags &= ~ opts[i].mask;
32398 comma = ", ";
32402 fputs ("\n", file);
32405 /* Helper function to print the current isa options on a line. */
32407 static void
32408 rs6000_print_isa_options (FILE *file, int indent, const char *string,
32409 HOST_WIDE_INT flags)
32411 rs6000_print_options_internal (file, indent, string, flags, "-m",
32412 &rs6000_opt_masks[0],
32413 ARRAY_SIZE (rs6000_opt_masks));
32416 static void
32417 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
32418 HOST_WIDE_INT flags)
32420 rs6000_print_options_internal (file, indent, string, flags, "",
32421 &rs6000_builtin_mask_names[0],
32422 ARRAY_SIZE (rs6000_builtin_mask_names));
32426 /* Hook to determine if one function can safely inline another. */
32428 static bool
32429 rs6000_can_inline_p (tree caller, tree callee)
32431 bool ret = false;
32432 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
32433 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
32435 /* If callee has no option attributes, then it is ok to inline. */
32436 if (!callee_tree)
32437 ret = true;
32439 /* If caller has no option attributes, but callee does then it is not ok to
32440 inline. */
32441 else if (!caller_tree)
32442 ret = false;
32444 else
32446 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
32447 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
32449 /* Callee's options should a subset of the caller's, i.e. a vsx function
32450 can inline an altivec function but a non-vsx function can't inline a
32451 vsx function. */
32452 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
32453 == callee_opts->x_rs6000_isa_flags)
32454 ret = true;
32457 if (TARGET_DEBUG_TARGET)
32458 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
32459 (DECL_NAME (caller)
32460 ? IDENTIFIER_POINTER (DECL_NAME (caller))
32461 : "<unknown>"),
32462 (DECL_NAME (callee)
32463 ? IDENTIFIER_POINTER (DECL_NAME (callee))
32464 : "<unknown>"),
32465 (ret ? "can" : "cannot"));
32467 return ret;
32470 /* Allocate a stack temp and fixup the address so it meets the particular
32471 memory requirements (either offetable or REG+REG addressing). */
32474 rs6000_allocate_stack_temp (enum machine_mode mode,
32475 bool offsettable_p,
32476 bool reg_reg_p)
32478 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
32479 rtx addr = XEXP (stack, 0);
32480 int strict_p = (reload_in_progress || reload_completed);
32482 if (!legitimate_indirect_address_p (addr, strict_p))
32484 if (offsettable_p
32485 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
32486 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32488 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
32489 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32492 return stack;
32495 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
32496 to such a form to deal with memory reference instructions like STFIWX that
32497 only take reg+reg addressing. */
32500 rs6000_address_for_fpconvert (rtx x)
32502 int strict_p = (reload_in_progress || reload_completed);
32503 rtx addr;
32505 gcc_assert (MEM_P (x));
32506 addr = XEXP (x, 0);
32507 if (! legitimate_indirect_address_p (addr, strict_p)
32508 && ! legitimate_indexed_address_p (addr, strict_p))
32510 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
32512 rtx reg = XEXP (addr, 0);
32513 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
32514 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
32515 gcc_assert (REG_P (reg));
32516 emit_insn (gen_add3_insn (reg, reg, size_rtx));
32517 addr = reg;
32519 else if (GET_CODE (addr) == PRE_MODIFY)
32521 rtx reg = XEXP (addr, 0);
32522 rtx expr = XEXP (addr, 1);
32523 gcc_assert (REG_P (reg));
32524 gcc_assert (GET_CODE (expr) == PLUS);
32525 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
32526 addr = reg;
32529 x = replace_equiv_address (x, copy_addr_to_reg (addr));
32532 return x;
32535 /* Given a memory reference, if it is not in the form for altivec memory
32536 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
32537 convert to the altivec format. */
32540 rs6000_address_for_altivec (rtx x)
32542 gcc_assert (MEM_P (x));
32543 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
32545 rtx addr = XEXP (x, 0);
32546 int strict_p = (reload_in_progress || reload_completed);
32548 if (!legitimate_indexed_address_p (addr, strict_p)
32549 && !legitimate_indirect_address_p (addr, strict_p))
32550 addr = copy_to_mode_reg (Pmode, addr);
32552 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
32553 x = change_address (x, GET_MODE (x), addr);
32556 return x;
32559 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
32561 On the RS/6000, all integer constants are acceptable, most won't be valid
32562 for particular insns, though. Only easy FP constants are acceptable. */
32564 static bool
32565 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
32567 if (TARGET_ELF && tls_referenced_p (x))
32568 return false;
32570 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
32571 || GET_MODE (x) == VOIDmode
32572 || (TARGET_POWERPC64 && mode == DImode)
32573 || easy_fp_constant (x, mode)
32574 || easy_vector_constant (x, mode));
32579 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
32581 void
32582 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
32584 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
32585 rtx toc_load = NULL_RTX;
32586 rtx toc_restore = NULL_RTX;
32587 rtx func_addr;
32588 rtx abi_reg = NULL_RTX;
32589 rtx call[4];
32590 int n_call;
32591 rtx insn;
32593 /* Handle longcall attributes. */
32594 if (INTVAL (cookie) & CALL_LONG)
32595 func_desc = rs6000_longcall_ref (func_desc);
32597 /* Handle indirect calls. */
32598 if (GET_CODE (func_desc) != SYMBOL_REF
32599 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
32601 /* Save the TOC into its reserved slot before the call,
32602 and prepare to restore it after the call. */
32603 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32604 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
32605 rtx stack_toc_mem = gen_frame_mem (Pmode,
32606 gen_rtx_PLUS (Pmode, stack_ptr,
32607 stack_toc_offset));
32608 toc_restore = gen_rtx_SET (VOIDmode, toc_reg, stack_toc_mem);
32610 /* Can we optimize saving the TOC in the prologue or
32611 do we need to do it at every call? */
32612 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
32613 cfun->machine->save_toc_in_prologue = true;
32614 else
32616 MEM_VOLATILE_P (stack_toc_mem) = 1;
32617 emit_move_insn (stack_toc_mem, toc_reg);
32620 if (DEFAULT_ABI == ABI_ELFv2)
32622 /* A function pointer in the ELFv2 ABI is just a plain address, but
32623 the ABI requires it to be loaded into r12 before the call. */
32624 func_addr = gen_rtx_REG (Pmode, 12);
32625 emit_move_insn (func_addr, func_desc);
32626 abi_reg = func_addr;
32628 else
32630 /* A function pointer under AIX is a pointer to a data area whose
32631 first word contains the actual address of the function, whose
32632 second word contains a pointer to its TOC, and whose third word
32633 contains a value to place in the static chain register (r11).
32634 Note that if we load the static chain, our "trampoline" need
32635 not have any executable code. */
32637 /* Load up address of the actual function. */
32638 func_desc = force_reg (Pmode, func_desc);
32639 func_addr = gen_reg_rtx (Pmode);
32640 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
32642 /* Prepare to load the TOC of the called function. Note that the
32643 TOC load must happen immediately before the actual call so
32644 that unwinding the TOC registers works correctly. See the
32645 comment in frob_update_context. */
32646 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
32647 rtx func_toc_mem = gen_rtx_MEM (Pmode,
32648 gen_rtx_PLUS (Pmode, func_desc,
32649 func_toc_offset));
32650 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
32652 /* If we have a static chain, load it up. */
32653 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32655 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
32656 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
32657 rtx func_sc_mem = gen_rtx_MEM (Pmode,
32658 gen_rtx_PLUS (Pmode, func_desc,
32659 func_sc_offset));
32660 emit_move_insn (sc_reg, func_sc_mem);
32661 abi_reg = sc_reg;
32665 else
32667 /* Direct calls use the TOC: for local calls, the callee will
32668 assume the TOC register is set; for non-local calls, the
32669 PLT stub needs the TOC register. */
32670 abi_reg = toc_reg;
32671 func_addr = func_desc;
32674 /* Create the call. */
32675 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
32676 if (value != NULL_RTX)
32677 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32678 n_call = 1;
32680 if (toc_load)
32681 call[n_call++] = toc_load;
32682 if (toc_restore)
32683 call[n_call++] = toc_restore;
32685 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
32687 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
32688 insn = emit_call_insn (insn);
32690 /* Mention all registers defined by the ABI to hold information
32691 as uses in CALL_INSN_FUNCTION_USAGE. */
32692 if (abi_reg)
32693 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32696 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
32698 void
32699 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
32701 rtx call[2];
32702 rtx insn;
32704 gcc_assert (INTVAL (cookie) == 0);
32706 /* Create the call. */
32707 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
32708 if (value != NULL_RTX)
32709 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32711 call[1] = simple_return_rtx;
32713 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
32714 insn = emit_call_insn (insn);
32716 /* Note use of the TOC register. */
32717 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
32718 /* We need to also mark a use of the link register since the function we
32719 sibling-call to will use it to return to our caller. */
32720 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
32723 /* Return whether we need to always update the saved TOC pointer when we update
32724 the stack pointer. */
32726 static bool
32727 rs6000_save_toc_in_prologue_p (void)
32729 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
32732 #ifdef HAVE_GAS_HIDDEN
32733 # define USE_HIDDEN_LINKONCE 1
32734 #else
32735 # define USE_HIDDEN_LINKONCE 0
32736 #endif
32738 /* Fills in the label name that should be used for a 476 link stack thunk. */
32740 void
32741 get_ppc476_thunk_name (char name[32])
32743 gcc_assert (TARGET_LINK_STACK);
32745 if (USE_HIDDEN_LINKONCE)
32746 sprintf (name, "__ppc476.get_thunk");
32747 else
32748 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
32751 /* This function emits the simple thunk routine that is used to preserve
32752 the link stack on the 476 cpu. */
32754 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
32755 static void
32756 rs6000_code_end (void)
32758 char name[32];
32759 tree decl;
32761 if (!TARGET_LINK_STACK)
32762 return;
32764 get_ppc476_thunk_name (name);
32766 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
32767 build_function_type_list (void_type_node, NULL_TREE));
32768 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
32769 NULL_TREE, void_type_node);
32770 TREE_PUBLIC (decl) = 1;
32771 TREE_STATIC (decl) = 1;
32773 #if RS6000_WEAK
32774 if (USE_HIDDEN_LINKONCE)
32776 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
32777 targetm.asm_out.unique_section (decl, 0);
32778 switch_to_section (get_named_section (decl, NULL, 0));
32779 DECL_WEAK (decl) = 1;
32780 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
32781 targetm.asm_out.globalize_label (asm_out_file, name);
32782 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
32783 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
32785 else
32786 #endif
32788 switch_to_section (text_section);
32789 ASM_OUTPUT_LABEL (asm_out_file, name);
32792 DECL_INITIAL (decl) = make_node (BLOCK);
32793 current_function_decl = decl;
32794 init_function_start (decl);
32795 first_function_block_is_cold = false;
32796 /* Make sure unwind info is emitted for the thunk if needed. */
32797 final_start_function (emit_barrier (), asm_out_file, 1);
32799 fputs ("\tblr\n", asm_out_file);
32801 final_end_function ();
32802 init_insn_lengths ();
32803 free_after_compilation (cfun);
32804 set_cfun (NULL);
32805 current_function_decl = NULL;
32808 /* Add r30 to hard reg set if the prologue sets it up and it is not
32809 pic_offset_table_rtx. */
32811 static void
32812 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
32814 if (!TARGET_SINGLE_PIC_BASE
32815 && TARGET_TOC
32816 && TARGET_MINIMAL_TOC
32817 && get_pool_size () != 0)
32818 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32822 /* Helper function for rs6000_split_logical to emit a logical instruction after
32823 spliting the operation to single GPR registers.
32825 DEST is the destination register.
32826 OP1 and OP2 are the input source registers.
32827 CODE is the base operation (AND, IOR, XOR, NOT).
32828 MODE is the machine mode.
32829 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32830 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32831 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
32833 static void
32834 rs6000_split_logical_inner (rtx dest,
32835 rtx op1,
32836 rtx op2,
32837 enum rtx_code code,
32838 enum machine_mode mode,
32839 bool complement_final_p,
32840 bool complement_op1_p,
32841 bool complement_op2_p)
32843 rtx bool_rtx;
32845 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
32846 if (op2 && GET_CODE (op2) == CONST_INT
32847 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
32848 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32850 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
32851 HOST_WIDE_INT value = INTVAL (op2) & mask;
32853 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
32854 if (code == AND)
32856 if (value == 0)
32858 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
32859 return;
32862 else if (value == mask)
32864 if (!rtx_equal_p (dest, op1))
32865 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32866 return;
32870 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
32871 into separate ORI/ORIS or XORI/XORIS instrucitons. */
32872 else if (code == IOR || code == XOR)
32874 if (value == 0)
32876 if (!rtx_equal_p (dest, op1))
32877 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32878 return;
32883 if (code == AND && mode == SImode
32884 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32886 emit_insn (gen_andsi3 (dest, op1, op2));
32887 return;
32890 if (complement_op1_p)
32891 op1 = gen_rtx_NOT (mode, op1);
32893 if (complement_op2_p)
32894 op2 = gen_rtx_NOT (mode, op2);
32896 bool_rtx = ((code == NOT)
32897 ? gen_rtx_NOT (mode, op1)
32898 : gen_rtx_fmt_ee (code, mode, op1, op2));
32900 if (complement_final_p)
32901 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
32903 emit_insn (gen_rtx_SET (VOIDmode, dest, bool_rtx));
32906 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
32907 operations are split immediately during RTL generation to allow for more
32908 optimizations of the AND/IOR/XOR.
32910 OPERANDS is an array containing the destination and two input operands.
32911 CODE is the base operation (AND, IOR, XOR, NOT).
32912 MODE is the machine mode.
32913 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32914 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32915 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32916 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32917 formation of the AND instructions. */
32919 static void
32920 rs6000_split_logical_di (rtx operands[3],
32921 enum rtx_code code,
32922 bool complement_final_p,
32923 bool complement_op1_p,
32924 bool complement_op2_p)
32926 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
32927 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
32928 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
32929 enum hi_lo { hi = 0, lo = 1 };
32930 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
32931 size_t i;
32933 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
32934 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
32935 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
32936 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
32938 if (code == NOT)
32939 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
32940 else
32942 if (GET_CODE (operands[2]) != CONST_INT)
32944 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
32945 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
32947 else
32949 HOST_WIDE_INT value = INTVAL (operands[2]);
32950 HOST_WIDE_INT value_hi_lo[2];
32952 gcc_assert (!complement_final_p);
32953 gcc_assert (!complement_op1_p);
32954 gcc_assert (!complement_op2_p);
32956 value_hi_lo[hi] = value >> 32;
32957 value_hi_lo[lo] = value & lower_32bits;
32959 for (i = 0; i < 2; i++)
32961 HOST_WIDE_INT sub_value = value_hi_lo[i];
32963 if (sub_value & sign_bit)
32964 sub_value |= upper_32bits;
32966 op2_hi_lo[i] = GEN_INT (sub_value);
32968 /* If this is an AND instruction, check to see if we need to load
32969 the value in a register. */
32970 if (code == AND && sub_value != -1 && sub_value != 0
32971 && !and_operand (op2_hi_lo[i], SImode))
32972 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
32977 for (i = 0; i < 2; i++)
32979 /* Split large IOR/XOR operations. */
32980 if ((code == IOR || code == XOR)
32981 && GET_CODE (op2_hi_lo[i]) == CONST_INT
32982 && !complement_final_p
32983 && !complement_op1_p
32984 && !complement_op2_p
32985 && !logical_const_operand (op2_hi_lo[i], SImode))
32987 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
32988 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
32989 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
32990 rtx tmp = gen_reg_rtx (SImode);
32992 /* Make sure the constant is sign extended. */
32993 if ((hi_16bits & sign_bit) != 0)
32994 hi_16bits |= upper_32bits;
32996 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
32997 code, SImode, false, false, false);
32999 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
33000 code, SImode, false, false, false);
33002 else
33003 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
33004 code, SImode, complement_final_p,
33005 complement_op1_p, complement_op2_p);
33008 return;
33011 /* Split the insns that make up boolean operations operating on multiple GPR
33012 registers. The boolean MD patterns ensure that the inputs either are
33013 exactly the same as the output registers, or there is no overlap.
33015 OPERANDS is an array containing the destination and two input operands.
33016 CODE is the base operation (AND, IOR, XOR, NOT).
33017 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
33018 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
33019 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
33021 void
33022 rs6000_split_logical (rtx operands[3],
33023 enum rtx_code code,
33024 bool complement_final_p,
33025 bool complement_op1_p,
33026 bool complement_op2_p)
33028 enum machine_mode mode = GET_MODE (operands[0]);
33029 enum machine_mode sub_mode;
33030 rtx op0, op1, op2;
33031 int sub_size, regno0, regno1, nregs, i;
33033 /* If this is DImode, use the specialized version that can run before
33034 register allocation. */
33035 if (mode == DImode && !TARGET_POWERPC64)
33037 rs6000_split_logical_di (operands, code, complement_final_p,
33038 complement_op1_p, complement_op2_p);
33039 return;
33042 op0 = operands[0];
33043 op1 = operands[1];
33044 op2 = (code == NOT) ? NULL_RTX : operands[2];
33045 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
33046 sub_size = GET_MODE_SIZE (sub_mode);
33047 regno0 = REGNO (op0);
33048 regno1 = REGNO (op1);
33050 gcc_assert (reload_completed);
33051 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33052 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33054 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
33055 gcc_assert (nregs > 1);
33057 if (op2 && REG_P (op2))
33058 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
33060 for (i = 0; i < nregs; i++)
33062 int offset = i * sub_size;
33063 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
33064 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
33065 rtx sub_op2 = ((code == NOT)
33066 ? NULL_RTX
33067 : simplify_subreg (sub_mode, op2, mode, offset));
33069 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
33070 complement_final_p, complement_op1_p,
33071 complement_op2_p);
33074 return;
33078 /* Return true if the peephole2 can combine a load involving a combination of
33079 an addis instruction and a load with an offset that can be fused together on
33080 a power8.
33082 The operands are:
33083 operands[0] register set with addis
33084 operands[1] value set via addis
33085 operands[2] target register being loaded
33086 operands[3] D-form memory reference using operands[0].
33088 In addition, we are passed a boolean that is true if this is a peephole2,
33089 and we can use see if the addis_reg is dead after the insn and can be
33090 replaced by the target register. */
33092 bool
33093 fusion_gpr_load_p (rtx *operands, bool peep2_p)
33095 rtx addis_reg = operands[0];
33096 rtx addis_value = operands[1];
33097 rtx target = operands[2];
33098 rtx mem = operands[3];
33099 rtx addr;
33100 rtx base_reg;
33102 /* Validate arguments. */
33103 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
33104 return false;
33106 if (!base_reg_operand (target, GET_MODE (target)))
33107 return false;
33109 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
33110 return false;
33112 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
33113 return false;
33115 /* Allow sign/zero extension. */
33116 if (GET_CODE (mem) == ZERO_EXTEND
33117 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
33118 mem = XEXP (mem, 0);
33120 if (!MEM_P (mem))
33121 return false;
33123 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
33124 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33125 return false;
33127 /* Validate that the register used to load the high value is either the
33128 register being loaded, or we can safely replace its use in a peephole2.
33130 If this is a peephole2, we assume that there are 2 instructions in the
33131 peephole (addis and load), so we want to check if the target register was
33132 not used in the memory address and the register to hold the addis result
33133 is dead after the peephole. */
33134 if (REGNO (addis_reg) != REGNO (target))
33136 if (!peep2_p)
33137 return false;
33139 if (reg_mentioned_p (target, mem))
33140 return false;
33142 if (!peep2_reg_dead_p (2, addis_reg))
33143 return false;
33145 /* If the target register being loaded is the stack pointer, we must
33146 avoid loading any other value into it, even temporarily. */
33147 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
33148 return false;
33151 base_reg = XEXP (addr, 0);
33152 return REGNO (addis_reg) == REGNO (base_reg);
33155 /* During the peephole2 pass, adjust and expand the insns for a load fusion
33156 sequence. We adjust the addis register to use the target register. If the
33157 load sign extends, we adjust the code to do the zero extending load, and an
33158 explicit sign extension later since the fusion only covers zero extending
33159 loads.
33161 The operands are:
33162 operands[0] register set with addis (to be replaced with target)
33163 operands[1] value set via addis
33164 operands[2] target register being loaded
33165 operands[3] D-form memory reference using operands[0]. */
33167 void
33168 expand_fusion_gpr_load (rtx *operands)
33170 rtx addis_value = operands[1];
33171 rtx target = operands[2];
33172 rtx orig_mem = operands[3];
33173 rtx new_addr, new_mem, orig_addr, offset;
33174 enum rtx_code plus_or_lo_sum;
33175 enum machine_mode target_mode = GET_MODE (target);
33176 enum machine_mode extend_mode = target_mode;
33177 enum machine_mode ptr_mode = Pmode;
33178 enum rtx_code extend = UNKNOWN;
33179 rtx addis_reg = ((ptr_mode == target_mode)
33180 ? target
33181 : simplify_subreg (ptr_mode, target, target_mode, 0));
33183 if (GET_CODE (orig_mem) == ZERO_EXTEND
33184 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
33186 extend = GET_CODE (orig_mem);
33187 orig_mem = XEXP (orig_mem, 0);
33188 target_mode = GET_MODE (orig_mem);
33191 gcc_assert (MEM_P (orig_mem));
33193 orig_addr = XEXP (orig_mem, 0);
33194 plus_or_lo_sum = GET_CODE (orig_addr);
33195 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
33197 offset = XEXP (orig_addr, 1);
33198 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_reg, offset);
33199 new_mem = change_address (orig_mem, target_mode, new_addr);
33201 if (extend != UNKNOWN)
33202 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
33204 emit_insn (gen_rtx_SET (VOIDmode, addis_reg, addis_value));
33205 emit_insn (gen_rtx_SET (VOIDmode, target, new_mem));
33207 if (extend == SIGN_EXTEND)
33209 int sub_off = ((BYTES_BIG_ENDIAN)
33210 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
33211 : 0);
33212 rtx sign_reg
33213 = simplify_subreg (target_mode, target, extend_mode, sub_off);
33215 emit_insn (gen_rtx_SET (VOIDmode, target,
33216 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
33219 return;
33222 /* Return a string to fuse an addis instruction with a gpr load to the same
33223 register that we loaded up the addis instruction. The code is complicated,
33224 so we call output_asm_insn directly, and just return "".
33226 The operands are:
33227 operands[0] register set with addis (must be same reg as target).
33228 operands[1] value set via addis
33229 operands[2] target register being loaded
33230 operands[3] D-form memory reference using operands[0]. */
33232 const char *
33233 emit_fusion_gpr_load (rtx *operands)
33235 rtx addis_reg = operands[0];
33236 rtx addis_value = operands[1];
33237 rtx target = operands[2];
33238 rtx mem = operands[3];
33239 rtx fuse_ops[10];
33240 rtx addr;
33241 rtx load_offset;
33242 const char *addis_str = NULL;
33243 const char *load_str = NULL;
33244 const char *extend_insn = NULL;
33245 const char *mode_name = NULL;
33246 char insn_template[80];
33247 enum machine_mode mode;
33248 const char *comment_str = ASM_COMMENT_START;
33249 bool sign_p = false;
33251 gcc_assert (REG_P (addis_reg) && REG_P (target));
33252 gcc_assert (REGNO (addis_reg) == REGNO (target));
33254 if (*comment_str == ' ')
33255 comment_str++;
33257 /* Allow sign/zero extension. */
33258 if (GET_CODE (mem) == ZERO_EXTEND)
33259 mem = XEXP (mem, 0);
33261 else if (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN)
33263 sign_p = true;
33264 mem = XEXP (mem, 0);
33267 gcc_assert (MEM_P (mem));
33268 addr = XEXP (mem, 0);
33269 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33270 gcc_unreachable ();
33272 load_offset = XEXP (addr, 1);
33274 /* Now emit the load instruction to the same register. */
33275 mode = GET_MODE (mem);
33276 switch (mode)
33278 case QImode:
33279 mode_name = "char";
33280 load_str = "lbz";
33281 extend_insn = "extsb %0,%0";
33282 break;
33284 case HImode:
33285 mode_name = "short";
33286 load_str = "lhz";
33287 extend_insn = "extsh %0,%0";
33288 break;
33290 case SImode:
33291 mode_name = "int";
33292 load_str = "lwz";
33293 extend_insn = "extsw %0,%0";
33294 break;
33296 case DImode:
33297 if (TARGET_POWERPC64)
33299 mode_name = "long";
33300 load_str = "ld";
33302 else
33303 gcc_unreachable ();
33304 break;
33306 default:
33307 gcc_unreachable ();
33310 /* Emit the addis instruction. */
33311 fuse_ops[0] = target;
33312 if (satisfies_constraint_L (addis_value))
33314 fuse_ops[1] = addis_value;
33315 addis_str = "lis %0,%v1";
33318 else if (GET_CODE (addis_value) == PLUS)
33320 rtx op0 = XEXP (addis_value, 0);
33321 rtx op1 = XEXP (addis_value, 1);
33323 if (REG_P (op0) && CONST_INT_P (op1)
33324 && satisfies_constraint_L (op1))
33326 fuse_ops[1] = op0;
33327 fuse_ops[2] = op1;
33328 addis_str = "addis %0,%1,%v2";
33332 else if (GET_CODE (addis_value) == HIGH)
33334 rtx value = XEXP (addis_value, 0);
33335 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
33337 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
33338 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
33339 if (TARGET_ELF)
33340 addis_str = "addis %0,%2,%1@toc@ha";
33342 else if (TARGET_XCOFF)
33343 addis_str = "addis %0,%1@u(%2)";
33345 else
33346 gcc_unreachable ();
33349 else if (GET_CODE (value) == PLUS)
33351 rtx op0 = XEXP (value, 0);
33352 rtx op1 = XEXP (value, 1);
33354 if (GET_CODE (op0) == UNSPEC
33355 && XINT (op0, 1) == UNSPEC_TOCREL
33356 && CONST_INT_P (op1))
33358 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
33359 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
33360 fuse_ops[3] = op1;
33361 if (TARGET_ELF)
33362 addis_str = "addis %0,%2,%1+%3@toc@ha";
33364 else if (TARGET_XCOFF)
33365 addis_str = "addis %0,%1+%3@u(%2)";
33367 else
33368 gcc_unreachable ();
33372 else if (satisfies_constraint_L (value))
33374 fuse_ops[1] = value;
33375 addis_str = "lis %0,%v1";
33378 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
33380 fuse_ops[1] = value;
33381 addis_str = "lis %0,%1@ha";
33385 if (!addis_str)
33386 fatal_insn ("Could not generate addis value for fusion", addis_value);
33388 sprintf (insn_template, "%s\t\t%s gpr load fusion, type %s", addis_str,
33389 comment_str, mode_name);
33390 output_asm_insn (insn_template, fuse_ops);
33392 /* Emit the D-form load instruction. */
33393 if (CONST_INT_P (load_offset) && satisfies_constraint_I (load_offset))
33395 sprintf (insn_template, "%s %%0,%%1(%%0)", load_str);
33396 fuse_ops[1] = load_offset;
33397 output_asm_insn (insn_template, fuse_ops);
33400 else if (GET_CODE (load_offset) == UNSPEC
33401 && XINT (load_offset, 1) == UNSPEC_TOCREL)
33403 if (TARGET_ELF)
33404 sprintf (insn_template, "%s %%0,%%1@toc@l(%%0)", load_str);
33406 else if (TARGET_XCOFF)
33407 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
33409 else
33410 gcc_unreachable ();
33412 fuse_ops[1] = XVECEXP (load_offset, 0, 0);
33413 output_asm_insn (insn_template, fuse_ops);
33416 else if (GET_CODE (load_offset) == PLUS
33417 && GET_CODE (XEXP (load_offset, 0)) == UNSPEC
33418 && XINT (XEXP (load_offset, 0), 1) == UNSPEC_TOCREL
33419 && CONST_INT_P (XEXP (load_offset, 1)))
33421 rtx tocrel_unspec = XEXP (load_offset, 0);
33422 if (TARGET_ELF)
33423 sprintf (insn_template, "%s %%0,%%1+%%2@toc@l(%%0)", load_str);
33425 else if (TARGET_XCOFF)
33426 sprintf (insn_template, "%s %%0,%%1+%%2@l(%%0)", load_str);
33428 else
33429 gcc_unreachable ();
33431 fuse_ops[1] = XVECEXP (tocrel_unspec, 0, 0);
33432 fuse_ops[2] = XEXP (load_offset, 1);
33433 output_asm_insn (insn_template, fuse_ops);
33436 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (load_offset))
33438 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
33440 fuse_ops[1] = load_offset;
33441 output_asm_insn (insn_template, fuse_ops);
33444 else
33445 fatal_insn ("Unable to generate load offset for fusion", load_offset);
33447 /* Handle sign extension. The peephole2 pass generates this as a separate
33448 insn, but we handle it just in case it got reattached. */
33449 if (sign_p)
33451 gcc_assert (extend_insn != NULL);
33452 output_asm_insn (extend_insn, fuse_ops);
33455 return "";
33458 /* Analyze vector computations and remove unnecessary doubleword
33459 swaps (xxswapdi instructions). This pass is performed only
33460 for little-endian VSX code generation.
33462 For this specific case, loads and stores of 4x32 and 2x64 vectors
33463 are inefficient. These are implemented using the lvx2dx and
33464 stvx2dx instructions, which invert the order of doublewords in
33465 a vector register. Thus the code generation inserts an xxswapdi
33466 after each such load, and prior to each such store. (For spill
33467 code after register assignment, an additional xxswapdi is inserted
33468 following each store in order to return a hard register to its
33469 unpermuted value.)
33471 The extra xxswapdi instructions reduce performance. This can be
33472 particularly bad for vectorized code. The purpose of this pass
33473 is to reduce the number of xxswapdi instructions required for
33474 correctness.
33476 The primary insight is that much code that operates on vectors
33477 does not care about the relative order of elements in a register,
33478 so long as the correct memory order is preserved. If we have
33479 a computation where all input values are provided by lvxd2x/xxswapdi
33480 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
33481 and all intermediate computations are pure SIMD (independent of
33482 element order), then all the xxswapdi's associated with the loads
33483 and stores may be removed.
33485 This pass uses some of the infrastructure and logical ideas from
33486 the "web" pass in web.c. We create maximal webs of computations
33487 fitting the description above using union-find. Each such web is
33488 then optimized by removing its unnecessary xxswapdi instructions.
33490 The pass is placed prior to global optimization so that we can
33491 perform the optimization in the safest and simplest way possible;
33492 that is, by replacing each xxswapdi insn with a register copy insn.
33493 Subsequent forward propagation will remove copies where possible.
33495 There are some operations sensitive to element order for which we
33496 can still allow the operation, provided we modify those operations.
33497 These include CONST_VECTORs, for which we must swap the first and
33498 second halves of the constant vector; and SUBREGs, for which we
33499 must adjust the byte offset to account for the swapped doublewords.
33500 A remaining opportunity would be non-immediate-form splats, for
33501 which we should adjust the selected lane of the input. We should
33502 also make code generation adjustments for sum-across operations,
33503 since this is a common vectorizer reduction.
33505 Because we run prior to the first split, we can see loads and stores
33506 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
33507 vector loads and stores that have not yet been split into a permuting
33508 load/store and a swap. (One way this can happen is with a builtin
33509 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
33510 than deleting a swap, we convert the load/store into a permuting
33511 load/store (which effectively removes the swap). */
33513 /* This is based on the union-find logic in web.c. web_entry_base is
33514 defined in df.h. */
33515 class swap_web_entry : public web_entry_base
33517 public:
33518 /* Pointer to the insn. */
33519 rtx_insn *insn;
33520 /* Set if insn contains a mention of a vector register. All other
33521 fields are undefined if this field is unset. */
33522 unsigned int is_relevant : 1;
33523 /* Set if insn is a load. */
33524 unsigned int is_load : 1;
33525 /* Set if insn is a store. */
33526 unsigned int is_store : 1;
33527 /* Set if insn is a doubleword swap. This can either be a register swap
33528 or a permuting load or store (test is_load and is_store for this). */
33529 unsigned int is_swap : 1;
33530 /* Set if the insn has a live-in use of a parameter register. */
33531 unsigned int is_live_in : 1;
33532 /* Set if the insn has a live-out def of a return register. */
33533 unsigned int is_live_out : 1;
33534 /* Set if the insn contains a subreg reference of a vector register. */
33535 unsigned int contains_subreg : 1;
33536 /* Set if the insn contains a 128-bit integer operand. */
33537 unsigned int is_128_int : 1;
33538 /* Set if this is a call-insn. */
33539 unsigned int is_call : 1;
33540 /* Set if this insn does not perform a vector operation for which
33541 element order matters, or if we know how to fix it up if it does.
33542 Undefined if is_swap is set. */
33543 unsigned int is_swappable : 1;
33544 /* A nonzero value indicates what kind of special handling for this
33545 insn is required if doublewords are swapped. Undefined if
33546 is_swappable is not set. */
33547 unsigned int special_handling : 3;
33548 /* Set if the web represented by this entry cannot be optimized. */
33549 unsigned int web_not_optimizable : 1;
33550 /* Set if this insn should be deleted. */
33551 unsigned int will_delete : 1;
33554 enum special_handling_values {
33555 SH_NONE = 0,
33556 SH_CONST_VECTOR,
33557 SH_SUBREG,
33558 SH_NOSWAP_LD,
33559 SH_NOSWAP_ST
33562 /* Union INSN with all insns containing definitions that reach USE.
33563 Detect whether USE is live-in to the current function. */
33564 static void
33565 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
33567 struct df_link *link = DF_REF_CHAIN (use);
33569 if (!link)
33570 insn_entry[INSN_UID (insn)].is_live_in = 1;
33572 while (link)
33574 if (DF_REF_IS_ARTIFICIAL (link->ref))
33575 insn_entry[INSN_UID (insn)].is_live_in = 1;
33577 if (DF_REF_INSN_INFO (link->ref))
33579 rtx def_insn = DF_REF_INSN (link->ref);
33580 (void)unionfind_union (insn_entry + INSN_UID (insn),
33581 insn_entry + INSN_UID (def_insn));
33584 link = link->next;
33588 /* Union INSN with all insns containing uses reached from DEF.
33589 Detect whether DEF is live-out from the current function. */
33590 static void
33591 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
33593 struct df_link *link = DF_REF_CHAIN (def);
33595 if (!link)
33596 insn_entry[INSN_UID (insn)].is_live_out = 1;
33598 while (link)
33600 /* This could be an eh use or some other artificial use;
33601 we treat these all the same (killing the optimization). */
33602 if (DF_REF_IS_ARTIFICIAL (link->ref))
33603 insn_entry[INSN_UID (insn)].is_live_out = 1;
33605 if (DF_REF_INSN_INFO (link->ref))
33607 rtx use_insn = DF_REF_INSN (link->ref);
33608 (void)unionfind_union (insn_entry + INSN_UID (insn),
33609 insn_entry + INSN_UID (use_insn));
33612 link = link->next;
33616 /* Return 1 iff INSN is a load insn, including permuting loads that
33617 represent an lvxd2x instruction; else return 0. */
33618 static unsigned int
33619 insn_is_load_p (rtx insn)
33621 rtx body = PATTERN (insn);
33623 if (GET_CODE (body) == SET)
33625 if (GET_CODE (SET_SRC (body)) == MEM)
33626 return 1;
33628 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
33629 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
33630 return 1;
33632 return 0;
33635 if (GET_CODE (body) != PARALLEL)
33636 return 0;
33638 rtx set = XVECEXP (body, 0, 0);
33640 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
33641 return 1;
33643 return 0;
33646 /* Return 1 iff INSN is a store insn, including permuting stores that
33647 represent an stvxd2x instruction; else return 0. */
33648 static unsigned int
33649 insn_is_store_p (rtx insn)
33651 rtx body = PATTERN (insn);
33652 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
33653 return 1;
33654 if (GET_CODE (body) != PARALLEL)
33655 return 0;
33656 rtx set = XVECEXP (body, 0, 0);
33657 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
33658 return 1;
33659 return 0;
33662 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
33663 a permuting load, or a permuting store. */
33664 static unsigned int
33665 insn_is_swap_p (rtx insn)
33667 rtx body = PATTERN (insn);
33668 if (GET_CODE (body) != SET)
33669 return 0;
33670 rtx rhs = SET_SRC (body);
33671 if (GET_CODE (rhs) != VEC_SELECT)
33672 return 0;
33673 rtx parallel = XEXP (rhs, 1);
33674 if (GET_CODE (parallel) != PARALLEL)
33675 return 0;
33676 unsigned int len = XVECLEN (parallel, 0);
33677 if (len != 2 && len != 4 && len != 8 && len != 16)
33678 return 0;
33679 for (unsigned int i = 0; i < len / 2; ++i)
33681 rtx op = XVECEXP (parallel, 0, i);
33682 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
33683 return 0;
33685 for (unsigned int i = len / 2; i < len; ++i)
33687 rtx op = XVECEXP (parallel, 0, i);
33688 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
33689 return 0;
33691 return 1;
33694 /* Return 1 iff OP is an operand that will not be affected by having
33695 vector doublewords swapped in memory. */
33696 static unsigned int
33697 rtx_is_swappable_p (rtx op, unsigned int *special)
33699 enum rtx_code code = GET_CODE (op);
33700 int i, j;
33702 switch (code)
33704 case LABEL_REF:
33705 case SYMBOL_REF:
33706 case CLOBBER:
33707 case REG:
33708 return 1;
33710 case VEC_CONCAT:
33711 case VEC_SELECT:
33712 case ASM_INPUT:
33713 case ASM_OPERANDS:
33714 return 0;
33716 case CONST_VECTOR:
33718 *special = SH_CONST_VECTOR;
33719 return 1;
33722 case VEC_DUPLICATE:
33723 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
33724 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
33725 it represents a vector splat for which we can do special
33726 handling. */
33727 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
33728 return 1;
33729 else
33730 return 0;
33732 case UNSPEC:
33734 /* Various operations are unsafe for this optimization, at least
33735 without significant additional work. Permutes are obviously
33736 problematic, as both the permute control vector and the ordering
33737 of the target values are invalidated by doubleword swapping.
33738 Vector pack and unpack modify the number of vector lanes.
33739 Merge-high/low will not operate correctly on swapped operands.
33740 Vector shifts across element boundaries are clearly uncool,
33741 as are vector select and concatenate operations. Vector
33742 sum-across instructions define one operand with a specific
33743 order-dependent element, so additional fixup code would be
33744 needed to make those work. Vector set and non-immediate-form
33745 vector splat are element-order sensitive. A few of these
33746 cases might be workable with special handling if required. */
33747 int val = XINT (op, 1);
33748 if (val == UNSPEC_VMRGH_DIRECT
33749 || val == UNSPEC_VMRGL_DIRECT
33750 || val == UNSPEC_VPACK_SIGN_SIGN_SAT
33751 || val == UNSPEC_VPACK_SIGN_UNS_SAT
33752 || val == UNSPEC_VPACK_UNS_UNS_MOD
33753 || val == UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
33754 || val == UNSPEC_VPACK_UNS_UNS_SAT
33755 || val == UNSPEC_VPERM
33756 || val == UNSPEC_VPERM_UNS
33757 || val == UNSPEC_VPERMHI
33758 || val == UNSPEC_VPERMSI
33759 || val == UNSPEC_VPKPX
33760 || val == UNSPEC_VSLDOI
33761 || val == UNSPEC_VSLO
33762 || val == UNSPEC_VSPLT_DIRECT
33763 || val == UNSPEC_VSRO
33764 || val == UNSPEC_VSUM2SWS
33765 || val == UNSPEC_VSUM4S
33766 || val == UNSPEC_VSUM4UBS
33767 || val == UNSPEC_VSUMSWS
33768 || val == UNSPEC_VSUMSWS_DIRECT
33769 || val == UNSPEC_VSX_CONCAT
33770 || val == UNSPEC_VSX_CVSPDP
33771 || val == UNSPEC_VSX_CVSPDPN
33772 || val == UNSPEC_VSX_SET
33773 || val == UNSPEC_VSX_SLDWI
33774 || val == UNSPEC_VSX_XXSPLTW
33775 || val == UNSPEC_VUNPACK_HI_SIGN
33776 || val == UNSPEC_VUNPACK_HI_SIGN_DIRECT
33777 || val == UNSPEC_VUNPACK_LO_SIGN
33778 || val == UNSPEC_VUNPACK_LO_SIGN_DIRECT
33779 || val == UNSPEC_VUPKHPX
33780 || val == UNSPEC_VUPKHS_V4SF
33781 || val == UNSPEC_VUPKHU_V4SF
33782 || val == UNSPEC_VUPKLPX
33783 || val == UNSPEC_VUPKLS_V4SF
33784 || val == UNSPEC_VUPKHU_V4SF)
33785 return 0;
33788 default:
33789 break;
33792 const char *fmt = GET_RTX_FORMAT (code);
33793 int ok = 1;
33795 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
33796 if (fmt[i] == 'e' || fmt[i] == 'u')
33798 unsigned int special_op = SH_NONE;
33799 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
33800 /* Ensure we never have two kinds of special handling
33801 for the same insn. */
33802 if (*special != SH_NONE && special_op != SH_NONE
33803 && *special != special_op)
33804 return 0;
33805 *special = special_op;
33807 else if (fmt[i] == 'E')
33808 for (j = 0; j < XVECLEN (op, i); ++j)
33810 unsigned int special_op = SH_NONE;
33811 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
33812 /* Ensure we never have two kinds of special handling
33813 for the same insn. */
33814 if (*special != SH_NONE && special_op != SH_NONE
33815 && *special != special_op)
33816 return 0;
33817 *special = special_op;
33820 return ok;
33823 /* Return 1 iff INSN is an operand that will not be affected by
33824 having vector doublewords swapped in memory (in which case
33825 *SPECIAL is unchanged), or that can be modified to be correct
33826 if vector doublewords are swapped in memory (in which case
33827 *SPECIAL is changed to a value indicating how). */
33828 static unsigned int
33829 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
33830 unsigned int *special)
33832 /* Calls are always bad. */
33833 if (GET_CODE (insn) == CALL_INSN)
33834 return 0;
33836 /* Loads and stores seen here are not permuting, but we can still
33837 fix them up by converting them to permuting ones. Exception:
33838 UNSPEC_LVX and UNSPEC_STVX, which have a PARALLEL body instead
33839 of a SET. */
33840 rtx body = PATTERN (insn);
33841 int i = INSN_UID (insn);
33843 if (insn_entry[i].is_load)
33845 if (GET_CODE (body) == SET)
33847 *special = SH_NOSWAP_LD;
33848 return 1;
33850 else
33851 return 0;
33854 if (insn_entry[i].is_store)
33856 if (GET_CODE (body) == SET)
33858 *special = SH_NOSWAP_ST;
33859 return 1;
33861 else
33862 return 0;
33865 /* Otherwise check the operands for vector lane violations. */
33866 return rtx_is_swappable_p (body, special);
33869 enum chain_purpose { FOR_LOADS, FOR_STORES };
33871 /* Return true if the UD or DU chain headed by LINK is non-empty,
33872 and every entry on the chain references an insn that is a
33873 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
33874 register swap must have only permuting loads as reaching defs.
33875 If PURPOSE is FOR_STORES, each such register swap must have only
33876 register swaps or permuting stores as reached uses. */
33877 static bool
33878 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
33879 enum chain_purpose purpose)
33881 if (!link)
33882 return false;
33884 for (; link; link = link->next)
33886 if (!VECTOR_MODE_P (GET_MODE (DF_REF_REG (link->ref))))
33887 continue;
33889 if (DF_REF_IS_ARTIFICIAL (link->ref))
33890 return false;
33892 rtx reached_insn = DF_REF_INSN (link->ref);
33893 unsigned uid = INSN_UID (reached_insn);
33894 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
33896 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
33897 || insn_entry[uid].is_store)
33898 return false;
33900 if (purpose == FOR_LOADS)
33902 df_ref use;
33903 FOR_EACH_INSN_INFO_USE (use, insn_info)
33905 struct df_link *swap_link = DF_REF_CHAIN (use);
33907 while (swap_link)
33909 if (DF_REF_IS_ARTIFICIAL (link->ref))
33910 return false;
33912 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
33913 unsigned uid2 = INSN_UID (swap_def_insn);
33915 /* Only permuting loads are allowed. */
33916 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
33917 return false;
33919 swap_link = swap_link->next;
33923 else if (purpose == FOR_STORES)
33925 df_ref def;
33926 FOR_EACH_INSN_INFO_DEF (def, insn_info)
33928 struct df_link *swap_link = DF_REF_CHAIN (def);
33930 while (swap_link)
33932 if (DF_REF_IS_ARTIFICIAL (link->ref))
33933 return false;
33935 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
33936 unsigned uid2 = INSN_UID (swap_use_insn);
33938 /* Permuting stores or register swaps are allowed. */
33939 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
33940 return false;
33942 swap_link = swap_link->next;
33948 return true;
33951 /* Mark the xxswapdi instructions associated with permuting loads and
33952 stores for removal. Note that we only flag them for deletion here,
33953 as there is a possibility of a swap being reached from multiple
33954 loads, etc. */
33955 static void
33956 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
33958 rtx insn = insn_entry[i].insn;
33959 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
33961 if (insn_entry[i].is_load)
33963 df_ref def;
33964 FOR_EACH_INSN_INFO_DEF (def, insn_info)
33966 struct df_link *link = DF_REF_CHAIN (def);
33968 /* We know by now that these are swaps, so we can delete
33969 them confidently. */
33970 while (link)
33972 rtx use_insn = DF_REF_INSN (link->ref);
33973 insn_entry[INSN_UID (use_insn)].will_delete = 1;
33974 link = link->next;
33978 else if (insn_entry[i].is_store)
33980 df_ref use;
33981 FOR_EACH_INSN_INFO_USE (use, insn_info)
33983 /* Ignore uses for addressability. */
33984 enum machine_mode mode = GET_MODE (DF_REF_REG (use));
33985 if (!VECTOR_MODE_P (mode))
33986 continue;
33988 struct df_link *link = DF_REF_CHAIN (use);
33990 /* We know by now that these are swaps, so we can delete
33991 them confidently. */
33992 while (link)
33994 rtx def_insn = DF_REF_INSN (link->ref);
33995 insn_entry[INSN_UID (def_insn)].will_delete = 1;
33996 link = link->next;
34002 /* OP is either a CONST_VECTOR or an expression containing one.
34003 Swap the first half of the vector with the second in the first
34004 case. Recurse to find it in the second. */
34005 static void
34006 swap_const_vector_halves (rtx op)
34008 int i;
34009 enum rtx_code code = GET_CODE (op);
34010 if (GET_CODE (op) == CONST_VECTOR)
34012 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
34013 for (i = 0; i < half_units; ++i)
34015 rtx temp = CONST_VECTOR_ELT (op, i);
34016 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
34017 CONST_VECTOR_ELT (op, i + half_units) = temp;
34020 else
34022 int j;
34023 const char *fmt = GET_RTX_FORMAT (code);
34024 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
34025 if (fmt[i] == 'e' || fmt[i] == 'u')
34026 swap_const_vector_halves (XEXP (op, i));
34027 else if (fmt[i] == 'E')
34028 for (j = 0; j < XVECLEN (op, i); ++j)
34029 swap_const_vector_halves (XVECEXP (op, i, j));
34033 /* Find all subregs of a vector expression that perform a narrowing,
34034 and adjust the subreg index to account for doubleword swapping. */
34035 static void
34036 adjust_subreg_index (rtx op)
34038 enum rtx_code code = GET_CODE (op);
34039 if (code == SUBREG
34040 && (GET_MODE_SIZE (GET_MODE (op))
34041 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
34043 unsigned int index = SUBREG_BYTE (op);
34044 if (index < 8)
34045 index += 8;
34046 else
34047 index -= 8;
34048 SUBREG_BYTE (op) = index;
34051 const char *fmt = GET_RTX_FORMAT (code);
34052 int i,j;
34053 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
34054 if (fmt[i] == 'e' || fmt[i] == 'u')
34055 adjust_subreg_index (XEXP (op, i));
34056 else if (fmt[i] == 'E')
34057 for (j = 0; j < XVECLEN (op, i); ++j)
34058 adjust_subreg_index (XVECEXP (op, i, j));
34061 /* Convert the non-permuting load INSN to a permuting one. */
34062 static void
34063 permute_load (rtx_insn *insn)
34065 rtx body = PATTERN (insn);
34066 rtx mem_op = SET_SRC (body);
34067 rtx tgt_reg = SET_DEST (body);
34068 enum machine_mode mode = GET_MODE (tgt_reg);
34069 int n_elts = GET_MODE_NUNITS (mode);
34070 int half_elts = n_elts / 2;
34071 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
34072 int i, j;
34073 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
34074 XVECEXP (par, 0, i) = GEN_INT (j);
34075 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
34076 XVECEXP (par, 0, i) = GEN_INT (j);
34077 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
34078 SET_SRC (body) = sel;
34079 INSN_CODE (insn) = -1; /* Force re-recognition. */
34080 df_insn_rescan (insn);
34082 if (dump_file)
34083 fprintf (dump_file, "Replacing load %d with permuted load\n",
34084 INSN_UID (insn));
34087 /* Convert the non-permuting store INSN to a permuting one. */
34088 static void
34089 permute_store (rtx_insn *insn)
34091 rtx body = PATTERN (insn);
34092 rtx src_reg = SET_SRC (body);
34093 enum machine_mode mode = GET_MODE (src_reg);
34094 int n_elts = GET_MODE_NUNITS (mode);
34095 int half_elts = n_elts / 2;
34096 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
34097 int i, j;
34098 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
34099 XVECEXP (par, 0, i) = GEN_INT (j);
34100 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
34101 XVECEXP (par, 0, i) = GEN_INT (j);
34102 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
34103 SET_SRC (body) = sel;
34104 INSN_CODE (insn) = -1; /* Force re-recognition. */
34105 df_insn_rescan (insn);
34107 if (dump_file)
34108 fprintf (dump_file, "Replacing store %d with permuted store\n",
34109 INSN_UID (insn));
34112 /* The insn described by INSN_ENTRY[I] can be swapped, but only
34113 with special handling. Take care of that here. */
34114 static void
34115 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
34117 rtx_insn *insn = insn_entry[i].insn;
34118 rtx body = PATTERN (insn);
34120 switch (insn_entry[i].special_handling)
34122 case SH_CONST_VECTOR:
34124 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
34125 gcc_assert (GET_CODE (body) == SET);
34126 rtx rhs = SET_SRC (body);
34127 swap_const_vector_halves (rhs);
34128 if (dump_file)
34129 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
34130 break;
34132 case SH_SUBREG:
34133 /* A subreg of the same size is already safe. For subregs that
34134 select a smaller portion of a reg, adjust the index for
34135 swapped doublewords. */
34136 adjust_subreg_index (body);
34137 if (dump_file)
34138 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
34139 break;
34140 case SH_NOSWAP_LD:
34141 /* Convert a non-permuting load to a permuting one. */
34142 permute_load (insn);
34143 break;
34144 case SH_NOSWAP_ST:
34145 /* Convert a non-permuting store to a permuting one. */
34146 permute_store (insn);
34147 break;
34151 /* Find the insn from the Ith table entry, which is known to be a
34152 register swap Y = SWAP(X). Replace it with a copy Y = X. */
34153 static void
34154 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
34156 rtx_insn *insn = insn_entry[i].insn;
34157 rtx body = PATTERN (insn);
34158 rtx src_reg = XEXP (SET_SRC (body), 0);
34159 rtx copy = gen_rtx_SET (VOIDmode, SET_DEST (body), src_reg);
34160 rtx_insn *new_insn = emit_insn_before (copy, insn);
34161 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
34162 df_insn_rescan (new_insn);
34164 if (dump_file)
34166 unsigned int new_uid = INSN_UID (new_insn);
34167 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
34170 df_insn_delete (insn);
34171 remove_insn (insn);
34172 INSN_DELETED_P (insn) = 1;
34175 /* Dump the swap table to DUMP_FILE. */
34176 static void
34177 dump_swap_insn_table (swap_web_entry *insn_entry)
34179 int e = get_max_uid ();
34180 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
34182 for (int i = 0; i < e; ++i)
34183 if (insn_entry[i].is_relevant)
34185 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
34186 fprintf (dump_file, "%6d %6d ", i,
34187 pred_entry && pred_entry->insn
34188 ? INSN_UID (pred_entry->insn) : 0);
34189 if (insn_entry[i].is_load)
34190 fputs ("load ", dump_file);
34191 if (insn_entry[i].is_store)
34192 fputs ("store ", dump_file);
34193 if (insn_entry[i].is_swap)
34194 fputs ("swap ", dump_file);
34195 if (insn_entry[i].is_live_in)
34196 fputs ("live-in ", dump_file);
34197 if (insn_entry[i].is_live_out)
34198 fputs ("live-out ", dump_file);
34199 if (insn_entry[i].contains_subreg)
34200 fputs ("subreg ", dump_file);
34201 if (insn_entry[i].is_128_int)
34202 fputs ("int128 ", dump_file);
34203 if (insn_entry[i].is_call)
34204 fputs ("call ", dump_file);
34205 if (insn_entry[i].is_swappable)
34207 fputs ("swappable ", dump_file);
34208 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
34209 fputs ("special:constvec ", dump_file);
34210 else if (insn_entry[i].special_handling == SH_SUBREG)
34211 fputs ("special:subreg ", dump_file);
34212 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
34213 fputs ("special:load ", dump_file);
34214 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
34215 fputs ("special:store ", dump_file);
34217 if (insn_entry[i].web_not_optimizable)
34218 fputs ("unoptimizable ", dump_file);
34219 if (insn_entry[i].will_delete)
34220 fputs ("delete ", dump_file);
34221 fputs ("\n", dump_file);
34223 fputs ("\n", dump_file);
34226 /* Main entry point for this pass. */
34227 unsigned int
34228 rs6000_analyze_swaps (function *fun)
34230 swap_web_entry *insn_entry;
34231 basic_block bb;
34232 rtx_insn *insn;
34234 /* Dataflow analysis for use-def chains. */
34235 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
34236 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
34237 df_analyze ();
34238 df_set_flags (DF_DEFER_INSN_RESCAN);
34240 /* Allocate structure to represent webs of insns. */
34241 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
34243 /* Walk the insns to gather basic data. */
34244 FOR_ALL_BB_FN (bb, fun)
34245 FOR_BB_INSNS (bb, insn)
34247 unsigned int uid = INSN_UID (insn);
34248 if (NONDEBUG_INSN_P (insn))
34250 insn_entry[uid].insn = insn;
34252 if (GET_CODE (insn) == CALL_INSN)
34253 insn_entry[uid].is_call = 1;
34255 /* Walk the uses and defs to see if we mention vector regs.
34256 Record any constraints on optimization of such mentions. */
34257 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
34258 df_ref mention;
34259 FOR_EACH_INSN_INFO_USE (mention, insn_info)
34261 /* We use DF_REF_REAL_REG here to get inside any subregs. */
34262 enum machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
34264 /* If a use gets its value from a call insn, it will be
34265 a hard register and will look like (reg:V4SI 3 3).
34266 The df analysis creates two mentions for GPR3 and GPR4,
34267 both DImode. We must recognize this and treat it as a
34268 vector mention to ensure the call is unioned with this
34269 use. */
34270 if (mode == DImode && DF_REF_INSN_INFO (mention))
34272 rtx feeder = DF_REF_INSN (mention);
34273 /* FIXME: It is pretty hard to get from the df mention
34274 to the mode of the use in the insn. We arbitrarily
34275 pick a vector mode here, even though the use might
34276 be a real DImode. We can be too conservative
34277 (create a web larger than necessary) because of
34278 this, so consider eventually fixing this. */
34279 if (GET_CODE (feeder) == CALL_INSN)
34280 mode = V4SImode;
34283 if (VECTOR_MODE_P (mode))
34285 insn_entry[uid].is_relevant = 1;
34286 if (mode == TImode || mode == V1TImode)
34287 insn_entry[uid].is_128_int = 1;
34288 if (DF_REF_INSN_INFO (mention))
34289 insn_entry[uid].contains_subreg
34290 = !rtx_equal_p (DF_REF_REG (mention),
34291 DF_REF_REAL_REG (mention));
34292 union_defs (insn_entry, insn, mention);
34295 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
34297 /* We use DF_REF_REAL_REG here to get inside any subregs. */
34298 enum machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
34300 /* If we're loading up a hard vector register for a call,
34301 it looks like (set (reg:V4SI 9 9) (...)). The df
34302 analysis creates two mentions for GPR9 and GPR10, both
34303 DImode. So relying on the mode from the mentions
34304 isn't sufficient to ensure we union the call into the
34305 web with the parameter setup code. */
34306 if (mode == DImode && GET_CODE (insn) == SET
34307 && VECTOR_MODE_P (GET_MODE (SET_DEST (insn))))
34308 mode = GET_MODE (SET_DEST (insn));
34310 if (VECTOR_MODE_P (mode))
34312 insn_entry[uid].is_relevant = 1;
34313 if (mode == TImode || mode == V1TImode)
34314 insn_entry[uid].is_128_int = 1;
34315 if (DF_REF_INSN_INFO (mention))
34316 insn_entry[uid].contains_subreg
34317 = !rtx_equal_p (DF_REF_REG (mention),
34318 DF_REF_REAL_REG (mention));
34319 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
34320 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
34321 insn_entry[uid].is_live_out = 1;
34322 union_uses (insn_entry, insn, mention);
34326 if (insn_entry[uid].is_relevant)
34328 /* Determine if this is a load or store. */
34329 insn_entry[uid].is_load = insn_is_load_p (insn);
34330 insn_entry[uid].is_store = insn_is_store_p (insn);
34332 /* Determine if this is a doubleword swap. If not,
34333 determine whether it can legally be swapped. */
34334 if (insn_is_swap_p (insn))
34335 insn_entry[uid].is_swap = 1;
34336 else
34338 unsigned int special = SH_NONE;
34339 insn_entry[uid].is_swappable
34340 = insn_is_swappable_p (insn_entry, insn, &special);
34341 if (special != SH_NONE && insn_entry[uid].contains_subreg)
34342 insn_entry[uid].is_swappable = 0;
34343 else if (special != SH_NONE)
34344 insn_entry[uid].special_handling = special;
34345 else if (insn_entry[uid].contains_subreg)
34346 insn_entry[uid].special_handling = SH_SUBREG;
34352 if (dump_file)
34354 fprintf (dump_file, "\nSwap insn entry table when first built\n");
34355 dump_swap_insn_table (insn_entry);
34358 /* Record unoptimizable webs. */
34359 unsigned e = get_max_uid (), i;
34360 for (i = 0; i < e; ++i)
34362 if (!insn_entry[i].is_relevant)
34363 continue;
34365 swap_web_entry *root
34366 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
34368 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
34369 || (insn_entry[i].contains_subreg
34370 && insn_entry[i].special_handling != SH_SUBREG)
34371 || insn_entry[i].is_128_int || insn_entry[i].is_call
34372 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
34373 root->web_not_optimizable = 1;
34375 /* If we have loads or stores that aren't permuting then the
34376 optimization isn't appropriate. */
34377 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
34378 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
34379 root->web_not_optimizable = 1;
34381 /* If we have permuting loads or stores that are not accompanied
34382 by a register swap, the optimization isn't appropriate. */
34383 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
34385 rtx insn = insn_entry[i].insn;
34386 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
34387 df_ref def;
34389 FOR_EACH_INSN_INFO_DEF (def, insn_info)
34391 struct df_link *link = DF_REF_CHAIN (def);
34393 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
34395 root->web_not_optimizable = 1;
34396 break;
34400 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
34402 rtx insn = insn_entry[i].insn;
34403 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
34404 df_ref use;
34406 FOR_EACH_INSN_INFO_USE (use, insn_info)
34408 struct df_link *link = DF_REF_CHAIN (use);
34410 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
34412 root->web_not_optimizable = 1;
34413 break;
34419 if (dump_file)
34421 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
34422 dump_swap_insn_table (insn_entry);
34425 /* For each load and store in an optimizable web (which implies
34426 the loads and stores are permuting), find the associated
34427 register swaps and mark them for removal. Due to various
34428 optimizations we may mark the same swap more than once. Also
34429 perform special handling for swappable insns that require it. */
34430 for (i = 0; i < e; ++i)
34431 if ((insn_entry[i].is_load || insn_entry[i].is_store)
34432 && insn_entry[i].is_swap)
34434 swap_web_entry* root_entry
34435 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
34436 if (!root_entry->web_not_optimizable)
34437 mark_swaps_for_removal (insn_entry, i);
34439 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
34441 swap_web_entry* root_entry
34442 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
34443 if (!root_entry->web_not_optimizable)
34444 handle_special_swappables (insn_entry, i);
34447 /* Now delete the swaps marked for removal. */
34448 for (i = 0; i < e; ++i)
34449 if (insn_entry[i].will_delete)
34450 replace_swap_with_copy (insn_entry, i);
34452 /* Clean up. */
34453 free (insn_entry);
34454 return 0;
34457 const pass_data pass_data_analyze_swaps =
34459 RTL_PASS, /* type */
34460 "swaps", /* name */
34461 OPTGROUP_NONE, /* optinfo_flags */
34462 TV_NONE, /* tv_id */
34463 0, /* properties_required */
34464 0, /* properties_provided */
34465 0, /* properties_destroyed */
34466 0, /* todo_flags_start */
34467 TODO_df_finish, /* todo_flags_finish */
34470 class pass_analyze_swaps : public rtl_opt_pass
34472 public:
34473 pass_analyze_swaps(gcc::context *ctxt)
34474 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
34477 /* opt_pass methods: */
34478 virtual bool gate (function *)
34480 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
34481 && rs6000_optimize_swaps);
34484 virtual unsigned int execute (function *fun)
34486 return rs6000_analyze_swaps (fun);
34489 }; // class pass_analyze_swaps
34491 rtl_opt_pass *
34492 make_pass_analyze_swaps (gcc::context *ctxt)
34494 return new pass_analyze_swaps (ctxt);
34497 struct gcc_target targetm = TARGET_INITIALIZER;
34499 #include "gt-rs6000.h"