Rebase.
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobd088ff6acd289e793916de1e70283f3601ed697c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2014 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "stringpool.h"
36 #include "stor-layout.h"
37 #include "calls.h"
38 #include "print-tree.h"
39 #include "varasm.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "function.h"
44 #include "output.h"
45 #include "dbxout.h"
46 #include "basic-block.h"
47 #include "diagnostic-core.h"
48 #include "toplev.h"
49 #include "ggc.h"
50 #include "hashtab.h"
51 #include "tm_p.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "cfgloop.h"
58 #include "sched-int.h"
59 #include "pointer-set.h"
60 #include "hash-table.h"
61 #include "vec.h"
62 #include "basic-block.h"
63 #include "tree-ssa-alias.h"
64 #include "internal-fn.h"
65 #include "gimple-fold.h"
66 #include "tree-eh.h"
67 #include "gimple-expr.h"
68 #include "is-a.h"
69 #include "gimple.h"
70 #include "gimplify.h"
71 #include "gimple-iterator.h"
72 #include "gimple-walk.h"
73 #include "intl.h"
74 #include "params.h"
75 #include "tm-constrs.h"
76 #include "ira.h"
77 #include "opts.h"
78 #include "tree-vectorizer.h"
79 #include "dumpfile.h"
80 #include "cgraph.h"
81 #include "target-globals.h"
82 #include "builtins.h"
83 #if TARGET_XCOFF
84 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
85 #endif
86 #if TARGET_MACHO
87 #include "gstab.h" /* for N_SLINE */
88 #endif
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
94 #define min(A,B) ((A) < (B) ? (A) : (B))
95 #define max(A,B) ((A) > (B) ? (A) : (B))
97 /* Structure used to define the rs6000 stack */
98 typedef struct rs6000_stack {
99 int reload_completed; /* stack info won't change from here on */
100 int first_gp_reg_save; /* first callee saved GP register used */
101 int first_fp_reg_save; /* first callee saved FP register used */
102 int first_altivec_reg_save; /* first callee saved AltiVec register used */
103 int lr_save_p; /* true if the link reg needs to be saved */
104 int cr_save_p; /* true if the CR reg needs to be saved */
105 unsigned int vrsave_mask; /* mask of vec registers to save */
106 int push_p; /* true if we need to allocate stack space */
107 int calls_p; /* true if the function makes any calls */
108 int world_save_p; /* true if we're saving *everything*:
109 r13-r31, cr, f14-f31, vrsave, v20-v31 */
110 enum rs6000_abi abi; /* which ABI to use */
111 int gp_save_offset; /* offset to save GP regs from initial SP */
112 int fp_save_offset; /* offset to save FP regs from initial SP */
113 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
114 int lr_save_offset; /* offset to save LR from initial SP */
115 int cr_save_offset; /* offset to save CR from initial SP */
116 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
117 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
118 int varargs_save_offset; /* offset to save the varargs registers */
119 int ehrd_offset; /* offset to EH return data */
120 int ehcr_offset; /* offset to EH CR field data */
121 int reg_size; /* register size (4 or 8) */
122 HOST_WIDE_INT vars_size; /* variable save area size */
123 int parm_size; /* outgoing parameter size */
124 int save_size; /* save area size */
125 int fixed_size; /* fixed size of stack frame */
126 int gp_size; /* size of saved GP registers */
127 int fp_size; /* size of saved FP registers */
128 int altivec_size; /* size of saved AltiVec registers */
129 int cr_size; /* size to hold CR if not in save_size */
130 int vrsave_size; /* size to hold VRSAVE if not in save_size */
131 int altivec_padding_size; /* size of altivec alignment padding if
132 not in save_size */
133 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
134 int spe_padding_size;
135 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
136 int spe_64bit_regs_used;
137 int savres_strategy;
138 } rs6000_stack_t;
140 /* A C structure for machine-specific, per-function data.
141 This is added to the cfun structure. */
142 typedef struct GTY(()) machine_function
144 /* Some local-dynamic symbol. */
145 const char *some_ld_name;
146 /* Whether the instruction chain has been scanned already. */
147 int insn_chain_scanned_p;
148 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
149 int ra_needs_full_frame;
150 /* Flags if __builtin_return_address (0) was used. */
151 int ra_need_lr;
152 /* Cache lr_save_p after expansion of builtin_eh_return. */
153 int lr_save_state;
154 /* Whether we need to save the TOC to the reserved stack location in the
155 function prologue. */
156 bool save_toc_in_prologue;
157 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
158 varargs save area. */
159 HOST_WIDE_INT varargs_save_offset;
160 /* Temporary stack slot to use for SDmode copies. This slot is
161 64-bits wide and is allocated early enough so that the offset
162 does not overflow the 16-bit load/store offset field. */
163 rtx sdmode_stack_slot;
164 /* Flag if r2 setup is needed with ELFv2 ABI. */
165 bool r2_setup_needed;
166 } machine_function;
168 /* Support targetm.vectorize.builtin_mask_for_load. */
169 static GTY(()) tree altivec_builtin_mask_for_load;
171 /* Set to nonzero once AIX common-mode calls have been defined. */
172 static GTY(()) int common_mode_defined;
174 /* Label number of label created for -mrelocatable, to call to so we can
175 get the address of the GOT section */
176 static int rs6000_pic_labelno;
178 #ifdef USING_ELFOS_H
179 /* Counter for labels which are to be placed in .fixup. */
180 int fixuplabelno = 0;
181 #endif
183 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
184 int dot_symbols;
186 /* Specify the machine mode that pointers have. After generation of rtl, the
187 compiler makes no further distinction between pointers and any other objects
188 of this machine mode. The type is unsigned since not all things that
189 include rs6000.h also include machmode.h. */
190 unsigned rs6000_pmode;
192 /* Width in bits of a pointer. */
193 unsigned rs6000_pointer_size;
195 #ifdef HAVE_AS_GNU_ATTRIBUTE
196 /* Flag whether floating point values have been passed/returned. */
197 static bool rs6000_passes_float;
198 /* Flag whether vector values have been passed/returned. */
199 static bool rs6000_passes_vector;
200 /* Flag whether small (<= 8 byte) structures have been returned. */
201 static bool rs6000_returns_struct;
202 #endif
204 /* Value is TRUE if register/mode pair is acceptable. */
205 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
207 /* Maximum number of registers needed for a given register class and mode. */
208 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
210 /* How many registers are needed for a given register and mode. */
211 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
213 /* Map register number to register class. */
214 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
216 static int dbg_cost_ctrl;
218 /* Built in types. */
219 tree rs6000_builtin_types[RS6000_BTI_MAX];
220 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
222 /* Flag to say the TOC is initialized */
223 int toc_initialized;
224 char toc_label_name[10];
226 /* Cached value of rs6000_variable_issue. This is cached in
227 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
228 static short cached_can_issue_more;
230 static GTY(()) section *read_only_data_section;
231 static GTY(()) section *private_data_section;
232 static GTY(()) section *tls_data_section;
233 static GTY(()) section *tls_private_data_section;
234 static GTY(()) section *read_only_private_data_section;
235 static GTY(()) section *sdata2_section;
236 static GTY(()) section *toc_section;
238 struct builtin_description
240 const HOST_WIDE_INT mask;
241 const enum insn_code icode;
242 const char *const name;
243 const enum rs6000_builtins code;
246 /* Describe the vector unit used for modes. */
247 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
248 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
250 /* Register classes for various constraints that are based on the target
251 switches. */
252 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
254 /* Describe the alignment of a vector. */
255 int rs6000_vector_align[NUM_MACHINE_MODES];
257 /* Map selected modes to types for builtins. */
258 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
260 /* What modes to automatically generate reciprocal divide estimate (fre) and
261 reciprocal sqrt (frsqrte) for. */
262 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
264 /* Masks to determine which reciprocal esitmate instructions to generate
265 automatically. */
266 enum rs6000_recip_mask {
267 RECIP_SF_DIV = 0x001, /* Use divide estimate */
268 RECIP_DF_DIV = 0x002,
269 RECIP_V4SF_DIV = 0x004,
270 RECIP_V2DF_DIV = 0x008,
272 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
273 RECIP_DF_RSQRT = 0x020,
274 RECIP_V4SF_RSQRT = 0x040,
275 RECIP_V2DF_RSQRT = 0x080,
277 /* Various combination of flags for -mrecip=xxx. */
278 RECIP_NONE = 0,
279 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
280 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
281 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
283 RECIP_HIGH_PRECISION = RECIP_ALL,
285 /* On low precision machines like the power5, don't enable double precision
286 reciprocal square root estimate, since it isn't accurate enough. */
287 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
290 /* -mrecip options. */
291 static struct
293 const char *string; /* option name */
294 unsigned int mask; /* mask bits to set */
295 } recip_options[] = {
296 { "all", RECIP_ALL },
297 { "none", RECIP_NONE },
298 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
299 | RECIP_V2DF_DIV) },
300 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
301 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
302 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
303 | RECIP_V2DF_RSQRT) },
304 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
305 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
308 /* Pointer to function (in rs6000-c.c) that can define or undefine target
309 macros that have changed. Languages that don't support the preprocessor
310 don't link in rs6000-c.c, so we can't call it directly. */
311 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
313 /* Simplfy register classes into simpler classifications. We assume
314 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
315 check for standard register classes (gpr/floating/altivec/vsx) and
316 floating/vector classes (float/altivec/vsx). */
318 enum rs6000_reg_type {
319 NO_REG_TYPE,
320 PSEUDO_REG_TYPE,
321 GPR_REG_TYPE,
322 VSX_REG_TYPE,
323 ALTIVEC_REG_TYPE,
324 FPR_REG_TYPE,
325 SPR_REG_TYPE,
326 CR_REG_TYPE,
327 SPE_ACC_TYPE,
328 SPEFSCR_REG_TYPE
331 /* Map register class to register type. */
332 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
334 /* First/last register type for the 'normal' register types (i.e. general
335 purpose, floating point, altivec, and VSX registers). */
336 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
338 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
341 /* Register classes we care about in secondary reload or go if legitimate
342 address. We only need to worry about GPR, FPR, and Altivec registers here,
343 along an ANY field that is the OR of the 3 register classes. */
345 enum rs6000_reload_reg_type {
346 RELOAD_REG_GPR, /* General purpose registers. */
347 RELOAD_REG_FPR, /* Traditional floating point regs. */
348 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
349 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
350 N_RELOAD_REG
353 /* For setting up register classes, loop through the 3 register classes mapping
354 into real registers, and skip the ANY class, which is just an OR of the
355 bits. */
356 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
357 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
359 /* Map reload register type to a register in the register class. */
360 struct reload_reg_map_type {
361 const char *name; /* Register class name. */
362 int reg; /* Register in the register class. */
365 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
366 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
367 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
368 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
369 { "Any", -1 }, /* RELOAD_REG_ANY. */
372 /* Mask bits for each register class, indexed per mode. Historically the
373 compiler has been more restrictive which types can do PRE_MODIFY instead of
374 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
375 typedef unsigned char addr_mask_type;
377 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
378 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
379 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
380 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
381 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
382 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
384 /* Register type masks based on the type, of valid addressing modes. */
385 struct rs6000_reg_addr {
386 enum insn_code reload_load; /* INSN to reload for loading. */
387 enum insn_code reload_store; /* INSN to reload for storing. */
388 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
389 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
390 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
391 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
394 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
396 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
397 static inline bool
398 mode_supports_pre_incdec_p (enum machine_mode mode)
400 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
401 != 0);
404 /* Helper function to say whether a mode supports PRE_MODIFY. */
405 static inline bool
406 mode_supports_pre_modify_p (enum machine_mode mode)
408 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
409 != 0);
413 /* Target cpu costs. */
415 struct processor_costs {
416 const int mulsi; /* cost of SImode multiplication. */
417 const int mulsi_const; /* cost of SImode multiplication by constant. */
418 const int mulsi_const9; /* cost of SImode mult by short constant. */
419 const int muldi; /* cost of DImode multiplication. */
420 const int divsi; /* cost of SImode division. */
421 const int divdi; /* cost of DImode division. */
422 const int fp; /* cost of simple SFmode and DFmode insns. */
423 const int dmul; /* cost of DFmode multiplication (and fmadd). */
424 const int sdiv; /* cost of SFmode division (fdivs). */
425 const int ddiv; /* cost of DFmode division (fdiv). */
426 const int cache_line_size; /* cache line size in bytes. */
427 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
428 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
429 const int simultaneous_prefetches; /* number of parallel prefetch
430 operations. */
433 const struct processor_costs *rs6000_cost;
435 /* Processor costs (relative to an add) */
437 /* Instruction size costs on 32bit processors. */
438 static const
439 struct processor_costs size32_cost = {
440 COSTS_N_INSNS (1), /* mulsi */
441 COSTS_N_INSNS (1), /* mulsi_const */
442 COSTS_N_INSNS (1), /* mulsi_const9 */
443 COSTS_N_INSNS (1), /* muldi */
444 COSTS_N_INSNS (1), /* divsi */
445 COSTS_N_INSNS (1), /* divdi */
446 COSTS_N_INSNS (1), /* fp */
447 COSTS_N_INSNS (1), /* dmul */
448 COSTS_N_INSNS (1), /* sdiv */
449 COSTS_N_INSNS (1), /* ddiv */
456 /* Instruction size costs on 64bit processors. */
457 static const
458 struct processor_costs size64_cost = {
459 COSTS_N_INSNS (1), /* mulsi */
460 COSTS_N_INSNS (1), /* mulsi_const */
461 COSTS_N_INSNS (1), /* mulsi_const9 */
462 COSTS_N_INSNS (1), /* muldi */
463 COSTS_N_INSNS (1), /* divsi */
464 COSTS_N_INSNS (1), /* divdi */
465 COSTS_N_INSNS (1), /* fp */
466 COSTS_N_INSNS (1), /* dmul */
467 COSTS_N_INSNS (1), /* sdiv */
468 COSTS_N_INSNS (1), /* ddiv */
469 128,
475 /* Instruction costs on RS64A processors. */
476 static const
477 struct processor_costs rs64a_cost = {
478 COSTS_N_INSNS (20), /* mulsi */
479 COSTS_N_INSNS (12), /* mulsi_const */
480 COSTS_N_INSNS (8), /* mulsi_const9 */
481 COSTS_N_INSNS (34), /* muldi */
482 COSTS_N_INSNS (65), /* divsi */
483 COSTS_N_INSNS (67), /* divdi */
484 COSTS_N_INSNS (4), /* fp */
485 COSTS_N_INSNS (4), /* dmul */
486 COSTS_N_INSNS (31), /* sdiv */
487 COSTS_N_INSNS (31), /* ddiv */
488 128, /* cache line size */
489 128, /* l1 cache */
490 2048, /* l2 cache */
491 1, /* streams */
494 /* Instruction costs on MPCCORE processors. */
495 static const
496 struct processor_costs mpccore_cost = {
497 COSTS_N_INSNS (2), /* mulsi */
498 COSTS_N_INSNS (2), /* mulsi_const */
499 COSTS_N_INSNS (2), /* mulsi_const9 */
500 COSTS_N_INSNS (2), /* muldi */
501 COSTS_N_INSNS (6), /* divsi */
502 COSTS_N_INSNS (6), /* divdi */
503 COSTS_N_INSNS (4), /* fp */
504 COSTS_N_INSNS (5), /* dmul */
505 COSTS_N_INSNS (10), /* sdiv */
506 COSTS_N_INSNS (17), /* ddiv */
507 32, /* cache line size */
508 4, /* l1 cache */
509 16, /* l2 cache */
510 1, /* streams */
513 /* Instruction costs on PPC403 processors. */
514 static const
515 struct processor_costs ppc403_cost = {
516 COSTS_N_INSNS (4), /* mulsi */
517 COSTS_N_INSNS (4), /* mulsi_const */
518 COSTS_N_INSNS (4), /* mulsi_const9 */
519 COSTS_N_INSNS (4), /* muldi */
520 COSTS_N_INSNS (33), /* divsi */
521 COSTS_N_INSNS (33), /* divdi */
522 COSTS_N_INSNS (11), /* fp */
523 COSTS_N_INSNS (11), /* dmul */
524 COSTS_N_INSNS (11), /* sdiv */
525 COSTS_N_INSNS (11), /* ddiv */
526 32, /* cache line size */
527 4, /* l1 cache */
528 16, /* l2 cache */
529 1, /* streams */
532 /* Instruction costs on PPC405 processors. */
533 static const
534 struct processor_costs ppc405_cost = {
535 COSTS_N_INSNS (5), /* mulsi */
536 COSTS_N_INSNS (4), /* mulsi_const */
537 COSTS_N_INSNS (3), /* mulsi_const9 */
538 COSTS_N_INSNS (5), /* muldi */
539 COSTS_N_INSNS (35), /* divsi */
540 COSTS_N_INSNS (35), /* divdi */
541 COSTS_N_INSNS (11), /* fp */
542 COSTS_N_INSNS (11), /* dmul */
543 COSTS_N_INSNS (11), /* sdiv */
544 COSTS_N_INSNS (11), /* ddiv */
545 32, /* cache line size */
546 16, /* l1 cache */
547 128, /* l2 cache */
548 1, /* streams */
551 /* Instruction costs on PPC440 processors. */
552 static const
553 struct processor_costs ppc440_cost = {
554 COSTS_N_INSNS (3), /* mulsi */
555 COSTS_N_INSNS (2), /* mulsi_const */
556 COSTS_N_INSNS (2), /* mulsi_const9 */
557 COSTS_N_INSNS (3), /* muldi */
558 COSTS_N_INSNS (34), /* divsi */
559 COSTS_N_INSNS (34), /* divdi */
560 COSTS_N_INSNS (5), /* fp */
561 COSTS_N_INSNS (5), /* dmul */
562 COSTS_N_INSNS (19), /* sdiv */
563 COSTS_N_INSNS (33), /* ddiv */
564 32, /* cache line size */
565 32, /* l1 cache */
566 256, /* l2 cache */
567 1, /* streams */
570 /* Instruction costs on PPC476 processors. */
571 static const
572 struct processor_costs ppc476_cost = {
573 COSTS_N_INSNS (4), /* mulsi */
574 COSTS_N_INSNS (4), /* mulsi_const */
575 COSTS_N_INSNS (4), /* mulsi_const9 */
576 COSTS_N_INSNS (4), /* muldi */
577 COSTS_N_INSNS (11), /* divsi */
578 COSTS_N_INSNS (11), /* divdi */
579 COSTS_N_INSNS (6), /* fp */
580 COSTS_N_INSNS (6), /* dmul */
581 COSTS_N_INSNS (19), /* sdiv */
582 COSTS_N_INSNS (33), /* ddiv */
583 32, /* l1 cache line size */
584 32, /* l1 cache */
585 512, /* l2 cache */
586 1, /* streams */
589 /* Instruction costs on PPC601 processors. */
590 static const
591 struct processor_costs ppc601_cost = {
592 COSTS_N_INSNS (5), /* mulsi */
593 COSTS_N_INSNS (5), /* mulsi_const */
594 COSTS_N_INSNS (5), /* mulsi_const9 */
595 COSTS_N_INSNS (5), /* muldi */
596 COSTS_N_INSNS (36), /* divsi */
597 COSTS_N_INSNS (36), /* divdi */
598 COSTS_N_INSNS (4), /* fp */
599 COSTS_N_INSNS (5), /* dmul */
600 COSTS_N_INSNS (17), /* sdiv */
601 COSTS_N_INSNS (31), /* ddiv */
602 32, /* cache line size */
603 32, /* l1 cache */
604 256, /* l2 cache */
605 1, /* streams */
608 /* Instruction costs on PPC603 processors. */
609 static const
610 struct processor_costs ppc603_cost = {
611 COSTS_N_INSNS (5), /* mulsi */
612 COSTS_N_INSNS (3), /* mulsi_const */
613 COSTS_N_INSNS (2), /* mulsi_const9 */
614 COSTS_N_INSNS (5), /* muldi */
615 COSTS_N_INSNS (37), /* divsi */
616 COSTS_N_INSNS (37), /* divdi */
617 COSTS_N_INSNS (3), /* fp */
618 COSTS_N_INSNS (4), /* dmul */
619 COSTS_N_INSNS (18), /* sdiv */
620 COSTS_N_INSNS (33), /* ddiv */
621 32, /* cache line size */
622 8, /* l1 cache */
623 64, /* l2 cache */
624 1, /* streams */
627 /* Instruction costs on PPC604 processors. */
628 static const
629 struct processor_costs ppc604_cost = {
630 COSTS_N_INSNS (4), /* mulsi */
631 COSTS_N_INSNS (4), /* mulsi_const */
632 COSTS_N_INSNS (4), /* mulsi_const9 */
633 COSTS_N_INSNS (4), /* muldi */
634 COSTS_N_INSNS (20), /* divsi */
635 COSTS_N_INSNS (20), /* divdi */
636 COSTS_N_INSNS (3), /* fp */
637 COSTS_N_INSNS (3), /* dmul */
638 COSTS_N_INSNS (18), /* sdiv */
639 COSTS_N_INSNS (32), /* ddiv */
640 32, /* cache line size */
641 16, /* l1 cache */
642 512, /* l2 cache */
643 1, /* streams */
646 /* Instruction costs on PPC604e processors. */
647 static const
648 struct processor_costs ppc604e_cost = {
649 COSTS_N_INSNS (2), /* mulsi */
650 COSTS_N_INSNS (2), /* mulsi_const */
651 COSTS_N_INSNS (2), /* mulsi_const9 */
652 COSTS_N_INSNS (2), /* muldi */
653 COSTS_N_INSNS (20), /* divsi */
654 COSTS_N_INSNS (20), /* divdi */
655 COSTS_N_INSNS (3), /* fp */
656 COSTS_N_INSNS (3), /* dmul */
657 COSTS_N_INSNS (18), /* sdiv */
658 COSTS_N_INSNS (32), /* ddiv */
659 32, /* cache line size */
660 32, /* l1 cache */
661 1024, /* l2 cache */
662 1, /* streams */
665 /* Instruction costs on PPC620 processors. */
666 static const
667 struct processor_costs ppc620_cost = {
668 COSTS_N_INSNS (5), /* mulsi */
669 COSTS_N_INSNS (4), /* mulsi_const */
670 COSTS_N_INSNS (3), /* mulsi_const9 */
671 COSTS_N_INSNS (7), /* muldi */
672 COSTS_N_INSNS (21), /* divsi */
673 COSTS_N_INSNS (37), /* divdi */
674 COSTS_N_INSNS (3), /* fp */
675 COSTS_N_INSNS (3), /* dmul */
676 COSTS_N_INSNS (18), /* sdiv */
677 COSTS_N_INSNS (32), /* ddiv */
678 128, /* cache line size */
679 32, /* l1 cache */
680 1024, /* l2 cache */
681 1, /* streams */
684 /* Instruction costs on PPC630 processors. */
685 static const
686 struct processor_costs ppc630_cost = {
687 COSTS_N_INSNS (5), /* mulsi */
688 COSTS_N_INSNS (4), /* mulsi_const */
689 COSTS_N_INSNS (3), /* mulsi_const9 */
690 COSTS_N_INSNS (7), /* muldi */
691 COSTS_N_INSNS (21), /* divsi */
692 COSTS_N_INSNS (37), /* divdi */
693 COSTS_N_INSNS (3), /* fp */
694 COSTS_N_INSNS (3), /* dmul */
695 COSTS_N_INSNS (17), /* sdiv */
696 COSTS_N_INSNS (21), /* ddiv */
697 128, /* cache line size */
698 64, /* l1 cache */
699 1024, /* l2 cache */
700 1, /* streams */
703 /* Instruction costs on Cell processor. */
704 /* COSTS_N_INSNS (1) ~ one add. */
705 static const
706 struct processor_costs ppccell_cost = {
707 COSTS_N_INSNS (9/2)+2, /* mulsi */
708 COSTS_N_INSNS (6/2), /* mulsi_const */
709 COSTS_N_INSNS (6/2), /* mulsi_const9 */
710 COSTS_N_INSNS (15/2)+2, /* muldi */
711 COSTS_N_INSNS (38/2), /* divsi */
712 COSTS_N_INSNS (70/2), /* divdi */
713 COSTS_N_INSNS (10/2), /* fp */
714 COSTS_N_INSNS (10/2), /* dmul */
715 COSTS_N_INSNS (74/2), /* sdiv */
716 COSTS_N_INSNS (74/2), /* ddiv */
717 128, /* cache line size */
718 32, /* l1 cache */
719 512, /* l2 cache */
720 6, /* streams */
723 /* Instruction costs on PPC750 and PPC7400 processors. */
724 static const
725 struct processor_costs ppc750_cost = {
726 COSTS_N_INSNS (5), /* mulsi */
727 COSTS_N_INSNS (3), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (5), /* muldi */
730 COSTS_N_INSNS (17), /* divsi */
731 COSTS_N_INSNS (17), /* divdi */
732 COSTS_N_INSNS (3), /* fp */
733 COSTS_N_INSNS (3), /* dmul */
734 COSTS_N_INSNS (17), /* sdiv */
735 COSTS_N_INSNS (31), /* ddiv */
736 32, /* cache line size */
737 32, /* l1 cache */
738 512, /* l2 cache */
739 1, /* streams */
742 /* Instruction costs on PPC7450 processors. */
743 static const
744 struct processor_costs ppc7450_cost = {
745 COSTS_N_INSNS (4), /* mulsi */
746 COSTS_N_INSNS (3), /* mulsi_const */
747 COSTS_N_INSNS (3), /* mulsi_const9 */
748 COSTS_N_INSNS (4), /* muldi */
749 COSTS_N_INSNS (23), /* divsi */
750 COSTS_N_INSNS (23), /* divdi */
751 COSTS_N_INSNS (5), /* fp */
752 COSTS_N_INSNS (5), /* dmul */
753 COSTS_N_INSNS (21), /* sdiv */
754 COSTS_N_INSNS (35), /* ddiv */
755 32, /* cache line size */
756 32, /* l1 cache */
757 1024, /* l2 cache */
758 1, /* streams */
761 /* Instruction costs on PPC8540 processors. */
762 static const
763 struct processor_costs ppc8540_cost = {
764 COSTS_N_INSNS (4), /* mulsi */
765 COSTS_N_INSNS (4), /* mulsi_const */
766 COSTS_N_INSNS (4), /* mulsi_const9 */
767 COSTS_N_INSNS (4), /* muldi */
768 COSTS_N_INSNS (19), /* divsi */
769 COSTS_N_INSNS (19), /* divdi */
770 COSTS_N_INSNS (4), /* fp */
771 COSTS_N_INSNS (4), /* dmul */
772 COSTS_N_INSNS (29), /* sdiv */
773 COSTS_N_INSNS (29), /* ddiv */
774 32, /* cache line size */
775 32, /* l1 cache */
776 256, /* l2 cache */
777 1, /* prefetch streams /*/
780 /* Instruction costs on E300C2 and E300C3 cores. */
781 static const
782 struct processor_costs ppce300c2c3_cost = {
783 COSTS_N_INSNS (4), /* mulsi */
784 COSTS_N_INSNS (4), /* mulsi_const */
785 COSTS_N_INSNS (4), /* mulsi_const9 */
786 COSTS_N_INSNS (4), /* muldi */
787 COSTS_N_INSNS (19), /* divsi */
788 COSTS_N_INSNS (19), /* divdi */
789 COSTS_N_INSNS (3), /* fp */
790 COSTS_N_INSNS (4), /* dmul */
791 COSTS_N_INSNS (18), /* sdiv */
792 COSTS_N_INSNS (33), /* ddiv */
794 16, /* l1 cache */
795 16, /* l2 cache */
796 1, /* prefetch streams /*/
799 /* Instruction costs on PPCE500MC processors. */
800 static const
801 struct processor_costs ppce500mc_cost = {
802 COSTS_N_INSNS (4), /* mulsi */
803 COSTS_N_INSNS (4), /* mulsi_const */
804 COSTS_N_INSNS (4), /* mulsi_const9 */
805 COSTS_N_INSNS (4), /* muldi */
806 COSTS_N_INSNS (14), /* divsi */
807 COSTS_N_INSNS (14), /* divdi */
808 COSTS_N_INSNS (8), /* fp */
809 COSTS_N_INSNS (10), /* dmul */
810 COSTS_N_INSNS (36), /* sdiv */
811 COSTS_N_INSNS (66), /* ddiv */
812 64, /* cache line size */
813 32, /* l1 cache */
814 128, /* l2 cache */
815 1, /* prefetch streams /*/
818 /* Instruction costs on PPCE500MC64 processors. */
819 static const
820 struct processor_costs ppce500mc64_cost = {
821 COSTS_N_INSNS (4), /* mulsi */
822 COSTS_N_INSNS (4), /* mulsi_const */
823 COSTS_N_INSNS (4), /* mulsi_const9 */
824 COSTS_N_INSNS (4), /* muldi */
825 COSTS_N_INSNS (14), /* divsi */
826 COSTS_N_INSNS (14), /* divdi */
827 COSTS_N_INSNS (4), /* fp */
828 COSTS_N_INSNS (10), /* dmul */
829 COSTS_N_INSNS (36), /* sdiv */
830 COSTS_N_INSNS (66), /* ddiv */
831 64, /* cache line size */
832 32, /* l1 cache */
833 128, /* l2 cache */
834 1, /* prefetch streams /*/
837 /* Instruction costs on PPCE5500 processors. */
838 static const
839 struct processor_costs ppce5500_cost = {
840 COSTS_N_INSNS (5), /* mulsi */
841 COSTS_N_INSNS (5), /* mulsi_const */
842 COSTS_N_INSNS (4), /* mulsi_const9 */
843 COSTS_N_INSNS (5), /* muldi */
844 COSTS_N_INSNS (14), /* divsi */
845 COSTS_N_INSNS (14), /* divdi */
846 COSTS_N_INSNS (7), /* fp */
847 COSTS_N_INSNS (10), /* dmul */
848 COSTS_N_INSNS (36), /* sdiv */
849 COSTS_N_INSNS (66), /* ddiv */
850 64, /* cache line size */
851 32, /* l1 cache */
852 128, /* l2 cache */
853 1, /* prefetch streams /*/
856 /* Instruction costs on PPCE6500 processors. */
857 static const
858 struct processor_costs ppce6500_cost = {
859 COSTS_N_INSNS (5), /* mulsi */
860 COSTS_N_INSNS (5), /* mulsi_const */
861 COSTS_N_INSNS (4), /* mulsi_const9 */
862 COSTS_N_INSNS (5), /* muldi */
863 COSTS_N_INSNS (14), /* divsi */
864 COSTS_N_INSNS (14), /* divdi */
865 COSTS_N_INSNS (7), /* fp */
866 COSTS_N_INSNS (10), /* dmul */
867 COSTS_N_INSNS (36), /* sdiv */
868 COSTS_N_INSNS (66), /* ddiv */
869 64, /* cache line size */
870 32, /* l1 cache */
871 128, /* l2 cache */
872 1, /* prefetch streams /*/
875 /* Instruction costs on AppliedMicro Titan processors. */
876 static const
877 struct processor_costs titan_cost = {
878 COSTS_N_INSNS (5), /* mulsi */
879 COSTS_N_INSNS (5), /* mulsi_const */
880 COSTS_N_INSNS (5), /* mulsi_const9 */
881 COSTS_N_INSNS (5), /* muldi */
882 COSTS_N_INSNS (18), /* divsi */
883 COSTS_N_INSNS (18), /* divdi */
884 COSTS_N_INSNS (10), /* fp */
885 COSTS_N_INSNS (10), /* dmul */
886 COSTS_N_INSNS (46), /* sdiv */
887 COSTS_N_INSNS (72), /* ddiv */
888 32, /* cache line size */
889 32, /* l1 cache */
890 512, /* l2 cache */
891 1, /* prefetch streams /*/
894 /* Instruction costs on POWER4 and POWER5 processors. */
895 static const
896 struct processor_costs power4_cost = {
897 COSTS_N_INSNS (3), /* mulsi */
898 COSTS_N_INSNS (2), /* mulsi_const */
899 COSTS_N_INSNS (2), /* mulsi_const9 */
900 COSTS_N_INSNS (4), /* muldi */
901 COSTS_N_INSNS (18), /* divsi */
902 COSTS_N_INSNS (34), /* divdi */
903 COSTS_N_INSNS (3), /* fp */
904 COSTS_N_INSNS (3), /* dmul */
905 COSTS_N_INSNS (17), /* sdiv */
906 COSTS_N_INSNS (17), /* ddiv */
907 128, /* cache line size */
908 32, /* l1 cache */
909 1024, /* l2 cache */
910 8, /* prefetch streams /*/
913 /* Instruction costs on POWER6 processors. */
914 static const
915 struct processor_costs power6_cost = {
916 COSTS_N_INSNS (8), /* mulsi */
917 COSTS_N_INSNS (8), /* mulsi_const */
918 COSTS_N_INSNS (8), /* mulsi_const9 */
919 COSTS_N_INSNS (8), /* muldi */
920 COSTS_N_INSNS (22), /* divsi */
921 COSTS_N_INSNS (28), /* divdi */
922 COSTS_N_INSNS (3), /* fp */
923 COSTS_N_INSNS (3), /* dmul */
924 COSTS_N_INSNS (13), /* sdiv */
925 COSTS_N_INSNS (16), /* ddiv */
926 128, /* cache line size */
927 64, /* l1 cache */
928 2048, /* l2 cache */
929 16, /* prefetch streams */
932 /* Instruction costs on POWER7 processors. */
933 static const
934 struct processor_costs power7_cost = {
935 COSTS_N_INSNS (2), /* mulsi */
936 COSTS_N_INSNS (2), /* mulsi_const */
937 COSTS_N_INSNS (2), /* mulsi_const9 */
938 COSTS_N_INSNS (2), /* muldi */
939 COSTS_N_INSNS (18), /* divsi */
940 COSTS_N_INSNS (34), /* divdi */
941 COSTS_N_INSNS (3), /* fp */
942 COSTS_N_INSNS (3), /* dmul */
943 COSTS_N_INSNS (13), /* sdiv */
944 COSTS_N_INSNS (16), /* ddiv */
945 128, /* cache line size */
946 32, /* l1 cache */
947 256, /* l2 cache */
948 12, /* prefetch streams */
951 /* Instruction costs on POWER8 processors. */
952 static const
953 struct processor_costs power8_cost = {
954 COSTS_N_INSNS (3), /* mulsi */
955 COSTS_N_INSNS (3), /* mulsi_const */
956 COSTS_N_INSNS (3), /* mulsi_const9 */
957 COSTS_N_INSNS (3), /* muldi */
958 COSTS_N_INSNS (19), /* divsi */
959 COSTS_N_INSNS (35), /* divdi */
960 COSTS_N_INSNS (3), /* fp */
961 COSTS_N_INSNS (3), /* dmul */
962 COSTS_N_INSNS (14), /* sdiv */
963 COSTS_N_INSNS (17), /* ddiv */
964 128, /* cache line size */
965 32, /* l1 cache */
966 256, /* l2 cache */
967 12, /* prefetch streams */
970 /* Instruction costs on POWER A2 processors. */
971 static const
972 struct processor_costs ppca2_cost = {
973 COSTS_N_INSNS (16), /* mulsi */
974 COSTS_N_INSNS (16), /* mulsi_const */
975 COSTS_N_INSNS (16), /* mulsi_const9 */
976 COSTS_N_INSNS (16), /* muldi */
977 COSTS_N_INSNS (22), /* divsi */
978 COSTS_N_INSNS (28), /* divdi */
979 COSTS_N_INSNS (3), /* fp */
980 COSTS_N_INSNS (3), /* dmul */
981 COSTS_N_INSNS (59), /* sdiv */
982 COSTS_N_INSNS (72), /* ddiv */
984 16, /* l1 cache */
985 2048, /* l2 cache */
986 16, /* prefetch streams */
990 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
991 #undef RS6000_BUILTIN_1
992 #undef RS6000_BUILTIN_2
993 #undef RS6000_BUILTIN_3
994 #undef RS6000_BUILTIN_A
995 #undef RS6000_BUILTIN_D
996 #undef RS6000_BUILTIN_E
997 #undef RS6000_BUILTIN_H
998 #undef RS6000_BUILTIN_P
999 #undef RS6000_BUILTIN_Q
1000 #undef RS6000_BUILTIN_S
1001 #undef RS6000_BUILTIN_X
1003 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1004 { NAME, ICODE, MASK, ATTR },
1006 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1007 { NAME, ICODE, MASK, ATTR },
1009 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1010 { NAME, ICODE, MASK, ATTR },
1012 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1013 { NAME, ICODE, MASK, ATTR },
1015 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1016 { NAME, ICODE, MASK, ATTR },
1018 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1019 { NAME, ICODE, MASK, ATTR },
1021 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1022 { NAME, ICODE, MASK, ATTR },
1024 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1025 { NAME, ICODE, MASK, ATTR },
1027 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1028 { NAME, ICODE, MASK, ATTR },
1030 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1031 { NAME, ICODE, MASK, ATTR },
1033 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1034 { NAME, ICODE, MASK, ATTR },
1036 struct rs6000_builtin_info_type {
1037 const char *name;
1038 const enum insn_code icode;
1039 const HOST_WIDE_INT mask;
1040 const unsigned attr;
1043 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1045 #include "rs6000-builtin.def"
1048 #undef RS6000_BUILTIN_1
1049 #undef RS6000_BUILTIN_2
1050 #undef RS6000_BUILTIN_3
1051 #undef RS6000_BUILTIN_A
1052 #undef RS6000_BUILTIN_D
1053 #undef RS6000_BUILTIN_E
1054 #undef RS6000_BUILTIN_H
1055 #undef RS6000_BUILTIN_P
1056 #undef RS6000_BUILTIN_Q
1057 #undef RS6000_BUILTIN_S
1058 #undef RS6000_BUILTIN_X
1060 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1061 static tree (*rs6000_veclib_handler) (tree, tree, tree);
1064 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
1065 static bool spe_func_has_64bit_regs_p (void);
1066 static struct machine_function * rs6000_init_machine_status (void);
1067 static int rs6000_ra_ever_killed (void);
1068 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1069 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1070 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1071 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
1072 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1073 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
1074 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
1075 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
1076 bool);
1077 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
1078 static bool is_microcoded_insn (rtx);
1079 static bool is_nonpipeline_insn (rtx);
1080 static bool is_cracked_insn (rtx);
1081 static bool is_load_insn (rtx, rtx *);
1082 static bool is_store_insn (rtx, rtx *);
1083 static bool set_to_load_agen (rtx,rtx);
1084 static bool insn_terminates_group_p (rtx , enum group_termination);
1085 static bool insn_must_be_first_in_group (rtx);
1086 static bool insn_must_be_last_in_group (rtx);
1087 static void altivec_init_builtins (void);
1088 static tree builtin_function_type (enum machine_mode, enum machine_mode,
1089 enum machine_mode, enum machine_mode,
1090 enum rs6000_builtins, const char *name);
1091 static void rs6000_common_init_builtins (void);
1092 static void paired_init_builtins (void);
1093 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1094 static void spe_init_builtins (void);
1095 static void htm_init_builtins (void);
1096 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1097 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1098 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1099 static rs6000_stack_t *rs6000_stack_info (void);
1100 static void is_altivec_return_reg (rtx, void *);
1101 int easy_vector_constant (rtx, enum machine_mode);
1102 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
1103 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1104 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
1105 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1106 bool, bool);
1107 #if TARGET_MACHO
1108 static void macho_branch_islands (void);
1109 #endif
1110 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
1111 int, int *);
1112 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
1113 int, int, int *);
1114 static bool rs6000_mode_dependent_address (const_rtx);
1115 static bool rs6000_debug_mode_dependent_address (const_rtx);
1116 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1117 enum machine_mode, rtx);
1118 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1119 enum machine_mode,
1120 rtx);
1121 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1122 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1123 enum reg_class);
1124 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1125 enum machine_mode);
1126 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1127 enum reg_class,
1128 enum machine_mode);
1129 static bool rs6000_cannot_change_mode_class (enum machine_mode,
1130 enum machine_mode,
1131 enum reg_class);
1132 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
1133 enum machine_mode,
1134 enum reg_class);
1135 static bool rs6000_save_toc_in_prologue_p (void);
1137 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
1138 int, int *)
1139 = rs6000_legitimize_reload_address;
1141 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1142 = rs6000_mode_dependent_address;
1144 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1145 enum machine_mode, rtx)
1146 = rs6000_secondary_reload_class;
1148 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1149 = rs6000_preferred_reload_class;
1151 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1152 enum machine_mode)
1153 = rs6000_secondary_memory_needed;
1155 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1156 enum machine_mode,
1157 enum reg_class)
1158 = rs6000_cannot_change_mode_class;
1160 const int INSN_NOT_AVAILABLE = -1;
1162 static void rs6000_print_isa_options (FILE *, int, const char *,
1163 HOST_WIDE_INT);
1164 static void rs6000_print_builtin_options (FILE *, int, const char *,
1165 HOST_WIDE_INT);
1167 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1168 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1169 enum rs6000_reg_type,
1170 enum machine_mode,
1171 secondary_reload_info *,
1172 bool);
1174 /* Hash table stuff for keeping track of TOC entries. */
1176 struct GTY(()) toc_hash_struct
1178 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1179 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1180 rtx key;
1181 enum machine_mode key_mode;
1182 int labelno;
1185 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1187 /* Hash table to keep track of the argument types for builtin functions. */
1189 struct GTY(()) builtin_hash_struct
1191 tree type;
1192 enum machine_mode mode[4]; /* return value + 3 arguments. */
1193 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1196 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1199 /* Default register names. */
1200 char rs6000_reg_names[][8] =
1202 "0", "1", "2", "3", "4", "5", "6", "7",
1203 "8", "9", "10", "11", "12", "13", "14", "15",
1204 "16", "17", "18", "19", "20", "21", "22", "23",
1205 "24", "25", "26", "27", "28", "29", "30", "31",
1206 "0", "1", "2", "3", "4", "5", "6", "7",
1207 "8", "9", "10", "11", "12", "13", "14", "15",
1208 "16", "17", "18", "19", "20", "21", "22", "23",
1209 "24", "25", "26", "27", "28", "29", "30", "31",
1210 "mq", "lr", "ctr","ap",
1211 "0", "1", "2", "3", "4", "5", "6", "7",
1212 "ca",
1213 /* AltiVec registers. */
1214 "0", "1", "2", "3", "4", "5", "6", "7",
1215 "8", "9", "10", "11", "12", "13", "14", "15",
1216 "16", "17", "18", "19", "20", "21", "22", "23",
1217 "24", "25", "26", "27", "28", "29", "30", "31",
1218 "vrsave", "vscr",
1219 /* SPE registers. */
1220 "spe_acc", "spefscr",
1221 /* Soft frame pointer. */
1222 "sfp",
1223 /* HTM SPR registers. */
1224 "tfhar", "tfiar", "texasr",
1225 /* SPE High registers. */
1226 "0", "1", "2", "3", "4", "5", "6", "7",
1227 "8", "9", "10", "11", "12", "13", "14", "15",
1228 "16", "17", "18", "19", "20", "21", "22", "23",
1229 "24", "25", "26", "27", "28", "29", "30", "31"
1232 #ifdef TARGET_REGNAMES
1233 static const char alt_reg_names[][8] =
1235 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1236 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1237 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1238 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1239 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1240 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1241 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1242 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1243 "mq", "lr", "ctr", "ap",
1244 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1245 "ca",
1246 /* AltiVec registers. */
1247 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1248 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1249 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1250 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1251 "vrsave", "vscr",
1252 /* SPE registers. */
1253 "spe_acc", "spefscr",
1254 /* Soft frame pointer. */
1255 "sfp",
1256 /* HTM SPR registers. */
1257 "tfhar", "tfiar", "texasr",
1258 /* SPE High registers. */
1259 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1260 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1261 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1262 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1264 #endif
1266 /* Table of valid machine attributes. */
1268 static const struct attribute_spec rs6000_attribute_table[] =
1270 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1271 affects_type_identity } */
1272 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1273 false },
1274 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1275 false },
1276 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1277 false },
1278 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1279 false },
1280 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1281 false },
1282 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1283 SUBTARGET_ATTRIBUTE_TABLE,
1284 #endif
1285 { NULL, 0, 0, false, false, false, NULL, false }
1288 #ifndef TARGET_PROFILE_KERNEL
1289 #define TARGET_PROFILE_KERNEL 0
1290 #endif
1292 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1293 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1295 /* Initialize the GCC target structure. */
1296 #undef TARGET_ATTRIBUTE_TABLE
1297 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1298 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1299 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1300 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1301 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1303 #undef TARGET_ASM_ALIGNED_DI_OP
1304 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1306 /* Default unaligned ops are only provided for ELF. Find the ops needed
1307 for non-ELF systems. */
1308 #ifndef OBJECT_FORMAT_ELF
1309 #if TARGET_XCOFF
1310 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1311 64-bit targets. */
1312 #undef TARGET_ASM_UNALIGNED_HI_OP
1313 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1314 #undef TARGET_ASM_UNALIGNED_SI_OP
1315 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1316 #undef TARGET_ASM_UNALIGNED_DI_OP
1317 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1318 #else
1319 /* For Darwin. */
1320 #undef TARGET_ASM_UNALIGNED_HI_OP
1321 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1322 #undef TARGET_ASM_UNALIGNED_SI_OP
1323 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1324 #undef TARGET_ASM_UNALIGNED_DI_OP
1325 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1326 #undef TARGET_ASM_ALIGNED_DI_OP
1327 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1328 #endif
1329 #endif
1331 /* This hook deals with fixups for relocatable code and DI-mode objects
1332 in 64-bit code. */
1333 #undef TARGET_ASM_INTEGER
1334 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1336 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1337 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1338 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1339 #endif
1341 #undef TARGET_SET_UP_BY_PROLOGUE
1342 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1344 #undef TARGET_HAVE_TLS
1345 #define TARGET_HAVE_TLS HAVE_AS_TLS
1347 #undef TARGET_CANNOT_FORCE_CONST_MEM
1348 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1350 #undef TARGET_DELEGITIMIZE_ADDRESS
1351 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1353 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1354 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1356 #undef TARGET_ASM_FUNCTION_PROLOGUE
1357 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1358 #undef TARGET_ASM_FUNCTION_EPILOGUE
1359 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1361 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1362 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1364 #undef TARGET_LEGITIMIZE_ADDRESS
1365 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1367 #undef TARGET_SCHED_VARIABLE_ISSUE
1368 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1370 #undef TARGET_SCHED_ISSUE_RATE
1371 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1372 #undef TARGET_SCHED_ADJUST_COST
1373 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1374 #undef TARGET_SCHED_ADJUST_PRIORITY
1375 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1376 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1377 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1378 #undef TARGET_SCHED_INIT
1379 #define TARGET_SCHED_INIT rs6000_sched_init
1380 #undef TARGET_SCHED_FINISH
1381 #define TARGET_SCHED_FINISH rs6000_sched_finish
1382 #undef TARGET_SCHED_REORDER
1383 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1384 #undef TARGET_SCHED_REORDER2
1385 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1387 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1388 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1390 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1391 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1393 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1394 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1395 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1396 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1397 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1398 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1399 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1400 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1402 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1403 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1404 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1405 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1406 rs6000_builtin_support_vector_misalignment
1407 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1408 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1409 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1410 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1411 rs6000_builtin_vectorization_cost
1412 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1413 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1414 rs6000_preferred_simd_mode
1415 #undef TARGET_VECTORIZE_INIT_COST
1416 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1417 #undef TARGET_VECTORIZE_ADD_STMT_COST
1418 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1419 #undef TARGET_VECTORIZE_FINISH_COST
1420 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1421 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1422 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1424 #undef TARGET_INIT_BUILTINS
1425 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1426 #undef TARGET_BUILTIN_DECL
1427 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1429 #undef TARGET_EXPAND_BUILTIN
1430 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1432 #undef TARGET_MANGLE_TYPE
1433 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1435 #undef TARGET_INIT_LIBFUNCS
1436 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1438 #if TARGET_MACHO
1439 #undef TARGET_BINDS_LOCAL_P
1440 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1441 #endif
1443 #undef TARGET_MS_BITFIELD_LAYOUT_P
1444 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1446 #undef TARGET_ASM_OUTPUT_MI_THUNK
1447 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1449 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1450 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1452 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1453 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1455 #undef TARGET_REGISTER_MOVE_COST
1456 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1457 #undef TARGET_MEMORY_MOVE_COST
1458 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1459 #undef TARGET_RTX_COSTS
1460 #define TARGET_RTX_COSTS rs6000_rtx_costs
1461 #undef TARGET_ADDRESS_COST
1462 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1464 #undef TARGET_DWARF_REGISTER_SPAN
1465 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1467 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1468 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1470 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1471 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1473 /* On rs6000, function arguments are promoted, as are function return
1474 values. */
1475 #undef TARGET_PROMOTE_FUNCTION_MODE
1476 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1478 #undef TARGET_RETURN_IN_MEMORY
1479 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1481 #undef TARGET_RETURN_IN_MSB
1482 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1484 #undef TARGET_SETUP_INCOMING_VARARGS
1485 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1487 /* Always strict argument naming on rs6000. */
1488 #undef TARGET_STRICT_ARGUMENT_NAMING
1489 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1490 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1491 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1492 #undef TARGET_SPLIT_COMPLEX_ARG
1493 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1494 #undef TARGET_MUST_PASS_IN_STACK
1495 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1496 #undef TARGET_PASS_BY_REFERENCE
1497 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1498 #undef TARGET_ARG_PARTIAL_BYTES
1499 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1500 #undef TARGET_FUNCTION_ARG_ADVANCE
1501 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1502 #undef TARGET_FUNCTION_ARG
1503 #define TARGET_FUNCTION_ARG rs6000_function_arg
1504 #undef TARGET_FUNCTION_ARG_BOUNDARY
1505 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1507 #undef TARGET_BUILD_BUILTIN_VA_LIST
1508 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1510 #undef TARGET_EXPAND_BUILTIN_VA_START
1511 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1513 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1514 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1516 #undef TARGET_EH_RETURN_FILTER_MODE
1517 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1519 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1520 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1522 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1523 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1525 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1526 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1528 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1529 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1531 #undef TARGET_OPTION_OVERRIDE
1532 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1534 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1535 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1536 rs6000_builtin_vectorized_function
1538 #if !TARGET_MACHO
1539 #undef TARGET_STACK_PROTECT_FAIL
1540 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1541 #endif
1543 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1544 The PowerPC architecture requires only weak consistency among
1545 processors--that is, memory accesses between processors need not be
1546 sequentially consistent and memory accesses among processors can occur
1547 in any order. The ability to order memory accesses weakly provides
1548 opportunities for more efficient use of the system bus. Unless a
1549 dependency exists, the 604e allows read operations to precede store
1550 operations. */
1551 #undef TARGET_RELAXED_ORDERING
1552 #define TARGET_RELAXED_ORDERING true
1554 #ifdef HAVE_AS_TLS
1555 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1556 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1557 #endif
1559 /* Use a 32-bit anchor range. This leads to sequences like:
1561 addis tmp,anchor,high
1562 add dest,tmp,low
1564 where tmp itself acts as an anchor, and can be shared between
1565 accesses to the same 64k page. */
1566 #undef TARGET_MIN_ANCHOR_OFFSET
1567 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1568 #undef TARGET_MAX_ANCHOR_OFFSET
1569 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1570 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1571 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1572 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1573 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1575 #undef TARGET_BUILTIN_RECIPROCAL
1576 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1578 #undef TARGET_EXPAND_TO_RTL_HOOK
1579 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1581 #undef TARGET_INSTANTIATE_DECLS
1582 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1584 #undef TARGET_SECONDARY_RELOAD
1585 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1587 #undef TARGET_LEGITIMATE_ADDRESS_P
1588 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1590 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1591 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1593 #undef TARGET_LRA_P
1594 #define TARGET_LRA_P rs6000_lra_p
1596 #undef TARGET_CAN_ELIMINATE
1597 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1599 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1600 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1602 #undef TARGET_TRAMPOLINE_INIT
1603 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1605 #undef TARGET_FUNCTION_VALUE
1606 #define TARGET_FUNCTION_VALUE rs6000_function_value
1608 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1609 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1611 #undef TARGET_OPTION_SAVE
1612 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1614 #undef TARGET_OPTION_RESTORE
1615 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1617 #undef TARGET_OPTION_PRINT
1618 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1620 #undef TARGET_CAN_INLINE_P
1621 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1623 #undef TARGET_SET_CURRENT_FUNCTION
1624 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1626 #undef TARGET_LEGITIMATE_CONSTANT_P
1627 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1629 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1630 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1632 #undef TARGET_CAN_USE_DOLOOP_P
1633 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1636 /* Processor table. */
1637 struct rs6000_ptt
1639 const char *const name; /* Canonical processor name. */
1640 const enum processor_type processor; /* Processor type enum value. */
1641 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1644 static struct rs6000_ptt const processor_target_table[] =
1646 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1647 #include "rs6000-cpus.def"
1648 #undef RS6000_CPU
1651 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1652 name is invalid. */
1654 static int
1655 rs6000_cpu_name_lookup (const char *name)
1657 size_t i;
1659 if (name != NULL)
1661 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1662 if (! strcmp (name, processor_target_table[i].name))
1663 return (int)i;
1666 return -1;
1670 /* Return number of consecutive hard regs needed starting at reg REGNO
1671 to hold something of mode MODE.
1672 This is ordinarily the length in words of a value of mode MODE
1673 but can be less for certain modes in special long registers.
1675 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1676 scalar instructions. The upper 32 bits are only available to the
1677 SIMD instructions.
1679 POWER and PowerPC GPRs hold 32 bits worth;
1680 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1682 static int
1683 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1685 unsigned HOST_WIDE_INT reg_size;
1687 /* TF/TD modes are special in that they always take 2 registers. */
1688 if (FP_REGNO_P (regno))
1689 reg_size = ((VECTOR_MEM_VSX_P (mode) && mode != TDmode && mode != TFmode)
1690 ? UNITS_PER_VSX_WORD
1691 : UNITS_PER_FP_WORD);
1693 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1694 reg_size = UNITS_PER_SPE_WORD;
1696 else if (ALTIVEC_REGNO_P (regno))
1697 reg_size = UNITS_PER_ALTIVEC_WORD;
1699 /* The value returned for SCmode in the E500 double case is 2 for
1700 ABI compatibility; storing an SCmode value in a single register
1701 would require function_arg and rs6000_spe_function_arg to handle
1702 SCmode so as to pass the value correctly in a pair of
1703 registers. */
1704 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1705 && !DECIMAL_FLOAT_MODE_P (mode))
1706 reg_size = UNITS_PER_FP_WORD;
1708 else
1709 reg_size = UNITS_PER_WORD;
1711 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1714 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1715 MODE. */
1716 static int
1717 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1719 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1721 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1722 register combinations, and use PTImode where we need to deal with quad
1723 word memory operations. Don't allow quad words in the argument or frame
1724 pointer registers, just registers 0..31. */
1725 if (mode == PTImode)
1726 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1727 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1728 && ((regno & 1) == 0));
1730 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1731 implementations. Don't allow an item to be split between a FP register
1732 and an Altivec register. Allow TImode in all VSX registers if the user
1733 asked for it. */
1734 if (TARGET_VSX && VSX_REGNO_P (regno)
1735 && (VECTOR_MEM_VSX_P (mode)
1736 || (TARGET_VSX_SCALAR_FLOAT && mode == SFmode)
1737 || (TARGET_VSX_SCALAR_DOUBLE && (mode == DFmode || mode == DImode))
1738 || (TARGET_VSX_TIMODE && mode == TImode)
1739 || (TARGET_VADDUQM && mode == V1TImode)))
1741 if (FP_REGNO_P (regno))
1742 return FP_REGNO_P (last_regno);
1744 if (ALTIVEC_REGNO_P (regno))
1746 if (mode == SFmode && !TARGET_UPPER_REGS_SF)
1747 return 0;
1749 if ((mode == DFmode || mode == DImode) && !TARGET_UPPER_REGS_DF)
1750 return 0;
1752 return ALTIVEC_REGNO_P (last_regno);
1756 /* The GPRs can hold any mode, but values bigger than one register
1757 cannot go past R31. */
1758 if (INT_REGNO_P (regno))
1759 return INT_REGNO_P (last_regno);
1761 /* The float registers (except for VSX vector modes) can only hold floating
1762 modes and DImode. */
1763 if (FP_REGNO_P (regno))
1765 if (SCALAR_FLOAT_MODE_P (mode)
1766 && (mode != TDmode || (regno % 2) == 0)
1767 && FP_REGNO_P (last_regno))
1768 return 1;
1770 if (GET_MODE_CLASS (mode) == MODE_INT
1771 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1772 return 1;
1774 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1775 && PAIRED_VECTOR_MODE (mode))
1776 return 1;
1778 return 0;
1781 /* The CR register can only hold CC modes. */
1782 if (CR_REGNO_P (regno))
1783 return GET_MODE_CLASS (mode) == MODE_CC;
1785 if (CA_REGNO_P (regno))
1786 return mode == BImode;
1788 /* AltiVec only in AldyVec registers. */
1789 if (ALTIVEC_REGNO_P (regno))
1790 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1791 || mode == V1TImode);
1793 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1794 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1795 return 1;
1797 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1798 and it must be able to fit within the register set. */
1800 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1803 /* Print interesting facts about registers. */
1804 static void
1805 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1807 int r, m;
1809 for (r = first_regno; r <= last_regno; ++r)
1811 const char *comma = "";
1812 int len;
1814 if (first_regno == last_regno)
1815 fprintf (stderr, "%s:\t", reg_name);
1816 else
1817 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1819 len = 8;
1820 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1821 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1823 if (len > 70)
1825 fprintf (stderr, ",\n\t");
1826 len = 8;
1827 comma = "";
1830 if (rs6000_hard_regno_nregs[m][r] > 1)
1831 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1832 rs6000_hard_regno_nregs[m][r]);
1833 else
1834 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1836 comma = ", ";
1839 if (call_used_regs[r])
1841 if (len > 70)
1843 fprintf (stderr, ",\n\t");
1844 len = 8;
1845 comma = "";
1848 len += fprintf (stderr, "%s%s", comma, "call-used");
1849 comma = ", ";
1852 if (fixed_regs[r])
1854 if (len > 70)
1856 fprintf (stderr, ",\n\t");
1857 len = 8;
1858 comma = "";
1861 len += fprintf (stderr, "%s%s", comma, "fixed");
1862 comma = ", ";
1865 if (len > 70)
1867 fprintf (stderr, ",\n\t");
1868 comma = "";
1871 len += fprintf (stderr, "%sreg-class = %s", comma,
1872 reg_class_names[(int)rs6000_regno_regclass[r]]);
1873 comma = ", ";
1875 if (len > 70)
1877 fprintf (stderr, ",\n\t");
1878 comma = "";
1881 fprintf (stderr, "%sregno = %d\n", comma, r);
1885 static const char *
1886 rs6000_debug_vector_unit (enum rs6000_vector v)
1888 const char *ret;
1890 switch (v)
1892 case VECTOR_NONE: ret = "none"; break;
1893 case VECTOR_ALTIVEC: ret = "altivec"; break;
1894 case VECTOR_VSX: ret = "vsx"; break;
1895 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
1896 case VECTOR_PAIRED: ret = "paired"; break;
1897 case VECTOR_SPE: ret = "spe"; break;
1898 case VECTOR_OTHER: ret = "other"; break;
1899 default: ret = "unknown"; break;
1902 return ret;
1905 /* Print the address masks in a human readble fashion. */
1906 DEBUG_FUNCTION void
1907 rs6000_debug_print_mode (ssize_t m)
1909 ssize_t rc;
1911 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
1912 for (rc = 0; rc < N_RELOAD_REG; rc++)
1914 addr_mask_type mask = reg_addr[m].addr_mask[rc];
1915 fprintf (stderr,
1916 " %s: %c%c%c%c%c%c",
1917 reload_reg_map[rc].name,
1918 (mask & RELOAD_REG_VALID) != 0 ? 'v' : ' ',
1919 (mask & RELOAD_REG_MULTIPLE) != 0 ? 'm' : ' ',
1920 (mask & RELOAD_REG_INDEXED) != 0 ? 'i' : ' ',
1921 (mask & RELOAD_REG_OFFSET) != 0 ? 'o' : ' ',
1922 (mask & RELOAD_REG_PRE_INCDEC) != 0 ? '+' : ' ',
1923 (mask & RELOAD_REG_PRE_MODIFY) != 0 ? '+' : ' ');
1926 if (rs6000_vector_unit[m] != VECTOR_NONE
1927 || rs6000_vector_mem[m] != VECTOR_NONE
1928 || (reg_addr[m].reload_store != CODE_FOR_nothing)
1929 || (reg_addr[m].reload_load != CODE_FOR_nothing))
1931 fprintf (stderr,
1932 " Vector-arith=%-10s Vector-mem=%-10s Reload=%c%c",
1933 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
1934 rs6000_debug_vector_unit (rs6000_vector_mem[m]),
1935 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
1936 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
1939 fputs ("\n", stderr);
1942 #define DEBUG_FMT_ID "%-32s= "
1943 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1944 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1945 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1947 /* Print various interesting information with -mdebug=reg. */
1948 static void
1949 rs6000_debug_reg_global (void)
1951 static const char *const tf[2] = { "false", "true" };
1952 const char *nl = (const char *)0;
1953 int m;
1954 size_t m1, m2, v;
1955 char costly_num[20];
1956 char nop_num[20];
1957 char flags_buffer[40];
1958 const char *costly_str;
1959 const char *nop_str;
1960 const char *trace_str;
1961 const char *abi_str;
1962 const char *cmodel_str;
1963 struct cl_target_option cl_opts;
1965 /* Modes we want tieable information on. */
1966 static const enum machine_mode print_tieable_modes[] = {
1967 QImode,
1968 HImode,
1969 SImode,
1970 DImode,
1971 TImode,
1972 PTImode,
1973 SFmode,
1974 DFmode,
1975 TFmode,
1976 SDmode,
1977 DDmode,
1978 TDmode,
1979 V8QImode,
1980 V4HImode,
1981 V2SImode,
1982 V16QImode,
1983 V8HImode,
1984 V4SImode,
1985 V2DImode,
1986 V1TImode,
1987 V32QImode,
1988 V16HImode,
1989 V8SImode,
1990 V4DImode,
1991 V2TImode,
1992 V2SFmode,
1993 V4SFmode,
1994 V2DFmode,
1995 V8SFmode,
1996 V4DFmode,
1997 CCmode,
1998 CCUNSmode,
1999 CCEQmode,
2002 /* Virtual regs we are interested in. */
2003 const static struct {
2004 int regno; /* register number. */
2005 const char *name; /* register name. */
2006 } virtual_regs[] = {
2007 { STACK_POINTER_REGNUM, "stack pointer:" },
2008 { TOC_REGNUM, "toc: " },
2009 { STATIC_CHAIN_REGNUM, "static chain: " },
2010 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2011 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2012 { ARG_POINTER_REGNUM, "arg pointer: " },
2013 { FRAME_POINTER_REGNUM, "frame pointer:" },
2014 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2015 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2016 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2017 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2018 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2019 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2020 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2021 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2022 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2025 fputs ("\nHard register information:\n", stderr);
2026 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2027 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2028 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2029 LAST_ALTIVEC_REGNO,
2030 "vs");
2031 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2032 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2033 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2034 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2035 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2036 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2037 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2038 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2040 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2041 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2042 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2044 fprintf (stderr,
2045 "\n"
2046 "d reg_class = %s\n"
2047 "f reg_class = %s\n"
2048 "v reg_class = %s\n"
2049 "wa reg_class = %s\n"
2050 "wd reg_class = %s\n"
2051 "wf reg_class = %s\n"
2052 "wg reg_class = %s\n"
2053 "wl reg_class = %s\n"
2054 "wm reg_class = %s\n"
2055 "wr reg_class = %s\n"
2056 "ws reg_class = %s\n"
2057 "wt reg_class = %s\n"
2058 "wu reg_class = %s\n"
2059 "wv reg_class = %s\n"
2060 "ww reg_class = %s\n"
2061 "wx reg_class = %s\n"
2062 "wy reg_class = %s\n"
2063 "wz reg_class = %s\n"
2064 "\n",
2065 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2066 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2067 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2068 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2069 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2070 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2071 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2072 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2073 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2074 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2075 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2076 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2077 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2078 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2079 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2080 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2081 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2082 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2084 nl = "\n";
2085 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2086 rs6000_debug_print_mode (m);
2088 fputs ("\n", stderr);
2090 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2092 enum machine_mode mode1 = print_tieable_modes[m1];
2093 bool first_time = true;
2095 nl = (const char *)0;
2096 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2098 enum machine_mode mode2 = print_tieable_modes[m2];
2099 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2101 if (first_time)
2103 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2104 nl = "\n";
2105 first_time = false;
2108 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2112 if (!first_time)
2113 fputs ("\n", stderr);
2116 if (nl)
2117 fputs (nl, stderr);
2119 if (rs6000_recip_control)
2121 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2123 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2124 if (rs6000_recip_bits[m])
2126 fprintf (stderr,
2127 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2128 GET_MODE_NAME (m),
2129 (RS6000_RECIP_AUTO_RE_P (m)
2130 ? "auto"
2131 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2132 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2133 ? "auto"
2134 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2137 fputs ("\n", stderr);
2140 if (rs6000_cpu_index >= 0)
2142 const char *name = processor_target_table[rs6000_cpu_index].name;
2143 HOST_WIDE_INT flags
2144 = processor_target_table[rs6000_cpu_index].target_enable;
2146 sprintf (flags_buffer, "-mcpu=%s flags", name);
2147 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2149 else
2150 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2152 if (rs6000_tune_index >= 0)
2154 const char *name = processor_target_table[rs6000_tune_index].name;
2155 HOST_WIDE_INT flags
2156 = processor_target_table[rs6000_tune_index].target_enable;
2158 sprintf (flags_buffer, "-mtune=%s flags", name);
2159 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2161 else
2162 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2164 cl_target_option_save (&cl_opts, &global_options);
2165 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2166 rs6000_isa_flags);
2168 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2169 rs6000_isa_flags_explicit);
2171 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2172 rs6000_builtin_mask);
2174 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2176 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2177 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2179 switch (rs6000_sched_costly_dep)
2181 case max_dep_latency:
2182 costly_str = "max_dep_latency";
2183 break;
2185 case no_dep_costly:
2186 costly_str = "no_dep_costly";
2187 break;
2189 case all_deps_costly:
2190 costly_str = "all_deps_costly";
2191 break;
2193 case true_store_to_load_dep_costly:
2194 costly_str = "true_store_to_load_dep_costly";
2195 break;
2197 case store_to_load_dep_costly:
2198 costly_str = "store_to_load_dep_costly";
2199 break;
2201 default:
2202 costly_str = costly_num;
2203 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2204 break;
2207 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2209 switch (rs6000_sched_insert_nops)
2211 case sched_finish_regroup_exact:
2212 nop_str = "sched_finish_regroup_exact";
2213 break;
2215 case sched_finish_pad_groups:
2216 nop_str = "sched_finish_pad_groups";
2217 break;
2219 case sched_finish_none:
2220 nop_str = "sched_finish_none";
2221 break;
2223 default:
2224 nop_str = nop_num;
2225 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2226 break;
2229 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2231 switch (rs6000_sdata)
2233 default:
2234 case SDATA_NONE:
2235 break;
2237 case SDATA_DATA:
2238 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2239 break;
2241 case SDATA_SYSV:
2242 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2243 break;
2245 case SDATA_EABI:
2246 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2247 break;
2251 switch (rs6000_traceback)
2253 case traceback_default: trace_str = "default"; break;
2254 case traceback_none: trace_str = "none"; break;
2255 case traceback_part: trace_str = "part"; break;
2256 case traceback_full: trace_str = "full"; break;
2257 default: trace_str = "unknown"; break;
2260 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2262 switch (rs6000_current_cmodel)
2264 case CMODEL_SMALL: cmodel_str = "small"; break;
2265 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2266 case CMODEL_LARGE: cmodel_str = "large"; break;
2267 default: cmodel_str = "unknown"; break;
2270 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2272 switch (rs6000_current_abi)
2274 case ABI_NONE: abi_str = "none"; break;
2275 case ABI_AIX: abi_str = "aix"; break;
2276 case ABI_ELFv2: abi_str = "ELFv2"; break;
2277 case ABI_V4: abi_str = "V4"; break;
2278 case ABI_DARWIN: abi_str = "darwin"; break;
2279 default: abi_str = "unknown"; break;
2282 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2284 if (rs6000_altivec_abi)
2285 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2287 if (rs6000_spe_abi)
2288 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2290 if (rs6000_darwin64_abi)
2291 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2293 if (rs6000_float_gprs)
2294 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2296 fprintf (stderr, DEBUG_FMT_S, "fprs",
2297 (TARGET_FPRS ? "true" : "false"));
2299 fprintf (stderr, DEBUG_FMT_S, "single_float",
2300 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2302 fprintf (stderr, DEBUG_FMT_S, "double_float",
2303 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2305 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2306 (TARGET_SOFT_FLOAT ? "true" : "false"));
2308 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2309 (TARGET_E500_SINGLE ? "true" : "false"));
2311 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2312 (TARGET_E500_DOUBLE ? "true" : "false"));
2314 if (TARGET_LINK_STACK)
2315 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2317 if (targetm.lra_p ())
2318 fprintf (stderr, DEBUG_FMT_S, "lra", "true");
2320 if (TARGET_P8_FUSION)
2321 fprintf (stderr, DEBUG_FMT_S, "p8 fusion",
2322 (TARGET_P8_FUSION_SIGN) ? "zero+sign" : "zero");
2324 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2325 TARGET_SECURE_PLT ? "secure" : "bss");
2326 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2327 aix_struct_return ? "aix" : "sysv");
2328 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2329 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2330 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2331 tf[!!rs6000_align_branch_targets]);
2332 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2333 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2334 rs6000_long_double_type_size);
2335 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2336 (int)rs6000_sched_restricted_insns_priority);
2337 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2338 (int)END_BUILTINS);
2339 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2340 (int)RS6000_BUILTIN_COUNT);
2342 if (TARGET_VSX)
2343 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2344 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2348 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2349 legitimate address support to figure out the appropriate addressing to
2350 use. */
2352 static void
2353 rs6000_setup_reg_addr_masks (void)
2355 ssize_t rc, reg, m, nregs;
2356 addr_mask_type any_addr_mask, addr_mask;
2358 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2360 enum machine_mode m2 = (enum machine_mode)m;
2362 /* SDmode is special in that we want to access it only via REG+REG
2363 addressing on power7 and above, since we want to use the LFIWZX and
2364 STFIWZX instructions to load it. */
2365 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2367 any_addr_mask = 0;
2368 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2370 addr_mask = 0;
2371 reg = reload_reg_map[rc].reg;
2373 /* Can mode values go in the GPR/FPR/Altivec registers? */
2374 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2376 nregs = rs6000_hard_regno_nregs[m][reg];
2377 addr_mask |= RELOAD_REG_VALID;
2379 /* Indicate if the mode takes more than 1 physical register. If
2380 it takes a single register, indicate it can do REG+REG
2381 addressing. */
2382 if (nregs > 1 || m == BLKmode)
2383 addr_mask |= RELOAD_REG_MULTIPLE;
2384 else
2385 addr_mask |= RELOAD_REG_INDEXED;
2387 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2388 addressing. Restrict addressing on SPE for 64-bit types
2389 because of the SUBREG hackery used to address 64-bit floats in
2390 '32-bit' GPRs. To simplify secondary reload, don't allow
2391 update forms on scalar floating point types that can go in the
2392 upper registers. */
2394 if (TARGET_UPDATE
2395 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2396 && GET_MODE_SIZE (m2) <= 8
2397 && !VECTOR_MODE_P (m2)
2398 && !COMPLEX_MODE_P (m2)
2399 && !indexed_only_p
2400 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (m2) == 8)
2401 && !(m2 == DFmode && TARGET_UPPER_REGS_DF)
2402 && !(m2 == SFmode && TARGET_UPPER_REGS_SF))
2404 addr_mask |= RELOAD_REG_PRE_INCDEC;
2406 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2407 we don't allow PRE_MODIFY for some multi-register
2408 operations. */
2409 switch (m)
2411 default:
2412 addr_mask |= RELOAD_REG_PRE_MODIFY;
2413 break;
2415 case DImode:
2416 if (TARGET_POWERPC64)
2417 addr_mask |= RELOAD_REG_PRE_MODIFY;
2418 break;
2420 case DFmode:
2421 case DDmode:
2422 if (TARGET_DF_INSN)
2423 addr_mask |= RELOAD_REG_PRE_MODIFY;
2424 break;
2429 /* GPR and FPR registers can do REG+OFFSET addressing, except
2430 possibly for SDmode. */
2431 if ((addr_mask != 0) && !indexed_only_p
2432 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR))
2433 addr_mask |= RELOAD_REG_OFFSET;
2435 reg_addr[m].addr_mask[rc] = addr_mask;
2436 any_addr_mask |= addr_mask;
2439 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2444 /* Initialize the various global tables that are based on register size. */
2445 static void
2446 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2448 ssize_t r, m, c;
2449 int align64;
2450 int align32;
2452 /* Precalculate REGNO_REG_CLASS. */
2453 rs6000_regno_regclass[0] = GENERAL_REGS;
2454 for (r = 1; r < 32; ++r)
2455 rs6000_regno_regclass[r] = BASE_REGS;
2457 for (r = 32; r < 64; ++r)
2458 rs6000_regno_regclass[r] = FLOAT_REGS;
2460 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2461 rs6000_regno_regclass[r] = NO_REGS;
2463 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2464 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2466 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2467 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2468 rs6000_regno_regclass[r] = CR_REGS;
2470 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2471 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2472 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
2473 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2474 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2475 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2476 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2477 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2478 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2479 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2480 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2481 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2483 /* Precalculate register class to simpler reload register class. We don't
2484 need all of the register classes that are combinations of different
2485 classes, just the simple ones that have constraint letters. */
2486 for (c = 0; c < N_REG_CLASSES; c++)
2487 reg_class_to_reg_type[c] = NO_REG_TYPE;
2489 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2490 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2491 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2492 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2493 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2494 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2495 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2496 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2497 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2498 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2499 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2500 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2502 if (TARGET_VSX)
2504 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2505 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2507 else
2509 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2510 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2513 /* Precalculate the valid memory formats as well as the vector information,
2514 this must be set up before the rs6000_hard_regno_nregs_internal calls
2515 below. */
2516 gcc_assert ((int)VECTOR_NONE == 0);
2517 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2518 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2520 gcc_assert ((int)CODE_FOR_nothing == 0);
2521 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2523 gcc_assert ((int)NO_REGS == 0);
2524 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2526 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2527 believes it can use native alignment or still uses 128-bit alignment. */
2528 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2530 align64 = 64;
2531 align32 = 32;
2533 else
2535 align64 = 128;
2536 align32 = 128;
2539 /* V2DF mode, VSX only. */
2540 if (TARGET_VSX)
2542 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2543 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2544 rs6000_vector_align[V2DFmode] = align64;
2547 /* V4SF mode, either VSX or Altivec. */
2548 if (TARGET_VSX)
2550 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2551 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2552 rs6000_vector_align[V4SFmode] = align32;
2554 else if (TARGET_ALTIVEC)
2556 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2557 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2558 rs6000_vector_align[V4SFmode] = align32;
2561 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2562 and stores. */
2563 if (TARGET_ALTIVEC)
2565 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2566 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2567 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2568 rs6000_vector_align[V4SImode] = align32;
2569 rs6000_vector_align[V8HImode] = align32;
2570 rs6000_vector_align[V16QImode] = align32;
2572 if (TARGET_VSX)
2574 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2575 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2576 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2578 else
2580 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2581 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2582 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2586 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2587 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2588 if (TARGET_VSX)
2590 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2591 rs6000_vector_unit[V2DImode]
2592 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2593 rs6000_vector_align[V2DImode] = align64;
2595 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
2596 rs6000_vector_unit[V1TImode]
2597 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2598 rs6000_vector_align[V1TImode] = 128;
2601 /* DFmode, see if we want to use the VSX unit. */
2602 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2604 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2605 rs6000_vector_mem[DFmode]
2606 = (TARGET_UPPER_REGS_DF ? VECTOR_VSX : VECTOR_NONE);
2607 rs6000_vector_align[DFmode] = align64;
2610 /* Allow TImode in VSX register and set the VSX memory macros. */
2611 if (TARGET_VSX && TARGET_VSX_TIMODE)
2613 rs6000_vector_mem[TImode] = VECTOR_VSX;
2614 rs6000_vector_align[TImode] = align64;
2617 /* TODO add SPE and paired floating point vector support. */
2619 /* Register class constraints for the constraints that depend on compile
2620 switches. When the VSX code was added, different constraints were added
2621 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
2622 of the VSX registers are used. The register classes for scalar floating
2623 point types is set, based on whether we allow that type into the upper
2624 (Altivec) registers. GCC has register classes to target the Altivec
2625 registers for load/store operations, to select using a VSX memory
2626 operation instead of the traditional floating point operation. The
2627 constraints are:
2629 d - Register class to use with traditional DFmode instructions.
2630 f - Register class to use with traditional SFmode instructions.
2631 v - Altivec register.
2632 wa - Any VSX register.
2633 wd - Preferred register class for V2DFmode.
2634 wf - Preferred register class for V4SFmode.
2635 wg - Float register for power6x move insns.
2636 wl - Float register if we can do 32-bit signed int loads.
2637 wm - VSX register for ISA 2.07 direct move operations.
2638 wr - GPR if 64-bit mode is permitted.
2639 ws - Register class to do ISA 2.06 DF operations.
2640 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
2641 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
2642 wt - VSX register for TImode in VSX registers.
2643 ww - Register class to do SF conversions in with VSX operations.
2644 wx - Float register if we can do 32-bit int stores.
2645 wy - Register class to do ISA 2.07 SF operations.
2646 wz - Float register if we can do 32-bit unsigned int loads. */
2648 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2649 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2651 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2652 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2654 if (TARGET_VSX)
2656 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2657 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2658 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2660 if (TARGET_VSX_TIMODE)
2661 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS;
2663 if (TARGET_UPPER_REGS_DF)
2665 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
2666 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
2668 else
2669 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
2672 /* Add conditional constraints based on various options, to allow us to
2673 collapse multiple insn patterns. */
2674 if (TARGET_ALTIVEC)
2675 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2677 if (TARGET_MFPGPR)
2678 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
2680 if (TARGET_LFIWAX)
2681 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS;
2683 if (TARGET_DIRECT_MOVE)
2684 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
2686 if (TARGET_POWERPC64)
2687 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
2689 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF)
2691 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
2692 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
2693 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
2695 else if (TARGET_P8_VECTOR)
2697 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
2698 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2700 else if (TARGET_VSX)
2701 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2703 if (TARGET_STFIWX)
2704 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS;
2706 if (TARGET_LFIWZX)
2707 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS;
2709 /* Set up the reload helper and direct move functions. */
2710 if (TARGET_VSX || TARGET_ALTIVEC)
2712 if (TARGET_64BIT)
2714 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
2715 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
2716 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
2717 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
2718 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
2719 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
2720 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
2721 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
2722 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
2723 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
2724 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
2725 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
2726 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
2727 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
2728 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2730 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
2731 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
2732 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
2733 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
2735 if (TARGET_P8_VECTOR)
2737 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
2738 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
2739 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
2740 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
2742 if (TARGET_VSX_TIMODE)
2744 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
2745 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
2747 if (TARGET_DIRECT_MOVE)
2749 if (TARGET_POWERPC64)
2751 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
2752 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
2753 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
2754 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
2755 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
2756 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
2757 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
2758 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
2759 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
2761 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
2762 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
2763 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
2764 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
2765 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
2766 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
2767 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
2768 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
2769 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
2771 else
2773 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
2774 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
2775 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
2779 else
2781 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
2782 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
2783 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
2784 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
2785 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
2786 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
2787 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
2788 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
2789 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
2790 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
2791 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
2792 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
2793 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
2794 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
2795 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2797 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
2798 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
2799 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
2800 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
2802 if (TARGET_P8_VECTOR)
2804 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
2805 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
2806 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
2807 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
2809 if (TARGET_VSX_TIMODE)
2811 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
2812 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
2817 /* Precalculate HARD_REGNO_NREGS. */
2818 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2819 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2820 rs6000_hard_regno_nregs[m][r]
2821 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2823 /* Precalculate HARD_REGNO_MODE_OK. */
2824 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2825 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2826 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2827 rs6000_hard_regno_mode_ok_p[m][r] = true;
2829 /* Precalculate CLASS_MAX_NREGS sizes. */
2830 for (c = 0; c < LIM_REG_CLASSES; ++c)
2832 int reg_size;
2834 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2835 reg_size = UNITS_PER_VSX_WORD;
2837 else if (c == ALTIVEC_REGS)
2838 reg_size = UNITS_PER_ALTIVEC_WORD;
2840 else if (c == FLOAT_REGS)
2841 reg_size = UNITS_PER_FP_WORD;
2843 else
2844 reg_size = UNITS_PER_WORD;
2846 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2848 enum machine_mode m2 = (enum machine_mode)m;
2849 int reg_size2 = reg_size;
2851 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2852 if (TARGET_VSX && VSX_REG_CLASS_P (c)
2853 && (m == TDmode || m == TFmode))
2854 reg_size2 = UNITS_PER_FP_WORD;
2856 rs6000_class_max_nregs[m][c]
2857 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
2861 if (TARGET_E500_DOUBLE)
2862 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2864 /* Calculate which modes to automatically generate code to use a the
2865 reciprocal divide and square root instructions. In the future, possibly
2866 automatically generate the instructions even if the user did not specify
2867 -mrecip. The older machines double precision reciprocal sqrt estimate is
2868 not accurate enough. */
2869 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2870 if (TARGET_FRES)
2871 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2872 if (TARGET_FRE)
2873 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2874 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2875 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2876 if (VECTOR_UNIT_VSX_P (V2DFmode))
2877 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2879 if (TARGET_FRSQRTES)
2880 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2881 if (TARGET_FRSQRTE)
2882 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2883 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2884 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2885 if (VECTOR_UNIT_VSX_P (V2DFmode))
2886 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2888 if (rs6000_recip_control)
2890 if (!flag_finite_math_only)
2891 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2892 if (flag_trapping_math)
2893 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2894 if (!flag_reciprocal_math)
2895 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2896 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2898 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2899 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2900 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2902 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2903 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2904 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2906 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2907 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2908 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2910 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2911 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2912 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2914 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2915 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2916 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2918 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2919 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2920 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2922 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2923 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2924 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2926 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2927 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2928 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2932 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2933 legitimate address support to figure out the appropriate addressing to
2934 use. */
2935 rs6000_setup_reg_addr_masks ();
2937 if (global_init_p || TARGET_DEBUG_TARGET)
2939 if (TARGET_DEBUG_REG)
2940 rs6000_debug_reg_global ();
2942 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2943 fprintf (stderr,
2944 "SImode variable mult cost = %d\n"
2945 "SImode constant mult cost = %d\n"
2946 "SImode short constant mult cost = %d\n"
2947 "DImode multipliciation cost = %d\n"
2948 "SImode division cost = %d\n"
2949 "DImode division cost = %d\n"
2950 "Simple fp operation cost = %d\n"
2951 "DFmode multiplication cost = %d\n"
2952 "SFmode division cost = %d\n"
2953 "DFmode division cost = %d\n"
2954 "cache line size = %d\n"
2955 "l1 cache size = %d\n"
2956 "l2 cache size = %d\n"
2957 "simultaneous prefetches = %d\n"
2958 "\n",
2959 rs6000_cost->mulsi,
2960 rs6000_cost->mulsi_const,
2961 rs6000_cost->mulsi_const9,
2962 rs6000_cost->muldi,
2963 rs6000_cost->divsi,
2964 rs6000_cost->divdi,
2965 rs6000_cost->fp,
2966 rs6000_cost->dmul,
2967 rs6000_cost->sdiv,
2968 rs6000_cost->ddiv,
2969 rs6000_cost->cache_line_size,
2970 rs6000_cost->l1_cache_size,
2971 rs6000_cost->l2_cache_size,
2972 rs6000_cost->simultaneous_prefetches);
2976 #if TARGET_MACHO
2977 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2979 static void
2980 darwin_rs6000_override_options (void)
2982 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2983 off. */
2984 rs6000_altivec_abi = 1;
2985 TARGET_ALTIVEC_VRSAVE = 1;
2986 rs6000_current_abi = ABI_DARWIN;
2988 if (DEFAULT_ABI == ABI_DARWIN
2989 && TARGET_64BIT)
2990 darwin_one_byte_bool = 1;
2992 if (TARGET_64BIT && ! TARGET_POWERPC64)
2994 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
2995 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2997 if (flag_mkernel)
2999 rs6000_default_long_calls = 1;
3000 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3003 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3004 Altivec. */
3005 if (!flag_mkernel && !flag_apple_kext
3006 && TARGET_64BIT
3007 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3008 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3010 /* Unless the user (not the configurer) has explicitly overridden
3011 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3012 G4 unless targeting the kernel. */
3013 if (!flag_mkernel
3014 && !flag_apple_kext
3015 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3016 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3017 && ! global_options_set.x_rs6000_cpu_index)
3019 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3022 #endif
3024 /* If not otherwise specified by a target, make 'long double' equivalent to
3025 'double'. */
3027 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3028 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3029 #endif
3031 /* Return the builtin mask of the various options used that could affect which
3032 builtins were used. In the past we used target_flags, but we've run out of
3033 bits, and some options like SPE and PAIRED are no longer in
3034 target_flags. */
3036 HOST_WIDE_INT
3037 rs6000_builtin_mask_calculate (void)
3039 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3040 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3041 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3042 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3043 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3044 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3045 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3046 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3047 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3048 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3049 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3050 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3051 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3052 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3053 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3054 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0));
3057 /* Override command line options. Mostly we process the processor type and
3058 sometimes adjust other TARGET_ options. */
3060 static bool
3061 rs6000_option_override_internal (bool global_init_p)
3063 bool ret = true;
3064 bool have_cpu = false;
3066 /* The default cpu requested at configure time, if any. */
3067 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3069 HOST_WIDE_INT set_masks;
3070 int cpu_index;
3071 int tune_index;
3072 struct cl_target_option *main_target_opt
3073 = ((global_init_p || target_option_default_node == NULL)
3074 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3076 /* Remember the explicit arguments. */
3077 if (global_init_p)
3078 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3080 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3081 library functions, so warn about it. The flag may be useful for
3082 performance studies from time to time though, so don't disable it
3083 entirely. */
3084 if (global_options_set.x_rs6000_alignment_flags
3085 && rs6000_alignment_flags == MASK_ALIGN_POWER
3086 && DEFAULT_ABI == ABI_DARWIN
3087 && TARGET_64BIT)
3088 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3089 " it is incompatible with the installed C and C++ libraries");
3091 /* Numerous experiment shows that IRA based loop pressure
3092 calculation works better for RTL loop invariant motion on targets
3093 with enough (>= 32) registers. It is an expensive optimization.
3094 So it is on only for peak performance. */
3095 if (optimize >= 3 && global_init_p
3096 && !global_options_set.x_flag_ira_loop_pressure)
3097 flag_ira_loop_pressure = 1;
3099 /* Set the pointer size. */
3100 if (TARGET_64BIT)
3102 rs6000_pmode = (int)DImode;
3103 rs6000_pointer_size = 64;
3105 else
3107 rs6000_pmode = (int)SImode;
3108 rs6000_pointer_size = 32;
3111 /* Some OSs don't support saving the high part of 64-bit registers on context
3112 switch. Other OSs don't support saving Altivec registers. On those OSs,
3113 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3114 if the user wants either, the user must explicitly specify them and we
3115 won't interfere with the user's specification. */
3117 set_masks = POWERPC_MASKS;
3118 #ifdef OS_MISSING_POWERPC64
3119 if (OS_MISSING_POWERPC64)
3120 set_masks &= ~OPTION_MASK_POWERPC64;
3121 #endif
3122 #ifdef OS_MISSING_ALTIVEC
3123 if (OS_MISSING_ALTIVEC)
3124 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3125 #endif
3127 /* Don't override by the processor default if given explicitly. */
3128 set_masks &= ~rs6000_isa_flags_explicit;
3130 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3131 the cpu in a target attribute or pragma, but did not specify a tuning
3132 option, use the cpu for the tuning option rather than the option specified
3133 with -mtune on the command line. Process a '--with-cpu' configuration
3134 request as an implicit --cpu. */
3135 if (rs6000_cpu_index >= 0)
3137 cpu_index = rs6000_cpu_index;
3138 have_cpu = true;
3140 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3142 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3143 have_cpu = true;
3145 else if (implicit_cpu)
3147 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3148 have_cpu = true;
3150 else
3152 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
3153 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3154 have_cpu = false;
3157 gcc_assert (cpu_index >= 0);
3159 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3160 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3161 with those from the cpu, except for options that were explicitly set. If
3162 we don't have a cpu, do not override the target bits set in
3163 TARGET_DEFAULT. */
3164 if (have_cpu)
3166 rs6000_isa_flags &= ~set_masks;
3167 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3168 & set_masks);
3170 else
3171 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3172 & ~rs6000_isa_flags_explicit);
3174 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3175 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3176 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3177 to using rs6000_isa_flags, we need to do the initialization here. */
3178 if (!have_cpu)
3179 rs6000_isa_flags |= (TARGET_DEFAULT & ~rs6000_isa_flags_explicit);
3181 if (rs6000_tune_index >= 0)
3182 tune_index = rs6000_tune_index;
3183 else if (have_cpu)
3184 rs6000_tune_index = tune_index = cpu_index;
3185 else
3187 size_t i;
3188 enum processor_type tune_proc
3189 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3191 tune_index = -1;
3192 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3193 if (processor_target_table[i].processor == tune_proc)
3195 rs6000_tune_index = tune_index = i;
3196 break;
3200 gcc_assert (tune_index >= 0);
3201 rs6000_cpu = processor_target_table[tune_index].processor;
3203 /* Pick defaults for SPE related control flags. Do this early to make sure
3204 that the TARGET_ macros are representative ASAP. */
3206 int spe_capable_cpu =
3207 (rs6000_cpu == PROCESSOR_PPC8540
3208 || rs6000_cpu == PROCESSOR_PPC8548);
3210 if (!global_options_set.x_rs6000_spe_abi)
3211 rs6000_spe_abi = spe_capable_cpu;
3213 if (!global_options_set.x_rs6000_spe)
3214 rs6000_spe = spe_capable_cpu;
3216 if (!global_options_set.x_rs6000_float_gprs)
3217 rs6000_float_gprs =
3218 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3219 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3220 : 0);
3223 if (global_options_set.x_rs6000_spe_abi
3224 && rs6000_spe_abi
3225 && !TARGET_SPE_ABI)
3226 error ("not configured for SPE ABI");
3228 if (global_options_set.x_rs6000_spe
3229 && rs6000_spe
3230 && !TARGET_SPE)
3231 error ("not configured for SPE instruction set");
3233 if (main_target_opt != NULL
3234 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3235 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3236 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3237 error ("target attribute or pragma changes SPE ABI");
3239 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3240 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3241 || rs6000_cpu == PROCESSOR_PPCE5500)
3243 if (TARGET_ALTIVEC)
3244 error ("AltiVec not supported in this target");
3245 if (TARGET_SPE)
3246 error ("SPE not supported in this target");
3248 if (rs6000_cpu == PROCESSOR_PPCE6500)
3250 if (TARGET_SPE)
3251 error ("SPE not supported in this target");
3254 /* Disable Cell microcode if we are optimizing for the Cell
3255 and not optimizing for size. */
3256 if (rs6000_gen_cell_microcode == -1)
3257 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3258 && !optimize_size);
3260 /* If we are optimizing big endian systems for space and it's OK to
3261 use instructions that would be microcoded on the Cell, use the
3262 load/store multiple and string instructions. */
3263 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3264 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3265 | OPTION_MASK_STRING);
3267 /* Don't allow -mmultiple or -mstring on little endian systems
3268 unless the cpu is a 750, because the hardware doesn't support the
3269 instructions used in little endian mode, and causes an alignment
3270 trap. The 750 does not cause an alignment trap (except when the
3271 target is unaligned). */
3273 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3275 if (TARGET_MULTIPLE)
3277 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3278 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3279 warning (0, "-mmultiple is not supported on little endian systems");
3282 if (TARGET_STRING)
3284 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3285 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3286 warning (0, "-mstring is not supported on little endian systems");
3290 /* If little-endian, default to -mstrict-align on older processors.
3291 Testing for htm matches power8 and later. */
3292 if (!BYTES_BIG_ENDIAN
3293 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3294 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3296 /* -maltivec={le,be} implies -maltivec. */
3297 if (rs6000_altivec_element_order != 0)
3298 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3300 /* Disallow -maltivec=le in big endian mode for now. This is not
3301 known to be useful for anyone. */
3302 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3304 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3305 rs6000_altivec_element_order = 0;
3308 /* Add some warnings for VSX. */
3309 if (TARGET_VSX)
3311 const char *msg = NULL;
3312 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3313 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3315 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3316 msg = N_("-mvsx requires hardware floating point");
3317 else
3319 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3320 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3323 else if (TARGET_PAIRED_FLOAT)
3324 msg = N_("-mvsx and -mpaired are incompatible");
3325 else if (TARGET_AVOID_XFORM > 0)
3326 msg = N_("-mvsx needs indexed addressing");
3327 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3328 & OPTION_MASK_ALTIVEC))
3330 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3331 msg = N_("-mvsx and -mno-altivec are incompatible");
3332 else
3333 msg = N_("-mno-altivec disables vsx");
3336 if (msg)
3338 warning (0, msg);
3339 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3340 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3344 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3345 the -mcpu setting to enable options that conflict. */
3346 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3347 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3348 | OPTION_MASK_ALTIVEC
3349 | OPTION_MASK_VSX)) != 0)
3350 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3351 | OPTION_MASK_DIRECT_MOVE)
3352 & ~rs6000_isa_flags_explicit);
3354 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3355 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3357 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3358 unless the user explicitly used the -mno-<option> to disable the code. */
3359 if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3360 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3361 else if (TARGET_VSX)
3362 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3363 else if (TARGET_POPCNTD)
3364 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3365 else if (TARGET_DFP)
3366 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3367 else if (TARGET_CMPB)
3368 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3369 else if (TARGET_FPRND)
3370 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
3371 else if (TARGET_POPCNTB)
3372 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
3373 else if (TARGET_ALTIVEC)
3374 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
3376 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3378 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3379 error ("-mcrypto requires -maltivec");
3380 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3383 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3385 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3386 error ("-mdirect-move requires -mvsx");
3387 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3390 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3392 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3393 error ("-mpower8-vector requires -maltivec");
3394 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3397 if (TARGET_P8_VECTOR && !TARGET_VSX)
3399 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3400 error ("-mpower8-vector requires -mvsx");
3401 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3404 if (TARGET_VSX_TIMODE && !TARGET_VSX)
3406 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
3407 error ("-mvsx-timode requires -mvsx");
3408 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
3411 if (TARGET_DFP && !TARGET_HARD_FLOAT)
3413 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
3414 error ("-mhard-dfp requires -mhard-float");
3415 rs6000_isa_flags &= ~OPTION_MASK_DFP;
3418 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3419 silently turn off quad memory mode. */
3420 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
3422 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3423 warning (0, N_("-mquad-memory requires 64-bit mode"));
3425 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
3426 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
3428 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
3429 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
3432 /* Non-atomic quad memory load/store are disabled for little endian, since
3433 the words are reversed, but atomic operations can still be done by
3434 swapping the words. */
3435 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
3437 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3438 warning (0, N_("-mquad-memory is not available in little endian mode"));
3440 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3443 /* Assume if the user asked for normal quad memory instructions, they want
3444 the atomic versions as well, unless they explicity told us not to use quad
3445 word atomic instructions. */
3446 if (TARGET_QUAD_MEMORY
3447 && !TARGET_QUAD_MEMORY_ATOMIC
3448 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
3449 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
3451 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3452 generating power8 instructions. */
3453 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3454 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
3455 & OPTION_MASK_P8_FUSION);
3457 /* Power8 does not fuse sign extended loads with the addis. If we are
3458 optimizing at high levels for speed, convert a sign extended load into a
3459 zero extending load, and an explicit sign extension. */
3460 if (TARGET_P8_FUSION
3461 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
3462 && optimize_function_for_speed_p (cfun)
3463 && optimize >= 3)
3464 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
3466 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3467 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
3469 /* E500mc does "better" if we inline more aggressively. Respect the
3470 user's opinion, though. */
3471 if (rs6000_block_move_inline_limit == 0
3472 && (rs6000_cpu == PROCESSOR_PPCE500MC
3473 || rs6000_cpu == PROCESSOR_PPCE500MC64
3474 || rs6000_cpu == PROCESSOR_PPCE5500
3475 || rs6000_cpu == PROCESSOR_PPCE6500))
3476 rs6000_block_move_inline_limit = 128;
3478 /* store_one_arg depends on expand_block_move to handle at least the
3479 size of reg_parm_stack_space. */
3480 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
3481 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
3483 if (global_init_p)
3485 /* If the appropriate debug option is enabled, replace the target hooks
3486 with debug versions that call the real version and then prints
3487 debugging information. */
3488 if (TARGET_DEBUG_COST)
3490 targetm.rtx_costs = rs6000_debug_rtx_costs;
3491 targetm.address_cost = rs6000_debug_address_cost;
3492 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
3495 if (TARGET_DEBUG_ADDR)
3497 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
3498 targetm.legitimize_address = rs6000_debug_legitimize_address;
3499 rs6000_secondary_reload_class_ptr
3500 = rs6000_debug_secondary_reload_class;
3501 rs6000_secondary_memory_needed_ptr
3502 = rs6000_debug_secondary_memory_needed;
3503 rs6000_cannot_change_mode_class_ptr
3504 = rs6000_debug_cannot_change_mode_class;
3505 rs6000_preferred_reload_class_ptr
3506 = rs6000_debug_preferred_reload_class;
3507 rs6000_legitimize_reload_address_ptr
3508 = rs6000_debug_legitimize_reload_address;
3509 rs6000_mode_dependent_address_ptr
3510 = rs6000_debug_mode_dependent_address;
3513 if (rs6000_veclibabi_name)
3515 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
3516 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
3517 else
3519 error ("unknown vectorization library ABI type (%s) for "
3520 "-mveclibabi= switch", rs6000_veclibabi_name);
3521 ret = false;
3526 if (!global_options_set.x_rs6000_long_double_type_size)
3528 if (main_target_opt != NULL
3529 && (main_target_opt->x_rs6000_long_double_type_size
3530 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
3531 error ("target attribute or pragma changes long double size");
3532 else
3533 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3536 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3537 if (!global_options_set.x_rs6000_ieeequad)
3538 rs6000_ieeequad = 1;
3539 #endif
3541 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3542 target attribute or pragma which automatically enables both options,
3543 unless the altivec ABI was set. This is set by default for 64-bit, but
3544 not for 32-bit. */
3545 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3546 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
3547 & ~rs6000_isa_flags_explicit);
3549 /* Enable Altivec ABI for AIX -maltivec. */
3550 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
3552 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3553 error ("target attribute or pragma changes AltiVec ABI");
3554 else
3555 rs6000_altivec_abi = 1;
3558 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3559 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3560 be explicitly overridden in either case. */
3561 if (TARGET_ELF)
3563 if (!global_options_set.x_rs6000_altivec_abi
3564 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
3566 if (main_target_opt != NULL &&
3567 !main_target_opt->x_rs6000_altivec_abi)
3568 error ("target attribute or pragma changes AltiVec ABI");
3569 else
3570 rs6000_altivec_abi = 1;
3574 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3575 So far, the only darwin64 targets are also MACH-O. */
3576 if (TARGET_MACHO
3577 && DEFAULT_ABI == ABI_DARWIN
3578 && TARGET_64BIT)
3580 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
3581 error ("target attribute or pragma changes darwin64 ABI");
3582 else
3584 rs6000_darwin64_abi = 1;
3585 /* Default to natural alignment, for better performance. */
3586 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3590 /* Place FP constants in the constant pool instead of TOC
3591 if section anchors enabled. */
3592 if (flag_section_anchors
3593 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
3594 TARGET_NO_FP_IN_TOC = 1;
3596 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3597 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
3599 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3600 SUBTARGET_OVERRIDE_OPTIONS;
3601 #endif
3602 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3603 SUBSUBTARGET_OVERRIDE_OPTIONS;
3604 #endif
3605 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3606 SUB3TARGET_OVERRIDE_OPTIONS;
3607 #endif
3609 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3610 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
3612 /* For the E500 family of cores, reset the single/double FP flags to let us
3613 check that they remain constant across attributes or pragmas. Also,
3614 clear a possible request for string instructions, not supported and which
3615 we might have silently queried above for -Os.
3617 For other families, clear ISEL in case it was set implicitly.
3620 switch (rs6000_cpu)
3622 case PROCESSOR_PPC8540:
3623 case PROCESSOR_PPC8548:
3624 case PROCESSOR_PPCE500MC:
3625 case PROCESSOR_PPCE500MC64:
3626 case PROCESSOR_PPCE5500:
3627 case PROCESSOR_PPCE6500:
3629 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
3630 rs6000_double_float = TARGET_E500_DOUBLE;
3632 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3634 break;
3636 default:
3638 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
3639 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
3641 break;
3644 if (main_target_opt)
3646 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
3647 error ("target attribute or pragma changes single precision floating "
3648 "point");
3649 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
3650 error ("target attribute or pragma changes double precision floating "
3651 "point");
3654 /* Detect invalid option combinations with E500. */
3655 CHECK_E500_OPTIONS;
3657 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
3658 && rs6000_cpu != PROCESSOR_POWER5
3659 && rs6000_cpu != PROCESSOR_POWER6
3660 && rs6000_cpu != PROCESSOR_POWER7
3661 && rs6000_cpu != PROCESSOR_POWER8
3662 && rs6000_cpu != PROCESSOR_PPCA2
3663 && rs6000_cpu != PROCESSOR_CELL
3664 && rs6000_cpu != PROCESSOR_PPC476);
3665 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
3666 || rs6000_cpu == PROCESSOR_POWER5
3667 || rs6000_cpu == PROCESSOR_POWER7
3668 || rs6000_cpu == PROCESSOR_POWER8);
3669 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
3670 || rs6000_cpu == PROCESSOR_POWER5
3671 || rs6000_cpu == PROCESSOR_POWER6
3672 || rs6000_cpu == PROCESSOR_POWER7
3673 || rs6000_cpu == PROCESSOR_POWER8
3674 || rs6000_cpu == PROCESSOR_PPCE500MC
3675 || rs6000_cpu == PROCESSOR_PPCE500MC64
3676 || rs6000_cpu == PROCESSOR_PPCE5500
3677 || rs6000_cpu == PROCESSOR_PPCE6500);
3679 /* Allow debug switches to override the above settings. These are set to -1
3680 in rs6000.opt to indicate the user hasn't directly set the switch. */
3681 if (TARGET_ALWAYS_HINT >= 0)
3682 rs6000_always_hint = TARGET_ALWAYS_HINT;
3684 if (TARGET_SCHED_GROUPS >= 0)
3685 rs6000_sched_groups = TARGET_SCHED_GROUPS;
3687 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
3688 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
3690 rs6000_sched_restricted_insns_priority
3691 = (rs6000_sched_groups ? 1 : 0);
3693 /* Handle -msched-costly-dep option. */
3694 rs6000_sched_costly_dep
3695 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
3697 if (rs6000_sched_costly_dep_str)
3699 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
3700 rs6000_sched_costly_dep = no_dep_costly;
3701 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
3702 rs6000_sched_costly_dep = all_deps_costly;
3703 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
3704 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
3705 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
3706 rs6000_sched_costly_dep = store_to_load_dep_costly;
3707 else
3708 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
3709 atoi (rs6000_sched_costly_dep_str));
3712 /* Handle -minsert-sched-nops option. */
3713 rs6000_sched_insert_nops
3714 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
3716 if (rs6000_sched_insert_nops_str)
3718 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
3719 rs6000_sched_insert_nops = sched_finish_none;
3720 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
3721 rs6000_sched_insert_nops = sched_finish_pad_groups;
3722 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
3723 rs6000_sched_insert_nops = sched_finish_regroup_exact;
3724 else
3725 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
3726 atoi (rs6000_sched_insert_nops_str));
3729 if (global_init_p)
3731 #ifdef TARGET_REGNAMES
3732 /* If the user desires alternate register names, copy in the
3733 alternate names now. */
3734 if (TARGET_REGNAMES)
3735 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
3736 #endif
3738 /* Set aix_struct_return last, after the ABI is determined.
3739 If -maix-struct-return or -msvr4-struct-return was explicitly
3740 used, don't override with the ABI default. */
3741 if (!global_options_set.x_aix_struct_return)
3742 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
3744 #if 0
3745 /* IBM XL compiler defaults to unsigned bitfields. */
3746 if (TARGET_XL_COMPAT)
3747 flag_signed_bitfields = 0;
3748 #endif
3750 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3751 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
3753 if (TARGET_TOC)
3754 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
3756 /* We can only guarantee the availability of DI pseudo-ops when
3757 assembling for 64-bit targets. */
3758 if (!TARGET_64BIT)
3760 targetm.asm_out.aligned_op.di = NULL;
3761 targetm.asm_out.unaligned_op.di = NULL;
3765 /* Set branch target alignment, if not optimizing for size. */
3766 if (!optimize_size)
3768 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3769 aligned 8byte to avoid misprediction by the branch predictor. */
3770 if (rs6000_cpu == PROCESSOR_TITAN
3771 || rs6000_cpu == PROCESSOR_CELL)
3773 if (align_functions <= 0)
3774 align_functions = 8;
3775 if (align_jumps <= 0)
3776 align_jumps = 8;
3777 if (align_loops <= 0)
3778 align_loops = 8;
3780 if (rs6000_align_branch_targets)
3782 if (align_functions <= 0)
3783 align_functions = 16;
3784 if (align_jumps <= 0)
3785 align_jumps = 16;
3786 if (align_loops <= 0)
3788 can_override_loop_align = 1;
3789 align_loops = 16;
3792 if (align_jumps_max_skip <= 0)
3793 align_jumps_max_skip = 15;
3794 if (align_loops_max_skip <= 0)
3795 align_loops_max_skip = 15;
3798 /* Arrange to save and restore machine status around nested functions. */
3799 init_machine_status = rs6000_init_machine_status;
3801 /* We should always be splitting complex arguments, but we can't break
3802 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3803 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
3804 targetm.calls.split_complex_arg = NULL;
3807 /* Initialize rs6000_cost with the appropriate target costs. */
3808 if (optimize_size)
3809 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
3810 else
3811 switch (rs6000_cpu)
3813 case PROCESSOR_RS64A:
3814 rs6000_cost = &rs64a_cost;
3815 break;
3817 case PROCESSOR_MPCCORE:
3818 rs6000_cost = &mpccore_cost;
3819 break;
3821 case PROCESSOR_PPC403:
3822 rs6000_cost = &ppc403_cost;
3823 break;
3825 case PROCESSOR_PPC405:
3826 rs6000_cost = &ppc405_cost;
3827 break;
3829 case PROCESSOR_PPC440:
3830 rs6000_cost = &ppc440_cost;
3831 break;
3833 case PROCESSOR_PPC476:
3834 rs6000_cost = &ppc476_cost;
3835 break;
3837 case PROCESSOR_PPC601:
3838 rs6000_cost = &ppc601_cost;
3839 break;
3841 case PROCESSOR_PPC603:
3842 rs6000_cost = &ppc603_cost;
3843 break;
3845 case PROCESSOR_PPC604:
3846 rs6000_cost = &ppc604_cost;
3847 break;
3849 case PROCESSOR_PPC604e:
3850 rs6000_cost = &ppc604e_cost;
3851 break;
3853 case PROCESSOR_PPC620:
3854 rs6000_cost = &ppc620_cost;
3855 break;
3857 case PROCESSOR_PPC630:
3858 rs6000_cost = &ppc630_cost;
3859 break;
3861 case PROCESSOR_CELL:
3862 rs6000_cost = &ppccell_cost;
3863 break;
3865 case PROCESSOR_PPC750:
3866 case PROCESSOR_PPC7400:
3867 rs6000_cost = &ppc750_cost;
3868 break;
3870 case PROCESSOR_PPC7450:
3871 rs6000_cost = &ppc7450_cost;
3872 break;
3874 case PROCESSOR_PPC8540:
3875 case PROCESSOR_PPC8548:
3876 rs6000_cost = &ppc8540_cost;
3877 break;
3879 case PROCESSOR_PPCE300C2:
3880 case PROCESSOR_PPCE300C3:
3881 rs6000_cost = &ppce300c2c3_cost;
3882 break;
3884 case PROCESSOR_PPCE500MC:
3885 rs6000_cost = &ppce500mc_cost;
3886 break;
3888 case PROCESSOR_PPCE500MC64:
3889 rs6000_cost = &ppce500mc64_cost;
3890 break;
3892 case PROCESSOR_PPCE5500:
3893 rs6000_cost = &ppce5500_cost;
3894 break;
3896 case PROCESSOR_PPCE6500:
3897 rs6000_cost = &ppce6500_cost;
3898 break;
3900 case PROCESSOR_TITAN:
3901 rs6000_cost = &titan_cost;
3902 break;
3904 case PROCESSOR_POWER4:
3905 case PROCESSOR_POWER5:
3906 rs6000_cost = &power4_cost;
3907 break;
3909 case PROCESSOR_POWER6:
3910 rs6000_cost = &power6_cost;
3911 break;
3913 case PROCESSOR_POWER7:
3914 rs6000_cost = &power7_cost;
3915 break;
3917 case PROCESSOR_POWER8:
3918 rs6000_cost = &power8_cost;
3919 break;
3921 case PROCESSOR_PPCA2:
3922 rs6000_cost = &ppca2_cost;
3923 break;
3925 default:
3926 gcc_unreachable ();
3929 if (global_init_p)
3931 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3932 rs6000_cost->simultaneous_prefetches,
3933 global_options.x_param_values,
3934 global_options_set.x_param_values);
3935 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3936 global_options.x_param_values,
3937 global_options_set.x_param_values);
3938 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3939 rs6000_cost->cache_line_size,
3940 global_options.x_param_values,
3941 global_options_set.x_param_values);
3942 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3943 global_options.x_param_values,
3944 global_options_set.x_param_values);
3946 /* Increase loop peeling limits based on performance analysis. */
3947 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
3948 global_options.x_param_values,
3949 global_options_set.x_param_values);
3950 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
3951 global_options.x_param_values,
3952 global_options_set.x_param_values);
3954 /* If using typedef char *va_list, signal that
3955 __builtin_va_start (&ap, 0) can be optimized to
3956 ap = __builtin_next_arg (0). */
3957 if (DEFAULT_ABI != ABI_V4)
3958 targetm.expand_builtin_va_start = NULL;
3961 /* Set up single/double float flags.
3962 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3963 then set both flags. */
3964 if (TARGET_HARD_FLOAT && TARGET_FPRS
3965 && rs6000_single_float == 0 && rs6000_double_float == 0)
3966 rs6000_single_float = rs6000_double_float = 1;
3968 /* If not explicitly specified via option, decide whether to generate indexed
3969 load/store instructions. */
3970 if (TARGET_AVOID_XFORM == -1)
3971 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3972 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3973 need indexed accesses and the type used is the scalar type of the element
3974 being loaded or stored. */
3975 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3976 && !TARGET_ALTIVEC);
3978 /* Set the -mrecip options. */
3979 if (rs6000_recip_name)
3981 char *p = ASTRDUP (rs6000_recip_name);
3982 char *q;
3983 unsigned int mask, i;
3984 bool invert;
3986 while ((q = strtok (p, ",")) != NULL)
3988 p = NULL;
3989 if (*q == '!')
3991 invert = true;
3992 q++;
3994 else
3995 invert = false;
3997 if (!strcmp (q, "default"))
3998 mask = ((TARGET_RECIP_PRECISION)
3999 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4000 else
4002 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4003 if (!strcmp (q, recip_options[i].string))
4005 mask = recip_options[i].mask;
4006 break;
4009 if (i == ARRAY_SIZE (recip_options))
4011 error ("unknown option for -mrecip=%s", q);
4012 invert = false;
4013 mask = 0;
4014 ret = false;
4018 if (invert)
4019 rs6000_recip_control &= ~mask;
4020 else
4021 rs6000_recip_control |= mask;
4025 /* Set the builtin mask of the various options used that could affect which
4026 builtins were used. In the past we used target_flags, but we've run out
4027 of bits, and some options like SPE and PAIRED are no longer in
4028 target_flags. */
4029 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4030 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4032 fprintf (stderr,
4033 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX ", ",
4034 rs6000_builtin_mask);
4035 rs6000_print_builtin_options (stderr, 0, NULL, rs6000_builtin_mask);
4038 /* Initialize all of the registers. */
4039 rs6000_init_hard_regno_mode_ok (global_init_p);
4041 /* Save the initial options in case the user does function specific options */
4042 if (global_init_p)
4043 target_option_default_node = target_option_current_node
4044 = build_target_option_node (&global_options);
4046 /* If not explicitly specified via option, decide whether to generate the
4047 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4048 if (TARGET_LINK_STACK == -1)
4049 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
4051 return ret;
4054 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4055 define the target cpu type. */
4057 static void
4058 rs6000_option_override (void)
4060 (void) rs6000_option_override_internal (true);
4064 /* Implement targetm.vectorize.builtin_mask_for_load. */
4065 static tree
4066 rs6000_builtin_mask_for_load (void)
4068 if (TARGET_ALTIVEC || TARGET_VSX)
4069 return altivec_builtin_mask_for_load;
4070 else
4071 return 0;
4074 /* Implement LOOP_ALIGN. */
4076 rs6000_loop_align (rtx label)
4078 basic_block bb;
4079 int ninsns;
4081 /* Don't override loop alignment if -falign-loops was specified. */
4082 if (!can_override_loop_align)
4083 return align_loops_log;
4085 bb = BLOCK_FOR_INSN (label);
4086 ninsns = num_loop_insns(bb->loop_father);
4088 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4089 if (ninsns > 4 && ninsns <= 8
4090 && (rs6000_cpu == PROCESSOR_POWER4
4091 || rs6000_cpu == PROCESSOR_POWER5
4092 || rs6000_cpu == PROCESSOR_POWER6
4093 || rs6000_cpu == PROCESSOR_POWER7
4094 || rs6000_cpu == PROCESSOR_POWER8))
4095 return 5;
4096 else
4097 return align_loops_log;
4100 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
4101 static int
4102 rs6000_loop_align_max_skip (rtx label)
4104 return (1 << rs6000_loop_align (label)) - 1;
4107 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4108 after applying N number of iterations. This routine does not determine
4109 how may iterations are required to reach desired alignment. */
4111 static bool
4112 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4114 if (is_packed)
4115 return false;
4117 if (TARGET_32BIT)
4119 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4120 return true;
4122 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4123 return true;
4125 return false;
4127 else
4129 if (TARGET_MACHO)
4130 return false;
4132 /* Assuming that all other types are naturally aligned. CHECKME! */
4133 return true;
4137 /* Return true if the vector misalignment factor is supported by the
4138 target. */
4139 static bool
4140 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
4141 const_tree type,
4142 int misalignment,
4143 bool is_packed)
4145 if (TARGET_VSX)
4147 /* Return if movmisalign pattern is not supported for this mode. */
4148 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4149 return false;
4151 if (misalignment == -1)
4153 /* Misalignment factor is unknown at compile time but we know
4154 it's word aligned. */
4155 if (rs6000_vector_alignment_reachable (type, is_packed))
4157 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
4159 if (element_size == 64 || element_size == 32)
4160 return true;
4163 return false;
4166 /* VSX supports word-aligned vector. */
4167 if (misalignment % 4 == 0)
4168 return true;
4170 return false;
4173 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4174 static int
4175 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4176 tree vectype, int misalign)
4178 unsigned elements;
4179 tree elem_type;
4181 switch (type_of_cost)
4183 case scalar_stmt:
4184 case scalar_load:
4185 case scalar_store:
4186 case vector_stmt:
4187 case vector_load:
4188 case vector_store:
4189 case vec_to_scalar:
4190 case scalar_to_vec:
4191 case cond_branch_not_taken:
4192 return 1;
4194 case vec_perm:
4195 if (TARGET_VSX)
4196 return 3;
4197 else
4198 return 1;
4200 case vec_promote_demote:
4201 if (TARGET_VSX)
4202 return 4;
4203 else
4204 return 1;
4206 case cond_branch_taken:
4207 return 3;
4209 case unaligned_load:
4210 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4212 elements = TYPE_VECTOR_SUBPARTS (vectype);
4213 if (elements == 2)
4214 /* Double word aligned. */
4215 return 2;
4217 if (elements == 4)
4219 switch (misalign)
4221 case 8:
4222 /* Double word aligned. */
4223 return 2;
4225 case -1:
4226 /* Unknown misalignment. */
4227 case 4:
4228 case 12:
4229 /* Word aligned. */
4230 return 22;
4232 default:
4233 gcc_unreachable ();
4238 if (TARGET_ALTIVEC)
4239 /* Misaligned loads are not supported. */
4240 gcc_unreachable ();
4242 return 2;
4244 case unaligned_store:
4245 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4247 elements = TYPE_VECTOR_SUBPARTS (vectype);
4248 if (elements == 2)
4249 /* Double word aligned. */
4250 return 2;
4252 if (elements == 4)
4254 switch (misalign)
4256 case 8:
4257 /* Double word aligned. */
4258 return 2;
4260 case -1:
4261 /* Unknown misalignment. */
4262 case 4:
4263 case 12:
4264 /* Word aligned. */
4265 return 23;
4267 default:
4268 gcc_unreachable ();
4273 if (TARGET_ALTIVEC)
4274 /* Misaligned stores are not supported. */
4275 gcc_unreachable ();
4277 return 2;
4279 case vec_construct:
4280 elements = TYPE_VECTOR_SUBPARTS (vectype);
4281 elem_type = TREE_TYPE (vectype);
4282 /* 32-bit vectors loaded into registers are stored as double
4283 precision, so we need n/2 converts in addition to the usual
4284 n/2 merges to construct a vector of short floats from them. */
4285 if (SCALAR_FLOAT_TYPE_P (elem_type)
4286 && TYPE_PRECISION (elem_type) == 32)
4287 return elements + 1;
4288 else
4289 return elements / 2 + 1;
4291 default:
4292 gcc_unreachable ();
4296 /* Implement targetm.vectorize.preferred_simd_mode. */
4298 static enum machine_mode
4299 rs6000_preferred_simd_mode (enum machine_mode mode)
4301 if (TARGET_VSX)
4302 switch (mode)
4304 case DFmode:
4305 return V2DFmode;
4306 default:;
4308 if (TARGET_ALTIVEC || TARGET_VSX)
4309 switch (mode)
4311 case SFmode:
4312 return V4SFmode;
4313 case TImode:
4314 return V1TImode;
4315 case DImode:
4316 return V2DImode;
4317 case SImode:
4318 return V4SImode;
4319 case HImode:
4320 return V8HImode;
4321 case QImode:
4322 return V16QImode;
4323 default:;
4325 if (TARGET_SPE)
4326 switch (mode)
4328 case SFmode:
4329 return V2SFmode;
4330 case SImode:
4331 return V2SImode;
4332 default:;
4334 if (TARGET_PAIRED_FLOAT
4335 && mode == SFmode)
4336 return V2SFmode;
4337 return word_mode;
4340 typedef struct _rs6000_cost_data
4342 struct loop *loop_info;
4343 unsigned cost[3];
4344 } rs6000_cost_data;
4346 /* Test for likely overcommitment of vector hardware resources. If a
4347 loop iteration is relatively large, and too large a percentage of
4348 instructions in the loop are vectorized, the cost model may not
4349 adequately reflect delays from unavailable vector resources.
4350 Penalize the loop body cost for this case. */
4352 static void
4353 rs6000_density_test (rs6000_cost_data *data)
4355 const int DENSITY_PCT_THRESHOLD = 85;
4356 const int DENSITY_SIZE_THRESHOLD = 70;
4357 const int DENSITY_PENALTY = 10;
4358 struct loop *loop = data->loop_info;
4359 basic_block *bbs = get_loop_body (loop);
4360 int nbbs = loop->num_nodes;
4361 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
4362 int i, density_pct;
4364 for (i = 0; i < nbbs; i++)
4366 basic_block bb = bbs[i];
4367 gimple_stmt_iterator gsi;
4369 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4371 gimple stmt = gsi_stmt (gsi);
4372 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4374 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4375 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
4376 not_vec_cost++;
4380 free (bbs);
4381 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
4383 if (density_pct > DENSITY_PCT_THRESHOLD
4384 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
4386 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
4387 if (dump_enabled_p ())
4388 dump_printf_loc (MSG_NOTE, vect_location,
4389 "density %d%%, cost %d exceeds threshold, penalizing "
4390 "loop body cost by %d%%", density_pct,
4391 vec_cost + not_vec_cost, DENSITY_PENALTY);
4395 /* Implement targetm.vectorize.init_cost. */
4397 static void *
4398 rs6000_init_cost (struct loop *loop_info)
4400 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
4401 data->loop_info = loop_info;
4402 data->cost[vect_prologue] = 0;
4403 data->cost[vect_body] = 0;
4404 data->cost[vect_epilogue] = 0;
4405 return data;
4408 /* Implement targetm.vectorize.add_stmt_cost. */
4410 static unsigned
4411 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
4412 struct _stmt_vec_info *stmt_info, int misalign,
4413 enum vect_cost_model_location where)
4415 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4416 unsigned retval = 0;
4418 if (flag_vect_cost_model)
4420 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
4421 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
4422 misalign);
4423 /* Statements in an inner loop relative to the loop being
4424 vectorized are weighted more heavily. The value here is
4425 arbitrary and could potentially be improved with analysis. */
4426 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
4427 count *= 50; /* FIXME. */
4429 retval = (unsigned) (count * stmt_cost);
4430 cost_data->cost[where] += retval;
4433 return retval;
4436 /* Implement targetm.vectorize.finish_cost. */
4438 static void
4439 rs6000_finish_cost (void *data, unsigned *prologue_cost,
4440 unsigned *body_cost, unsigned *epilogue_cost)
4442 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4444 if (cost_data->loop_info)
4445 rs6000_density_test (cost_data);
4447 *prologue_cost = cost_data->cost[vect_prologue];
4448 *body_cost = cost_data->cost[vect_body];
4449 *epilogue_cost = cost_data->cost[vect_epilogue];
4452 /* Implement targetm.vectorize.destroy_cost_data. */
4454 static void
4455 rs6000_destroy_cost_data (void *data)
4457 free (data);
4460 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4461 library with vectorized intrinsics. */
4463 static tree
4464 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
4466 char name[32];
4467 const char *suffix = NULL;
4468 tree fntype, new_fndecl, bdecl = NULL_TREE;
4469 int n_args = 1;
4470 const char *bname;
4471 enum machine_mode el_mode, in_mode;
4472 int n, in_n;
4474 /* Libmass is suitable for unsafe math only as it does not correctly support
4475 parts of IEEE with the required precision such as denormals. Only support
4476 it if we have VSX to use the simd d2 or f4 functions.
4477 XXX: Add variable length support. */
4478 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
4479 return NULL_TREE;
4481 el_mode = TYPE_MODE (TREE_TYPE (type_out));
4482 n = TYPE_VECTOR_SUBPARTS (type_out);
4483 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4484 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4485 if (el_mode != in_mode
4486 || n != in_n)
4487 return NULL_TREE;
4489 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4491 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4492 switch (fn)
4494 case BUILT_IN_ATAN2:
4495 case BUILT_IN_HYPOT:
4496 case BUILT_IN_POW:
4497 n_args = 2;
4498 /* fall through */
4500 case BUILT_IN_ACOS:
4501 case BUILT_IN_ACOSH:
4502 case BUILT_IN_ASIN:
4503 case BUILT_IN_ASINH:
4504 case BUILT_IN_ATAN:
4505 case BUILT_IN_ATANH:
4506 case BUILT_IN_CBRT:
4507 case BUILT_IN_COS:
4508 case BUILT_IN_COSH:
4509 case BUILT_IN_ERF:
4510 case BUILT_IN_ERFC:
4511 case BUILT_IN_EXP2:
4512 case BUILT_IN_EXP:
4513 case BUILT_IN_EXPM1:
4514 case BUILT_IN_LGAMMA:
4515 case BUILT_IN_LOG10:
4516 case BUILT_IN_LOG1P:
4517 case BUILT_IN_LOG2:
4518 case BUILT_IN_LOG:
4519 case BUILT_IN_SIN:
4520 case BUILT_IN_SINH:
4521 case BUILT_IN_SQRT:
4522 case BUILT_IN_TAN:
4523 case BUILT_IN_TANH:
4524 bdecl = builtin_decl_implicit (fn);
4525 suffix = "d2"; /* pow -> powd2 */
4526 if (el_mode != DFmode
4527 || n != 2
4528 || !bdecl)
4529 return NULL_TREE;
4530 break;
4532 case BUILT_IN_ATAN2F:
4533 case BUILT_IN_HYPOTF:
4534 case BUILT_IN_POWF:
4535 n_args = 2;
4536 /* fall through */
4538 case BUILT_IN_ACOSF:
4539 case BUILT_IN_ACOSHF:
4540 case BUILT_IN_ASINF:
4541 case BUILT_IN_ASINHF:
4542 case BUILT_IN_ATANF:
4543 case BUILT_IN_ATANHF:
4544 case BUILT_IN_CBRTF:
4545 case BUILT_IN_COSF:
4546 case BUILT_IN_COSHF:
4547 case BUILT_IN_ERFF:
4548 case BUILT_IN_ERFCF:
4549 case BUILT_IN_EXP2F:
4550 case BUILT_IN_EXPF:
4551 case BUILT_IN_EXPM1F:
4552 case BUILT_IN_LGAMMAF:
4553 case BUILT_IN_LOG10F:
4554 case BUILT_IN_LOG1PF:
4555 case BUILT_IN_LOG2F:
4556 case BUILT_IN_LOGF:
4557 case BUILT_IN_SINF:
4558 case BUILT_IN_SINHF:
4559 case BUILT_IN_SQRTF:
4560 case BUILT_IN_TANF:
4561 case BUILT_IN_TANHF:
4562 bdecl = builtin_decl_implicit (fn);
4563 suffix = "4"; /* powf -> powf4 */
4564 if (el_mode != SFmode
4565 || n != 4
4566 || !bdecl)
4567 return NULL_TREE;
4568 break;
4570 default:
4571 return NULL_TREE;
4574 else
4575 return NULL_TREE;
4577 gcc_assert (suffix != NULL);
4578 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
4579 if (!bname)
4580 return NULL_TREE;
4582 strcpy (name, bname + sizeof ("__builtin_") - 1);
4583 strcat (name, suffix);
4585 if (n_args == 1)
4586 fntype = build_function_type_list (type_out, type_in, NULL);
4587 else if (n_args == 2)
4588 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
4589 else
4590 gcc_unreachable ();
4592 /* Build a function declaration for the vectorized function. */
4593 new_fndecl = build_decl (BUILTINS_LOCATION,
4594 FUNCTION_DECL, get_identifier (name), fntype);
4595 TREE_PUBLIC (new_fndecl) = 1;
4596 DECL_EXTERNAL (new_fndecl) = 1;
4597 DECL_IS_NOVOPS (new_fndecl) = 1;
4598 TREE_READONLY (new_fndecl) = 1;
4600 return new_fndecl;
4603 /* Returns a function decl for a vectorized version of the builtin function
4604 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4605 if it is not available. */
4607 static tree
4608 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
4609 tree type_in)
4611 enum machine_mode in_mode, out_mode;
4612 int in_n, out_n;
4614 if (TARGET_DEBUG_BUILTIN)
4615 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4616 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
4617 GET_MODE_NAME (TYPE_MODE (type_out)),
4618 GET_MODE_NAME (TYPE_MODE (type_in)));
4620 if (TREE_CODE (type_out) != VECTOR_TYPE
4621 || TREE_CODE (type_in) != VECTOR_TYPE
4622 || !TARGET_VECTORIZE_BUILTINS)
4623 return NULL_TREE;
4625 out_mode = TYPE_MODE (TREE_TYPE (type_out));
4626 out_n = TYPE_VECTOR_SUBPARTS (type_out);
4627 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4628 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4630 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4632 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4633 switch (fn)
4635 case BUILT_IN_CLZIMAX:
4636 case BUILT_IN_CLZLL:
4637 case BUILT_IN_CLZL:
4638 case BUILT_IN_CLZ:
4639 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4641 if (out_mode == QImode && out_n == 16)
4642 return rs6000_builtin_decls[P8V_BUILTIN_VCLZB];
4643 else if (out_mode == HImode && out_n == 8)
4644 return rs6000_builtin_decls[P8V_BUILTIN_VCLZH];
4645 else if (out_mode == SImode && out_n == 4)
4646 return rs6000_builtin_decls[P8V_BUILTIN_VCLZW];
4647 else if (out_mode == DImode && out_n == 2)
4648 return rs6000_builtin_decls[P8V_BUILTIN_VCLZD];
4650 break;
4651 case BUILT_IN_COPYSIGN:
4652 if (VECTOR_UNIT_VSX_P (V2DFmode)
4653 && out_mode == DFmode && out_n == 2
4654 && in_mode == DFmode && in_n == 2)
4655 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
4656 break;
4657 case BUILT_IN_COPYSIGNF:
4658 if (out_mode != SFmode || out_n != 4
4659 || in_mode != SFmode || in_n != 4)
4660 break;
4661 if (VECTOR_UNIT_VSX_P (V4SFmode))
4662 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
4663 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4664 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
4665 break;
4666 case BUILT_IN_POPCOUNTIMAX:
4667 case BUILT_IN_POPCOUNTLL:
4668 case BUILT_IN_POPCOUNTL:
4669 case BUILT_IN_POPCOUNT:
4670 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4672 if (out_mode == QImode && out_n == 16)
4673 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTB];
4674 else if (out_mode == HImode && out_n == 8)
4675 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTH];
4676 else if (out_mode == SImode && out_n == 4)
4677 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTW];
4678 else if (out_mode == DImode && out_n == 2)
4679 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTD];
4681 break;
4682 case BUILT_IN_SQRT:
4683 if (VECTOR_UNIT_VSX_P (V2DFmode)
4684 && out_mode == DFmode && out_n == 2
4685 && in_mode == DFmode && in_n == 2)
4686 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
4687 break;
4688 case BUILT_IN_SQRTF:
4689 if (VECTOR_UNIT_VSX_P (V4SFmode)
4690 && out_mode == SFmode && out_n == 4
4691 && in_mode == SFmode && in_n == 4)
4692 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
4693 break;
4694 case BUILT_IN_CEIL:
4695 if (VECTOR_UNIT_VSX_P (V2DFmode)
4696 && out_mode == DFmode && out_n == 2
4697 && in_mode == DFmode && in_n == 2)
4698 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
4699 break;
4700 case BUILT_IN_CEILF:
4701 if (out_mode != SFmode || out_n != 4
4702 || in_mode != SFmode || in_n != 4)
4703 break;
4704 if (VECTOR_UNIT_VSX_P (V4SFmode))
4705 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
4706 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4707 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
4708 break;
4709 case BUILT_IN_FLOOR:
4710 if (VECTOR_UNIT_VSX_P (V2DFmode)
4711 && out_mode == DFmode && out_n == 2
4712 && in_mode == DFmode && in_n == 2)
4713 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
4714 break;
4715 case BUILT_IN_FLOORF:
4716 if (out_mode != SFmode || out_n != 4
4717 || in_mode != SFmode || in_n != 4)
4718 break;
4719 if (VECTOR_UNIT_VSX_P (V4SFmode))
4720 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
4721 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4722 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
4723 break;
4724 case BUILT_IN_FMA:
4725 if (VECTOR_UNIT_VSX_P (V2DFmode)
4726 && out_mode == DFmode && out_n == 2
4727 && in_mode == DFmode && in_n == 2)
4728 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
4729 break;
4730 case BUILT_IN_FMAF:
4731 if (VECTOR_UNIT_VSX_P (V4SFmode)
4732 && out_mode == SFmode && out_n == 4
4733 && in_mode == SFmode && in_n == 4)
4734 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
4735 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
4736 && out_mode == SFmode && out_n == 4
4737 && in_mode == SFmode && in_n == 4)
4738 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
4739 break;
4740 case BUILT_IN_TRUNC:
4741 if (VECTOR_UNIT_VSX_P (V2DFmode)
4742 && out_mode == DFmode && out_n == 2
4743 && in_mode == DFmode && in_n == 2)
4744 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
4745 break;
4746 case BUILT_IN_TRUNCF:
4747 if (out_mode != SFmode || out_n != 4
4748 || in_mode != SFmode || in_n != 4)
4749 break;
4750 if (VECTOR_UNIT_VSX_P (V4SFmode))
4751 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
4752 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4753 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
4754 break;
4755 case BUILT_IN_NEARBYINT:
4756 if (VECTOR_UNIT_VSX_P (V2DFmode)
4757 && flag_unsafe_math_optimizations
4758 && out_mode == DFmode && out_n == 2
4759 && in_mode == DFmode && in_n == 2)
4760 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
4761 break;
4762 case BUILT_IN_NEARBYINTF:
4763 if (VECTOR_UNIT_VSX_P (V4SFmode)
4764 && flag_unsafe_math_optimizations
4765 && out_mode == SFmode && out_n == 4
4766 && in_mode == SFmode && in_n == 4)
4767 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
4768 break;
4769 case BUILT_IN_RINT:
4770 if (VECTOR_UNIT_VSX_P (V2DFmode)
4771 && !flag_trapping_math
4772 && out_mode == DFmode && out_n == 2
4773 && in_mode == DFmode && in_n == 2)
4774 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
4775 break;
4776 case BUILT_IN_RINTF:
4777 if (VECTOR_UNIT_VSX_P (V4SFmode)
4778 && !flag_trapping_math
4779 && out_mode == SFmode && out_n == 4
4780 && in_mode == SFmode && in_n == 4)
4781 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
4782 break;
4783 default:
4784 break;
4788 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
4790 enum rs6000_builtins fn
4791 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
4792 switch (fn)
4794 case RS6000_BUILTIN_RSQRTF:
4795 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4796 && out_mode == SFmode && out_n == 4
4797 && in_mode == SFmode && in_n == 4)
4798 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
4799 break;
4800 case RS6000_BUILTIN_RSQRT:
4801 if (VECTOR_UNIT_VSX_P (V2DFmode)
4802 && out_mode == DFmode && out_n == 2
4803 && in_mode == DFmode && in_n == 2)
4804 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
4805 break;
4806 case RS6000_BUILTIN_RECIPF:
4807 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4808 && out_mode == SFmode && out_n == 4
4809 && in_mode == SFmode && in_n == 4)
4810 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
4811 break;
4812 case RS6000_BUILTIN_RECIP:
4813 if (VECTOR_UNIT_VSX_P (V2DFmode)
4814 && out_mode == DFmode && out_n == 2
4815 && in_mode == DFmode && in_n == 2)
4816 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
4817 break;
4818 default:
4819 break;
4823 /* Generate calls to libmass if appropriate. */
4824 if (rs6000_veclib_handler)
4825 return rs6000_veclib_handler (fndecl, type_out, type_in);
4827 return NULL_TREE;
4830 /* Default CPU string for rs6000*_file_start functions. */
4831 static const char *rs6000_default_cpu;
4833 /* Do anything needed at the start of the asm file. */
4835 static void
4836 rs6000_file_start (void)
4838 char buffer[80];
4839 const char *start = buffer;
4840 FILE *file = asm_out_file;
4842 rs6000_default_cpu = TARGET_CPU_DEFAULT;
4844 default_file_start ();
4846 if (flag_verbose_asm)
4848 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
4850 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
4852 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
4853 start = "";
4856 if (global_options_set.x_rs6000_cpu_index)
4858 fprintf (file, "%s -mcpu=%s", start,
4859 processor_target_table[rs6000_cpu_index].name);
4860 start = "";
4863 if (global_options_set.x_rs6000_tune_index)
4865 fprintf (file, "%s -mtune=%s", start,
4866 processor_target_table[rs6000_tune_index].name);
4867 start = "";
4870 if (PPC405_ERRATUM77)
4872 fprintf (file, "%s PPC405CR_ERRATUM77", start);
4873 start = "";
4876 #ifdef USING_ELFOS_H
4877 switch (rs6000_sdata)
4879 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
4880 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
4881 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
4882 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
4885 if (rs6000_sdata && g_switch_value)
4887 fprintf (file, "%s -G %d", start,
4888 g_switch_value);
4889 start = "";
4891 #endif
4893 if (*start == '\0')
4894 putc ('\n', file);
4897 if (DEFAULT_ABI == ABI_ELFv2)
4898 fprintf (file, "\t.abiversion 2\n");
4900 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2
4901 || (TARGET_ELF && flag_pic == 2))
4903 switch_to_section (toc_section);
4904 switch_to_section (text_section);
4909 /* Return nonzero if this function is known to have a null epilogue. */
4912 direct_return (void)
4914 if (reload_completed)
4916 rs6000_stack_t *info = rs6000_stack_info ();
4918 if (info->first_gp_reg_save == 32
4919 && info->first_fp_reg_save == 64
4920 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4921 && ! info->lr_save_p
4922 && ! info->cr_save_p
4923 && info->vrsave_mask == 0
4924 && ! info->push_p)
4925 return 1;
4928 return 0;
4931 /* Return the number of instructions it takes to form a constant in an
4932 integer register. */
4935 num_insns_constant_wide (HOST_WIDE_INT value)
4937 /* signed constant loadable with addi */
4938 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4939 return 1;
4941 /* constant loadable with addis */
4942 else if ((value & 0xffff) == 0
4943 && (value >> 31 == -1 || value >> 31 == 0))
4944 return 1;
4946 else if (TARGET_POWERPC64)
4948 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4949 HOST_WIDE_INT high = value >> 31;
4951 if (high == 0 || high == -1)
4952 return 2;
4954 high >>= 1;
4956 if (low == 0)
4957 return num_insns_constant_wide (high) + 1;
4958 else if (high == 0)
4959 return num_insns_constant_wide (low) + 1;
4960 else
4961 return (num_insns_constant_wide (high)
4962 + num_insns_constant_wide (low) + 1);
4965 else
4966 return 2;
4970 num_insns_constant (rtx op, enum machine_mode mode)
4972 HOST_WIDE_INT low, high;
4974 switch (GET_CODE (op))
4976 case CONST_INT:
4977 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4978 && mask64_operand (op, mode))
4979 return 2;
4980 else
4981 return num_insns_constant_wide (INTVAL (op));
4983 case CONST_WIDE_INT:
4985 int i;
4986 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
4987 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
4988 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
4989 return ins;
4992 case CONST_DOUBLE:
4993 if (mode == SFmode || mode == SDmode)
4995 long l;
4996 REAL_VALUE_TYPE rv;
4998 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4999 if (DECIMAL_FLOAT_MODE_P (mode))
5000 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
5001 else
5002 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
5003 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5006 long l[2];
5007 REAL_VALUE_TYPE rv;
5009 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
5010 if (DECIMAL_FLOAT_MODE_P (mode))
5011 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
5012 else
5013 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
5014 high = l[WORDS_BIG_ENDIAN == 0];
5015 low = l[WORDS_BIG_ENDIAN != 0];
5017 if (TARGET_32BIT)
5018 return (num_insns_constant_wide (low)
5019 + num_insns_constant_wide (high));
5020 else
5022 if ((high == 0 && low >= 0)
5023 || (high == -1 && low < 0))
5024 return num_insns_constant_wide (low);
5026 else if (mask64_operand (op, mode))
5027 return 2;
5029 else if (low == 0)
5030 return num_insns_constant_wide (high) + 1;
5032 else
5033 return (num_insns_constant_wide (high)
5034 + num_insns_constant_wide (low) + 1);
5037 default:
5038 gcc_unreachable ();
5042 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5043 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5044 corresponding element of the vector, but for V4SFmode and V2SFmode,
5045 the corresponding "float" is interpreted as an SImode integer. */
5047 HOST_WIDE_INT
5048 const_vector_elt_as_int (rtx op, unsigned int elt)
5050 rtx tmp;
5052 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5053 gcc_assert (GET_MODE (op) != V2DImode
5054 && GET_MODE (op) != V2DFmode);
5056 tmp = CONST_VECTOR_ELT (op, elt);
5057 if (GET_MODE (op) == V4SFmode
5058 || GET_MODE (op) == V2SFmode)
5059 tmp = gen_lowpart (SImode, tmp);
5060 return INTVAL (tmp);
5063 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5064 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5065 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5066 all items are set to the same value and contain COPIES replicas of the
5067 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5068 operand and the others are set to the value of the operand's msb. */
5070 static bool
5071 vspltis_constant (rtx op, unsigned step, unsigned copies)
5073 enum machine_mode mode = GET_MODE (op);
5074 enum machine_mode inner = GET_MODE_INNER (mode);
5076 unsigned i;
5077 unsigned nunits;
5078 unsigned bitsize;
5079 unsigned mask;
5081 HOST_WIDE_INT val;
5082 HOST_WIDE_INT splat_val;
5083 HOST_WIDE_INT msb_val;
5085 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5086 return false;
5088 nunits = GET_MODE_NUNITS (mode);
5089 bitsize = GET_MODE_BITSIZE (inner);
5090 mask = GET_MODE_MASK (inner);
5092 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5093 splat_val = val;
5094 msb_val = val >= 0 ? 0 : -1;
5096 /* Construct the value to be splatted, if possible. If not, return 0. */
5097 for (i = 2; i <= copies; i *= 2)
5099 HOST_WIDE_INT small_val;
5100 bitsize /= 2;
5101 small_val = splat_val >> bitsize;
5102 mask >>= bitsize;
5103 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
5104 return false;
5105 splat_val = small_val;
5108 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5109 if (EASY_VECTOR_15 (splat_val))
5112 /* Also check if we can splat, and then add the result to itself. Do so if
5113 the value is positive, of if the splat instruction is using OP's mode;
5114 for splat_val < 0, the splat and the add should use the same mode. */
5115 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5116 && (splat_val >= 0 || (step == 1 && copies == 1)))
5119 /* Also check if are loading up the most significant bit which can be done by
5120 loading up -1 and shifting the value left by -1. */
5121 else if (EASY_VECTOR_MSB (splat_val, inner))
5124 else
5125 return false;
5127 /* Check if VAL is present in every STEP-th element, and the
5128 other elements are filled with its most significant bit. */
5129 for (i = 1; i < nunits; ++i)
5131 HOST_WIDE_INT desired_val;
5132 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
5133 if ((i & (step - 1)) == 0)
5134 desired_val = val;
5135 else
5136 desired_val = msb_val;
5138 if (desired_val != const_vector_elt_as_int (op, elt))
5139 return false;
5142 return true;
5146 /* Return true if OP is of the given MODE and can be synthesized
5147 with a vspltisb, vspltish or vspltisw. */
5149 bool
5150 easy_altivec_constant (rtx op, enum machine_mode mode)
5152 unsigned step, copies;
5154 if (mode == VOIDmode)
5155 mode = GET_MODE (op);
5156 else if (mode != GET_MODE (op))
5157 return false;
5159 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
5160 constants. */
5161 if (mode == V2DFmode)
5162 return zero_constant (op, mode);
5164 else if (mode == V2DImode)
5166 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
5167 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
5168 return false;
5170 if (zero_constant (op, mode))
5171 return true;
5173 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
5174 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
5175 return true;
5177 return false;
5180 /* V1TImode is a special container for TImode. Ignore for now. */
5181 else if (mode == V1TImode)
5182 return false;
5184 /* Start with a vspltisw. */
5185 step = GET_MODE_NUNITS (mode) / 4;
5186 copies = 1;
5188 if (vspltis_constant (op, step, copies))
5189 return true;
5191 /* Then try with a vspltish. */
5192 if (step == 1)
5193 copies <<= 1;
5194 else
5195 step >>= 1;
5197 if (vspltis_constant (op, step, copies))
5198 return true;
5200 /* And finally a vspltisb. */
5201 if (step == 1)
5202 copies <<= 1;
5203 else
5204 step >>= 1;
5206 if (vspltis_constant (op, step, copies))
5207 return true;
5209 return false;
5212 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
5213 result is OP. Abort if it is not possible. */
5216 gen_easy_altivec_constant (rtx op)
5218 enum machine_mode mode = GET_MODE (op);
5219 int nunits = GET_MODE_NUNITS (mode);
5220 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5221 unsigned step = nunits / 4;
5222 unsigned copies = 1;
5224 /* Start with a vspltisw. */
5225 if (vspltis_constant (op, step, copies))
5226 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
5228 /* Then try with a vspltish. */
5229 if (step == 1)
5230 copies <<= 1;
5231 else
5232 step >>= 1;
5234 if (vspltis_constant (op, step, copies))
5235 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
5237 /* And finally a vspltisb. */
5238 if (step == 1)
5239 copies <<= 1;
5240 else
5241 step >>= 1;
5243 if (vspltis_constant (op, step, copies))
5244 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
5246 gcc_unreachable ();
5249 const char *
5250 output_vec_const_move (rtx *operands)
5252 int cst, cst2;
5253 enum machine_mode mode;
5254 rtx dest, vec;
5256 dest = operands[0];
5257 vec = operands[1];
5258 mode = GET_MODE (dest);
5260 if (TARGET_VSX)
5262 if (zero_constant (vec, mode))
5263 return "xxlxor %x0,%x0,%x0";
5265 if ((mode == V2DImode || mode == V1TImode)
5266 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
5267 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
5268 return "vspltisw %0,-1";
5271 if (TARGET_ALTIVEC)
5273 rtx splat_vec;
5274 if (zero_constant (vec, mode))
5275 return "vxor %0,%0,%0";
5277 splat_vec = gen_easy_altivec_constant (vec);
5278 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
5279 operands[1] = XEXP (splat_vec, 0);
5280 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
5281 return "#";
5283 switch (GET_MODE (splat_vec))
5285 case V4SImode:
5286 return "vspltisw %0,%1";
5288 case V8HImode:
5289 return "vspltish %0,%1";
5291 case V16QImode:
5292 return "vspltisb %0,%1";
5294 default:
5295 gcc_unreachable ();
5299 gcc_assert (TARGET_SPE);
5301 /* Vector constant 0 is handled as a splitter of V2SI, and in the
5302 pattern of V1DI, V4HI, and V2SF.
5304 FIXME: We should probably return # and add post reload
5305 splitters for these, but this way is so easy ;-). */
5306 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
5307 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
5308 operands[1] = CONST_VECTOR_ELT (vec, 0);
5309 operands[2] = CONST_VECTOR_ELT (vec, 1);
5310 if (cst == cst2)
5311 return "li %0,%1\n\tevmergelo %0,%0,%0";
5312 else if (WORDS_BIG_ENDIAN)
5313 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
5314 else
5315 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
5318 /* Initialize TARGET of vector PAIRED to VALS. */
5320 void
5321 paired_expand_vector_init (rtx target, rtx vals)
5323 enum machine_mode mode = GET_MODE (target);
5324 int n_elts = GET_MODE_NUNITS (mode);
5325 int n_var = 0;
5326 rtx x, new_rtx, tmp, constant_op, op1, op2;
5327 int i;
5329 for (i = 0; i < n_elts; ++i)
5331 x = XVECEXP (vals, 0, i);
5332 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
5333 ++n_var;
5335 if (n_var == 0)
5337 /* Load from constant pool. */
5338 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
5339 return;
5342 if (n_var == 2)
5344 /* The vector is initialized only with non-constants. */
5345 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
5346 XVECEXP (vals, 0, 1));
5348 emit_move_insn (target, new_rtx);
5349 return;
5352 /* One field is non-constant and the other one is a constant. Load the
5353 constant from the constant pool and use ps_merge instruction to
5354 construct the whole vector. */
5355 op1 = XVECEXP (vals, 0, 0);
5356 op2 = XVECEXP (vals, 0, 1);
5358 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
5360 tmp = gen_reg_rtx (GET_MODE (constant_op));
5361 emit_move_insn (tmp, constant_op);
5363 if (CONSTANT_P (op1))
5364 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
5365 else
5366 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
5368 emit_move_insn (target, new_rtx);
5371 void
5372 paired_expand_vector_move (rtx operands[])
5374 rtx op0 = operands[0], op1 = operands[1];
5376 emit_move_insn (op0, op1);
5379 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
5380 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
5381 operands for the relation operation COND. This is a recursive
5382 function. */
5384 static void
5385 paired_emit_vector_compare (enum rtx_code rcode,
5386 rtx dest, rtx op0, rtx op1,
5387 rtx cc_op0, rtx cc_op1)
5389 rtx tmp = gen_reg_rtx (V2SFmode);
5390 rtx tmp1, max, min;
5392 gcc_assert (TARGET_PAIRED_FLOAT);
5393 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
5395 switch (rcode)
5397 case LT:
5398 case LTU:
5399 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5400 return;
5401 case GE:
5402 case GEU:
5403 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5404 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
5405 return;
5406 case LE:
5407 case LEU:
5408 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
5409 return;
5410 case GT:
5411 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5412 return;
5413 case EQ:
5414 tmp1 = gen_reg_rtx (V2SFmode);
5415 max = gen_reg_rtx (V2SFmode);
5416 min = gen_reg_rtx (V2SFmode);
5417 gen_reg_rtx (V2SFmode);
5419 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5420 emit_insn (gen_selv2sf4
5421 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5422 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
5423 emit_insn (gen_selv2sf4
5424 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5425 emit_insn (gen_subv2sf3 (tmp1, min, max));
5426 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
5427 return;
5428 case NE:
5429 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
5430 return;
5431 case UNLE:
5432 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5433 return;
5434 case UNLT:
5435 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
5436 return;
5437 case UNGE:
5438 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5439 return;
5440 case UNGT:
5441 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
5442 return;
5443 default:
5444 gcc_unreachable ();
5447 return;
5450 /* Emit vector conditional expression.
5451 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5452 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5455 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
5456 rtx cond, rtx cc_op0, rtx cc_op1)
5458 enum rtx_code rcode = GET_CODE (cond);
5460 if (!TARGET_PAIRED_FLOAT)
5461 return 0;
5463 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
5465 return 1;
5468 /* Initialize vector TARGET to VALS. */
5470 void
5471 rs6000_expand_vector_init (rtx target, rtx vals)
5473 enum machine_mode mode = GET_MODE (target);
5474 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5475 int n_elts = GET_MODE_NUNITS (mode);
5476 int n_var = 0, one_var = -1;
5477 bool all_same = true, all_const_zero = true;
5478 rtx x, mem;
5479 int i;
5481 for (i = 0; i < n_elts; ++i)
5483 x = XVECEXP (vals, 0, i);
5484 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
5485 ++n_var, one_var = i;
5486 else if (x != CONST0_RTX (inner_mode))
5487 all_const_zero = false;
5489 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
5490 all_same = false;
5493 if (n_var == 0)
5495 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
5496 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
5497 if ((int_vector_p || TARGET_VSX) && all_const_zero)
5499 /* Zero register. */
5500 emit_insn (gen_rtx_SET (VOIDmode, target,
5501 gen_rtx_XOR (mode, target, target)));
5502 return;
5504 else if (int_vector_p && easy_vector_constant (const_vec, mode))
5506 /* Splat immediate. */
5507 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
5508 return;
5510 else
5512 /* Load from constant pool. */
5513 emit_move_insn (target, const_vec);
5514 return;
5518 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5519 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5521 rtx op0 = XVECEXP (vals, 0, 0);
5522 rtx op1 = XVECEXP (vals, 0, 1);
5523 if (all_same)
5525 if (!MEM_P (op0) && !REG_P (op0))
5526 op0 = force_reg (inner_mode, op0);
5527 if (mode == V2DFmode)
5528 emit_insn (gen_vsx_splat_v2df (target, op0));
5529 else
5530 emit_insn (gen_vsx_splat_v2di (target, op0));
5532 else
5534 op0 = force_reg (inner_mode, op0);
5535 op1 = force_reg (inner_mode, op1);
5536 if (mode == V2DFmode)
5537 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
5538 else
5539 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
5541 return;
5544 /* With single precision floating point on VSX, know that internally single
5545 precision is actually represented as a double, and either make 2 V2DF
5546 vectors, and convert these vectors to single precision, or do one
5547 conversion, and splat the result to the other elements. */
5548 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
5550 if (all_same)
5552 rtx freg = gen_reg_rtx (V4SFmode);
5553 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
5554 rtx cvt = ((TARGET_XSCVDPSPN)
5555 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
5556 : gen_vsx_xscvdpsp_scalar (freg, sreg));
5558 emit_insn (cvt);
5559 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg, const0_rtx));
5561 else
5563 rtx dbl_even = gen_reg_rtx (V2DFmode);
5564 rtx dbl_odd = gen_reg_rtx (V2DFmode);
5565 rtx flt_even = gen_reg_rtx (V4SFmode);
5566 rtx flt_odd = gen_reg_rtx (V4SFmode);
5567 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
5568 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
5569 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
5570 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
5572 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
5573 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
5574 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
5575 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
5576 rs6000_expand_extract_even (target, flt_even, flt_odd);
5578 return;
5581 /* Store value to stack temp. Load vector element. Splat. However, splat
5582 of 64-bit items is not supported on Altivec. */
5583 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
5585 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5586 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
5587 XVECEXP (vals, 0, 0));
5588 x = gen_rtx_UNSPEC (VOIDmode,
5589 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5590 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5591 gen_rtvec (2,
5592 gen_rtx_SET (VOIDmode,
5593 target, mem),
5594 x)));
5595 x = gen_rtx_VEC_SELECT (inner_mode, target,
5596 gen_rtx_PARALLEL (VOIDmode,
5597 gen_rtvec (1, const0_rtx)));
5598 emit_insn (gen_rtx_SET (VOIDmode, target,
5599 gen_rtx_VEC_DUPLICATE (mode, x)));
5600 return;
5603 /* One field is non-constant. Load constant then overwrite
5604 varying field. */
5605 if (n_var == 1)
5607 rtx copy = copy_rtx (vals);
5609 /* Load constant part of vector, substitute neighboring value for
5610 varying element. */
5611 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
5612 rs6000_expand_vector_init (target, copy);
5614 /* Insert variable. */
5615 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
5616 return;
5619 /* Construct the vector in memory one field at a time
5620 and load the whole vector. */
5621 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5622 for (i = 0; i < n_elts; i++)
5623 emit_move_insn (adjust_address_nv (mem, inner_mode,
5624 i * GET_MODE_SIZE (inner_mode)),
5625 XVECEXP (vals, 0, i));
5626 emit_move_insn (target, mem);
5629 /* Set field ELT of TARGET to VAL. */
5631 void
5632 rs6000_expand_vector_set (rtx target, rtx val, int elt)
5634 enum machine_mode mode = GET_MODE (target);
5635 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5636 rtx reg = gen_reg_rtx (mode);
5637 rtx mask, mem, x;
5638 int width = GET_MODE_SIZE (inner_mode);
5639 int i;
5641 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5643 rtx (*set_func) (rtx, rtx, rtx, rtx)
5644 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
5645 emit_insn (set_func (target, target, val, GEN_INT (elt)));
5646 return;
5649 /* Simplify setting single element vectors like V1TImode. */
5650 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
5652 emit_move_insn (target, gen_lowpart (mode, val));
5653 return;
5656 /* Load single variable value. */
5657 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5658 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
5659 x = gen_rtx_UNSPEC (VOIDmode,
5660 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5661 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5662 gen_rtvec (2,
5663 gen_rtx_SET (VOIDmode,
5664 reg, mem),
5665 x)));
5667 /* Linear sequence. */
5668 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
5669 for (i = 0; i < 16; ++i)
5670 XVECEXP (mask, 0, i) = GEN_INT (i);
5672 /* Set permute mask to insert element into target. */
5673 for (i = 0; i < width; ++i)
5674 XVECEXP (mask, 0, elt*width + i)
5675 = GEN_INT (i + 0x10);
5676 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
5678 if (BYTES_BIG_ENDIAN)
5679 x = gen_rtx_UNSPEC (mode,
5680 gen_rtvec (3, target, reg,
5681 force_reg (V16QImode, x)),
5682 UNSPEC_VPERM);
5683 else
5685 /* Invert selector. We prefer to generate VNAND on P8 so
5686 that future fusion opportunities can kick in, but must
5687 generate VNOR elsewhere. */
5688 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
5689 rtx iorx = (TARGET_P8_VECTOR
5690 ? gen_rtx_IOR (V16QImode, notx, notx)
5691 : gen_rtx_AND (V16QImode, notx, notx));
5692 rtx tmp = gen_reg_rtx (V16QImode);
5693 emit_insn (gen_rtx_SET (VOIDmode, tmp, iorx));
5695 /* Permute with operands reversed and adjusted selector. */
5696 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
5697 UNSPEC_VPERM);
5700 emit_insn (gen_rtx_SET (VOIDmode, target, x));
5703 /* Extract field ELT from VEC into TARGET. */
5705 void
5706 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
5708 enum machine_mode mode = GET_MODE (vec);
5709 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5710 rtx mem;
5712 if (VECTOR_MEM_VSX_P (mode))
5714 switch (mode)
5716 default:
5717 break;
5718 case V1TImode:
5719 gcc_assert (elt == 0 && inner_mode == TImode);
5720 emit_move_insn (target, gen_lowpart (TImode, vec));
5721 break;
5722 case V2DFmode:
5723 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
5724 return;
5725 case V2DImode:
5726 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
5727 return;
5728 case V4SFmode:
5729 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
5730 return;
5734 /* Allocate mode-sized buffer. */
5735 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5737 emit_move_insn (mem, vec);
5739 /* Add offset to field within buffer matching vector element. */
5740 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
5742 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
5745 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5746 implement ANDing by the mask IN. */
5747 void
5748 build_mask64_2_operands (rtx in, rtx *out)
5750 unsigned HOST_WIDE_INT c, lsb, m1, m2;
5751 int shift;
5753 gcc_assert (GET_CODE (in) == CONST_INT);
5755 c = INTVAL (in);
5756 if (c & 1)
5758 /* Assume c initially something like 0x00fff000000fffff. The idea
5759 is to rotate the word so that the middle ^^^^^^ group of zeros
5760 is at the MS end and can be cleared with an rldicl mask. We then
5761 rotate back and clear off the MS ^^ group of zeros with a
5762 second rldicl. */
5763 c = ~c; /* c == 0xff000ffffff00000 */
5764 lsb = c & -c; /* lsb == 0x0000000000100000 */
5765 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
5766 c = ~c; /* c == 0x00fff000000fffff */
5767 c &= -lsb; /* c == 0x00fff00000000000 */
5768 lsb = c & -c; /* lsb == 0x0000100000000000 */
5769 c = ~c; /* c == 0xff000fffffffffff */
5770 c &= -lsb; /* c == 0xff00000000000000 */
5771 shift = 0;
5772 while ((lsb >>= 1) != 0)
5773 shift++; /* shift == 44 on exit from loop */
5774 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
5775 m1 = ~m1; /* m1 == 0x000000ffffffffff */
5776 m2 = ~c; /* m2 == 0x00ffffffffffffff */
5778 else
5780 /* Assume c initially something like 0xff000f0000000000. The idea
5781 is to rotate the word so that the ^^^ middle group of zeros
5782 is at the LS end and can be cleared with an rldicr mask. We then
5783 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5784 a second rldicr. */
5785 lsb = c & -c; /* lsb == 0x0000010000000000 */
5786 m2 = -lsb; /* m2 == 0xffffff0000000000 */
5787 c = ~c; /* c == 0x00fff0ffffffffff */
5788 c &= -lsb; /* c == 0x00fff00000000000 */
5789 lsb = c & -c; /* lsb == 0x0000100000000000 */
5790 c = ~c; /* c == 0xff000fffffffffff */
5791 c &= -lsb; /* c == 0xff00000000000000 */
5792 shift = 0;
5793 while ((lsb >>= 1) != 0)
5794 shift++; /* shift == 44 on exit from loop */
5795 m1 = ~c; /* m1 == 0x00ffffffffffffff */
5796 m1 >>= shift; /* m1 == 0x0000000000000fff */
5797 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
5800 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5801 masks will be all 1's. We are guaranteed more than one transition. */
5802 out[0] = GEN_INT (64 - shift);
5803 out[1] = GEN_INT (m1);
5804 out[2] = GEN_INT (shift);
5805 out[3] = GEN_INT (m2);
5808 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5810 bool
5811 invalid_e500_subreg (rtx op, enum machine_mode mode)
5813 if (TARGET_E500_DOUBLE)
5815 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5816 subreg:TI and reg:TF. Decimal float modes are like integer
5817 modes (only low part of each register used) for this
5818 purpose. */
5819 if (GET_CODE (op) == SUBREG
5820 && (mode == SImode || mode == DImode || mode == TImode
5821 || mode == DDmode || mode == TDmode || mode == PTImode)
5822 && REG_P (SUBREG_REG (op))
5823 && (GET_MODE (SUBREG_REG (op)) == DFmode
5824 || GET_MODE (SUBREG_REG (op)) == TFmode))
5825 return true;
5827 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5828 reg:TI. */
5829 if (GET_CODE (op) == SUBREG
5830 && (mode == DFmode || mode == TFmode)
5831 && REG_P (SUBREG_REG (op))
5832 && (GET_MODE (SUBREG_REG (op)) == DImode
5833 || GET_MODE (SUBREG_REG (op)) == TImode
5834 || GET_MODE (SUBREG_REG (op)) == PTImode
5835 || GET_MODE (SUBREG_REG (op)) == DDmode
5836 || GET_MODE (SUBREG_REG (op)) == TDmode))
5837 return true;
5840 if (TARGET_SPE
5841 && GET_CODE (op) == SUBREG
5842 && mode == SImode
5843 && REG_P (SUBREG_REG (op))
5844 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
5845 return true;
5847 return false;
5850 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
5851 selects whether the alignment is abi mandated, optional, or
5852 both abi and optional alignment. */
5854 unsigned int
5855 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
5857 if (how != align_opt)
5859 if (TREE_CODE (type) == VECTOR_TYPE)
5861 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
5862 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
5864 if (align < 64)
5865 align = 64;
5867 else if (align < 128)
5868 align = 128;
5870 else if (TARGET_E500_DOUBLE
5871 && TREE_CODE (type) == REAL_TYPE
5872 && TYPE_MODE (type) == DFmode)
5874 if (align < 64)
5875 align = 64;
5879 if (how != align_abi)
5881 if (TREE_CODE (type) == ARRAY_TYPE
5882 && TYPE_MODE (TREE_TYPE (type)) == QImode)
5884 if (align < BITS_PER_WORD)
5885 align = BITS_PER_WORD;
5889 return align;
5892 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
5894 bool
5895 rs6000_special_adjust_field_align_p (tree field, unsigned int computed)
5897 if (TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5899 if (computed != 128)
5901 static bool warned;
5902 if (!warned && warn_psabi)
5904 warned = true;
5905 inform (input_location,
5906 "the layout of aggregates containing vectors with"
5907 " %d-byte alignment has changed in GCC 4.10",
5908 computed / BITS_PER_UNIT);
5911 /* In current GCC there is no special case. */
5912 return false;
5915 return false;
5918 /* AIX increases natural record alignment to doubleword if the first
5919 field is an FP double while the FP fields remain word aligned. */
5921 unsigned int
5922 rs6000_special_round_type_align (tree type, unsigned int computed,
5923 unsigned int specified)
5925 unsigned int align = MAX (computed, specified);
5926 tree field = TYPE_FIELDS (type);
5928 /* Skip all non field decls */
5929 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5930 field = DECL_CHAIN (field);
5932 if (field != NULL && field != type)
5934 type = TREE_TYPE (field);
5935 while (TREE_CODE (type) == ARRAY_TYPE)
5936 type = TREE_TYPE (type);
5938 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
5939 align = MAX (align, 64);
5942 return align;
5945 /* Darwin increases record alignment to the natural alignment of
5946 the first field. */
5948 unsigned int
5949 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
5950 unsigned int specified)
5952 unsigned int align = MAX (computed, specified);
5954 if (TYPE_PACKED (type))
5955 return align;
5957 /* Find the first field, looking down into aggregates. */
5958 do {
5959 tree field = TYPE_FIELDS (type);
5960 /* Skip all non field decls */
5961 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5962 field = DECL_CHAIN (field);
5963 if (! field)
5964 break;
5965 /* A packed field does not contribute any extra alignment. */
5966 if (DECL_PACKED (field))
5967 return align;
5968 type = TREE_TYPE (field);
5969 while (TREE_CODE (type) == ARRAY_TYPE)
5970 type = TREE_TYPE (type);
5971 } while (AGGREGATE_TYPE_P (type));
5973 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
5974 align = MAX (align, TYPE_ALIGN (type));
5976 return align;
5979 /* Return 1 for an operand in small memory on V.4/eabi. */
5982 small_data_operand (rtx op ATTRIBUTE_UNUSED,
5983 enum machine_mode mode ATTRIBUTE_UNUSED)
5985 #if TARGET_ELF
5986 rtx sym_ref;
5988 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
5989 return 0;
5991 if (DEFAULT_ABI != ABI_V4)
5992 return 0;
5994 /* Vector and float memory instructions have a limited offset on the
5995 SPE, so using a vector or float variable directly as an operand is
5996 not useful. */
5997 if (TARGET_SPE
5998 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
5999 return 0;
6001 if (GET_CODE (op) == SYMBOL_REF)
6002 sym_ref = op;
6004 else if (GET_CODE (op) != CONST
6005 || GET_CODE (XEXP (op, 0)) != PLUS
6006 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
6007 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
6008 return 0;
6010 else
6012 rtx sum = XEXP (op, 0);
6013 HOST_WIDE_INT summand;
6015 /* We have to be careful here, because it is the referenced address
6016 that must be 32k from _SDA_BASE_, not just the symbol. */
6017 summand = INTVAL (XEXP (sum, 1));
6018 if (summand < 0 || summand > g_switch_value)
6019 return 0;
6021 sym_ref = XEXP (sum, 0);
6024 return SYMBOL_REF_SMALL_P (sym_ref);
6025 #else
6026 return 0;
6027 #endif
6030 /* Return true if either operand is a general purpose register. */
6032 bool
6033 gpr_or_gpr_p (rtx op0, rtx op1)
6035 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
6036 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
6039 /* Return true if this is a move direct operation between GPR registers and
6040 floating point/VSX registers. */
6042 bool
6043 direct_move_p (rtx op0, rtx op1)
6045 int regno0, regno1;
6047 if (!REG_P (op0) || !REG_P (op1))
6048 return false;
6050 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
6051 return false;
6053 regno0 = REGNO (op0);
6054 regno1 = REGNO (op1);
6055 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
6056 return false;
6058 if (INT_REGNO_P (regno0))
6059 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
6061 else if (INT_REGNO_P (regno1))
6063 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
6064 return true;
6066 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
6067 return true;
6070 return false;
6073 /* Return true if this is a load or store quad operation. This function does
6074 not handle the atomic quad memory instructions. */
6076 bool
6077 quad_load_store_p (rtx op0, rtx op1)
6079 bool ret;
6081 if (!TARGET_QUAD_MEMORY)
6082 ret = false;
6084 else if (REG_P (op0) && MEM_P (op1))
6085 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
6086 && quad_memory_operand (op1, GET_MODE (op1))
6087 && !reg_overlap_mentioned_p (op0, op1));
6089 else if (MEM_P (op0) && REG_P (op1))
6090 ret = (quad_memory_operand (op0, GET_MODE (op0))
6091 && quad_int_reg_operand (op1, GET_MODE (op1)));
6093 else
6094 ret = false;
6096 if (TARGET_DEBUG_ADDR)
6098 fprintf (stderr, "\n========== quad_load_store, return %s\n",
6099 ret ? "true" : "false");
6100 debug_rtx (gen_rtx_SET (VOIDmode, op0, op1));
6103 return ret;
6106 /* Given an address, return a constant offset term if one exists. */
6108 static rtx
6109 address_offset (rtx op)
6111 if (GET_CODE (op) == PRE_INC
6112 || GET_CODE (op) == PRE_DEC)
6113 op = XEXP (op, 0);
6114 else if (GET_CODE (op) == PRE_MODIFY
6115 || GET_CODE (op) == LO_SUM)
6116 op = XEXP (op, 1);
6118 if (GET_CODE (op) == CONST)
6119 op = XEXP (op, 0);
6121 if (GET_CODE (op) == PLUS)
6122 op = XEXP (op, 1);
6124 if (CONST_INT_P (op))
6125 return op;
6127 return NULL_RTX;
6130 /* Return true if the MEM operand is a memory operand suitable for use
6131 with a (full width, possibly multiple) gpr load/store. On
6132 powerpc64 this means the offset must be divisible by 4.
6133 Implements 'Y' constraint.
6135 Accept direct, indexed, offset, lo_sum and tocref. Since this is
6136 a constraint function we know the operand has satisfied a suitable
6137 memory predicate. Also accept some odd rtl generated by reload
6138 (see rs6000_legitimize_reload_address for various forms). It is
6139 important that reload rtl be accepted by appropriate constraints
6140 but not by the operand predicate.
6142 Offsetting a lo_sum should not be allowed, except where we know by
6143 alignment that a 32k boundary is not crossed, but see the ???
6144 comment in rs6000_legitimize_reload_address. Note that by
6145 "offsetting" here we mean a further offset to access parts of the
6146 MEM. It's fine to have a lo_sum where the inner address is offset
6147 from a sym, since the same sym+offset will appear in the high part
6148 of the address calculation. */
6150 bool
6151 mem_operand_gpr (rtx op, enum machine_mode mode)
6153 unsigned HOST_WIDE_INT offset;
6154 int extra;
6155 rtx addr = XEXP (op, 0);
6157 op = address_offset (addr);
6158 if (op == NULL_RTX)
6159 return true;
6161 offset = INTVAL (op);
6162 if (TARGET_POWERPC64 && (offset & 3) != 0)
6163 return false;
6165 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
6166 if (extra < 0)
6167 extra = 0;
6169 if (GET_CODE (addr) == LO_SUM)
6170 /* For lo_sum addresses, we must allow any offset except one that
6171 causes a wrap, so test only the low 16 bits. */
6172 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
6174 return offset + 0x8000 < 0x10000u - extra;
6177 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
6179 static bool
6180 reg_offset_addressing_ok_p (enum machine_mode mode)
6182 switch (mode)
6184 case V16QImode:
6185 case V8HImode:
6186 case V4SFmode:
6187 case V4SImode:
6188 case V2DFmode:
6189 case V2DImode:
6190 case V1TImode:
6191 case TImode:
6192 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
6193 TImode is not a vector mode, if we want to use the VSX registers to
6194 move it around, we need to restrict ourselves to reg+reg
6195 addressing. */
6196 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
6197 return false;
6198 break;
6200 case V4HImode:
6201 case V2SImode:
6202 case V1DImode:
6203 case V2SFmode:
6204 /* Paired vector modes. Only reg+reg addressing is valid. */
6205 if (TARGET_PAIRED_FLOAT)
6206 return false;
6207 break;
6209 case SDmode:
6210 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
6211 addressing for the LFIWZX and STFIWX instructions. */
6212 if (TARGET_NO_SDMODE_STACK)
6213 return false;
6214 break;
6216 default:
6217 break;
6220 return true;
6223 static bool
6224 virtual_stack_registers_memory_p (rtx op)
6226 int regnum;
6228 if (GET_CODE (op) == REG)
6229 regnum = REGNO (op);
6231 else if (GET_CODE (op) == PLUS
6232 && GET_CODE (XEXP (op, 0)) == REG
6233 && GET_CODE (XEXP (op, 1)) == CONST_INT)
6234 regnum = REGNO (XEXP (op, 0));
6236 else
6237 return false;
6239 return (regnum >= FIRST_VIRTUAL_REGISTER
6240 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
6243 /* Return true if a MODE sized memory accesses to OP plus OFFSET
6244 is known to not straddle a 32k boundary. */
6246 static bool
6247 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
6248 enum machine_mode mode)
6250 tree decl, type;
6251 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
6253 if (GET_CODE (op) != SYMBOL_REF)
6254 return false;
6256 dsize = GET_MODE_SIZE (mode);
6257 decl = SYMBOL_REF_DECL (op);
6258 if (!decl)
6260 if (dsize == 0)
6261 return false;
6263 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
6264 replacing memory addresses with an anchor plus offset. We
6265 could find the decl by rummaging around in the block->objects
6266 VEC for the given offset but that seems like too much work. */
6267 dalign = BITS_PER_UNIT;
6268 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
6269 && SYMBOL_REF_ANCHOR_P (op)
6270 && SYMBOL_REF_BLOCK (op) != NULL)
6272 struct object_block *block = SYMBOL_REF_BLOCK (op);
6274 dalign = block->alignment;
6275 offset += SYMBOL_REF_BLOCK_OFFSET (op);
6277 else if (CONSTANT_POOL_ADDRESS_P (op))
6279 /* It would be nice to have get_pool_align().. */
6280 enum machine_mode cmode = get_pool_mode (op);
6282 dalign = GET_MODE_ALIGNMENT (cmode);
6285 else if (DECL_P (decl))
6287 dalign = DECL_ALIGN (decl);
6289 if (dsize == 0)
6291 /* Allow BLKmode when the entire object is known to not
6292 cross a 32k boundary. */
6293 if (!DECL_SIZE_UNIT (decl))
6294 return false;
6296 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
6297 return false;
6299 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
6300 if (dsize > 32768)
6301 return false;
6303 return dalign / BITS_PER_UNIT >= dsize;
6306 else
6308 type = TREE_TYPE (decl);
6310 dalign = TYPE_ALIGN (type);
6311 if (CONSTANT_CLASS_P (decl))
6312 dalign = CONSTANT_ALIGNMENT (decl, dalign);
6313 else
6314 dalign = DATA_ALIGNMENT (decl, dalign);
6316 if (dsize == 0)
6318 /* BLKmode, check the entire object. */
6319 if (TREE_CODE (decl) == STRING_CST)
6320 dsize = TREE_STRING_LENGTH (decl);
6321 else if (TYPE_SIZE_UNIT (type)
6322 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
6323 dsize = tree_to_uhwi (TYPE_SIZE_UNIT (type));
6324 else
6325 return false;
6326 if (dsize > 32768)
6327 return false;
6329 return dalign / BITS_PER_UNIT >= dsize;
6333 /* Find how many bits of the alignment we know for this access. */
6334 mask = dalign / BITS_PER_UNIT - 1;
6335 lsb = offset & -offset;
6336 mask &= lsb - 1;
6337 dalign = mask + 1;
6339 return dalign >= dsize;
6342 static bool
6343 constant_pool_expr_p (rtx op)
6345 rtx base, offset;
6347 split_const (op, &base, &offset);
6348 return (GET_CODE (base) == SYMBOL_REF
6349 && CONSTANT_POOL_ADDRESS_P (base)
6350 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
6353 static const_rtx tocrel_base, tocrel_offset;
6355 /* Return true if OP is a toc pointer relative address (the output
6356 of create_TOC_reference). If STRICT, do not match high part or
6357 non-split -mcmodel=large/medium toc pointer relative addresses. */
6359 bool
6360 toc_relative_expr_p (const_rtx op, bool strict)
6362 if (!TARGET_TOC)
6363 return false;
6365 if (TARGET_CMODEL != CMODEL_SMALL)
6367 /* Only match the low part. */
6368 if (GET_CODE (op) == LO_SUM
6369 && REG_P (XEXP (op, 0))
6370 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
6371 op = XEXP (op, 1);
6372 else if (strict)
6373 return false;
6376 tocrel_base = op;
6377 tocrel_offset = const0_rtx;
6378 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
6380 tocrel_base = XEXP (op, 0);
6381 tocrel_offset = XEXP (op, 1);
6384 return (GET_CODE (tocrel_base) == UNSPEC
6385 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
6388 /* Return true if X is a constant pool address, and also for cmodel=medium
6389 if X is a toc-relative address known to be offsettable within MODE. */
6391 bool
6392 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
6393 bool strict)
6395 return (toc_relative_expr_p (x, strict)
6396 && (TARGET_CMODEL != CMODEL_MEDIUM
6397 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
6398 || mode == QImode
6399 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
6400 INTVAL (tocrel_offset), mode)));
6403 static bool
6404 legitimate_small_data_p (enum machine_mode mode, rtx x)
6406 return (DEFAULT_ABI == ABI_V4
6407 && !flag_pic && !TARGET_TOC
6408 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
6409 && small_data_operand (x, mode));
6412 /* SPE offset addressing is limited to 5-bits worth of double words. */
6413 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
6415 bool
6416 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
6417 bool strict, bool worst_case)
6419 unsigned HOST_WIDE_INT offset;
6420 unsigned int extra;
6422 if (GET_CODE (x) != PLUS)
6423 return false;
6424 if (!REG_P (XEXP (x, 0)))
6425 return false;
6426 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6427 return false;
6428 if (!reg_offset_addressing_ok_p (mode))
6429 return virtual_stack_registers_memory_p (x);
6430 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
6431 return true;
6432 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6433 return false;
6435 offset = INTVAL (XEXP (x, 1));
6436 extra = 0;
6437 switch (mode)
6439 case V4HImode:
6440 case V2SImode:
6441 case V1DImode:
6442 case V2SFmode:
6443 /* SPE vector modes. */
6444 return SPE_CONST_OFFSET_OK (offset);
6446 case DFmode:
6447 case DDmode:
6448 case DImode:
6449 /* On e500v2, we may have:
6451 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
6453 Which gets addressed with evldd instructions. */
6454 if (TARGET_E500_DOUBLE)
6455 return SPE_CONST_OFFSET_OK (offset);
6457 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
6458 addressing. */
6459 if (VECTOR_MEM_VSX_P (mode))
6460 return false;
6462 if (!worst_case)
6463 break;
6464 if (!TARGET_POWERPC64)
6465 extra = 4;
6466 else if (offset & 3)
6467 return false;
6468 break;
6470 case TFmode:
6471 if (TARGET_E500_DOUBLE)
6472 return (SPE_CONST_OFFSET_OK (offset)
6473 && SPE_CONST_OFFSET_OK (offset + 8));
6474 /* fall through */
6476 case TDmode:
6477 case TImode:
6478 case PTImode:
6479 extra = 8;
6480 if (!worst_case)
6481 break;
6482 if (!TARGET_POWERPC64)
6483 extra = 12;
6484 else if (offset & 3)
6485 return false;
6486 break;
6488 default:
6489 break;
6492 offset += 0x8000;
6493 return offset < 0x10000 - extra;
6496 bool
6497 legitimate_indexed_address_p (rtx x, int strict)
6499 rtx op0, op1;
6501 if (GET_CODE (x) != PLUS)
6502 return false;
6504 op0 = XEXP (x, 0);
6505 op1 = XEXP (x, 1);
6507 /* Recognize the rtl generated by reload which we know will later be
6508 replaced with proper base and index regs. */
6509 if (!strict
6510 && reload_in_progress
6511 && (REG_P (op0) || GET_CODE (op0) == PLUS)
6512 && REG_P (op1))
6513 return true;
6515 return (REG_P (op0) && REG_P (op1)
6516 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
6517 && INT_REG_OK_FOR_INDEX_P (op1, strict))
6518 || (INT_REG_OK_FOR_BASE_P (op1, strict)
6519 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
6522 bool
6523 avoiding_indexed_address_p (enum machine_mode mode)
6525 /* Avoid indexed addressing for modes that have non-indexed
6526 load/store instruction forms. */
6527 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
6530 bool
6531 legitimate_indirect_address_p (rtx x, int strict)
6533 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
6536 bool
6537 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
6539 if (!TARGET_MACHO || !flag_pic
6540 || mode != SImode || GET_CODE (x) != MEM)
6541 return false;
6542 x = XEXP (x, 0);
6544 if (GET_CODE (x) != LO_SUM)
6545 return false;
6546 if (GET_CODE (XEXP (x, 0)) != REG)
6547 return false;
6548 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
6549 return false;
6550 x = XEXP (x, 1);
6552 return CONSTANT_P (x);
6555 static bool
6556 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
6558 if (GET_CODE (x) != LO_SUM)
6559 return false;
6560 if (GET_CODE (XEXP (x, 0)) != REG)
6561 return false;
6562 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6563 return false;
6564 /* Restrict addressing for DI because of our SUBREG hackery. */
6565 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
6566 return false;
6567 x = XEXP (x, 1);
6569 if (TARGET_ELF || TARGET_MACHO)
6571 bool large_toc_ok;
6573 if (DEFAULT_ABI == ABI_V4 && flag_pic)
6574 return false;
6575 /* LRA don't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
6576 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
6577 recognizes some LO_SUM addresses as valid although this
6578 function says opposite. In most cases, LRA through different
6579 transformations can generate correct code for address reloads.
6580 It can not manage only some LO_SUM cases. So we need to add
6581 code analogous to one in rs6000_legitimize_reload_address for
6582 LOW_SUM here saying that some addresses are still valid. */
6583 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
6584 && small_toc_ref (x, VOIDmode));
6585 if (TARGET_TOC && ! large_toc_ok)
6586 return false;
6587 if (GET_MODE_NUNITS (mode) != 1)
6588 return false;
6589 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6590 && !(/* ??? Assume floating point reg based on mode? */
6591 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6592 && (mode == DFmode || mode == DDmode)))
6593 return false;
6595 return CONSTANT_P (x) || large_toc_ok;
6598 return false;
6602 /* Try machine-dependent ways of modifying an illegitimate address
6603 to be legitimate. If we find one, return the new, valid address.
6604 This is used from only one place: `memory_address' in explow.c.
6606 OLDX is the address as it was before break_out_memory_refs was
6607 called. In some cases it is useful to look at this to decide what
6608 needs to be done.
6610 It is always safe for this function to do nothing. It exists to
6611 recognize opportunities to optimize the output.
6613 On RS/6000, first check for the sum of a register with a constant
6614 integer that is out of range. If so, generate code to add the
6615 constant with the low-order 16 bits masked to the register and force
6616 this result into another register (this can be done with `cau').
6617 Then generate an address of REG+(CONST&0xffff), allowing for the
6618 possibility of bit 16 being a one.
6620 Then check for the sum of a register and something not constant, try to
6621 load the other things into a register and return the sum. */
6623 static rtx
6624 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
6625 enum machine_mode mode)
6627 unsigned int extra;
6629 if (!reg_offset_addressing_ok_p (mode))
6631 if (virtual_stack_registers_memory_p (x))
6632 return x;
6634 /* In theory we should not be seeing addresses of the form reg+0,
6635 but just in case it is generated, optimize it away. */
6636 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
6637 return force_reg (Pmode, XEXP (x, 0));
6639 /* For TImode with load/store quad, restrict addresses to just a single
6640 pointer, so it works with both GPRs and VSX registers. */
6641 /* Make sure both operands are registers. */
6642 else if (GET_CODE (x) == PLUS
6643 && (mode != TImode || !TARGET_QUAD_MEMORY))
6644 return gen_rtx_PLUS (Pmode,
6645 force_reg (Pmode, XEXP (x, 0)),
6646 force_reg (Pmode, XEXP (x, 1)));
6647 else
6648 return force_reg (Pmode, x);
6650 if (GET_CODE (x) == SYMBOL_REF)
6652 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
6653 if (model != 0)
6654 return rs6000_legitimize_tls_address (x, model);
6657 extra = 0;
6658 switch (mode)
6660 case TFmode:
6661 case TDmode:
6662 case TImode:
6663 case PTImode:
6664 /* As in legitimate_offset_address_p we do not assume
6665 worst-case. The mode here is just a hint as to the registers
6666 used. A TImode is usually in gprs, but may actually be in
6667 fprs. Leave worst-case scenario for reload to handle via
6668 insn constraints. PTImode is only GPRs. */
6669 extra = 8;
6670 break;
6671 default:
6672 break;
6675 if (GET_CODE (x) == PLUS
6676 && GET_CODE (XEXP (x, 0)) == REG
6677 && GET_CODE (XEXP (x, 1)) == CONST_INT
6678 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
6679 >= 0x10000 - extra)
6680 && !(SPE_VECTOR_MODE (mode)
6681 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
6683 HOST_WIDE_INT high_int, low_int;
6684 rtx sum;
6685 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6686 if (low_int >= 0x8000 - extra)
6687 low_int = 0;
6688 high_int = INTVAL (XEXP (x, 1)) - low_int;
6689 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
6690 GEN_INT (high_int)), 0);
6691 return plus_constant (Pmode, sum, low_int);
6693 else if (GET_CODE (x) == PLUS
6694 && GET_CODE (XEXP (x, 0)) == REG
6695 && GET_CODE (XEXP (x, 1)) != CONST_INT
6696 && GET_MODE_NUNITS (mode) == 1
6697 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6698 || (/* ??? Assume floating point reg based on mode? */
6699 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6700 && (mode == DFmode || mode == DDmode)))
6701 && !avoiding_indexed_address_p (mode))
6703 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
6704 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
6706 else if (SPE_VECTOR_MODE (mode)
6707 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
6709 if (mode == DImode)
6710 return x;
6711 /* We accept [reg + reg] and [reg + OFFSET]. */
6713 if (GET_CODE (x) == PLUS)
6715 rtx op1 = XEXP (x, 0);
6716 rtx op2 = XEXP (x, 1);
6717 rtx y;
6719 op1 = force_reg (Pmode, op1);
6721 if (GET_CODE (op2) != REG
6722 && (GET_CODE (op2) != CONST_INT
6723 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
6724 || (GET_MODE_SIZE (mode) > 8
6725 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
6726 op2 = force_reg (Pmode, op2);
6728 /* We can't always do [reg + reg] for these, because [reg +
6729 reg + offset] is not a legitimate addressing mode. */
6730 y = gen_rtx_PLUS (Pmode, op1, op2);
6732 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
6733 return force_reg (Pmode, y);
6734 else
6735 return y;
6738 return force_reg (Pmode, x);
6740 else if ((TARGET_ELF
6741 #if TARGET_MACHO
6742 || !MACHO_DYNAMIC_NO_PIC_P
6743 #endif
6745 && TARGET_32BIT
6746 && TARGET_NO_TOC
6747 && ! flag_pic
6748 && GET_CODE (x) != CONST_INT
6749 && GET_CODE (x) != CONST_WIDE_INT
6750 && GET_CODE (x) != CONST_DOUBLE
6751 && CONSTANT_P (x)
6752 && GET_MODE_NUNITS (mode) == 1
6753 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6754 || (/* ??? Assume floating point reg based on mode? */
6755 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6756 && (mode == DFmode || mode == DDmode))))
6758 rtx reg = gen_reg_rtx (Pmode);
6759 if (TARGET_ELF)
6760 emit_insn (gen_elf_high (reg, x));
6761 else
6762 emit_insn (gen_macho_high (reg, x));
6763 return gen_rtx_LO_SUM (Pmode, reg, x);
6765 else if (TARGET_TOC
6766 && GET_CODE (x) == SYMBOL_REF
6767 && constant_pool_expr_p (x)
6768 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
6769 return create_TOC_reference (x, NULL_RTX);
6770 else
6771 return x;
6774 /* Debug version of rs6000_legitimize_address. */
6775 static rtx
6776 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
6778 rtx ret;
6779 rtx insns;
6781 start_sequence ();
6782 ret = rs6000_legitimize_address (x, oldx, mode);
6783 insns = get_insns ();
6784 end_sequence ();
6786 if (ret != x)
6788 fprintf (stderr,
6789 "\nrs6000_legitimize_address: mode %s, old code %s, "
6790 "new code %s, modified\n",
6791 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
6792 GET_RTX_NAME (GET_CODE (ret)));
6794 fprintf (stderr, "Original address:\n");
6795 debug_rtx (x);
6797 fprintf (stderr, "oldx:\n");
6798 debug_rtx (oldx);
6800 fprintf (stderr, "New address:\n");
6801 debug_rtx (ret);
6803 if (insns)
6805 fprintf (stderr, "Insns added:\n");
6806 debug_rtx_list (insns, 20);
6809 else
6811 fprintf (stderr,
6812 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6813 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
6815 debug_rtx (x);
6818 if (insns)
6819 emit_insn (insns);
6821 return ret;
6824 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6825 We need to emit DTP-relative relocations. */
6827 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6828 static void
6829 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
6831 switch (size)
6833 case 4:
6834 fputs ("\t.long\t", file);
6835 break;
6836 case 8:
6837 fputs (DOUBLE_INT_ASM_OP, file);
6838 break;
6839 default:
6840 gcc_unreachable ();
6842 output_addr_const (file, x);
6843 fputs ("@dtprel+0x8000", file);
6846 /* Return true if X is a symbol that refers to real (rather than emulated)
6847 TLS. */
6849 static bool
6850 rs6000_real_tls_symbol_ref_p (rtx x)
6852 return (GET_CODE (x) == SYMBOL_REF
6853 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
6856 /* In the name of slightly smaller debug output, and to cater to
6857 general assembler lossage, recognize various UNSPEC sequences
6858 and turn them back into a direct symbol reference. */
6860 static rtx
6861 rs6000_delegitimize_address (rtx orig_x)
6863 rtx x, y, offset;
6865 orig_x = delegitimize_mem_from_attrs (orig_x);
6866 x = orig_x;
6867 if (MEM_P (x))
6868 x = XEXP (x, 0);
6870 y = x;
6871 if (TARGET_CMODEL != CMODEL_SMALL
6872 && GET_CODE (y) == LO_SUM)
6873 y = XEXP (y, 1);
6875 offset = NULL_RTX;
6876 if (GET_CODE (y) == PLUS
6877 && GET_MODE (y) == Pmode
6878 && CONST_INT_P (XEXP (y, 1)))
6880 offset = XEXP (y, 1);
6881 y = XEXP (y, 0);
6884 if (GET_CODE (y) == UNSPEC
6885 && XINT (y, 1) == UNSPEC_TOCREL)
6887 #ifdef ENABLE_CHECKING
6888 if (REG_P (XVECEXP (y, 0, 1))
6889 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
6891 /* All good. */
6893 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
6895 /* Weirdness alert. df_note_compute can replace r2 with a
6896 debug_expr when this unspec is in a debug_insn.
6897 Seen in gcc.dg/pr51957-1.c */
6899 else
6901 debug_rtx (orig_x);
6902 abort ();
6904 #endif
6905 y = XVECEXP (y, 0, 0);
6907 #ifdef HAVE_AS_TLS
6908 /* Do not associate thread-local symbols with the original
6909 constant pool symbol. */
6910 if (TARGET_XCOFF
6911 && GET_CODE (y) == SYMBOL_REF
6912 && CONSTANT_POOL_ADDRESS_P (y)
6913 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
6914 return orig_x;
6915 #endif
6917 if (offset != NULL_RTX)
6918 y = gen_rtx_PLUS (Pmode, y, offset);
6919 if (!MEM_P (orig_x))
6920 return y;
6921 else
6922 return replace_equiv_address_nv (orig_x, y);
6925 if (TARGET_MACHO
6926 && GET_CODE (orig_x) == LO_SUM
6927 && GET_CODE (XEXP (orig_x, 1)) == CONST)
6929 y = XEXP (XEXP (orig_x, 1), 0);
6930 if (GET_CODE (y) == UNSPEC
6931 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
6932 return XVECEXP (y, 0, 0);
6935 return orig_x;
6938 /* Return true if X shouldn't be emitted into the debug info.
6939 The linker doesn't like .toc section references from
6940 .debug_* sections, so reject .toc section symbols. */
6942 static bool
6943 rs6000_const_not_ok_for_debug_p (rtx x)
6945 if (GET_CODE (x) == SYMBOL_REF
6946 && CONSTANT_POOL_ADDRESS_P (x))
6948 rtx c = get_pool_constant (x);
6949 enum machine_mode cmode = get_pool_mode (x);
6950 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
6951 return true;
6954 return false;
6957 /* Construct the SYMBOL_REF for the tls_get_addr function. */
6959 static GTY(()) rtx rs6000_tls_symbol;
6960 static rtx
6961 rs6000_tls_get_addr (void)
6963 if (!rs6000_tls_symbol)
6964 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
6966 return rs6000_tls_symbol;
6969 /* Construct the SYMBOL_REF for TLS GOT references. */
6971 static GTY(()) rtx rs6000_got_symbol;
6972 static rtx
6973 rs6000_got_sym (void)
6975 if (!rs6000_got_symbol)
6977 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
6978 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
6979 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
6982 return rs6000_got_symbol;
6985 /* AIX Thread-Local Address support. */
6987 static rtx
6988 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
6990 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
6991 const char *name;
6992 char *tlsname;
6994 name = XSTR (addr, 0);
6995 /* Append TLS CSECT qualifier, unless the symbol already is qualified
6996 or the symbol will be in TLS private data section. */
6997 if (name[strlen (name) - 1] != ']'
6998 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
6999 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
7001 tlsname = XALLOCAVEC (char, strlen (name) + 4);
7002 strcpy (tlsname, name);
7003 strcat (tlsname,
7004 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
7005 tlsaddr = copy_rtx (addr);
7006 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
7008 else
7009 tlsaddr = addr;
7011 /* Place addr into TOC constant pool. */
7012 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
7014 /* Output the TOC entry and create the MEM referencing the value. */
7015 if (constant_pool_expr_p (XEXP (sym, 0))
7016 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
7018 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
7019 mem = gen_const_mem (Pmode, tocref);
7020 set_mem_alias_set (mem, get_TOC_alias_set ());
7022 else
7023 return sym;
7025 /* Use global-dynamic for local-dynamic. */
7026 if (model == TLS_MODEL_GLOBAL_DYNAMIC
7027 || model == TLS_MODEL_LOCAL_DYNAMIC)
7029 /* Create new TOC reference for @m symbol. */
7030 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
7031 tlsname = XALLOCAVEC (char, strlen (name) + 1);
7032 strcpy (tlsname, "*LCM");
7033 strcat (tlsname, name + 3);
7034 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
7035 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
7036 tocref = create_TOC_reference (modaddr, NULL_RTX);
7037 rtx modmem = gen_const_mem (Pmode, tocref);
7038 set_mem_alias_set (modmem, get_TOC_alias_set ());
7040 rtx modreg = gen_reg_rtx (Pmode);
7041 emit_insn (gen_rtx_SET (VOIDmode, modreg, modmem));
7043 tmpreg = gen_reg_rtx (Pmode);
7044 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
7046 dest = gen_reg_rtx (Pmode);
7047 if (TARGET_32BIT)
7048 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
7049 else
7050 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
7051 return dest;
7053 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
7054 else if (TARGET_32BIT)
7056 tlsreg = gen_reg_rtx (SImode);
7057 emit_insn (gen_tls_get_tpointer (tlsreg));
7059 else
7060 tlsreg = gen_rtx_REG (DImode, 13);
7062 /* Load the TOC value into temporary register. */
7063 tmpreg = gen_reg_rtx (Pmode);
7064 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
7065 set_unique_reg_note (get_last_insn (), REG_EQUAL,
7066 gen_rtx_MINUS (Pmode, addr, tlsreg));
7068 /* Add TOC symbol value to TLS pointer. */
7069 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
7071 return dest;
7074 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
7075 this (thread-local) address. */
7077 static rtx
7078 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
7080 rtx dest, insn;
7082 if (TARGET_XCOFF)
7083 return rs6000_legitimize_tls_address_aix (addr, model);
7085 dest = gen_reg_rtx (Pmode);
7086 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
7088 rtx tlsreg;
7090 if (TARGET_64BIT)
7092 tlsreg = gen_rtx_REG (Pmode, 13);
7093 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
7095 else
7097 tlsreg = gen_rtx_REG (Pmode, 2);
7098 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
7100 emit_insn (insn);
7102 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
7104 rtx tlsreg, tmp;
7106 tmp = gen_reg_rtx (Pmode);
7107 if (TARGET_64BIT)
7109 tlsreg = gen_rtx_REG (Pmode, 13);
7110 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
7112 else
7114 tlsreg = gen_rtx_REG (Pmode, 2);
7115 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
7117 emit_insn (insn);
7118 if (TARGET_64BIT)
7119 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
7120 else
7121 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
7122 emit_insn (insn);
7124 else
7126 rtx r3, got, tga, tmp1, tmp2, call_insn;
7128 /* We currently use relocations like @got@tlsgd for tls, which
7129 means the linker will handle allocation of tls entries, placing
7130 them in the .got section. So use a pointer to the .got section,
7131 not one to secondary TOC sections used by 64-bit -mminimal-toc,
7132 or to secondary GOT sections used by 32-bit -fPIC. */
7133 if (TARGET_64BIT)
7134 got = gen_rtx_REG (Pmode, 2);
7135 else
7137 if (flag_pic == 1)
7138 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
7139 else
7141 rtx gsym = rs6000_got_sym ();
7142 got = gen_reg_rtx (Pmode);
7143 if (flag_pic == 0)
7144 rs6000_emit_move (got, gsym, Pmode);
7145 else
7147 rtx mem, lab, last;
7149 tmp1 = gen_reg_rtx (Pmode);
7150 tmp2 = gen_reg_rtx (Pmode);
7151 mem = gen_const_mem (Pmode, tmp1);
7152 lab = gen_label_rtx ();
7153 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
7154 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
7155 if (TARGET_LINK_STACK)
7156 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
7157 emit_move_insn (tmp2, mem);
7158 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
7159 set_unique_reg_note (last, REG_EQUAL, gsym);
7164 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
7166 tga = rs6000_tls_get_addr ();
7167 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
7168 1, const0_rtx, Pmode);
7170 r3 = gen_rtx_REG (Pmode, 3);
7171 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7173 if (TARGET_64BIT)
7174 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
7175 else
7176 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
7178 else if (DEFAULT_ABI == ABI_V4)
7179 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
7180 else
7181 gcc_unreachable ();
7182 call_insn = last_call_insn ();
7183 PATTERN (call_insn) = insn;
7184 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7185 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7186 pic_offset_table_rtx);
7188 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
7190 tga = rs6000_tls_get_addr ();
7191 tmp1 = gen_reg_rtx (Pmode);
7192 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
7193 1, const0_rtx, Pmode);
7195 r3 = gen_rtx_REG (Pmode, 3);
7196 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7198 if (TARGET_64BIT)
7199 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
7200 else
7201 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
7203 else if (DEFAULT_ABI == ABI_V4)
7204 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
7205 else
7206 gcc_unreachable ();
7207 call_insn = last_call_insn ();
7208 PATTERN (call_insn) = insn;
7209 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7210 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7211 pic_offset_table_rtx);
7213 if (rs6000_tls_size == 16)
7215 if (TARGET_64BIT)
7216 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
7217 else
7218 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
7220 else if (rs6000_tls_size == 32)
7222 tmp2 = gen_reg_rtx (Pmode);
7223 if (TARGET_64BIT)
7224 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
7225 else
7226 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
7227 emit_insn (insn);
7228 if (TARGET_64BIT)
7229 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
7230 else
7231 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
7233 else
7235 tmp2 = gen_reg_rtx (Pmode);
7236 if (TARGET_64BIT)
7237 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
7238 else
7239 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
7240 emit_insn (insn);
7241 insn = gen_rtx_SET (Pmode, dest,
7242 gen_rtx_PLUS (Pmode, tmp2, tmp1));
7244 emit_insn (insn);
7246 else
7248 /* IE, or 64-bit offset LE. */
7249 tmp2 = gen_reg_rtx (Pmode);
7250 if (TARGET_64BIT)
7251 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
7252 else
7253 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
7254 emit_insn (insn);
7255 if (TARGET_64BIT)
7256 insn = gen_tls_tls_64 (dest, tmp2, addr);
7257 else
7258 insn = gen_tls_tls_32 (dest, tmp2, addr);
7259 emit_insn (insn);
7263 return dest;
7266 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
7268 static bool
7269 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
7271 if (GET_CODE (x) == HIGH
7272 && GET_CODE (XEXP (x, 0)) == UNSPEC)
7273 return true;
7275 /* A TLS symbol in the TOC cannot contain a sum. */
7276 if (GET_CODE (x) == CONST
7277 && GET_CODE (XEXP (x, 0)) == PLUS
7278 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7279 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
7280 return true;
7282 /* Do not place an ELF TLS symbol in the constant pool. */
7283 return TARGET_ELF && tls_referenced_p (x);
7286 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
7287 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
7288 can be addressed relative to the toc pointer. */
7290 static bool
7291 use_toc_relative_ref (rtx sym)
7293 return ((constant_pool_expr_p (sym)
7294 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
7295 get_pool_mode (sym)))
7296 || (TARGET_CMODEL == CMODEL_MEDIUM
7297 && SYMBOL_REF_LOCAL_P (sym)));
7300 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
7301 replace the input X, or the original X if no replacement is called for.
7302 The output parameter *WIN is 1 if the calling macro should goto WIN,
7303 0 if it should not.
7305 For RS/6000, we wish to handle large displacements off a base
7306 register by splitting the addend across an addiu/addis and the mem insn.
7307 This cuts number of extra insns needed from 3 to 1.
7309 On Darwin, we use this to generate code for floating point constants.
7310 A movsf_low is generated so we wind up with 2 instructions rather than 3.
7311 The Darwin code is inside #if TARGET_MACHO because only then are the
7312 machopic_* functions defined. */
7313 static rtx
7314 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
7315 int opnum, int type,
7316 int ind_levels ATTRIBUTE_UNUSED, int *win)
7318 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7320 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
7321 DFmode/DImode MEM. */
7322 if (reg_offset_p
7323 && opnum == 1
7324 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
7325 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
7326 reg_offset_p = false;
7328 /* We must recognize output that we have already generated ourselves. */
7329 if (GET_CODE (x) == PLUS
7330 && GET_CODE (XEXP (x, 0)) == PLUS
7331 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7332 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7333 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7335 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7336 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7337 opnum, (enum reload_type) type);
7338 *win = 1;
7339 return x;
7342 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
7343 if (GET_CODE (x) == LO_SUM
7344 && GET_CODE (XEXP (x, 0)) == HIGH)
7346 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7347 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7348 opnum, (enum reload_type) type);
7349 *win = 1;
7350 return x;
7353 #if TARGET_MACHO
7354 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
7355 && GET_CODE (x) == LO_SUM
7356 && GET_CODE (XEXP (x, 0)) == PLUS
7357 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
7358 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
7359 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
7360 && machopic_operand_p (XEXP (x, 1)))
7362 /* Result of previous invocation of this function on Darwin
7363 floating point constant. */
7364 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7365 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7366 opnum, (enum reload_type) type);
7367 *win = 1;
7368 return x;
7370 #endif
7372 if (TARGET_CMODEL != CMODEL_SMALL
7373 && reg_offset_p
7374 && small_toc_ref (x, VOIDmode))
7376 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
7377 x = gen_rtx_LO_SUM (Pmode, hi, x);
7378 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7379 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7380 opnum, (enum reload_type) type);
7381 *win = 1;
7382 return x;
7385 if (GET_CODE (x) == PLUS
7386 && GET_CODE (XEXP (x, 0)) == REG
7387 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
7388 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
7389 && GET_CODE (XEXP (x, 1)) == CONST_INT
7390 && reg_offset_p
7391 && !SPE_VECTOR_MODE (mode)
7392 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
7393 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
7395 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
7396 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
7397 HOST_WIDE_INT high
7398 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7400 /* Check for 32-bit overflow. */
7401 if (high + low != val)
7403 *win = 0;
7404 return x;
7407 /* Reload the high part into a base reg; leave the low part
7408 in the mem directly. */
7410 x = gen_rtx_PLUS (GET_MODE (x),
7411 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
7412 GEN_INT (high)),
7413 GEN_INT (low));
7415 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7416 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7417 opnum, (enum reload_type) type);
7418 *win = 1;
7419 return x;
7422 if (GET_CODE (x) == SYMBOL_REF
7423 && reg_offset_p
7424 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
7425 && !SPE_VECTOR_MODE (mode)
7426 #if TARGET_MACHO
7427 && DEFAULT_ABI == ABI_DARWIN
7428 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
7429 && machopic_symbol_defined_p (x)
7430 #else
7431 && DEFAULT_ABI == ABI_V4
7432 && !flag_pic
7433 #endif
7434 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
7435 The same goes for DImode without 64-bit gprs and DFmode and DDmode
7436 without fprs.
7437 ??? Assume floating point reg based on mode? This assumption is
7438 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
7439 where reload ends up doing a DFmode load of a constant from
7440 mem using two gprs. Unfortunately, at this point reload
7441 hasn't yet selected regs so poking around in reload data
7442 won't help and even if we could figure out the regs reliably,
7443 we'd still want to allow this transformation when the mem is
7444 naturally aligned. Since we say the address is good here, we
7445 can't disable offsets from LO_SUMs in mem_operand_gpr.
7446 FIXME: Allow offset from lo_sum for other modes too, when
7447 mem is sufficiently aligned. */
7448 && mode != TFmode
7449 && mode != TDmode
7450 && (mode != TImode || !TARGET_VSX_TIMODE)
7451 && mode != PTImode
7452 && (mode != DImode || TARGET_POWERPC64)
7453 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
7454 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
7456 #if TARGET_MACHO
7457 if (flag_pic)
7459 rtx offset = machopic_gen_offset (x);
7460 x = gen_rtx_LO_SUM (GET_MODE (x),
7461 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
7462 gen_rtx_HIGH (Pmode, offset)), offset);
7464 else
7465 #endif
7466 x = gen_rtx_LO_SUM (GET_MODE (x),
7467 gen_rtx_HIGH (Pmode, x), x);
7469 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7470 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7471 opnum, (enum reload_type) type);
7472 *win = 1;
7473 return x;
7476 /* Reload an offset address wrapped by an AND that represents the
7477 masking of the lower bits. Strip the outer AND and let reload
7478 convert the offset address into an indirect address. For VSX,
7479 force reload to create the address with an AND in a separate
7480 register, because we can't guarantee an altivec register will
7481 be used. */
7482 if (VECTOR_MEM_ALTIVEC_P (mode)
7483 && GET_CODE (x) == AND
7484 && GET_CODE (XEXP (x, 0)) == PLUS
7485 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7486 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7487 && GET_CODE (XEXP (x, 1)) == CONST_INT
7488 && INTVAL (XEXP (x, 1)) == -16)
7490 x = XEXP (x, 0);
7491 *win = 1;
7492 return x;
7495 if (TARGET_TOC
7496 && reg_offset_p
7497 && GET_CODE (x) == SYMBOL_REF
7498 && use_toc_relative_ref (x))
7500 x = create_TOC_reference (x, NULL_RTX);
7501 if (TARGET_CMODEL != CMODEL_SMALL)
7502 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7503 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7504 opnum, (enum reload_type) type);
7505 *win = 1;
7506 return x;
7508 *win = 0;
7509 return x;
7512 /* Debug version of rs6000_legitimize_reload_address. */
7513 static rtx
7514 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
7515 int opnum, int type,
7516 int ind_levels, int *win)
7518 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
7519 ind_levels, win);
7520 fprintf (stderr,
7521 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7522 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7523 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
7524 debug_rtx (x);
7526 if (x == ret)
7527 fprintf (stderr, "Same address returned\n");
7528 else if (!ret)
7529 fprintf (stderr, "NULL returned\n");
7530 else
7532 fprintf (stderr, "New address:\n");
7533 debug_rtx (ret);
7536 return ret;
7539 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7540 that is a valid memory address for an instruction.
7541 The MODE argument is the machine mode for the MEM expression
7542 that wants to use this address.
7544 On the RS/6000, there are four valid address: a SYMBOL_REF that
7545 refers to a constant pool entry of an address (or the sum of it
7546 plus a constant), a short (16-bit signed) constant plus a register,
7547 the sum of two registers, or a register indirect, possibly with an
7548 auto-increment. For DFmode, DDmode and DImode with a constant plus
7549 register, we must ensure that both words are addressable or PowerPC64
7550 with offset word aligned.
7552 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7553 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7554 because adjacent memory cells are accessed by adding word-sized offsets
7555 during assembly output. */
7556 static bool
7557 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
7559 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7561 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7562 if (VECTOR_MEM_ALTIVEC_P (mode)
7563 && GET_CODE (x) == AND
7564 && GET_CODE (XEXP (x, 1)) == CONST_INT
7565 && INTVAL (XEXP (x, 1)) == -16)
7566 x = XEXP (x, 0);
7568 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
7569 return 0;
7570 if (legitimate_indirect_address_p (x, reg_ok_strict))
7571 return 1;
7572 if (TARGET_UPDATE
7573 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
7574 && mode_supports_pre_incdec_p (mode)
7575 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
7576 return 1;
7577 if (virtual_stack_registers_memory_p (x))
7578 return 1;
7579 if (reg_offset_p && legitimate_small_data_p (mode, x))
7580 return 1;
7581 if (reg_offset_p
7582 && legitimate_constant_pool_address_p (x, mode,
7583 reg_ok_strict || lra_in_progress))
7584 return 1;
7585 /* For TImode, if we have load/store quad and TImode in VSX registers, only
7586 allow register indirect addresses. This will allow the values to go in
7587 either GPRs or VSX registers without reloading. The vector types would
7588 tend to go into VSX registers, so we allow REG+REG, while TImode seems
7589 somewhat split, in that some uses are GPR based, and some VSX based. */
7590 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
7591 return 0;
7592 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7593 if (! reg_ok_strict
7594 && reg_offset_p
7595 && GET_CODE (x) == PLUS
7596 && GET_CODE (XEXP (x, 0)) == REG
7597 && (XEXP (x, 0) == virtual_stack_vars_rtx
7598 || XEXP (x, 0) == arg_pointer_rtx)
7599 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7600 return 1;
7601 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
7602 return 1;
7603 if (mode != TFmode
7604 && mode != TDmode
7605 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7606 || TARGET_POWERPC64
7607 || (mode != DFmode && mode != DDmode)
7608 || (TARGET_E500_DOUBLE && mode != DDmode))
7609 && (TARGET_POWERPC64 || mode != DImode)
7610 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
7611 && mode != PTImode
7612 && !avoiding_indexed_address_p (mode)
7613 && legitimate_indexed_address_p (x, reg_ok_strict))
7614 return 1;
7615 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
7616 && mode_supports_pre_modify_p (mode)
7617 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
7618 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
7619 reg_ok_strict, false)
7620 || (!avoiding_indexed_address_p (mode)
7621 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
7622 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7623 return 1;
7624 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
7625 return 1;
7626 return 0;
7629 /* Debug version of rs6000_legitimate_address_p. */
7630 static bool
7631 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
7632 bool reg_ok_strict)
7634 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
7635 fprintf (stderr,
7636 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7637 "strict = %d, reload = %s, code = %s\n",
7638 ret ? "true" : "false",
7639 GET_MODE_NAME (mode),
7640 reg_ok_strict,
7641 (reload_completed
7642 ? "after"
7643 : (reload_in_progress ? "progress" : "before")),
7644 GET_RTX_NAME (GET_CODE (x)));
7645 debug_rtx (x);
7647 return ret;
7650 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7652 static bool
7653 rs6000_mode_dependent_address_p (const_rtx addr,
7654 addr_space_t as ATTRIBUTE_UNUSED)
7656 return rs6000_mode_dependent_address_ptr (addr);
7659 /* Go to LABEL if ADDR (a legitimate address expression)
7660 has an effect that depends on the machine mode it is used for.
7662 On the RS/6000 this is true of all integral offsets (since AltiVec
7663 and VSX modes don't allow them) or is a pre-increment or decrement.
7665 ??? Except that due to conceptual problems in offsettable_address_p
7666 we can't really report the problems of integral offsets. So leave
7667 this assuming that the adjustable offset must be valid for the
7668 sub-words of a TFmode operand, which is what we had before. */
7670 static bool
7671 rs6000_mode_dependent_address (const_rtx addr)
7673 switch (GET_CODE (addr))
7675 case PLUS:
7676 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7677 is considered a legitimate address before reload, so there
7678 are no offset restrictions in that case. Note that this
7679 condition is safe in strict mode because any address involving
7680 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7681 been rejected as illegitimate. */
7682 if (XEXP (addr, 0) != virtual_stack_vars_rtx
7683 && XEXP (addr, 0) != arg_pointer_rtx
7684 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
7686 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
7687 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
7689 break;
7691 case LO_SUM:
7692 /* Anything in the constant pool is sufficiently aligned that
7693 all bytes have the same high part address. */
7694 return !legitimate_constant_pool_address_p (addr, QImode, false);
7696 /* Auto-increment cases are now treated generically in recog.c. */
7697 case PRE_MODIFY:
7698 return TARGET_UPDATE;
7700 /* AND is only allowed in Altivec loads. */
7701 case AND:
7702 return true;
7704 default:
7705 break;
7708 return false;
7711 /* Debug version of rs6000_mode_dependent_address. */
7712 static bool
7713 rs6000_debug_mode_dependent_address (const_rtx addr)
7715 bool ret = rs6000_mode_dependent_address (addr);
7717 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
7718 ret ? "true" : "false");
7719 debug_rtx (addr);
7721 return ret;
7724 /* Implement FIND_BASE_TERM. */
7727 rs6000_find_base_term (rtx op)
7729 rtx base;
7731 base = op;
7732 if (GET_CODE (base) == CONST)
7733 base = XEXP (base, 0);
7734 if (GET_CODE (base) == PLUS)
7735 base = XEXP (base, 0);
7736 if (GET_CODE (base) == UNSPEC)
7737 switch (XINT (base, 1))
7739 case UNSPEC_TOCREL:
7740 case UNSPEC_MACHOPIC_OFFSET:
7741 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
7742 for aliasing purposes. */
7743 return XVECEXP (base, 0, 0);
7746 return op;
7749 /* More elaborate version of recog's offsettable_memref_p predicate
7750 that works around the ??? note of rs6000_mode_dependent_address.
7751 In particular it accepts
7753 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
7755 in 32-bit mode, that the recog predicate rejects. */
7757 static bool
7758 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
7760 bool worst_case;
7762 if (!MEM_P (op))
7763 return false;
7765 /* First mimic offsettable_memref_p. */
7766 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
7767 return true;
7769 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
7770 the latter predicate knows nothing about the mode of the memory
7771 reference and, therefore, assumes that it is the largest supported
7772 mode (TFmode). As a consequence, legitimate offsettable memory
7773 references are rejected. rs6000_legitimate_offset_address_p contains
7774 the correct logic for the PLUS case of rs6000_mode_dependent_address,
7775 at least with a little bit of help here given that we know the
7776 actual registers used. */
7777 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
7778 || GET_MODE_SIZE (reg_mode) == 4);
7779 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
7780 true, worst_case);
7783 /* Change register usage conditional on target flags. */
7784 static void
7785 rs6000_conditional_register_usage (void)
7787 int i;
7789 if (TARGET_DEBUG_TARGET)
7790 fprintf (stderr, "rs6000_conditional_register_usage called\n");
7792 /* Set MQ register fixed (already call_used) so that it will not be
7793 allocated. */
7794 fixed_regs[64] = 1;
7796 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7797 if (TARGET_64BIT)
7798 fixed_regs[13] = call_used_regs[13]
7799 = call_really_used_regs[13] = 1;
7801 /* Conditionally disable FPRs. */
7802 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
7803 for (i = 32; i < 64; i++)
7804 fixed_regs[i] = call_used_regs[i]
7805 = call_really_used_regs[i] = 1;
7807 /* The TOC register is not killed across calls in a way that is
7808 visible to the compiler. */
7809 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7810 call_really_used_regs[2] = 0;
7812 if (DEFAULT_ABI == ABI_V4
7813 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7814 && flag_pic == 2)
7815 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7817 if (DEFAULT_ABI == ABI_V4
7818 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7819 && flag_pic == 1)
7820 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7821 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7822 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7824 if (DEFAULT_ABI == ABI_DARWIN
7825 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
7826 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7827 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7828 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7830 if (TARGET_TOC && TARGET_MINIMAL_TOC)
7831 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7832 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7834 if (TARGET_SPE)
7836 global_regs[SPEFSCR_REGNO] = 1;
7837 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7838 registers in prologues and epilogues. We no longer use r14
7839 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7840 pool for link-compatibility with older versions of GCC. Once
7841 "old" code has died out, we can return r14 to the allocation
7842 pool. */
7843 fixed_regs[14]
7844 = call_used_regs[14]
7845 = call_really_used_regs[14] = 1;
7848 if (!TARGET_ALTIVEC && !TARGET_VSX)
7850 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
7851 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7852 call_really_used_regs[VRSAVE_REGNO] = 1;
7855 if (TARGET_ALTIVEC || TARGET_VSX)
7856 global_regs[VSCR_REGNO] = 1;
7858 if (TARGET_ALTIVEC_ABI)
7860 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
7861 call_used_regs[i] = call_really_used_regs[i] = 1;
7863 /* AIX reserves VR20:31 in non-extended ABI mode. */
7864 if (TARGET_XCOFF)
7865 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
7866 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7871 /* Output insns to set DEST equal to the constant SOURCE as a series of
7872 lis, ori and shl instructions and return TRUE. */
7874 bool
7875 rs6000_emit_set_const (rtx dest, rtx source)
7877 enum machine_mode mode = GET_MODE (dest);
7878 rtx temp, insn, set;
7879 HOST_WIDE_INT c;
7881 gcc_checking_assert (CONST_INT_P (source));
7882 c = INTVAL (source);
7883 switch (mode)
7885 case QImode:
7886 case HImode:
7887 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
7888 return true;
7890 case SImode:
7891 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
7893 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (temp),
7894 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
7895 emit_insn (gen_rtx_SET (VOIDmode, dest,
7896 gen_rtx_IOR (SImode, copy_rtx (temp),
7897 GEN_INT (c & 0xffff))));
7898 break;
7900 case DImode:
7901 if (!TARGET_POWERPC64)
7903 rtx hi, lo;
7905 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
7906 DImode);
7907 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
7908 DImode);
7909 emit_move_insn (hi, GEN_INT (c >> 32));
7910 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
7911 emit_move_insn (lo, GEN_INT (c));
7913 else
7914 rs6000_emit_set_long_const (dest, c);
7915 break;
7917 default:
7918 gcc_unreachable ();
7921 insn = get_last_insn ();
7922 set = single_set (insn);
7923 if (! CONSTANT_P (SET_SRC (set)))
7924 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
7926 return true;
7929 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
7930 Output insns to set DEST equal to the constant C as a series of
7931 lis, ori and shl instructions. */
7933 static void
7934 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
7936 rtx temp;
7937 HOST_WIDE_INT ud1, ud2, ud3, ud4;
7939 ud1 = c & 0xffff;
7940 c = c >> 16;
7941 ud2 = c & 0xffff;
7942 c = c >> 16;
7943 ud3 = c & 0xffff;
7944 c = c >> 16;
7945 ud4 = c & 0xffff;
7947 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
7948 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
7949 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
7951 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
7952 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
7954 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
7956 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
7957 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
7958 if (ud1 != 0)
7959 emit_move_insn (dest,
7960 gen_rtx_IOR (DImode, copy_rtx (temp),
7961 GEN_INT (ud1)));
7963 else if (ud3 == 0 && ud4 == 0)
7965 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
7967 gcc_assert (ud2 & 0x8000);
7968 emit_move_insn (copy_rtx (temp),
7969 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
7970 if (ud1 != 0)
7971 emit_move_insn (copy_rtx (temp),
7972 gen_rtx_IOR (DImode, copy_rtx (temp),
7973 GEN_INT (ud1)));
7974 emit_move_insn (dest,
7975 gen_rtx_ZERO_EXTEND (DImode,
7976 gen_lowpart (SImode,
7977 copy_rtx (temp))));
7979 else if ((ud4 == 0xffff && (ud3 & 0x8000))
7980 || (ud4 == 0 && ! (ud3 & 0x8000)))
7982 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
7984 emit_move_insn (copy_rtx (temp),
7985 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
7986 if (ud2 != 0)
7987 emit_move_insn (copy_rtx (temp),
7988 gen_rtx_IOR (DImode, copy_rtx (temp),
7989 GEN_INT (ud2)));
7990 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
7991 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
7992 GEN_INT (16)));
7993 if (ud1 != 0)
7994 emit_move_insn (dest,
7995 gen_rtx_IOR (DImode, copy_rtx (temp),
7996 GEN_INT (ud1)));
7998 else
8000 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8002 emit_move_insn (copy_rtx (temp),
8003 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
8004 if (ud3 != 0)
8005 emit_move_insn (copy_rtx (temp),
8006 gen_rtx_IOR (DImode, copy_rtx (temp),
8007 GEN_INT (ud3)));
8009 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
8010 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
8011 GEN_INT (32)));
8012 if (ud2 != 0)
8013 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
8014 gen_rtx_IOR (DImode, copy_rtx (temp),
8015 GEN_INT (ud2 << 16)));
8016 if (ud1 != 0)
8017 emit_move_insn (dest,
8018 gen_rtx_IOR (DImode, copy_rtx (temp),
8019 GEN_INT (ud1)));
8023 /* Helper for the following. Get rid of [r+r] memory refs
8024 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
8026 static void
8027 rs6000_eliminate_indexed_memrefs (rtx operands[2])
8029 if (reload_in_progress)
8030 return;
8032 if (GET_CODE (operands[0]) == MEM
8033 && GET_CODE (XEXP (operands[0], 0)) != REG
8034 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
8035 GET_MODE (operands[0]), false))
8036 operands[0]
8037 = replace_equiv_address (operands[0],
8038 copy_addr_to_reg (XEXP (operands[0], 0)));
8040 if (GET_CODE (operands[1]) == MEM
8041 && GET_CODE (XEXP (operands[1], 0)) != REG
8042 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
8043 GET_MODE (operands[1]), false))
8044 operands[1]
8045 = replace_equiv_address (operands[1],
8046 copy_addr_to_reg (XEXP (operands[1], 0)));
8049 /* Generate a vector of constants to permute MODE for a little-endian
8050 storage operation by swapping the two halves of a vector. */
8051 static rtvec
8052 rs6000_const_vec (enum machine_mode mode)
8054 int i, subparts;
8055 rtvec v;
8057 switch (mode)
8059 case V1TImode:
8060 subparts = 1;
8061 break;
8062 case V2DFmode:
8063 case V2DImode:
8064 subparts = 2;
8065 break;
8066 case V4SFmode:
8067 case V4SImode:
8068 subparts = 4;
8069 break;
8070 case V8HImode:
8071 subparts = 8;
8072 break;
8073 case V16QImode:
8074 subparts = 16;
8075 break;
8076 default:
8077 gcc_unreachable();
8080 v = rtvec_alloc (subparts);
8082 for (i = 0; i < subparts / 2; ++i)
8083 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
8084 for (i = subparts / 2; i < subparts; ++i)
8085 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
8087 return v;
8090 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
8091 for a VSX load or store operation. */
8093 rs6000_gen_le_vsx_permute (rtx source, enum machine_mode mode)
8095 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
8096 return gen_rtx_VEC_SELECT (mode, source, par);
8099 /* Emit a little-endian load from vector memory location SOURCE to VSX
8100 register DEST in mode MODE. The load is done with two permuting
8101 insn's that represent an lxvd2x and xxpermdi. */
8102 void
8103 rs6000_emit_le_vsx_load (rtx dest, rtx source, enum machine_mode mode)
8105 rtx tmp, permute_mem, permute_reg;
8107 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8108 V1TImode). */
8109 if (mode == TImode || mode == V1TImode)
8111 mode = V2DImode;
8112 dest = gen_lowpart (V2DImode, dest);
8113 source = adjust_address (source, V2DImode, 0);
8116 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
8117 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
8118 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
8119 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_mem));
8120 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_reg));
8123 /* Emit a little-endian store to vector memory location DEST from VSX
8124 register SOURCE in mode MODE. The store is done with two permuting
8125 insn's that represent an xxpermdi and an stxvd2x. */
8126 void
8127 rs6000_emit_le_vsx_store (rtx dest, rtx source, enum machine_mode mode)
8129 rtx tmp, permute_src, permute_tmp;
8131 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8132 V1TImode). */
8133 if (mode == TImode || mode == V1TImode)
8135 mode = V2DImode;
8136 dest = adjust_address (dest, V2DImode, 0);
8137 source = gen_lowpart (V2DImode, source);
8140 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
8141 permute_src = rs6000_gen_le_vsx_permute (source, mode);
8142 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
8143 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_src));
8144 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_tmp));
8147 /* Emit a sequence representing a little-endian VSX load or store,
8148 moving data from SOURCE to DEST in mode MODE. This is done
8149 separately from rs6000_emit_move to ensure it is called only
8150 during expand. LE VSX loads and stores introduced later are
8151 handled with a split. The expand-time RTL generation allows
8152 us to optimize away redundant pairs of register-permutes. */
8153 void
8154 rs6000_emit_le_vsx_move (rtx dest, rtx source, enum machine_mode mode)
8156 gcc_assert (!BYTES_BIG_ENDIAN
8157 && VECTOR_MEM_VSX_P (mode)
8158 && !gpr_or_gpr_p (dest, source)
8159 && (MEM_P (source) ^ MEM_P (dest)));
8161 if (MEM_P (source))
8163 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
8164 rs6000_emit_le_vsx_load (dest, source, mode);
8166 else
8168 if (!REG_P (source))
8169 source = force_reg (mode, source);
8170 rs6000_emit_le_vsx_store (dest, source, mode);
8174 /* Emit a move from SOURCE to DEST in mode MODE. */
8175 void
8176 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
8178 rtx operands[2];
8179 operands[0] = dest;
8180 operands[1] = source;
8182 if (TARGET_DEBUG_ADDR)
8184 fprintf (stderr,
8185 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
8186 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
8187 GET_MODE_NAME (mode),
8188 reload_in_progress,
8189 reload_completed,
8190 can_create_pseudo_p ());
8191 debug_rtx (dest);
8192 fprintf (stderr, "source:\n");
8193 debug_rtx (source);
8196 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
8197 if (CONST_WIDE_INT_P (operands[1])
8198 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
8200 /* This should be fixed with the introduction of CONST_WIDE_INT. */
8201 gcc_unreachable ();
8204 /* Check if GCC is setting up a block move that will end up using FP
8205 registers as temporaries. We must make sure this is acceptable. */
8206 if (GET_CODE (operands[0]) == MEM
8207 && GET_CODE (operands[1]) == MEM
8208 && mode == DImode
8209 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
8210 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
8211 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
8212 ? 32 : MEM_ALIGN (operands[0])))
8213 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
8214 ? 32
8215 : MEM_ALIGN (operands[1]))))
8216 && ! MEM_VOLATILE_P (operands [0])
8217 && ! MEM_VOLATILE_P (operands [1]))
8219 emit_move_insn (adjust_address (operands[0], SImode, 0),
8220 adjust_address (operands[1], SImode, 0));
8221 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
8222 adjust_address (copy_rtx (operands[1]), SImode, 4));
8223 return;
8226 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
8227 && !gpc_reg_operand (operands[1], mode))
8228 operands[1] = force_reg (mode, operands[1]);
8230 /* Recognize the case where operand[1] is a reference to thread-local
8231 data and load its address to a register. */
8232 if (tls_referenced_p (operands[1]))
8234 enum tls_model model;
8235 rtx tmp = operands[1];
8236 rtx addend = NULL;
8238 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
8240 addend = XEXP (XEXP (tmp, 0), 1);
8241 tmp = XEXP (XEXP (tmp, 0), 0);
8244 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
8245 model = SYMBOL_REF_TLS_MODEL (tmp);
8246 gcc_assert (model != 0);
8248 tmp = rs6000_legitimize_tls_address (tmp, model);
8249 if (addend)
8251 tmp = gen_rtx_PLUS (mode, tmp, addend);
8252 tmp = force_operand (tmp, operands[0]);
8254 operands[1] = tmp;
8257 /* Handle the case where reload calls us with an invalid address. */
8258 if (reload_in_progress && mode == Pmode
8259 && (! general_operand (operands[1], mode)
8260 || ! nonimmediate_operand (operands[0], mode)))
8261 goto emit_set;
8263 /* 128-bit constant floating-point values on Darwin should really be
8264 loaded as two parts. */
8265 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
8266 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
8268 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
8269 simplify_gen_subreg (DFmode, operands[1], mode, 0),
8270 DFmode);
8271 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
8272 GET_MODE_SIZE (DFmode)),
8273 simplify_gen_subreg (DFmode, operands[1], mode,
8274 GET_MODE_SIZE (DFmode)),
8275 DFmode);
8276 return;
8279 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
8280 cfun->machine->sdmode_stack_slot =
8281 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
8284 if (lra_in_progress
8285 && mode == SDmode
8286 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
8287 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
8288 && (REG_P (operands[1])
8289 || (GET_CODE (operands[1]) == SUBREG
8290 && REG_P (SUBREG_REG (operands[1])))))
8292 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
8293 ? SUBREG_REG (operands[1]) : operands[1]);
8294 enum reg_class cl;
8296 if (regno >= FIRST_PSEUDO_REGISTER)
8298 cl = reg_preferred_class (regno);
8299 gcc_assert (cl != NO_REGS);
8300 regno = ira_class_hard_regs[cl][0];
8302 if (FP_REGNO_P (regno))
8304 if (GET_MODE (operands[0]) != DDmode)
8305 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
8306 emit_insn (gen_movsd_store (operands[0], operands[1]));
8308 else if (INT_REGNO_P (regno))
8309 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8310 else
8311 gcc_unreachable();
8312 return;
8314 if (lra_in_progress
8315 && mode == SDmode
8316 && (REG_P (operands[0])
8317 || (GET_CODE (operands[0]) == SUBREG
8318 && REG_P (SUBREG_REG (operands[0]))))
8319 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
8320 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
8322 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
8323 ? SUBREG_REG (operands[0]) : operands[0]);
8324 enum reg_class cl;
8326 if (regno >= FIRST_PSEUDO_REGISTER)
8328 cl = reg_preferred_class (regno);
8329 gcc_assert (cl != NO_REGS);
8330 regno = ira_class_hard_regs[cl][0];
8332 if (FP_REGNO_P (regno))
8334 if (GET_MODE (operands[1]) != DDmode)
8335 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
8336 emit_insn (gen_movsd_load (operands[0], operands[1]));
8338 else if (INT_REGNO_P (regno))
8339 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8340 else
8341 gcc_unreachable();
8342 return;
8345 if (reload_in_progress
8346 && mode == SDmode
8347 && cfun->machine->sdmode_stack_slot != NULL_RTX
8348 && MEM_P (operands[0])
8349 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
8350 && REG_P (operands[1]))
8352 if (FP_REGNO_P (REGNO (operands[1])))
8354 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
8355 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8356 emit_insn (gen_movsd_store (mem, operands[1]));
8358 else if (INT_REGNO_P (REGNO (operands[1])))
8360 rtx mem = operands[0];
8361 if (BYTES_BIG_ENDIAN)
8362 mem = adjust_address_nv (mem, mode, 4);
8363 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8364 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
8366 else
8367 gcc_unreachable();
8368 return;
8370 if (reload_in_progress
8371 && mode == SDmode
8372 && REG_P (operands[0])
8373 && MEM_P (operands[1])
8374 && cfun->machine->sdmode_stack_slot != NULL_RTX
8375 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
8377 if (FP_REGNO_P (REGNO (operands[0])))
8379 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
8380 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8381 emit_insn (gen_movsd_load (operands[0], mem));
8383 else if (INT_REGNO_P (REGNO (operands[0])))
8385 rtx mem = operands[1];
8386 if (BYTES_BIG_ENDIAN)
8387 mem = adjust_address_nv (mem, mode, 4);
8388 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8389 emit_insn (gen_movsd_hardfloat (operands[0], mem));
8391 else
8392 gcc_unreachable();
8393 return;
8396 /* FIXME: In the long term, this switch statement should go away
8397 and be replaced by a sequence of tests based on things like
8398 mode == Pmode. */
8399 switch (mode)
8401 case HImode:
8402 case QImode:
8403 if (CONSTANT_P (operands[1])
8404 && GET_CODE (operands[1]) != CONST_INT)
8405 operands[1] = force_const_mem (mode, operands[1]);
8406 break;
8408 case TFmode:
8409 case TDmode:
8410 rs6000_eliminate_indexed_memrefs (operands);
8411 /* fall through */
8413 case DFmode:
8414 case DDmode:
8415 case SFmode:
8416 case SDmode:
8417 if (CONSTANT_P (operands[1])
8418 && ! easy_fp_constant (operands[1], mode))
8419 operands[1] = force_const_mem (mode, operands[1]);
8420 break;
8422 case V16QImode:
8423 case V8HImode:
8424 case V4SFmode:
8425 case V4SImode:
8426 case V4HImode:
8427 case V2SFmode:
8428 case V2SImode:
8429 case V1DImode:
8430 case V2DFmode:
8431 case V2DImode:
8432 case V1TImode:
8433 if (CONSTANT_P (operands[1])
8434 && !easy_vector_constant (operands[1], mode))
8435 operands[1] = force_const_mem (mode, operands[1]);
8436 break;
8438 case SImode:
8439 case DImode:
8440 /* Use default pattern for address of ELF small data */
8441 if (TARGET_ELF
8442 && mode == Pmode
8443 && DEFAULT_ABI == ABI_V4
8444 && (GET_CODE (operands[1]) == SYMBOL_REF
8445 || GET_CODE (operands[1]) == CONST)
8446 && small_data_operand (operands[1], mode))
8448 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8449 return;
8452 if (DEFAULT_ABI == ABI_V4
8453 && mode == Pmode && mode == SImode
8454 && flag_pic == 1 && got_operand (operands[1], mode))
8456 emit_insn (gen_movsi_got (operands[0], operands[1]));
8457 return;
8460 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
8461 && TARGET_NO_TOC
8462 && ! flag_pic
8463 && mode == Pmode
8464 && CONSTANT_P (operands[1])
8465 && GET_CODE (operands[1]) != HIGH
8466 && GET_CODE (operands[1]) != CONST_INT)
8468 rtx target = (!can_create_pseudo_p ()
8469 ? operands[0]
8470 : gen_reg_rtx (mode));
8472 /* If this is a function address on -mcall-aixdesc,
8473 convert it to the address of the descriptor. */
8474 if (DEFAULT_ABI == ABI_AIX
8475 && GET_CODE (operands[1]) == SYMBOL_REF
8476 && XSTR (operands[1], 0)[0] == '.')
8478 const char *name = XSTR (operands[1], 0);
8479 rtx new_ref;
8480 while (*name == '.')
8481 name++;
8482 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
8483 CONSTANT_POOL_ADDRESS_P (new_ref)
8484 = CONSTANT_POOL_ADDRESS_P (operands[1]);
8485 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
8486 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
8487 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
8488 operands[1] = new_ref;
8491 if (DEFAULT_ABI == ABI_DARWIN)
8493 #if TARGET_MACHO
8494 if (MACHO_DYNAMIC_NO_PIC_P)
8496 /* Take care of any required data indirection. */
8497 operands[1] = rs6000_machopic_legitimize_pic_address (
8498 operands[1], mode, operands[0]);
8499 if (operands[0] != operands[1])
8500 emit_insn (gen_rtx_SET (VOIDmode,
8501 operands[0], operands[1]));
8502 return;
8504 #endif
8505 emit_insn (gen_macho_high (target, operands[1]));
8506 emit_insn (gen_macho_low (operands[0], target, operands[1]));
8507 return;
8510 emit_insn (gen_elf_high (target, operands[1]));
8511 emit_insn (gen_elf_low (operands[0], target, operands[1]));
8512 return;
8515 /* If this is a SYMBOL_REF that refers to a constant pool entry,
8516 and we have put it in the TOC, we just need to make a TOC-relative
8517 reference to it. */
8518 if (TARGET_TOC
8519 && GET_CODE (operands[1]) == SYMBOL_REF
8520 && use_toc_relative_ref (operands[1]))
8521 operands[1] = create_TOC_reference (operands[1], operands[0]);
8522 else if (mode == Pmode
8523 && CONSTANT_P (operands[1])
8524 && GET_CODE (operands[1]) != HIGH
8525 && ((GET_CODE (operands[1]) != CONST_INT
8526 && ! easy_fp_constant (operands[1], mode))
8527 || (GET_CODE (operands[1]) == CONST_INT
8528 && (num_insns_constant (operands[1], mode)
8529 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
8530 || (GET_CODE (operands[0]) == REG
8531 && FP_REGNO_P (REGNO (operands[0]))))
8532 && !toc_relative_expr_p (operands[1], false)
8533 && (TARGET_CMODEL == CMODEL_SMALL
8534 || can_create_pseudo_p ()
8535 || (REG_P (operands[0])
8536 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
8539 #if TARGET_MACHO
8540 /* Darwin uses a special PIC legitimizer. */
8541 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
8543 operands[1] =
8544 rs6000_machopic_legitimize_pic_address (operands[1], mode,
8545 operands[0]);
8546 if (operands[0] != operands[1])
8547 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8548 return;
8550 #endif
8552 /* If we are to limit the number of things we put in the TOC and
8553 this is a symbol plus a constant we can add in one insn,
8554 just put the symbol in the TOC and add the constant. Don't do
8555 this if reload is in progress. */
8556 if (GET_CODE (operands[1]) == CONST
8557 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
8558 && GET_CODE (XEXP (operands[1], 0)) == PLUS
8559 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
8560 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
8561 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
8562 && ! side_effects_p (operands[0]))
8564 rtx sym =
8565 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
8566 rtx other = XEXP (XEXP (operands[1], 0), 1);
8568 sym = force_reg (mode, sym);
8569 emit_insn (gen_add3_insn (operands[0], sym, other));
8570 return;
8573 operands[1] = force_const_mem (mode, operands[1]);
8575 if (TARGET_TOC
8576 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
8577 && constant_pool_expr_p (XEXP (operands[1], 0))
8578 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
8579 get_pool_constant (XEXP (operands[1], 0)),
8580 get_pool_mode (XEXP (operands[1], 0))))
8582 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
8583 operands[0]);
8584 operands[1] = gen_const_mem (mode, tocref);
8585 set_mem_alias_set (operands[1], get_TOC_alias_set ());
8588 break;
8590 case TImode:
8591 if (!VECTOR_MEM_VSX_P (TImode))
8592 rs6000_eliminate_indexed_memrefs (operands);
8593 break;
8595 case PTImode:
8596 rs6000_eliminate_indexed_memrefs (operands);
8597 break;
8599 default:
8600 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
8603 /* Above, we may have called force_const_mem which may have returned
8604 an invalid address. If we can, fix this up; otherwise, reload will
8605 have to deal with it. */
8606 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
8607 operands[1] = validize_mem (operands[1]);
8609 emit_set:
8610 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8613 /* Return true if a structure, union or array containing FIELD should be
8614 accessed using `BLKMODE'.
8616 For the SPE, simd types are V2SI, and gcc can be tempted to put the
8617 entire thing in a DI and use subregs to access the internals.
8618 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
8619 back-end. Because a single GPR can hold a V2SI, but not a DI, the
8620 best thing to do is set structs to BLKmode and avoid Severe Tire
8621 Damage.
8623 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
8624 fit into 1, whereas DI still needs two. */
8626 static bool
8627 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
8629 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
8630 || (TARGET_E500_DOUBLE && mode == DFmode));
8633 /* Nonzero if we can use a floating-point register to pass this arg. */
8634 #define USE_FP_FOR_ARG_P(CUM,MODE) \
8635 (SCALAR_FLOAT_MODE_P (MODE) \
8636 && (CUM)->fregno <= FP_ARG_MAX_REG \
8637 && TARGET_HARD_FLOAT && TARGET_FPRS)
8639 /* Nonzero if we can use an AltiVec register to pass this arg. */
8640 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
8641 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
8642 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
8643 && TARGET_ALTIVEC_ABI \
8644 && (NAMED))
8646 /* Walk down the type tree of TYPE counting consecutive base elements.
8647 If *MODEP is VOIDmode, then set it to the first valid floating point
8648 or vector type. If a non-floating point or vector type is found, or
8649 if a floating point or vector type that doesn't match a non-VOIDmode
8650 *MODEP is found, then return -1, otherwise return the count in the
8651 sub-tree. */
8653 static int
8654 rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
8656 enum machine_mode mode;
8657 HOST_WIDE_INT size;
8659 switch (TREE_CODE (type))
8661 case REAL_TYPE:
8662 mode = TYPE_MODE (type);
8663 if (!SCALAR_FLOAT_MODE_P (mode))
8664 return -1;
8666 if (*modep == VOIDmode)
8667 *modep = mode;
8669 if (*modep == mode)
8670 return 1;
8672 break;
8674 case COMPLEX_TYPE:
8675 mode = TYPE_MODE (TREE_TYPE (type));
8676 if (!SCALAR_FLOAT_MODE_P (mode))
8677 return -1;
8679 if (*modep == VOIDmode)
8680 *modep = mode;
8682 if (*modep == mode)
8683 return 2;
8685 break;
8687 case VECTOR_TYPE:
8688 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
8689 return -1;
8691 /* Use V4SImode as representative of all 128-bit vector types. */
8692 size = int_size_in_bytes (type);
8693 switch (size)
8695 case 16:
8696 mode = V4SImode;
8697 break;
8698 default:
8699 return -1;
8702 if (*modep == VOIDmode)
8703 *modep = mode;
8705 /* Vector modes are considered to be opaque: two vectors are
8706 equivalent for the purposes of being homogeneous aggregates
8707 if they are the same size. */
8708 if (*modep == mode)
8709 return 1;
8711 break;
8713 case ARRAY_TYPE:
8715 int count;
8716 tree index = TYPE_DOMAIN (type);
8718 /* Can't handle incomplete types nor sizes that are not
8719 fixed. */
8720 if (!COMPLETE_TYPE_P (type)
8721 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8722 return -1;
8724 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
8725 if (count == -1
8726 || !index
8727 || !TYPE_MAX_VALUE (index)
8728 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
8729 || !TYPE_MIN_VALUE (index)
8730 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
8731 || count < 0)
8732 return -1;
8734 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
8735 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
8737 /* There must be no padding. */
8738 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8739 return -1;
8741 return count;
8744 case RECORD_TYPE:
8746 int count = 0;
8747 int sub_count;
8748 tree field;
8750 /* Can't handle incomplete types nor sizes that are not
8751 fixed. */
8752 if (!COMPLETE_TYPE_P (type)
8753 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8754 return -1;
8756 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8758 if (TREE_CODE (field) != FIELD_DECL)
8759 continue;
8761 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8762 if (sub_count < 0)
8763 return -1;
8764 count += sub_count;
8767 /* There must be no padding. */
8768 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8769 return -1;
8771 return count;
8774 case UNION_TYPE:
8775 case QUAL_UNION_TYPE:
8777 /* These aren't very interesting except in a degenerate case. */
8778 int count = 0;
8779 int sub_count;
8780 tree field;
8782 /* Can't handle incomplete types nor sizes that are not
8783 fixed. */
8784 if (!COMPLETE_TYPE_P (type)
8785 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8786 return -1;
8788 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8790 if (TREE_CODE (field) != FIELD_DECL)
8791 continue;
8793 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8794 if (sub_count < 0)
8795 return -1;
8796 count = count > sub_count ? count : sub_count;
8799 /* There must be no padding. */
8800 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8801 return -1;
8803 return count;
8806 default:
8807 break;
8810 return -1;
8813 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
8814 float or vector aggregate that shall be passed in FP/vector registers
8815 according to the ELFv2 ABI, return the homogeneous element mode in
8816 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
8818 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
8820 static bool
8821 rs6000_discover_homogeneous_aggregate (enum machine_mode mode, const_tree type,
8822 enum machine_mode *elt_mode,
8823 int *n_elts)
8825 /* Note that we do not accept complex types at the top level as
8826 homogeneous aggregates; these types are handled via the
8827 targetm.calls.split_complex_arg mechanism. Complex types
8828 can be elements of homogeneous aggregates, however. */
8829 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
8831 enum machine_mode field_mode = VOIDmode;
8832 int field_count = rs6000_aggregate_candidate (type, &field_mode);
8834 if (field_count > 0)
8836 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode)?
8837 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
8839 /* The ELFv2 ABI allows homogeneous aggregates to occupy
8840 up to AGGR_ARG_NUM_REG registers. */
8841 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
8843 if (elt_mode)
8844 *elt_mode = field_mode;
8845 if (n_elts)
8846 *n_elts = field_count;
8847 return true;
8852 if (elt_mode)
8853 *elt_mode = mode;
8854 if (n_elts)
8855 *n_elts = 1;
8856 return false;
8859 /* Return a nonzero value to say to return the function value in
8860 memory, just as large structures are always returned. TYPE will be
8861 the data type of the value, and FNTYPE will be the type of the
8862 function doing the returning, or @code{NULL} for libcalls.
8864 The AIX ABI for the RS/6000 specifies that all structures are
8865 returned in memory. The Darwin ABI does the same.
8867 For the Darwin 64 Bit ABI, a function result can be returned in
8868 registers or in memory, depending on the size of the return data
8869 type. If it is returned in registers, the value occupies the same
8870 registers as it would if it were the first and only function
8871 argument. Otherwise, the function places its result in memory at
8872 the location pointed to by GPR3.
8874 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
8875 but a draft put them in memory, and GCC used to implement the draft
8876 instead of the final standard. Therefore, aix_struct_return
8877 controls this instead of DEFAULT_ABI; V.4 targets needing backward
8878 compatibility can change DRAFT_V4_STRUCT_RET to override the
8879 default, and -m switches get the final word. See
8880 rs6000_option_override_internal for more details.
8882 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
8883 long double support is enabled. These values are returned in memory.
8885 int_size_in_bytes returns -1 for variable size objects, which go in
8886 memory always. The cast to unsigned makes -1 > 8. */
8888 static bool
8889 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
8891 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
8892 if (TARGET_MACHO
8893 && rs6000_darwin64_abi
8894 && TREE_CODE (type) == RECORD_TYPE
8895 && int_size_in_bytes (type) > 0)
8897 CUMULATIVE_ARGS valcum;
8898 rtx valret;
8900 valcum.words = 0;
8901 valcum.fregno = FP_ARG_MIN_REG;
8902 valcum.vregno = ALTIVEC_ARG_MIN_REG;
8903 /* Do a trial code generation as if this were going to be passed
8904 as an argument; if any part goes in memory, we return NULL. */
8905 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
8906 if (valret)
8907 return false;
8908 /* Otherwise fall through to more conventional ABI rules. */
8911 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
8912 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
8913 NULL, NULL))
8914 return false;
8916 /* The ELFv2 ABI returns aggregates up to 16B in registers */
8917 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
8918 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
8919 return false;
8921 if (AGGREGATE_TYPE_P (type)
8922 && (aix_struct_return
8923 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
8924 return true;
8926 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8927 modes only exist for GCC vector types if -maltivec. */
8928 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
8929 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
8930 return false;
8932 /* Return synthetic vectors in memory. */
8933 if (TREE_CODE (type) == VECTOR_TYPE
8934 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8936 static bool warned_for_return_big_vectors = false;
8937 if (!warned_for_return_big_vectors)
8939 warning (0, "GCC vector returned by reference: "
8940 "non-standard ABI extension with no compatibility guarantee");
8941 warned_for_return_big_vectors = true;
8943 return true;
8946 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
8947 return true;
8949 return false;
8952 /* Specify whether values returned in registers should be at the most
8953 significant end of a register. We want aggregates returned by
8954 value to match the way aggregates are passed to functions. */
8956 static bool
8957 rs6000_return_in_msb (const_tree valtype)
8959 return (DEFAULT_ABI == ABI_ELFv2
8960 && BYTES_BIG_ENDIAN
8961 && AGGREGATE_TYPE_P (valtype)
8962 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
8965 #ifdef HAVE_AS_GNU_ATTRIBUTE
8966 /* Return TRUE if a call to function FNDECL may be one that
8967 potentially affects the function calling ABI of the object file. */
8969 static bool
8970 call_ABI_of_interest (tree fndecl)
8972 if (cgraph_state == CGRAPH_STATE_EXPANSION)
8974 struct cgraph_node *c_node;
8976 /* Libcalls are always interesting. */
8977 if (fndecl == NULL_TREE)
8978 return true;
8980 /* Any call to an external function is interesting. */
8981 if (DECL_EXTERNAL (fndecl))
8982 return true;
8984 /* Interesting functions that we are emitting in this object file. */
8985 c_node = cgraph_node::get (fndecl);
8986 c_node = c_node->ultimate_alias_target ();
8987 return !c_node->only_called_directly_p ();
8989 return false;
8991 #endif
8993 /* Initialize a variable CUM of type CUMULATIVE_ARGS
8994 for a call to a function whose data type is FNTYPE.
8995 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
8997 For incoming args we set the number of arguments in the prototype large
8998 so we never return a PARALLEL. */
9000 void
9001 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
9002 rtx libname ATTRIBUTE_UNUSED, int incoming,
9003 int libcall, int n_named_args,
9004 tree fndecl ATTRIBUTE_UNUSED,
9005 enum machine_mode return_mode ATTRIBUTE_UNUSED)
9007 static CUMULATIVE_ARGS zero_cumulative;
9009 *cum = zero_cumulative;
9010 cum->words = 0;
9011 cum->fregno = FP_ARG_MIN_REG;
9012 cum->vregno = ALTIVEC_ARG_MIN_REG;
9013 cum->prototype = (fntype && prototype_p (fntype));
9014 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
9015 ? CALL_LIBCALL : CALL_NORMAL);
9016 cum->sysv_gregno = GP_ARG_MIN_REG;
9017 cum->stdarg = stdarg_p (fntype);
9019 cum->nargs_prototype = 0;
9020 if (incoming || cum->prototype)
9021 cum->nargs_prototype = n_named_args;
9023 /* Check for a longcall attribute. */
9024 if ((!fntype && rs6000_default_long_calls)
9025 || (fntype
9026 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
9027 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
9028 cum->call_cookie |= CALL_LONG;
9030 if (TARGET_DEBUG_ARG)
9032 fprintf (stderr, "\ninit_cumulative_args:");
9033 if (fntype)
9035 tree ret_type = TREE_TYPE (fntype);
9036 fprintf (stderr, " ret code = %s,",
9037 get_tree_code_name (TREE_CODE (ret_type)));
9040 if (cum->call_cookie & CALL_LONG)
9041 fprintf (stderr, " longcall,");
9043 fprintf (stderr, " proto = %d, nargs = %d\n",
9044 cum->prototype, cum->nargs_prototype);
9047 #ifdef HAVE_AS_GNU_ATTRIBUTE
9048 if (DEFAULT_ABI == ABI_V4)
9050 cum->escapes = call_ABI_of_interest (fndecl);
9051 if (cum->escapes)
9053 tree return_type;
9055 if (fntype)
9057 return_type = TREE_TYPE (fntype);
9058 return_mode = TYPE_MODE (return_type);
9060 else
9061 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
9063 if (return_type != NULL)
9065 if (TREE_CODE (return_type) == RECORD_TYPE
9066 && TYPE_TRANSPARENT_AGGR (return_type))
9068 return_type = TREE_TYPE (first_field (return_type));
9069 return_mode = TYPE_MODE (return_type);
9071 if (AGGREGATE_TYPE_P (return_type)
9072 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
9073 <= 8))
9074 rs6000_returns_struct = true;
9076 if (SCALAR_FLOAT_MODE_P (return_mode))
9077 rs6000_passes_float = true;
9078 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
9079 || SPE_VECTOR_MODE (return_mode))
9080 rs6000_passes_vector = true;
9083 #endif
9085 if (fntype
9086 && !TARGET_ALTIVEC
9087 && TARGET_ALTIVEC_ABI
9088 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
9090 error ("cannot return value in vector register because"
9091 " altivec instructions are disabled, use -maltivec"
9092 " to enable them");
9096 /* Return true if TYPE must be passed on the stack and not in registers. */
9098 static bool
9099 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
9101 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
9102 return must_pass_in_stack_var_size (mode, type);
9103 else
9104 return must_pass_in_stack_var_size_or_pad (mode, type);
9107 /* If defined, a C expression which determines whether, and in which
9108 direction, to pad out an argument with extra space. The value
9109 should be of type `enum direction': either `upward' to pad above
9110 the argument, `downward' to pad below, or `none' to inhibit
9111 padding.
9113 For the AIX ABI structs are always stored left shifted in their
9114 argument slot. */
9116 enum direction
9117 function_arg_padding (enum machine_mode mode, const_tree type)
9119 #ifndef AGGREGATE_PADDING_FIXED
9120 #define AGGREGATE_PADDING_FIXED 0
9121 #endif
9122 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
9123 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
9124 #endif
9126 if (!AGGREGATE_PADDING_FIXED)
9128 /* GCC used to pass structures of the same size as integer types as
9129 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
9130 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
9131 passed padded downward, except that -mstrict-align further
9132 muddied the water in that multi-component structures of 2 and 4
9133 bytes in size were passed padded upward.
9135 The following arranges for best compatibility with previous
9136 versions of gcc, but removes the -mstrict-align dependency. */
9137 if (BYTES_BIG_ENDIAN)
9139 HOST_WIDE_INT size = 0;
9141 if (mode == BLKmode)
9143 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
9144 size = int_size_in_bytes (type);
9146 else
9147 size = GET_MODE_SIZE (mode);
9149 if (size == 1 || size == 2 || size == 4)
9150 return downward;
9152 return upward;
9155 if (AGGREGATES_PAD_UPWARD_ALWAYS)
9157 if (type != 0 && AGGREGATE_TYPE_P (type))
9158 return upward;
9161 /* Fall back to the default. */
9162 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9165 /* If defined, a C expression that gives the alignment boundary, in bits,
9166 of an argument with the specified mode and type. If it is not defined,
9167 PARM_BOUNDARY is used for all arguments.
9169 V.4 wants long longs and doubles to be double word aligned. Just
9170 testing the mode size is a boneheaded way to do this as it means
9171 that other types such as complex int are also double word aligned.
9172 However, we're stuck with this because changing the ABI might break
9173 existing library interfaces.
9175 Doubleword align SPE vectors.
9176 Quadword align Altivec/VSX vectors.
9177 Quadword align large synthetic vector types. */
9179 static unsigned int
9180 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
9182 enum machine_mode elt_mode;
9183 int n_elts;
9185 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9187 if (DEFAULT_ABI == ABI_V4
9188 && (GET_MODE_SIZE (mode) == 8
9189 || (TARGET_HARD_FLOAT
9190 && TARGET_FPRS
9191 && (mode == TFmode || mode == TDmode))))
9192 return 64;
9193 else if (SPE_VECTOR_MODE (mode)
9194 || (type && TREE_CODE (type) == VECTOR_TYPE
9195 && int_size_in_bytes (type) >= 8
9196 && int_size_in_bytes (type) < 16))
9197 return 64;
9198 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9199 || (type && TREE_CODE (type) == VECTOR_TYPE
9200 && int_size_in_bytes (type) >= 16))
9201 return 128;
9203 /* Aggregate types that need > 8 byte alignment are quadword-aligned
9204 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
9205 -mcompat-align-parm is used. */
9206 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
9207 || DEFAULT_ABI == ABI_ELFv2)
9208 && type && TYPE_ALIGN (type) > 64)
9210 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
9211 or homogeneous float/vector aggregates here. We already handled
9212 vector aggregates above, but still need to check for float here. */
9213 bool aggregate_p = (AGGREGATE_TYPE_P (type)
9214 && !SCALAR_FLOAT_MODE_P (elt_mode));
9216 /* We used to check for BLKmode instead of the above aggregate type
9217 check. Warn when this results in any difference to the ABI. */
9218 if (aggregate_p != (mode == BLKmode))
9220 static bool warned;
9221 if (!warned && warn_psabi)
9223 warned = true;
9224 inform (input_location,
9225 "the ABI of passing aggregates with %d-byte alignment"
9226 " has changed in GCC 4.10",
9227 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
9231 if (aggregate_p)
9232 return 128;
9235 /* Similar for the Darwin64 ABI. Note that for historical reasons we
9236 implement the "aggregate type" check as a BLKmode check here; this
9237 means certain aggregate types are in fact not aligned. */
9238 if (TARGET_MACHO && rs6000_darwin64_abi
9239 && mode == BLKmode
9240 && type && TYPE_ALIGN (type) > 64)
9241 return 128;
9243 return PARM_BOUNDARY;
9246 /* The offset in words to the start of the parameter save area. */
9248 static unsigned int
9249 rs6000_parm_offset (void)
9251 return (DEFAULT_ABI == ABI_V4 ? 2
9252 : DEFAULT_ABI == ABI_ELFv2 ? 4
9253 : 6);
9256 /* For a function parm of MODE and TYPE, return the starting word in
9257 the parameter area. NWORDS of the parameter area are already used. */
9259 static unsigned int
9260 rs6000_parm_start (enum machine_mode mode, const_tree type,
9261 unsigned int nwords)
9263 unsigned int align;
9265 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
9266 return nwords + (-(rs6000_parm_offset () + nwords) & align);
9269 /* Compute the size (in words) of a function argument. */
9271 static unsigned long
9272 rs6000_arg_size (enum machine_mode mode, const_tree type)
9274 unsigned long size;
9276 if (mode != BLKmode)
9277 size = GET_MODE_SIZE (mode);
9278 else
9279 size = int_size_in_bytes (type);
9281 if (TARGET_32BIT)
9282 return (size + 3) >> 2;
9283 else
9284 return (size + 7) >> 3;
9287 /* Use this to flush pending int fields. */
9289 static void
9290 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
9291 HOST_WIDE_INT bitpos, int final)
9293 unsigned int startbit, endbit;
9294 int intregs, intoffset;
9295 enum machine_mode mode;
9297 /* Handle the situations where a float is taking up the first half
9298 of the GPR, and the other half is empty (typically due to
9299 alignment restrictions). We can detect this by a 8-byte-aligned
9300 int field, or by seeing that this is the final flush for this
9301 argument. Count the word and continue on. */
9302 if (cum->floats_in_gpr == 1
9303 && (cum->intoffset % 64 == 0
9304 || (cum->intoffset == -1 && final)))
9306 cum->words++;
9307 cum->floats_in_gpr = 0;
9310 if (cum->intoffset == -1)
9311 return;
9313 intoffset = cum->intoffset;
9314 cum->intoffset = -1;
9315 cum->floats_in_gpr = 0;
9317 if (intoffset % BITS_PER_WORD != 0)
9319 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9320 MODE_INT, 0);
9321 if (mode == BLKmode)
9323 /* We couldn't find an appropriate mode, which happens,
9324 e.g., in packed structs when there are 3 bytes to load.
9325 Back intoffset back to the beginning of the word in this
9326 case. */
9327 intoffset = intoffset & -BITS_PER_WORD;
9331 startbit = intoffset & -BITS_PER_WORD;
9332 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9333 intregs = (endbit - startbit) / BITS_PER_WORD;
9334 cum->words += intregs;
9335 /* words should be unsigned. */
9336 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
9338 int pad = (endbit/BITS_PER_WORD) - cum->words;
9339 cum->words += pad;
9343 /* The darwin64 ABI calls for us to recurse down through structs,
9344 looking for elements passed in registers. Unfortunately, we have
9345 to track int register count here also because of misalignments
9346 in powerpc alignment mode. */
9348 static void
9349 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
9350 const_tree type,
9351 HOST_WIDE_INT startbitpos)
9353 tree f;
9355 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9356 if (TREE_CODE (f) == FIELD_DECL)
9358 HOST_WIDE_INT bitpos = startbitpos;
9359 tree ftype = TREE_TYPE (f);
9360 enum machine_mode mode;
9361 if (ftype == error_mark_node)
9362 continue;
9363 mode = TYPE_MODE (ftype);
9365 if (DECL_SIZE (f) != 0
9366 && tree_fits_uhwi_p (bit_position (f)))
9367 bitpos += int_bit_position (f);
9369 /* ??? FIXME: else assume zero offset. */
9371 if (TREE_CODE (ftype) == RECORD_TYPE)
9372 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
9373 else if (USE_FP_FOR_ARG_P (cum, mode))
9375 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
9376 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9377 cum->fregno += n_fpregs;
9378 /* Single-precision floats present a special problem for
9379 us, because they are smaller than an 8-byte GPR, and so
9380 the structure-packing rules combined with the standard
9381 varargs behavior mean that we want to pack float/float
9382 and float/int combinations into a single register's
9383 space. This is complicated by the arg advance flushing,
9384 which works on arbitrarily large groups of int-type
9385 fields. */
9386 if (mode == SFmode)
9388 if (cum->floats_in_gpr == 1)
9390 /* Two floats in a word; count the word and reset
9391 the float count. */
9392 cum->words++;
9393 cum->floats_in_gpr = 0;
9395 else if (bitpos % 64 == 0)
9397 /* A float at the beginning of an 8-byte word;
9398 count it and put off adjusting cum->words until
9399 we see if a arg advance flush is going to do it
9400 for us. */
9401 cum->floats_in_gpr++;
9403 else
9405 /* The float is at the end of a word, preceded
9406 by integer fields, so the arg advance flush
9407 just above has already set cum->words and
9408 everything is taken care of. */
9411 else
9412 cum->words += n_fpregs;
9414 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9416 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9417 cum->vregno++;
9418 cum->words += 2;
9420 else if (cum->intoffset == -1)
9421 cum->intoffset = bitpos;
9425 /* Check for an item that needs to be considered specially under the darwin 64
9426 bit ABI. These are record types where the mode is BLK or the structure is
9427 8 bytes in size. */
9428 static int
9429 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
9431 return rs6000_darwin64_abi
9432 && ((mode == BLKmode
9433 && TREE_CODE (type) == RECORD_TYPE
9434 && int_size_in_bytes (type) > 0)
9435 || (type && TREE_CODE (type) == RECORD_TYPE
9436 && int_size_in_bytes (type) == 8)) ? 1 : 0;
9439 /* Update the data in CUM to advance over an argument
9440 of mode MODE and data type TYPE.
9441 (TYPE is null for libcalls where that information may not be available.)
9443 Note that for args passed by reference, function_arg will be called
9444 with MODE and TYPE set to that of the pointer to the arg, not the arg
9445 itself. */
9447 static void
9448 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9449 const_tree type, bool named, int depth)
9451 enum machine_mode elt_mode;
9452 int n_elts;
9454 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9456 /* Only tick off an argument if we're not recursing. */
9457 if (depth == 0)
9458 cum->nargs_prototype--;
9460 #ifdef HAVE_AS_GNU_ATTRIBUTE
9461 if (DEFAULT_ABI == ABI_V4
9462 && cum->escapes)
9464 if (SCALAR_FLOAT_MODE_P (mode))
9465 rs6000_passes_float = true;
9466 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
9467 rs6000_passes_vector = true;
9468 else if (SPE_VECTOR_MODE (mode)
9469 && !cum->stdarg
9470 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9471 rs6000_passes_vector = true;
9473 #endif
9475 if (TARGET_ALTIVEC_ABI
9476 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9477 || (type && TREE_CODE (type) == VECTOR_TYPE
9478 && int_size_in_bytes (type) == 16)))
9480 bool stack = false;
9482 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
9484 cum->vregno += n_elts;
9486 if (!TARGET_ALTIVEC)
9487 error ("cannot pass argument in vector register because"
9488 " altivec instructions are disabled, use -maltivec"
9489 " to enable them");
9491 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
9492 even if it is going to be passed in a vector register.
9493 Darwin does the same for variable-argument functions. */
9494 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9495 && TARGET_64BIT)
9496 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
9497 stack = true;
9499 else
9500 stack = true;
9502 if (stack)
9504 int align;
9506 /* Vector parameters must be 16-byte aligned. In 32-bit
9507 mode this means we need to take into account the offset
9508 to the parameter save area. In 64-bit mode, they just
9509 have to start on an even word, since the parameter save
9510 area is 16-byte aligned. */
9511 if (TARGET_32BIT)
9512 align = -(rs6000_parm_offset () + cum->words) & 3;
9513 else
9514 align = cum->words & 1;
9515 cum->words += align + rs6000_arg_size (mode, type);
9517 if (TARGET_DEBUG_ARG)
9519 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
9520 cum->words, align);
9521 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
9522 cum->nargs_prototype, cum->prototype,
9523 GET_MODE_NAME (mode));
9527 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
9528 && !cum->stdarg
9529 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9530 cum->sysv_gregno++;
9532 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9534 int size = int_size_in_bytes (type);
9535 /* Variable sized types have size == -1 and are
9536 treated as if consisting entirely of ints.
9537 Pad to 16 byte boundary if needed. */
9538 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9539 && (cum->words % 2) != 0)
9540 cum->words++;
9541 /* For varargs, we can just go up by the size of the struct. */
9542 if (!named)
9543 cum->words += (size + 7) / 8;
9544 else
9546 /* It is tempting to say int register count just goes up by
9547 sizeof(type)/8, but this is wrong in a case such as
9548 { int; double; int; } [powerpc alignment]. We have to
9549 grovel through the fields for these too. */
9550 cum->intoffset = 0;
9551 cum->floats_in_gpr = 0;
9552 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
9553 rs6000_darwin64_record_arg_advance_flush (cum,
9554 size * BITS_PER_UNIT, 1);
9556 if (TARGET_DEBUG_ARG)
9558 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
9559 cum->words, TYPE_ALIGN (type), size);
9560 fprintf (stderr,
9561 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
9562 cum->nargs_prototype, cum->prototype,
9563 GET_MODE_NAME (mode));
9566 else if (DEFAULT_ABI == ABI_V4)
9568 if (TARGET_HARD_FLOAT && TARGET_FPRS
9569 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
9570 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
9571 || (mode == TFmode && !TARGET_IEEEQUAD)
9572 || mode == SDmode || mode == DDmode || mode == TDmode))
9574 /* _Decimal128 must use an even/odd register pair. This assumes
9575 that the register number is odd when fregno is odd. */
9576 if (mode == TDmode && (cum->fregno % 2) == 1)
9577 cum->fregno++;
9579 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
9580 <= FP_ARG_V4_MAX_REG)
9581 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
9582 else
9584 cum->fregno = FP_ARG_V4_MAX_REG + 1;
9585 if (mode == DFmode || mode == TFmode
9586 || mode == DDmode || mode == TDmode)
9587 cum->words += cum->words & 1;
9588 cum->words += rs6000_arg_size (mode, type);
9591 else
9593 int n_words = rs6000_arg_size (mode, type);
9594 int gregno = cum->sysv_gregno;
9596 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9597 (r7,r8) or (r9,r10). As does any other 2 word item such
9598 as complex int due to a historical mistake. */
9599 if (n_words == 2)
9600 gregno += (1 - gregno) & 1;
9602 /* Multi-reg args are not split between registers and stack. */
9603 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9605 /* Long long and SPE vectors are aligned on the stack.
9606 So are other 2 word items such as complex int due to
9607 a historical mistake. */
9608 if (n_words == 2)
9609 cum->words += cum->words & 1;
9610 cum->words += n_words;
9613 /* Note: continuing to accumulate gregno past when we've started
9614 spilling to the stack indicates the fact that we've started
9615 spilling to the stack to expand_builtin_saveregs. */
9616 cum->sysv_gregno = gregno + n_words;
9619 if (TARGET_DEBUG_ARG)
9621 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9622 cum->words, cum->fregno);
9623 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
9624 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
9625 fprintf (stderr, "mode = %4s, named = %d\n",
9626 GET_MODE_NAME (mode), named);
9629 else
9631 int n_words = rs6000_arg_size (mode, type);
9632 int start_words = cum->words;
9633 int align_words = rs6000_parm_start (mode, type, start_words);
9635 cum->words = align_words + n_words;
9637 if (SCALAR_FLOAT_MODE_P (elt_mode)
9638 && TARGET_HARD_FLOAT && TARGET_FPRS)
9640 /* _Decimal128 must be passed in an even/odd float register pair.
9641 This assumes that the register number is odd when fregno is
9642 odd. */
9643 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
9644 cum->fregno++;
9645 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
9648 if (TARGET_DEBUG_ARG)
9650 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9651 cum->words, cum->fregno);
9652 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
9653 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
9654 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
9655 named, align_words - start_words, depth);
9660 static void
9661 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
9662 const_tree type, bool named)
9664 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
9668 static rtx
9669 spe_build_register_parallel (enum machine_mode mode, int gregno)
9671 rtx r1, r3, r5, r7;
9673 switch (mode)
9675 case DFmode:
9676 r1 = gen_rtx_REG (DImode, gregno);
9677 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9678 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
9680 case DCmode:
9681 case TFmode:
9682 r1 = gen_rtx_REG (DImode, gregno);
9683 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9684 r3 = gen_rtx_REG (DImode, gregno + 2);
9685 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9686 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
9688 case TCmode:
9689 r1 = gen_rtx_REG (DImode, gregno);
9690 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9691 r3 = gen_rtx_REG (DImode, gregno + 2);
9692 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9693 r5 = gen_rtx_REG (DImode, gregno + 4);
9694 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
9695 r7 = gen_rtx_REG (DImode, gregno + 6);
9696 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
9697 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
9699 default:
9700 gcc_unreachable ();
9704 /* Determine where to put a SIMD argument on the SPE. */
9705 static rtx
9706 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
9707 const_tree type)
9709 int gregno = cum->sysv_gregno;
9711 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
9712 are passed and returned in a pair of GPRs for ABI compatibility. */
9713 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
9714 || mode == DCmode || mode == TCmode))
9716 int n_words = rs6000_arg_size (mode, type);
9718 /* Doubles go in an odd/even register pair (r5/r6, etc). */
9719 if (mode == DFmode)
9720 gregno += (1 - gregno) & 1;
9722 /* Multi-reg args are not split between registers and stack. */
9723 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9724 return NULL_RTX;
9726 return spe_build_register_parallel (mode, gregno);
9728 if (cum->stdarg)
9730 int n_words = rs6000_arg_size (mode, type);
9732 /* SPE vectors are put in odd registers. */
9733 if (n_words == 2 && (gregno & 1) == 0)
9734 gregno += 1;
9736 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
9738 rtx r1, r2;
9739 enum machine_mode m = SImode;
9741 r1 = gen_rtx_REG (m, gregno);
9742 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
9743 r2 = gen_rtx_REG (m, gregno + 1);
9744 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
9745 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
9747 else
9748 return NULL_RTX;
9750 else
9752 if (gregno <= GP_ARG_MAX_REG)
9753 return gen_rtx_REG (mode, gregno);
9754 else
9755 return NULL_RTX;
9759 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
9760 structure between cum->intoffset and bitpos to integer registers. */
9762 static void
9763 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
9764 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
9766 enum machine_mode mode;
9767 unsigned int regno;
9768 unsigned int startbit, endbit;
9769 int this_regno, intregs, intoffset;
9770 rtx reg;
9772 if (cum->intoffset == -1)
9773 return;
9775 intoffset = cum->intoffset;
9776 cum->intoffset = -1;
9778 /* If this is the trailing part of a word, try to only load that
9779 much into the register. Otherwise load the whole register. Note
9780 that in the latter case we may pick up unwanted bits. It's not a
9781 problem at the moment but may wish to revisit. */
9783 if (intoffset % BITS_PER_WORD != 0)
9785 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9786 MODE_INT, 0);
9787 if (mode == BLKmode)
9789 /* We couldn't find an appropriate mode, which happens,
9790 e.g., in packed structs when there are 3 bytes to load.
9791 Back intoffset back to the beginning of the word in this
9792 case. */
9793 intoffset = intoffset & -BITS_PER_WORD;
9794 mode = word_mode;
9797 else
9798 mode = word_mode;
9800 startbit = intoffset & -BITS_PER_WORD;
9801 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9802 intregs = (endbit - startbit) / BITS_PER_WORD;
9803 this_regno = cum->words + intoffset / BITS_PER_WORD;
9805 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
9806 cum->use_stack = 1;
9808 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
9809 if (intregs <= 0)
9810 return;
9812 intoffset /= BITS_PER_UNIT;
9815 regno = GP_ARG_MIN_REG + this_regno;
9816 reg = gen_rtx_REG (mode, regno);
9817 rvec[(*k)++] =
9818 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
9820 this_regno += 1;
9821 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
9822 mode = word_mode;
9823 intregs -= 1;
9825 while (intregs > 0);
9828 /* Recursive workhorse for the following. */
9830 static void
9831 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
9832 HOST_WIDE_INT startbitpos, rtx rvec[],
9833 int *k)
9835 tree f;
9837 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9838 if (TREE_CODE (f) == FIELD_DECL)
9840 HOST_WIDE_INT bitpos = startbitpos;
9841 tree ftype = TREE_TYPE (f);
9842 enum machine_mode mode;
9843 if (ftype == error_mark_node)
9844 continue;
9845 mode = TYPE_MODE (ftype);
9847 if (DECL_SIZE (f) != 0
9848 && tree_fits_uhwi_p (bit_position (f)))
9849 bitpos += int_bit_position (f);
9851 /* ??? FIXME: else assume zero offset. */
9853 if (TREE_CODE (ftype) == RECORD_TYPE)
9854 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
9855 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
9857 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
9858 #if 0
9859 switch (mode)
9861 case SCmode: mode = SFmode; break;
9862 case DCmode: mode = DFmode; break;
9863 case TCmode: mode = TFmode; break;
9864 default: break;
9866 #endif
9867 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9868 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
9870 gcc_assert (cum->fregno == FP_ARG_MAX_REG
9871 && (mode == TFmode || mode == TDmode));
9872 /* Long double or _Decimal128 split over regs and memory. */
9873 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
9874 cum->use_stack=1;
9876 rvec[(*k)++]
9877 = gen_rtx_EXPR_LIST (VOIDmode,
9878 gen_rtx_REG (mode, cum->fregno++),
9879 GEN_INT (bitpos / BITS_PER_UNIT));
9880 if (mode == TFmode || mode == TDmode)
9881 cum->fregno++;
9883 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9885 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9886 rvec[(*k)++]
9887 = gen_rtx_EXPR_LIST (VOIDmode,
9888 gen_rtx_REG (mode, cum->vregno++),
9889 GEN_INT (bitpos / BITS_PER_UNIT));
9891 else if (cum->intoffset == -1)
9892 cum->intoffset = bitpos;
9896 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
9897 the register(s) to be used for each field and subfield of a struct
9898 being passed by value, along with the offset of where the
9899 register's value may be found in the block. FP fields go in FP
9900 register, vector fields go in vector registers, and everything
9901 else goes in int registers, packed as in memory.
9903 This code is also used for function return values. RETVAL indicates
9904 whether this is the case.
9906 Much of this is taken from the SPARC V9 port, which has a similar
9907 calling convention. */
9909 static rtx
9910 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
9911 bool named, bool retval)
9913 rtx rvec[FIRST_PSEUDO_REGISTER];
9914 int k = 1, kbase = 1;
9915 HOST_WIDE_INT typesize = int_size_in_bytes (type);
9916 /* This is a copy; modifications are not visible to our caller. */
9917 CUMULATIVE_ARGS copy_cum = *orig_cum;
9918 CUMULATIVE_ARGS *cum = &copy_cum;
9920 /* Pad to 16 byte boundary if needed. */
9921 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9922 && (cum->words % 2) != 0)
9923 cum->words++;
9925 cum->intoffset = 0;
9926 cum->use_stack = 0;
9927 cum->named = named;
9929 /* Put entries into rvec[] for individual FP and vector fields, and
9930 for the chunks of memory that go in int regs. Note we start at
9931 element 1; 0 is reserved for an indication of using memory, and
9932 may or may not be filled in below. */
9933 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
9934 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
9936 /* If any part of the struct went on the stack put all of it there.
9937 This hack is because the generic code for
9938 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
9939 parts of the struct are not at the beginning. */
9940 if (cum->use_stack)
9942 if (retval)
9943 return NULL_RTX; /* doesn't go in registers at all */
9944 kbase = 0;
9945 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9947 if (k > 1 || cum->use_stack)
9948 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
9949 else
9950 return NULL_RTX;
9953 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
9955 static rtx
9956 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
9957 int align_words)
9959 int n_units;
9960 int i, k;
9961 rtx rvec[GP_ARG_NUM_REG + 1];
9963 if (align_words >= GP_ARG_NUM_REG)
9964 return NULL_RTX;
9966 n_units = rs6000_arg_size (mode, type);
9968 /* Optimize the simple case where the arg fits in one gpr, except in
9969 the case of BLKmode due to assign_parms assuming that registers are
9970 BITS_PER_WORD wide. */
9971 if (n_units == 0
9972 || (n_units == 1 && mode != BLKmode))
9973 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9975 k = 0;
9976 if (align_words + n_units > GP_ARG_NUM_REG)
9977 /* Not all of the arg fits in gprs. Say that it goes in memory too,
9978 using a magic NULL_RTX component.
9979 This is not strictly correct. Only some of the arg belongs in
9980 memory, not all of it. However, the normal scheme using
9981 function_arg_partial_nregs can result in unusual subregs, eg.
9982 (subreg:SI (reg:DF) 4), which are not handled well. The code to
9983 store the whole arg to memory is often more efficient than code
9984 to store pieces, and we know that space is available in the right
9985 place for the whole arg. */
9986 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9988 i = 0;
9991 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
9992 rtx off = GEN_INT (i++ * 4);
9993 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9995 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
9997 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
10000 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
10001 but must also be copied into the parameter save area starting at
10002 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
10003 to the GPRs and/or memory. Return the number of elements used. */
10005 static int
10006 rs6000_psave_function_arg (enum machine_mode mode, const_tree type,
10007 int align_words, rtx *rvec)
10009 int k = 0;
10011 if (align_words < GP_ARG_NUM_REG)
10013 int n_words = rs6000_arg_size (mode, type);
10015 if (align_words + n_words > GP_ARG_NUM_REG
10016 || mode == BLKmode
10017 || (TARGET_32BIT && TARGET_POWERPC64))
10019 /* If this is partially on the stack, then we only
10020 include the portion actually in registers here. */
10021 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
10022 int i = 0;
10024 if (align_words + n_words > GP_ARG_NUM_REG)
10026 /* Not all of the arg fits in gprs. Say that it goes in memory
10027 too, using a magic NULL_RTX component. Also see comment in
10028 rs6000_mixed_function_arg for why the normal
10029 function_arg_partial_nregs scheme doesn't work in this case. */
10030 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10035 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
10036 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
10037 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10039 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
10041 else
10043 /* The whole arg fits in gprs. */
10044 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10045 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
10048 else
10050 /* It's entirely in memory. */
10051 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10054 return k;
10057 /* RVEC is a vector of K components of an argument of mode MODE.
10058 Construct the final function_arg return value from it. */
10060 static rtx
10061 rs6000_finish_function_arg (enum machine_mode mode, rtx *rvec, int k)
10063 gcc_assert (k >= 1);
10065 /* Avoid returning a PARALLEL in the trivial cases. */
10066 if (k == 1)
10068 if (XEXP (rvec[0], 0) == NULL_RTX)
10069 return NULL_RTX;
10071 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
10072 return XEXP (rvec[0], 0);
10075 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
10078 /* Determine where to put an argument to a function.
10079 Value is zero to push the argument on the stack,
10080 or a hard register in which to store the argument.
10082 MODE is the argument's machine mode.
10083 TYPE is the data type of the argument (as a tree).
10084 This is null for libcalls where that information may
10085 not be available.
10086 CUM is a variable of type CUMULATIVE_ARGS which gives info about
10087 the preceding args and about the function being called. It is
10088 not modified in this routine.
10089 NAMED is nonzero if this argument is a named parameter
10090 (otherwise it is an extra parameter matching an ellipsis).
10092 On RS/6000 the first eight words of non-FP are normally in registers
10093 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
10094 Under V.4, the first 8 FP args are in registers.
10096 If this is floating-point and no prototype is specified, we use
10097 both an FP and integer register (or possibly FP reg and stack). Library
10098 functions (when CALL_LIBCALL is set) always have the proper types for args,
10099 so we can pass the FP value just in one register. emit_library_function
10100 doesn't support PARALLEL anyway.
10102 Note that for args passed by reference, function_arg will be called
10103 with MODE and TYPE set to that of the pointer to the arg, not the arg
10104 itself. */
10106 static rtx
10107 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
10108 const_tree type, bool named)
10110 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10111 enum rs6000_abi abi = DEFAULT_ABI;
10112 enum machine_mode elt_mode;
10113 int n_elts;
10115 /* Return a marker to indicate whether CR1 needs to set or clear the
10116 bit that V.4 uses to say fp args were passed in registers.
10117 Assume that we don't need the marker for software floating point,
10118 or compiler generated library calls. */
10119 if (mode == VOIDmode)
10121 if (abi == ABI_V4
10122 && (cum->call_cookie & CALL_LIBCALL) == 0
10123 && (cum->stdarg
10124 || (cum->nargs_prototype < 0
10125 && (cum->prototype || TARGET_NO_PROTOTYPE))))
10127 /* For the SPE, we need to crxor CR6 always. */
10128 if (TARGET_SPE_ABI)
10129 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
10130 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
10131 return GEN_INT (cum->call_cookie
10132 | ((cum->fregno == FP_ARG_MIN_REG)
10133 ? CALL_V4_SET_FP_ARGS
10134 : CALL_V4_CLEAR_FP_ARGS));
10137 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
10140 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10142 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10144 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
10145 if (rslt != NULL_RTX)
10146 return rslt;
10147 /* Else fall through to usual handling. */
10150 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10152 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10153 rtx r, off;
10154 int i, k = 0;
10156 /* Do we also need to pass this argument in the parameter
10157 save area? */
10158 if (TARGET_64BIT && ! cum->prototype)
10160 int align_words = (cum->words + 1) & ~1;
10161 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10164 /* Describe where this argument goes in the vector registers. */
10165 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
10167 r = gen_rtx_REG (elt_mode, cum->vregno + i);
10168 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10169 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10172 return rs6000_finish_function_arg (mode, rvec, k);
10174 else if (TARGET_ALTIVEC_ABI
10175 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
10176 || (type && TREE_CODE (type) == VECTOR_TYPE
10177 && int_size_in_bytes (type) == 16)))
10179 if (named || abi == ABI_V4)
10180 return NULL_RTX;
10181 else
10183 /* Vector parameters to varargs functions under AIX or Darwin
10184 get passed in memory and possibly also in GPRs. */
10185 int align, align_words, n_words;
10186 enum machine_mode part_mode;
10188 /* Vector parameters must be 16-byte aligned. In 32-bit
10189 mode this means we need to take into account the offset
10190 to the parameter save area. In 64-bit mode, they just
10191 have to start on an even word, since the parameter save
10192 area is 16-byte aligned. */
10193 if (TARGET_32BIT)
10194 align = -(rs6000_parm_offset () + cum->words) & 3;
10195 else
10196 align = cum->words & 1;
10197 align_words = cum->words + align;
10199 /* Out of registers? Memory, then. */
10200 if (align_words >= GP_ARG_NUM_REG)
10201 return NULL_RTX;
10203 if (TARGET_32BIT && TARGET_POWERPC64)
10204 return rs6000_mixed_function_arg (mode, type, align_words);
10206 /* The vector value goes in GPRs. Only the part of the
10207 value in GPRs is reported here. */
10208 part_mode = mode;
10209 n_words = rs6000_arg_size (mode, type);
10210 if (align_words + n_words > GP_ARG_NUM_REG)
10211 /* Fortunately, there are only two possibilities, the value
10212 is either wholly in GPRs or half in GPRs and half not. */
10213 part_mode = DImode;
10215 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
10218 else if (TARGET_SPE_ABI && TARGET_SPE
10219 && (SPE_VECTOR_MODE (mode)
10220 || (TARGET_E500_DOUBLE && (mode == DFmode
10221 || mode == DCmode
10222 || mode == TFmode
10223 || mode == TCmode))))
10224 return rs6000_spe_function_arg (cum, mode, type);
10226 else if (abi == ABI_V4)
10228 if (TARGET_HARD_FLOAT && TARGET_FPRS
10229 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
10230 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
10231 || (mode == TFmode && !TARGET_IEEEQUAD)
10232 || mode == SDmode || mode == DDmode || mode == TDmode))
10234 /* _Decimal128 must use an even/odd register pair. This assumes
10235 that the register number is odd when fregno is odd. */
10236 if (mode == TDmode && (cum->fregno % 2) == 1)
10237 cum->fregno++;
10239 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
10240 <= FP_ARG_V4_MAX_REG)
10241 return gen_rtx_REG (mode, cum->fregno);
10242 else
10243 return NULL_RTX;
10245 else
10247 int n_words = rs6000_arg_size (mode, type);
10248 int gregno = cum->sysv_gregno;
10250 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
10251 (r7,r8) or (r9,r10). As does any other 2 word item such
10252 as complex int due to a historical mistake. */
10253 if (n_words == 2)
10254 gregno += (1 - gregno) & 1;
10256 /* Multi-reg args are not split between registers and stack. */
10257 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
10258 return NULL_RTX;
10260 if (TARGET_32BIT && TARGET_POWERPC64)
10261 return rs6000_mixed_function_arg (mode, type,
10262 gregno - GP_ARG_MIN_REG);
10263 return gen_rtx_REG (mode, gregno);
10266 else
10268 int align_words = rs6000_parm_start (mode, type, cum->words);
10270 /* _Decimal128 must be passed in an even/odd float register pair.
10271 This assumes that the register number is odd when fregno is odd. */
10272 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
10273 cum->fregno++;
10275 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10277 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10278 rtx r, off;
10279 int i, k = 0;
10280 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10281 int fpr_words;
10283 /* Do we also need to pass this argument in the parameter
10284 save area? */
10285 if (type && (cum->nargs_prototype <= 0
10286 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10287 && TARGET_XL_COMPAT
10288 && align_words >= GP_ARG_NUM_REG)))
10289 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10291 /* Describe where this argument goes in the fprs. */
10292 for (i = 0; i < n_elts
10293 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
10295 /* Check if the argument is split over registers and memory.
10296 This can only ever happen for long double or _Decimal128;
10297 complex types are handled via split_complex_arg. */
10298 enum machine_mode fmode = elt_mode;
10299 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
10301 gcc_assert (fmode == TFmode || fmode == TDmode);
10302 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
10305 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
10306 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10307 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10310 /* If there were not enough FPRs to hold the argument, the rest
10311 usually goes into memory. However, if the current position
10312 is still within the register parameter area, a portion may
10313 actually have to go into GPRs.
10315 Note that it may happen that the portion of the argument
10316 passed in the first "half" of the first GPR was already
10317 passed in the last FPR as well.
10319 For unnamed arguments, we already set up GPRs to cover the
10320 whole argument in rs6000_psave_function_arg, so there is
10321 nothing further to do at this point. */
10322 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
10323 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
10324 && cum->nargs_prototype > 0)
10326 static bool warned;
10328 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
10329 int n_words = rs6000_arg_size (mode, type);
10331 align_words += fpr_words;
10332 n_words -= fpr_words;
10336 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
10337 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
10338 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10340 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
10342 if (!warned && warn_psabi)
10344 warned = true;
10345 inform (input_location,
10346 "the ABI of passing homogeneous float aggregates"
10347 " has changed in GCC 4.10");
10351 return rs6000_finish_function_arg (mode, rvec, k);
10353 else if (align_words < GP_ARG_NUM_REG)
10355 if (TARGET_32BIT && TARGET_POWERPC64)
10356 return rs6000_mixed_function_arg (mode, type, align_words);
10358 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10360 else
10361 return NULL_RTX;
10365 /* For an arg passed partly in registers and partly in memory, this is
10366 the number of bytes passed in registers. For args passed entirely in
10367 registers or entirely in memory, zero. When an arg is described by a
10368 PARALLEL, perhaps using more than one register type, this function
10369 returns the number of bytes used by the first element of the PARALLEL. */
10371 static int
10372 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
10373 tree type, bool named)
10375 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10376 bool passed_in_gprs = true;
10377 int ret = 0;
10378 int align_words;
10379 enum machine_mode elt_mode;
10380 int n_elts;
10382 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10384 if (DEFAULT_ABI == ABI_V4)
10385 return 0;
10387 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10389 /* If we are passing this arg in the fixed parameter save area
10390 (gprs or memory) as well as VRs, we do not use the partial
10391 bytes mechanism; instead, rs6000_function_arg will return a
10392 PARALLEL including a memory element as necessary. */
10393 if (TARGET_64BIT && ! cum->prototype)
10394 return 0;
10396 /* Otherwise, we pass in VRs only. Check for partial copies. */
10397 passed_in_gprs = false;
10398 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
10399 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
10402 /* In this complicated case we just disable the partial_nregs code. */
10403 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10404 return 0;
10406 align_words = rs6000_parm_start (mode, type, cum->words);
10408 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10410 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10412 /* If we are passing this arg in the fixed parameter save area
10413 (gprs or memory) as well as FPRs, we do not use the partial
10414 bytes mechanism; instead, rs6000_function_arg will return a
10415 PARALLEL including a memory element as necessary. */
10416 if (type
10417 && (cum->nargs_prototype <= 0
10418 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10419 && TARGET_XL_COMPAT
10420 && align_words >= GP_ARG_NUM_REG)))
10421 return 0;
10423 /* Otherwise, we pass in FPRs only. Check for partial copies. */
10424 passed_in_gprs = false;
10425 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
10427 /* Compute number of bytes / words passed in FPRs. If there
10428 is still space available in the register parameter area
10429 *after* that amount, a part of the argument will be passed
10430 in GPRs. In that case, the total amount passed in any
10431 registers is equal to the amount that would have been passed
10432 in GPRs if everything were passed there, so we fall back to
10433 the GPR code below to compute the appropriate value. */
10434 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
10435 * MIN (8, GET_MODE_SIZE (elt_mode)));
10436 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
10438 if (align_words + fpr_words < GP_ARG_NUM_REG)
10439 passed_in_gprs = true;
10440 else
10441 ret = fpr;
10445 if (passed_in_gprs
10446 && align_words < GP_ARG_NUM_REG
10447 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
10448 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
10450 if (ret != 0 && TARGET_DEBUG_ARG)
10451 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
10453 return ret;
10456 /* A C expression that indicates when an argument must be passed by
10457 reference. If nonzero for an argument, a copy of that argument is
10458 made in memory and a pointer to the argument is passed instead of
10459 the argument itself. The pointer is passed in whatever way is
10460 appropriate for passing a pointer to that type.
10462 Under V.4, aggregates and long double are passed by reference.
10464 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
10465 reference unless the AltiVec vector extension ABI is in force.
10467 As an extension to all ABIs, variable sized types are passed by
10468 reference. */
10470 static bool
10471 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
10472 enum machine_mode mode, const_tree type,
10473 bool named ATTRIBUTE_UNUSED)
10475 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
10477 if (TARGET_DEBUG_ARG)
10478 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
10479 return 1;
10482 if (!type)
10483 return 0;
10485 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
10487 if (TARGET_DEBUG_ARG)
10488 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
10489 return 1;
10492 if (int_size_in_bytes (type) < 0)
10494 if (TARGET_DEBUG_ARG)
10495 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
10496 return 1;
10499 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10500 modes only exist for GCC vector types if -maltivec. */
10501 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
10503 if (TARGET_DEBUG_ARG)
10504 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
10505 return 1;
10508 /* Pass synthetic vectors in memory. */
10509 if (TREE_CODE (type) == VECTOR_TYPE
10510 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10512 static bool warned_for_pass_big_vectors = false;
10513 if (TARGET_DEBUG_ARG)
10514 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
10515 if (!warned_for_pass_big_vectors)
10517 warning (0, "GCC vector passed by reference: "
10518 "non-standard ABI extension with no compatibility guarantee");
10519 warned_for_pass_big_vectors = true;
10521 return 1;
10524 return 0;
10527 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
10528 already processes. Return true if the parameter must be passed
10529 (fully or partially) on the stack. */
10531 static bool
10532 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
10534 enum machine_mode mode;
10535 int unsignedp;
10536 rtx entry_parm;
10538 /* Catch errors. */
10539 if (type == NULL || type == error_mark_node)
10540 return true;
10542 /* Handle types with no storage requirement. */
10543 if (TYPE_MODE (type) == VOIDmode)
10544 return false;
10546 /* Handle complex types. */
10547 if (TREE_CODE (type) == COMPLEX_TYPE)
10548 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
10549 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
10551 /* Handle transparent aggregates. */
10552 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
10553 && TYPE_TRANSPARENT_AGGR (type))
10554 type = TREE_TYPE (first_field (type));
10556 /* See if this arg was passed by invisible reference. */
10557 if (pass_by_reference (get_cumulative_args (args_so_far),
10558 TYPE_MODE (type), type, true))
10559 type = build_pointer_type (type);
10561 /* Find mode as it is passed by the ABI. */
10562 unsignedp = TYPE_UNSIGNED (type);
10563 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
10565 /* If we must pass in stack, we need a stack. */
10566 if (rs6000_must_pass_in_stack (mode, type))
10567 return true;
10569 /* If there is no incoming register, we need a stack. */
10570 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
10571 if (entry_parm == NULL)
10572 return true;
10574 /* Likewise if we need to pass both in registers and on the stack. */
10575 if (GET_CODE (entry_parm) == PARALLEL
10576 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
10577 return true;
10579 /* Also true if we're partially in registers and partially not. */
10580 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
10581 return true;
10583 /* Update info on where next arg arrives in registers. */
10584 rs6000_function_arg_advance (args_so_far, mode, type, true);
10585 return false;
10588 /* Return true if FUN has no prototype, has a variable argument
10589 list, or passes any parameter in memory. */
10591 static bool
10592 rs6000_function_parms_need_stack (tree fun, bool incoming)
10594 tree fntype, result;
10595 CUMULATIVE_ARGS args_so_far_v;
10596 cumulative_args_t args_so_far;
10598 if (!fun)
10599 /* Must be a libcall, all of which only use reg parms. */
10600 return false;
10602 fntype = fun;
10603 if (!TYPE_P (fun))
10604 fntype = TREE_TYPE (fun);
10606 /* Varargs functions need the parameter save area. */
10607 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
10608 return true;
10610 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
10611 args_so_far = pack_cumulative_args (&args_so_far_v);
10613 /* When incoming, we will have been passed the function decl.
10614 It is necessary to use the decl to handle K&R style functions,
10615 where TYPE_ARG_TYPES may not be available. */
10616 if (incoming)
10618 gcc_assert (DECL_P (fun));
10619 result = DECL_RESULT (fun);
10621 else
10622 result = TREE_TYPE (fntype);
10624 if (result && aggregate_value_p (result, fntype))
10626 if (!TYPE_P (result))
10627 result = TREE_TYPE (result);
10628 result = build_pointer_type (result);
10629 rs6000_parm_needs_stack (args_so_far, result);
10632 if (incoming)
10634 tree parm;
10636 for (parm = DECL_ARGUMENTS (fun);
10637 parm && parm != void_list_node;
10638 parm = TREE_CHAIN (parm))
10639 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
10640 return true;
10642 else
10644 function_args_iterator args_iter;
10645 tree arg_type;
10647 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
10648 if (rs6000_parm_needs_stack (args_so_far, arg_type))
10649 return true;
10652 return false;
10655 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
10656 usually a constant depending on the ABI. However, in the ELFv2 ABI
10657 the register parameter area is optional when calling a function that
10658 has a prototype is scope, has no variable argument list, and passes
10659 all parameters in registers. */
10662 rs6000_reg_parm_stack_space (tree fun, bool incoming)
10664 int reg_parm_stack_space;
10666 switch (DEFAULT_ABI)
10668 default:
10669 reg_parm_stack_space = 0;
10670 break;
10672 case ABI_AIX:
10673 case ABI_DARWIN:
10674 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10675 break;
10677 case ABI_ELFv2:
10678 /* ??? Recomputing this every time is a bit expensive. Is there
10679 a place to cache this information? */
10680 if (rs6000_function_parms_need_stack (fun, incoming))
10681 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10682 else
10683 reg_parm_stack_space = 0;
10684 break;
10687 return reg_parm_stack_space;
10690 static void
10691 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
10693 int i;
10694 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
10696 if (nregs == 0)
10697 return;
10699 for (i = 0; i < nregs; i++)
10701 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
10702 if (reload_completed)
10704 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
10705 tem = NULL_RTX;
10706 else
10707 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
10708 i * GET_MODE_SIZE (reg_mode));
10710 else
10711 tem = replace_equiv_address (tem, XEXP (tem, 0));
10713 gcc_assert (tem);
10715 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
10719 /* Perform any needed actions needed for a function that is receiving a
10720 variable number of arguments.
10722 CUM is as above.
10724 MODE and TYPE are the mode and type of the current parameter.
10726 PRETEND_SIZE is a variable that should be set to the amount of stack
10727 that must be pushed by the prolog to pretend that our caller pushed
10730 Normally, this macro will push all remaining incoming registers on the
10731 stack and set PRETEND_SIZE to the length of the registers pushed. */
10733 static void
10734 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
10735 tree type, int *pretend_size ATTRIBUTE_UNUSED,
10736 int no_rtl)
10738 CUMULATIVE_ARGS next_cum;
10739 int reg_size = TARGET_32BIT ? 4 : 8;
10740 rtx save_area = NULL_RTX, mem;
10741 int first_reg_offset;
10742 alias_set_type set;
10744 /* Skip the last named argument. */
10745 next_cum = *get_cumulative_args (cum);
10746 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
10748 if (DEFAULT_ABI == ABI_V4)
10750 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
10752 if (! no_rtl)
10754 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
10755 HOST_WIDE_INT offset = 0;
10757 /* Try to optimize the size of the varargs save area.
10758 The ABI requires that ap.reg_save_area is doubleword
10759 aligned, but we don't need to allocate space for all
10760 the bytes, only those to which we actually will save
10761 anything. */
10762 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
10763 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
10764 if (TARGET_HARD_FLOAT && TARGET_FPRS
10765 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10766 && cfun->va_list_fpr_size)
10768 if (gpr_reg_num)
10769 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
10770 * UNITS_PER_FP_WORD;
10771 if (cfun->va_list_fpr_size
10772 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10773 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
10774 else
10775 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10776 * UNITS_PER_FP_WORD;
10778 if (gpr_reg_num)
10780 offset = -((first_reg_offset * reg_size) & ~7);
10781 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
10783 gpr_reg_num = cfun->va_list_gpr_size;
10784 if (reg_size == 4 && (first_reg_offset & 1))
10785 gpr_reg_num++;
10787 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
10789 else if (fpr_size)
10790 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
10791 * UNITS_PER_FP_WORD
10792 - (int) (GP_ARG_NUM_REG * reg_size);
10794 if (gpr_size + fpr_size)
10796 rtx reg_save_area
10797 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
10798 gcc_assert (GET_CODE (reg_save_area) == MEM);
10799 reg_save_area = XEXP (reg_save_area, 0);
10800 if (GET_CODE (reg_save_area) == PLUS)
10802 gcc_assert (XEXP (reg_save_area, 0)
10803 == virtual_stack_vars_rtx);
10804 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
10805 offset += INTVAL (XEXP (reg_save_area, 1));
10807 else
10808 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
10811 cfun->machine->varargs_save_offset = offset;
10812 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
10815 else
10817 first_reg_offset = next_cum.words;
10818 save_area = virtual_incoming_args_rtx;
10820 if (targetm.calls.must_pass_in_stack (mode, type))
10821 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
10824 set = get_varargs_alias_set ();
10825 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
10826 && cfun->va_list_gpr_size)
10828 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
10830 if (va_list_gpr_counter_field)
10831 /* V4 va_list_gpr_size counts number of registers needed. */
10832 n_gpr = cfun->va_list_gpr_size;
10833 else
10834 /* char * va_list instead counts number of bytes needed. */
10835 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
10837 if (nregs > n_gpr)
10838 nregs = n_gpr;
10840 mem = gen_rtx_MEM (BLKmode,
10841 plus_constant (Pmode, save_area,
10842 first_reg_offset * reg_size));
10843 MEM_NOTRAP_P (mem) = 1;
10844 set_mem_alias_set (mem, set);
10845 set_mem_align (mem, BITS_PER_WORD);
10847 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
10848 nregs);
10851 /* Save FP registers if needed. */
10852 if (DEFAULT_ABI == ABI_V4
10853 && TARGET_HARD_FLOAT && TARGET_FPRS
10854 && ! no_rtl
10855 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10856 && cfun->va_list_fpr_size)
10858 int fregno = next_cum.fregno, nregs;
10859 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
10860 rtx lab = gen_label_rtx ();
10861 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
10862 * UNITS_PER_FP_WORD);
10864 emit_jump_insn
10865 (gen_rtx_SET (VOIDmode,
10866 pc_rtx,
10867 gen_rtx_IF_THEN_ELSE (VOIDmode,
10868 gen_rtx_NE (VOIDmode, cr1,
10869 const0_rtx),
10870 gen_rtx_LABEL_REF (VOIDmode, lab),
10871 pc_rtx)));
10873 for (nregs = 0;
10874 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
10875 fregno++, off += UNITS_PER_FP_WORD, nregs++)
10877 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10878 ? DFmode : SFmode,
10879 plus_constant (Pmode, save_area, off));
10880 MEM_NOTRAP_P (mem) = 1;
10881 set_mem_alias_set (mem, set);
10882 set_mem_align (mem, GET_MODE_ALIGNMENT (
10883 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10884 ? DFmode : SFmode));
10885 emit_move_insn (mem, gen_rtx_REG (
10886 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10887 ? DFmode : SFmode, fregno));
10890 emit_label (lab);
10894 /* Create the va_list data type. */
10896 static tree
10897 rs6000_build_builtin_va_list (void)
10899 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
10901 /* For AIX, prefer 'char *' because that's what the system
10902 header files like. */
10903 if (DEFAULT_ABI != ABI_V4)
10904 return build_pointer_type (char_type_node);
10906 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
10907 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
10908 get_identifier ("__va_list_tag"), record);
10910 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
10911 unsigned_char_type_node);
10912 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
10913 unsigned_char_type_node);
10914 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
10915 every user file. */
10916 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
10917 get_identifier ("reserved"), short_unsigned_type_node);
10918 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
10919 get_identifier ("overflow_arg_area"),
10920 ptr_type_node);
10921 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
10922 get_identifier ("reg_save_area"),
10923 ptr_type_node);
10925 va_list_gpr_counter_field = f_gpr;
10926 va_list_fpr_counter_field = f_fpr;
10928 DECL_FIELD_CONTEXT (f_gpr) = record;
10929 DECL_FIELD_CONTEXT (f_fpr) = record;
10930 DECL_FIELD_CONTEXT (f_res) = record;
10931 DECL_FIELD_CONTEXT (f_ovf) = record;
10932 DECL_FIELD_CONTEXT (f_sav) = record;
10934 TYPE_STUB_DECL (record) = type_decl;
10935 TYPE_NAME (record) = type_decl;
10936 TYPE_FIELDS (record) = f_gpr;
10937 DECL_CHAIN (f_gpr) = f_fpr;
10938 DECL_CHAIN (f_fpr) = f_res;
10939 DECL_CHAIN (f_res) = f_ovf;
10940 DECL_CHAIN (f_ovf) = f_sav;
10942 layout_type (record);
10944 /* The correct type is an array type of one element. */
10945 return build_array_type (record, build_index_type (size_zero_node));
10948 /* Implement va_start. */
10950 static void
10951 rs6000_va_start (tree valist, rtx nextarg)
10953 HOST_WIDE_INT words, n_gpr, n_fpr;
10954 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
10955 tree gpr, fpr, ovf, sav, t;
10957 /* Only SVR4 needs something special. */
10958 if (DEFAULT_ABI != ABI_V4)
10960 std_expand_builtin_va_start (valist, nextarg);
10961 return;
10964 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
10965 f_fpr = DECL_CHAIN (f_gpr);
10966 f_res = DECL_CHAIN (f_fpr);
10967 f_ovf = DECL_CHAIN (f_res);
10968 f_sav = DECL_CHAIN (f_ovf);
10970 valist = build_simple_mem_ref (valist);
10971 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
10972 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
10973 f_fpr, NULL_TREE);
10974 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
10975 f_ovf, NULL_TREE);
10976 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
10977 f_sav, NULL_TREE);
10979 /* Count number of gp and fp argument registers used. */
10980 words = crtl->args.info.words;
10981 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
10982 GP_ARG_NUM_REG);
10983 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
10984 FP_ARG_NUM_REG);
10986 if (TARGET_DEBUG_ARG)
10987 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
10988 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
10989 words, n_gpr, n_fpr);
10991 if (cfun->va_list_gpr_size)
10993 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
10994 build_int_cst (NULL_TREE, n_gpr));
10995 TREE_SIDE_EFFECTS (t) = 1;
10996 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10999 if (cfun->va_list_fpr_size)
11001 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11002 build_int_cst (NULL_TREE, n_fpr));
11003 TREE_SIDE_EFFECTS (t) = 1;
11004 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11006 #ifdef HAVE_AS_GNU_ATTRIBUTE
11007 if (call_ABI_of_interest (cfun->decl))
11008 rs6000_passes_float = true;
11009 #endif
11012 /* Find the overflow area. */
11013 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11014 if (words != 0)
11015 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
11016 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11017 TREE_SIDE_EFFECTS (t) = 1;
11018 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11020 /* If there were no va_arg invocations, don't set up the register
11021 save area. */
11022 if (!cfun->va_list_gpr_size
11023 && !cfun->va_list_fpr_size
11024 && n_gpr < GP_ARG_NUM_REG
11025 && n_fpr < FP_ARG_V4_MAX_REG)
11026 return;
11028 /* Find the register save area. */
11029 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
11030 if (cfun->machine->varargs_save_offset)
11031 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
11032 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11033 TREE_SIDE_EFFECTS (t) = 1;
11034 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11037 /* Implement va_arg. */
11039 static tree
11040 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11041 gimple_seq *post_p)
11043 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
11044 tree gpr, fpr, ovf, sav, reg, t, u;
11045 int size, rsize, n_reg, sav_ofs, sav_scale;
11046 tree lab_false, lab_over, addr;
11047 int align;
11048 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
11049 int regalign = 0;
11050 gimple stmt;
11052 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11054 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
11055 return build_va_arg_indirect_ref (t);
11058 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
11059 earlier version of gcc, with the property that it always applied alignment
11060 adjustments to the va-args (even for zero-sized types). The cheapest way
11061 to deal with this is to replicate the effect of the part of
11062 std_gimplify_va_arg_expr that carries out the align adjust, for the case
11063 of relevance.
11064 We don't need to check for pass-by-reference because of the test above.
11065 We can return a simplifed answer, since we know there's no offset to add. */
11067 if (((TARGET_MACHO
11068 && rs6000_darwin64_abi)
11069 || DEFAULT_ABI == ABI_ELFv2
11070 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
11071 && integer_zerop (TYPE_SIZE (type)))
11073 unsigned HOST_WIDE_INT align, boundary;
11074 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
11075 align = PARM_BOUNDARY / BITS_PER_UNIT;
11076 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
11077 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
11078 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
11079 boundary /= BITS_PER_UNIT;
11080 if (boundary > align)
11082 tree t ;
11083 /* This updates arg ptr by the amount that would be necessary
11084 to align the zero-sized (but not zero-alignment) item. */
11085 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
11086 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
11087 gimplify_and_add (t, pre_p);
11089 t = fold_convert (sizetype, valist_tmp);
11090 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
11091 fold_convert (TREE_TYPE (valist),
11092 fold_build2 (BIT_AND_EXPR, sizetype, t,
11093 size_int (-boundary))));
11094 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
11095 gimplify_and_add (t, pre_p);
11097 /* Since it is zero-sized there's no increment for the item itself. */
11098 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
11099 return build_va_arg_indirect_ref (valist_tmp);
11102 if (DEFAULT_ABI != ABI_V4)
11104 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
11106 tree elem_type = TREE_TYPE (type);
11107 enum machine_mode elem_mode = TYPE_MODE (elem_type);
11108 int elem_size = GET_MODE_SIZE (elem_mode);
11110 if (elem_size < UNITS_PER_WORD)
11112 tree real_part, imag_part;
11113 gimple_seq post = NULL;
11115 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
11116 &post);
11117 /* Copy the value into a temporary, lest the formal temporary
11118 be reused out from under us. */
11119 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
11120 gimple_seq_add_seq (pre_p, post);
11122 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
11123 post_p);
11125 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
11129 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
11132 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11133 f_fpr = DECL_CHAIN (f_gpr);
11134 f_res = DECL_CHAIN (f_fpr);
11135 f_ovf = DECL_CHAIN (f_res);
11136 f_sav = DECL_CHAIN (f_ovf);
11138 valist = build_va_arg_indirect_ref (valist);
11139 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11140 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
11141 f_fpr, NULL_TREE);
11142 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
11143 f_ovf, NULL_TREE);
11144 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
11145 f_sav, NULL_TREE);
11147 size = int_size_in_bytes (type);
11148 rsize = (size + 3) / 4;
11149 align = 1;
11151 if (TARGET_HARD_FLOAT && TARGET_FPRS
11152 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
11153 || (TARGET_DOUBLE_FLOAT
11154 && (TYPE_MODE (type) == DFmode
11155 || TYPE_MODE (type) == TFmode
11156 || TYPE_MODE (type) == SDmode
11157 || TYPE_MODE (type) == DDmode
11158 || TYPE_MODE (type) == TDmode))))
11160 /* FP args go in FP registers, if present. */
11161 reg = fpr;
11162 n_reg = (size + 7) / 8;
11163 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
11164 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
11165 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
11166 align = 8;
11168 else
11170 /* Otherwise into GP registers. */
11171 reg = gpr;
11172 n_reg = rsize;
11173 sav_ofs = 0;
11174 sav_scale = 4;
11175 if (n_reg == 2)
11176 align = 8;
11179 /* Pull the value out of the saved registers.... */
11181 lab_over = NULL;
11182 addr = create_tmp_var (ptr_type_node, "addr");
11184 /* AltiVec vectors never go in registers when -mabi=altivec. */
11185 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11186 align = 16;
11187 else
11189 lab_false = create_artificial_label (input_location);
11190 lab_over = create_artificial_label (input_location);
11192 /* Long long and SPE vectors are aligned in the registers.
11193 As are any other 2 gpr item such as complex int due to a
11194 historical mistake. */
11195 u = reg;
11196 if (n_reg == 2 && reg == gpr)
11198 regalign = 1;
11199 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11200 build_int_cst (TREE_TYPE (reg), n_reg - 1));
11201 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
11202 unshare_expr (reg), u);
11204 /* _Decimal128 is passed in even/odd fpr pairs; the stored
11205 reg number is 0 for f1, so we want to make it odd. */
11206 else if (reg == fpr && TYPE_MODE (type) == TDmode)
11208 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11209 build_int_cst (TREE_TYPE (reg), 1));
11210 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
11213 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
11214 t = build2 (GE_EXPR, boolean_type_node, u, t);
11215 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11216 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11217 gimplify_and_add (t, pre_p);
11219 t = sav;
11220 if (sav_ofs)
11221 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11223 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11224 build_int_cst (TREE_TYPE (reg), n_reg));
11225 u = fold_convert (sizetype, u);
11226 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
11227 t = fold_build_pointer_plus (t, u);
11229 /* _Decimal32 varargs are located in the second word of the 64-bit
11230 FP register for 32-bit binaries. */
11231 if (!TARGET_POWERPC64
11232 && TARGET_HARD_FLOAT && TARGET_FPRS
11233 && TYPE_MODE (type) == SDmode)
11234 t = fold_build_pointer_plus_hwi (t, size);
11236 gimplify_assign (addr, t, pre_p);
11238 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11240 stmt = gimple_build_label (lab_false);
11241 gimple_seq_add_stmt (pre_p, stmt);
11243 if ((n_reg == 2 && !regalign) || n_reg > 2)
11245 /* Ensure that we don't find any more args in regs.
11246 Alignment has taken care of for special cases. */
11247 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
11251 /* ... otherwise out of the overflow area. */
11253 /* Care for on-stack alignment if needed. */
11254 t = ovf;
11255 if (align != 1)
11257 t = fold_build_pointer_plus_hwi (t, align - 1);
11258 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
11259 build_int_cst (TREE_TYPE (t), -align));
11261 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11263 gimplify_assign (unshare_expr (addr), t, pre_p);
11265 t = fold_build_pointer_plus_hwi (t, size);
11266 gimplify_assign (unshare_expr (ovf), t, pre_p);
11268 if (lab_over)
11270 stmt = gimple_build_label (lab_over);
11271 gimple_seq_add_stmt (pre_p, stmt);
11274 if (STRICT_ALIGNMENT
11275 && (TYPE_ALIGN (type)
11276 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
11278 /* The value (of type complex double, for example) may not be
11279 aligned in memory in the saved registers, so copy via a
11280 temporary. (This is the same code as used for SPARC.) */
11281 tree tmp = create_tmp_var (type, "va_arg_tmp");
11282 tree dest_addr = build_fold_addr_expr (tmp);
11284 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
11285 3, dest_addr, addr, size_int (rsize * 4));
11287 gimplify_and_add (copy, pre_p);
11288 addr = dest_addr;
11291 addr = fold_convert (ptrtype, addr);
11292 return build_va_arg_indirect_ref (addr);
11295 /* Builtins. */
11297 static void
11298 def_builtin (const char *name, tree type, enum rs6000_builtins code)
11300 tree t;
11301 unsigned classify = rs6000_builtin_info[(int)code].attr;
11302 const char *attr_string = "";
11304 gcc_assert (name != NULL);
11305 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
11307 if (rs6000_builtin_decls[(int)code])
11308 fatal_error ("internal error: builtin function %s already processed", name);
11310 rs6000_builtin_decls[(int)code] = t =
11311 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
11313 /* Set any special attributes. */
11314 if ((classify & RS6000_BTC_CONST) != 0)
11316 /* const function, function only depends on the inputs. */
11317 TREE_READONLY (t) = 1;
11318 TREE_NOTHROW (t) = 1;
11319 attr_string = ", pure";
11321 else if ((classify & RS6000_BTC_PURE) != 0)
11323 /* pure function, function can read global memory, but does not set any
11324 external state. */
11325 DECL_PURE_P (t) = 1;
11326 TREE_NOTHROW (t) = 1;
11327 attr_string = ", const";
11329 else if ((classify & RS6000_BTC_FP) != 0)
11331 /* Function is a math function. If rounding mode is on, then treat the
11332 function as not reading global memory, but it can have arbitrary side
11333 effects. If it is off, then assume the function is a const function.
11334 This mimics the ATTR_MATHFN_FPROUNDING attribute in
11335 builtin-attribute.def that is used for the math functions. */
11336 TREE_NOTHROW (t) = 1;
11337 if (flag_rounding_math)
11339 DECL_PURE_P (t) = 1;
11340 DECL_IS_NOVOPS (t) = 1;
11341 attr_string = ", fp, pure";
11343 else
11345 TREE_READONLY (t) = 1;
11346 attr_string = ", fp, const";
11349 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
11350 gcc_unreachable ();
11352 if (TARGET_DEBUG_BUILTIN)
11353 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
11354 (int)code, name, attr_string);
11357 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
11359 #undef RS6000_BUILTIN_1
11360 #undef RS6000_BUILTIN_2
11361 #undef RS6000_BUILTIN_3
11362 #undef RS6000_BUILTIN_A
11363 #undef RS6000_BUILTIN_D
11364 #undef RS6000_BUILTIN_E
11365 #undef RS6000_BUILTIN_H
11366 #undef RS6000_BUILTIN_P
11367 #undef RS6000_BUILTIN_Q
11368 #undef RS6000_BUILTIN_S
11369 #undef RS6000_BUILTIN_X
11371 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11372 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11373 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
11374 { MASK, ICODE, NAME, ENUM },
11376 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11377 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11378 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11379 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11380 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11381 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11382 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11383 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11385 static const struct builtin_description bdesc_3arg[] =
11387 #include "rs6000-builtin.def"
11390 /* DST operations: void foo (void *, const int, const char). */
11392 #undef RS6000_BUILTIN_1
11393 #undef RS6000_BUILTIN_2
11394 #undef RS6000_BUILTIN_3
11395 #undef RS6000_BUILTIN_A
11396 #undef RS6000_BUILTIN_D
11397 #undef RS6000_BUILTIN_E
11398 #undef RS6000_BUILTIN_H
11399 #undef RS6000_BUILTIN_P
11400 #undef RS6000_BUILTIN_Q
11401 #undef RS6000_BUILTIN_S
11402 #undef RS6000_BUILTIN_X
11404 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11405 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11406 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11407 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11408 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
11409 { MASK, ICODE, NAME, ENUM },
11411 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11412 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11413 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11414 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11415 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11416 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11418 static const struct builtin_description bdesc_dst[] =
11420 #include "rs6000-builtin.def"
11423 /* Simple binary operations: VECc = foo (VECa, VECb). */
11425 #undef RS6000_BUILTIN_1
11426 #undef RS6000_BUILTIN_2
11427 #undef RS6000_BUILTIN_3
11428 #undef RS6000_BUILTIN_A
11429 #undef RS6000_BUILTIN_D
11430 #undef RS6000_BUILTIN_E
11431 #undef RS6000_BUILTIN_H
11432 #undef RS6000_BUILTIN_P
11433 #undef RS6000_BUILTIN_Q
11434 #undef RS6000_BUILTIN_S
11435 #undef RS6000_BUILTIN_X
11437 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11438 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
11439 { MASK, ICODE, NAME, ENUM },
11441 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11442 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11443 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11444 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11445 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11446 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11447 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11448 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11449 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11451 static const struct builtin_description bdesc_2arg[] =
11453 #include "rs6000-builtin.def"
11456 #undef RS6000_BUILTIN_1
11457 #undef RS6000_BUILTIN_2
11458 #undef RS6000_BUILTIN_3
11459 #undef RS6000_BUILTIN_A
11460 #undef RS6000_BUILTIN_D
11461 #undef RS6000_BUILTIN_E
11462 #undef RS6000_BUILTIN_H
11463 #undef RS6000_BUILTIN_P
11464 #undef RS6000_BUILTIN_Q
11465 #undef RS6000_BUILTIN_S
11466 #undef RS6000_BUILTIN_X
11468 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11469 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11470 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11471 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11472 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11473 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11474 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11475 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
11476 { MASK, ICODE, NAME, ENUM },
11478 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11479 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11480 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11482 /* AltiVec predicates. */
11484 static const struct builtin_description bdesc_altivec_preds[] =
11486 #include "rs6000-builtin.def"
11489 /* SPE predicates. */
11490 #undef RS6000_BUILTIN_1
11491 #undef RS6000_BUILTIN_2
11492 #undef RS6000_BUILTIN_3
11493 #undef RS6000_BUILTIN_A
11494 #undef RS6000_BUILTIN_D
11495 #undef RS6000_BUILTIN_E
11496 #undef RS6000_BUILTIN_H
11497 #undef RS6000_BUILTIN_P
11498 #undef RS6000_BUILTIN_Q
11499 #undef RS6000_BUILTIN_S
11500 #undef RS6000_BUILTIN_X
11502 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11503 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11504 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11505 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11506 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11507 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11508 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11509 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11510 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11511 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
11512 { MASK, ICODE, NAME, ENUM },
11514 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11516 static const struct builtin_description bdesc_spe_predicates[] =
11518 #include "rs6000-builtin.def"
11521 /* SPE evsel predicates. */
11522 #undef RS6000_BUILTIN_1
11523 #undef RS6000_BUILTIN_2
11524 #undef RS6000_BUILTIN_3
11525 #undef RS6000_BUILTIN_A
11526 #undef RS6000_BUILTIN_D
11527 #undef RS6000_BUILTIN_E
11528 #undef RS6000_BUILTIN_H
11529 #undef RS6000_BUILTIN_P
11530 #undef RS6000_BUILTIN_Q
11531 #undef RS6000_BUILTIN_S
11532 #undef RS6000_BUILTIN_X
11534 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11535 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11536 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11537 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11538 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11539 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
11540 { MASK, ICODE, NAME, ENUM },
11542 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11543 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11544 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11545 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11546 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11548 static const struct builtin_description bdesc_spe_evsel[] =
11550 #include "rs6000-builtin.def"
11553 /* PAIRED predicates. */
11554 #undef RS6000_BUILTIN_1
11555 #undef RS6000_BUILTIN_2
11556 #undef RS6000_BUILTIN_3
11557 #undef RS6000_BUILTIN_A
11558 #undef RS6000_BUILTIN_D
11559 #undef RS6000_BUILTIN_E
11560 #undef RS6000_BUILTIN_H
11561 #undef RS6000_BUILTIN_P
11562 #undef RS6000_BUILTIN_Q
11563 #undef RS6000_BUILTIN_S
11564 #undef RS6000_BUILTIN_X
11566 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11567 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11568 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11569 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11570 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11571 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11572 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11573 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11574 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
11575 { MASK, ICODE, NAME, ENUM },
11577 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11578 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11580 static const struct builtin_description bdesc_paired_preds[] =
11582 #include "rs6000-builtin.def"
11585 /* ABS* operations. */
11587 #undef RS6000_BUILTIN_1
11588 #undef RS6000_BUILTIN_2
11589 #undef RS6000_BUILTIN_3
11590 #undef RS6000_BUILTIN_A
11591 #undef RS6000_BUILTIN_D
11592 #undef RS6000_BUILTIN_E
11593 #undef RS6000_BUILTIN_H
11594 #undef RS6000_BUILTIN_P
11595 #undef RS6000_BUILTIN_Q
11596 #undef RS6000_BUILTIN_S
11597 #undef RS6000_BUILTIN_X
11599 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11600 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11601 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11602 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
11603 { MASK, ICODE, NAME, ENUM },
11605 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11606 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11607 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11608 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11609 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11610 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11611 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11613 static const struct builtin_description bdesc_abs[] =
11615 #include "rs6000-builtin.def"
11618 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
11619 foo (VECa). */
11621 #undef RS6000_BUILTIN_1
11622 #undef RS6000_BUILTIN_2
11623 #undef RS6000_BUILTIN_3
11624 #undef RS6000_BUILTIN_A
11625 #undef RS6000_BUILTIN_D
11626 #undef RS6000_BUILTIN_E
11627 #undef RS6000_BUILTIN_H
11628 #undef RS6000_BUILTIN_P
11629 #undef RS6000_BUILTIN_Q
11630 #undef RS6000_BUILTIN_S
11631 #undef RS6000_BUILTIN_X
11633 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
11634 { MASK, ICODE, NAME, ENUM },
11636 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11637 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11638 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11639 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11640 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11641 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11642 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11643 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11644 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11645 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11647 static const struct builtin_description bdesc_1arg[] =
11649 #include "rs6000-builtin.def"
11652 /* HTM builtins. */
11653 #undef RS6000_BUILTIN_1
11654 #undef RS6000_BUILTIN_2
11655 #undef RS6000_BUILTIN_3
11656 #undef RS6000_BUILTIN_A
11657 #undef RS6000_BUILTIN_D
11658 #undef RS6000_BUILTIN_E
11659 #undef RS6000_BUILTIN_H
11660 #undef RS6000_BUILTIN_P
11661 #undef RS6000_BUILTIN_Q
11662 #undef RS6000_BUILTIN_S
11663 #undef RS6000_BUILTIN_X
11665 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11666 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11667 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11668 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11669 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11670 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11671 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
11672 { MASK, ICODE, NAME, ENUM },
11674 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11675 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11676 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11677 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11679 static const struct builtin_description bdesc_htm[] =
11681 #include "rs6000-builtin.def"
11684 #undef RS6000_BUILTIN_1
11685 #undef RS6000_BUILTIN_2
11686 #undef RS6000_BUILTIN_3
11687 #undef RS6000_BUILTIN_A
11688 #undef RS6000_BUILTIN_D
11689 #undef RS6000_BUILTIN_E
11690 #undef RS6000_BUILTIN_H
11691 #undef RS6000_BUILTIN_P
11692 #undef RS6000_BUILTIN_Q
11693 #undef RS6000_BUILTIN_S
11695 /* Return true if a builtin function is overloaded. */
11696 bool
11697 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
11699 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
11702 /* Expand an expression EXP that calls a builtin without arguments. */
11703 static rtx
11704 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
11706 rtx pat;
11707 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11709 if (icode == CODE_FOR_nothing)
11710 /* Builtin not supported on this processor. */
11711 return 0;
11713 if (target == 0
11714 || GET_MODE (target) != tmode
11715 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11716 target = gen_reg_rtx (tmode);
11718 pat = GEN_FCN (icode) (target);
11719 if (! pat)
11720 return 0;
11721 emit_insn (pat);
11723 return target;
11727 static rtx
11728 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
11730 rtx pat;
11731 tree arg0 = CALL_EXPR_ARG (exp, 0);
11732 tree arg1 = CALL_EXPR_ARG (exp, 1);
11733 rtx op0 = expand_normal (arg0);
11734 rtx op1 = expand_normal (arg1);
11735 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
11736 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
11738 if (icode == CODE_FOR_nothing)
11739 /* Builtin not supported on this processor. */
11740 return 0;
11742 /* If we got invalid arguments bail out before generating bad rtl. */
11743 if (arg0 == error_mark_node || arg1 == error_mark_node)
11744 return const0_rtx;
11746 if (GET_CODE (op0) != CONST_INT
11747 || INTVAL (op0) > 255
11748 || INTVAL (op0) < 0)
11750 error ("argument 1 must be an 8-bit field value");
11751 return const0_rtx;
11754 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11755 op0 = copy_to_mode_reg (mode0, op0);
11757 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11758 op1 = copy_to_mode_reg (mode1, op1);
11760 pat = GEN_FCN (icode) (op0, op1);
11761 if (! pat)
11762 return const0_rtx;
11763 emit_insn (pat);
11765 return NULL_RTX;
11769 static rtx
11770 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
11772 rtx pat;
11773 tree arg0 = CALL_EXPR_ARG (exp, 0);
11774 rtx op0 = expand_normal (arg0);
11775 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11776 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11778 if (icode == CODE_FOR_nothing)
11779 /* Builtin not supported on this processor. */
11780 return 0;
11782 /* If we got invalid arguments bail out before generating bad rtl. */
11783 if (arg0 == error_mark_node)
11784 return const0_rtx;
11786 if (icode == CODE_FOR_altivec_vspltisb
11787 || icode == CODE_FOR_altivec_vspltish
11788 || icode == CODE_FOR_altivec_vspltisw
11789 || icode == CODE_FOR_spe_evsplatfi
11790 || icode == CODE_FOR_spe_evsplati)
11792 /* Only allow 5-bit *signed* literals. */
11793 if (GET_CODE (op0) != CONST_INT
11794 || INTVAL (op0) > 15
11795 || INTVAL (op0) < -16)
11797 error ("argument 1 must be a 5-bit signed literal");
11798 return const0_rtx;
11802 if (target == 0
11803 || GET_MODE (target) != tmode
11804 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11805 target = gen_reg_rtx (tmode);
11807 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11808 op0 = copy_to_mode_reg (mode0, op0);
11810 pat = GEN_FCN (icode) (target, op0);
11811 if (! pat)
11812 return 0;
11813 emit_insn (pat);
11815 return target;
11818 static rtx
11819 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
11821 rtx pat, scratch1, scratch2;
11822 tree arg0 = CALL_EXPR_ARG (exp, 0);
11823 rtx op0 = expand_normal (arg0);
11824 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11825 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11827 /* If we have invalid arguments, bail out before generating bad rtl. */
11828 if (arg0 == error_mark_node)
11829 return const0_rtx;
11831 if (target == 0
11832 || GET_MODE (target) != tmode
11833 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11834 target = gen_reg_rtx (tmode);
11836 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11837 op0 = copy_to_mode_reg (mode0, op0);
11839 scratch1 = gen_reg_rtx (mode0);
11840 scratch2 = gen_reg_rtx (mode0);
11842 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
11843 if (! pat)
11844 return 0;
11845 emit_insn (pat);
11847 return target;
11850 static rtx
11851 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
11853 rtx pat;
11854 tree arg0 = CALL_EXPR_ARG (exp, 0);
11855 tree arg1 = CALL_EXPR_ARG (exp, 1);
11856 rtx op0 = expand_normal (arg0);
11857 rtx op1 = expand_normal (arg1);
11858 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11859 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11860 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11862 if (icode == CODE_FOR_nothing)
11863 /* Builtin not supported on this processor. */
11864 return 0;
11866 /* If we got invalid arguments bail out before generating bad rtl. */
11867 if (arg0 == error_mark_node || arg1 == error_mark_node)
11868 return const0_rtx;
11870 if (icode == CODE_FOR_altivec_vcfux
11871 || icode == CODE_FOR_altivec_vcfsx
11872 || icode == CODE_FOR_altivec_vctsxs
11873 || icode == CODE_FOR_altivec_vctuxs
11874 || icode == CODE_FOR_altivec_vspltb
11875 || icode == CODE_FOR_altivec_vsplth
11876 || icode == CODE_FOR_altivec_vspltw
11877 || icode == CODE_FOR_spe_evaddiw
11878 || icode == CODE_FOR_spe_evldd
11879 || icode == CODE_FOR_spe_evldh
11880 || icode == CODE_FOR_spe_evldw
11881 || icode == CODE_FOR_spe_evlhhesplat
11882 || icode == CODE_FOR_spe_evlhhossplat
11883 || icode == CODE_FOR_spe_evlhhousplat
11884 || icode == CODE_FOR_spe_evlwhe
11885 || icode == CODE_FOR_spe_evlwhos
11886 || icode == CODE_FOR_spe_evlwhou
11887 || icode == CODE_FOR_spe_evlwhsplat
11888 || icode == CODE_FOR_spe_evlwwsplat
11889 || icode == CODE_FOR_spe_evrlwi
11890 || icode == CODE_FOR_spe_evslwi
11891 || icode == CODE_FOR_spe_evsrwis
11892 || icode == CODE_FOR_spe_evsubifw
11893 || icode == CODE_FOR_spe_evsrwiu)
11895 /* Only allow 5-bit unsigned literals. */
11896 STRIP_NOPS (arg1);
11897 if (TREE_CODE (arg1) != INTEGER_CST
11898 || TREE_INT_CST_LOW (arg1) & ~0x1f)
11900 error ("argument 2 must be a 5-bit unsigned literal");
11901 return const0_rtx;
11905 if (target == 0
11906 || GET_MODE (target) != tmode
11907 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11908 target = gen_reg_rtx (tmode);
11910 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11911 op0 = copy_to_mode_reg (mode0, op0);
11912 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11913 op1 = copy_to_mode_reg (mode1, op1);
11915 pat = GEN_FCN (icode) (target, op0, op1);
11916 if (! pat)
11917 return 0;
11918 emit_insn (pat);
11920 return target;
11923 static rtx
11924 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11926 rtx pat, scratch;
11927 tree cr6_form = CALL_EXPR_ARG (exp, 0);
11928 tree arg0 = CALL_EXPR_ARG (exp, 1);
11929 tree arg1 = CALL_EXPR_ARG (exp, 2);
11930 rtx op0 = expand_normal (arg0);
11931 rtx op1 = expand_normal (arg1);
11932 enum machine_mode tmode = SImode;
11933 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11934 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11935 int cr6_form_int;
11937 if (TREE_CODE (cr6_form) != INTEGER_CST)
11939 error ("argument 1 of __builtin_altivec_predicate must be a constant");
11940 return const0_rtx;
11942 else
11943 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
11945 gcc_assert (mode0 == mode1);
11947 /* If we have invalid arguments, bail out before generating bad rtl. */
11948 if (arg0 == error_mark_node || arg1 == error_mark_node)
11949 return const0_rtx;
11951 if (target == 0
11952 || GET_MODE (target) != tmode
11953 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11954 target = gen_reg_rtx (tmode);
11956 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11957 op0 = copy_to_mode_reg (mode0, op0);
11958 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11959 op1 = copy_to_mode_reg (mode1, op1);
11961 scratch = gen_reg_rtx (mode0);
11963 pat = GEN_FCN (icode) (scratch, op0, op1);
11964 if (! pat)
11965 return 0;
11966 emit_insn (pat);
11968 /* The vec_any* and vec_all* predicates use the same opcodes for two
11969 different operations, but the bits in CR6 will be different
11970 depending on what information we want. So we have to play tricks
11971 with CR6 to get the right bits out.
11973 If you think this is disgusting, look at the specs for the
11974 AltiVec predicates. */
11976 switch (cr6_form_int)
11978 case 0:
11979 emit_insn (gen_cr6_test_for_zero (target));
11980 break;
11981 case 1:
11982 emit_insn (gen_cr6_test_for_zero_reverse (target));
11983 break;
11984 case 2:
11985 emit_insn (gen_cr6_test_for_lt (target));
11986 break;
11987 case 3:
11988 emit_insn (gen_cr6_test_for_lt_reverse (target));
11989 break;
11990 default:
11991 error ("argument 1 of __builtin_altivec_predicate is out of range");
11992 break;
11995 return target;
11998 static rtx
11999 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
12001 rtx pat, addr;
12002 tree arg0 = CALL_EXPR_ARG (exp, 0);
12003 tree arg1 = CALL_EXPR_ARG (exp, 1);
12004 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12005 enum machine_mode mode0 = Pmode;
12006 enum machine_mode mode1 = Pmode;
12007 rtx op0 = expand_normal (arg0);
12008 rtx op1 = expand_normal (arg1);
12010 if (icode == CODE_FOR_nothing)
12011 /* Builtin not supported on this processor. */
12012 return 0;
12014 /* If we got invalid arguments bail out before generating bad rtl. */
12015 if (arg0 == error_mark_node || arg1 == error_mark_node)
12016 return const0_rtx;
12018 if (target == 0
12019 || GET_MODE (target) != tmode
12020 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12021 target = gen_reg_rtx (tmode);
12023 op1 = copy_to_mode_reg (mode1, op1);
12025 if (op0 == const0_rtx)
12027 addr = gen_rtx_MEM (tmode, op1);
12029 else
12031 op0 = copy_to_mode_reg (mode0, op0);
12032 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
12035 pat = GEN_FCN (icode) (target, addr);
12037 if (! pat)
12038 return 0;
12039 emit_insn (pat);
12041 return target;
12044 /* Return a constant vector for use as a little-endian permute control vector
12045 to reverse the order of elements of the given vector mode. */
12046 static rtx
12047 swap_selector_for_mode (enum machine_mode mode)
12049 /* These are little endian vectors, so their elements are reversed
12050 from what you would normally expect for a permute control vector. */
12051 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
12052 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
12053 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
12054 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
12055 unsigned int *swaparray, i;
12056 rtx perm[16];
12058 switch (mode)
12060 case V2DFmode:
12061 case V2DImode:
12062 swaparray = swap2;
12063 break;
12064 case V4SFmode:
12065 case V4SImode:
12066 swaparray = swap4;
12067 break;
12068 case V8HImode:
12069 swaparray = swap8;
12070 break;
12071 case V16QImode:
12072 swaparray = swap16;
12073 break;
12074 default:
12075 gcc_unreachable ();
12078 for (i = 0; i < 16; ++i)
12079 perm[i] = GEN_INT (swaparray[i]);
12081 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
12084 /* Generate code for an "lvx", "lvxl", or "lve*x" built-in for a little endian target
12085 with -maltivec=be specified. Issue the load followed by an element-reversing
12086 permute. */
12087 void
12088 altivec_expand_lvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
12090 rtx tmp = gen_reg_rtx (mode);
12091 rtx load = gen_rtx_SET (VOIDmode, tmp, op1);
12092 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
12093 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
12094 rtx sel = swap_selector_for_mode (mode);
12095 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
12097 gcc_assert (REG_P (op0));
12098 emit_insn (par);
12099 emit_insn (gen_rtx_SET (VOIDmode, op0, vperm));
12102 /* Generate code for a "stvx" or "stvxl" built-in for a little endian target
12103 with -maltivec=be specified. Issue the store preceded by an element-reversing
12104 permute. */
12105 void
12106 altivec_expand_stvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
12108 rtx tmp = gen_reg_rtx (mode);
12109 rtx store = gen_rtx_SET (VOIDmode, op0, tmp);
12110 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
12111 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
12112 rtx sel = swap_selector_for_mode (mode);
12113 rtx vperm;
12115 gcc_assert (REG_P (op1));
12116 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
12117 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
12118 emit_insn (par);
12121 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
12122 specified. Issue the store preceded by an element-reversing permute. */
12123 void
12124 altivec_expand_stvex_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
12126 enum machine_mode inner_mode = GET_MODE_INNER (mode);
12127 rtx tmp = gen_reg_rtx (mode);
12128 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
12129 rtx sel = swap_selector_for_mode (mode);
12130 rtx vperm;
12132 gcc_assert (REG_P (op1));
12133 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
12134 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
12135 emit_insn (gen_rtx_SET (VOIDmode, op0, stvx));
12138 static rtx
12139 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
12141 rtx pat, addr;
12142 tree arg0 = CALL_EXPR_ARG (exp, 0);
12143 tree arg1 = CALL_EXPR_ARG (exp, 1);
12144 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12145 enum machine_mode mode0 = Pmode;
12146 enum machine_mode mode1 = Pmode;
12147 rtx op0 = expand_normal (arg0);
12148 rtx op1 = expand_normal (arg1);
12150 if (icode == CODE_FOR_nothing)
12151 /* Builtin not supported on this processor. */
12152 return 0;
12154 /* If we got invalid arguments bail out before generating bad rtl. */
12155 if (arg0 == error_mark_node || arg1 == error_mark_node)
12156 return const0_rtx;
12158 if (target == 0
12159 || GET_MODE (target) != tmode
12160 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12161 target = gen_reg_rtx (tmode);
12163 op1 = copy_to_mode_reg (mode1, op1);
12165 if (op0 == const0_rtx)
12167 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
12169 else
12171 op0 = copy_to_mode_reg (mode0, op0);
12172 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
12175 pat = GEN_FCN (icode) (target, addr);
12177 if (! pat)
12178 return 0;
12179 emit_insn (pat);
12181 return target;
12184 static rtx
12185 spe_expand_stv_builtin (enum insn_code icode, tree exp)
12187 tree arg0 = CALL_EXPR_ARG (exp, 0);
12188 tree arg1 = CALL_EXPR_ARG (exp, 1);
12189 tree arg2 = CALL_EXPR_ARG (exp, 2);
12190 rtx op0 = expand_normal (arg0);
12191 rtx op1 = expand_normal (arg1);
12192 rtx op2 = expand_normal (arg2);
12193 rtx pat;
12194 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
12195 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
12196 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
12198 /* Invalid arguments. Bail before doing anything stoopid! */
12199 if (arg0 == error_mark_node
12200 || arg1 == error_mark_node
12201 || arg2 == error_mark_node)
12202 return const0_rtx;
12204 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
12205 op0 = copy_to_mode_reg (mode2, op0);
12206 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
12207 op1 = copy_to_mode_reg (mode0, op1);
12208 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
12209 op2 = copy_to_mode_reg (mode1, op2);
12211 pat = GEN_FCN (icode) (op1, op2, op0);
12212 if (pat)
12213 emit_insn (pat);
12214 return NULL_RTX;
12217 static rtx
12218 paired_expand_stv_builtin (enum insn_code icode, tree exp)
12220 tree arg0 = CALL_EXPR_ARG (exp, 0);
12221 tree arg1 = CALL_EXPR_ARG (exp, 1);
12222 tree arg2 = CALL_EXPR_ARG (exp, 2);
12223 rtx op0 = expand_normal (arg0);
12224 rtx op1 = expand_normal (arg1);
12225 rtx op2 = expand_normal (arg2);
12226 rtx pat, addr;
12227 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12228 enum machine_mode mode1 = Pmode;
12229 enum machine_mode mode2 = Pmode;
12231 /* Invalid arguments. Bail before doing anything stoopid! */
12232 if (arg0 == error_mark_node
12233 || arg1 == error_mark_node
12234 || arg2 == error_mark_node)
12235 return const0_rtx;
12237 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
12238 op0 = copy_to_mode_reg (tmode, op0);
12240 op2 = copy_to_mode_reg (mode2, op2);
12242 if (op1 == const0_rtx)
12244 addr = gen_rtx_MEM (tmode, op2);
12246 else
12248 op1 = copy_to_mode_reg (mode1, op1);
12249 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12252 pat = GEN_FCN (icode) (addr, op0);
12253 if (pat)
12254 emit_insn (pat);
12255 return NULL_RTX;
12258 static rtx
12259 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
12261 tree arg0 = CALL_EXPR_ARG (exp, 0);
12262 tree arg1 = CALL_EXPR_ARG (exp, 1);
12263 tree arg2 = CALL_EXPR_ARG (exp, 2);
12264 rtx op0 = expand_normal (arg0);
12265 rtx op1 = expand_normal (arg1);
12266 rtx op2 = expand_normal (arg2);
12267 rtx pat, addr;
12268 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12269 enum machine_mode smode = insn_data[icode].operand[1].mode;
12270 enum machine_mode mode1 = Pmode;
12271 enum machine_mode mode2 = Pmode;
12273 /* Invalid arguments. Bail before doing anything stoopid! */
12274 if (arg0 == error_mark_node
12275 || arg1 == error_mark_node
12276 || arg2 == error_mark_node)
12277 return const0_rtx;
12279 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
12280 op0 = copy_to_mode_reg (smode, op0);
12282 op2 = copy_to_mode_reg (mode2, op2);
12284 if (op1 == const0_rtx)
12286 addr = gen_rtx_MEM (tmode, op2);
12288 else
12290 op1 = copy_to_mode_reg (mode1, op1);
12291 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12294 pat = GEN_FCN (icode) (addr, op0);
12295 if (pat)
12296 emit_insn (pat);
12297 return NULL_RTX;
12300 /* Return the appropriate SPR number associated with the given builtin. */
12301 static inline HOST_WIDE_INT
12302 htm_spr_num (enum rs6000_builtins code)
12304 if (code == HTM_BUILTIN_GET_TFHAR
12305 || code == HTM_BUILTIN_SET_TFHAR)
12306 return TFHAR_SPR;
12307 else if (code == HTM_BUILTIN_GET_TFIAR
12308 || code == HTM_BUILTIN_SET_TFIAR)
12309 return TFIAR_SPR;
12310 else if (code == HTM_BUILTIN_GET_TEXASR
12311 || code == HTM_BUILTIN_SET_TEXASR)
12312 return TEXASR_SPR;
12313 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
12314 || code == HTM_BUILTIN_SET_TEXASRU);
12315 return TEXASRU_SPR;
12318 /* Return the appropriate SPR regno associated with the given builtin. */
12319 static inline HOST_WIDE_INT
12320 htm_spr_regno (enum rs6000_builtins code)
12322 if (code == HTM_BUILTIN_GET_TFHAR
12323 || code == HTM_BUILTIN_SET_TFHAR)
12324 return TFHAR_REGNO;
12325 else if (code == HTM_BUILTIN_GET_TFIAR
12326 || code == HTM_BUILTIN_SET_TFIAR)
12327 return TFIAR_REGNO;
12328 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
12329 || code == HTM_BUILTIN_SET_TEXASR
12330 || code == HTM_BUILTIN_GET_TEXASRU
12331 || code == HTM_BUILTIN_SET_TEXASRU);
12332 return TEXASR_REGNO;
12335 /* Return the correct ICODE value depending on whether we are
12336 setting or reading the HTM SPRs. */
12337 static inline enum insn_code
12338 rs6000_htm_spr_icode (bool nonvoid)
12340 if (nonvoid)
12341 return (TARGET_64BIT) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
12342 else
12343 return (TARGET_64BIT) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
12346 /* Expand the HTM builtin in EXP and store the result in TARGET.
12347 Store true in *EXPANDEDP if we found a builtin to expand. */
12348 static rtx
12349 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
12351 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12352 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
12353 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12354 const struct builtin_description *d;
12355 size_t i;
12357 *expandedp = false;
12359 /* Expand the HTM builtins. */
12360 d = bdesc_htm;
12361 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
12362 if (d->code == fcode)
12364 rtx op[MAX_HTM_OPERANDS], pat;
12365 int nopnds = 0;
12366 tree arg;
12367 call_expr_arg_iterator iter;
12368 unsigned attr = rs6000_builtin_info[fcode].attr;
12369 enum insn_code icode = d->icode;
12371 if (attr & RS6000_BTC_SPR)
12372 icode = rs6000_htm_spr_icode (nonvoid);
12374 if (nonvoid)
12376 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12377 if (!target
12378 || GET_MODE (target) != tmode
12379 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
12380 target = gen_reg_rtx (tmode);
12381 op[nopnds++] = target;
12384 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
12386 const struct insn_operand_data *insn_op;
12388 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
12389 return NULL_RTX;
12391 insn_op = &insn_data[icode].operand[nopnds];
12393 op[nopnds] = expand_normal (arg);
12395 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
12397 if (!strcmp (insn_op->constraint, "n"))
12399 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
12400 if (!CONST_INT_P (op[nopnds]))
12401 error ("argument %d must be an unsigned literal", arg_num);
12402 else
12403 error ("argument %d is an unsigned literal that is "
12404 "out of range", arg_num);
12405 return const0_rtx;
12407 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
12410 nopnds++;
12413 /* Handle the builtins for extended mnemonics. These accept
12414 no arguments, but map to builtins that take arguments. */
12415 switch (fcode)
12417 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
12418 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
12419 op[nopnds++] = GEN_INT (1);
12420 #ifdef ENABLE_CHECKING
12421 attr |= RS6000_BTC_UNARY;
12422 #endif
12423 break;
12424 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
12425 op[nopnds++] = GEN_INT (0);
12426 #ifdef ENABLE_CHECKING
12427 attr |= RS6000_BTC_UNARY;
12428 #endif
12429 break;
12430 default:
12431 break;
12434 /* If this builtin accesses SPRs, then pass in the appropriate
12435 SPR number and SPR regno as the last two operands. */
12436 if (attr & RS6000_BTC_SPR)
12438 op[nopnds++] = gen_rtx_CONST_INT (Pmode, htm_spr_num (fcode));
12439 op[nopnds++] = gen_rtx_REG (Pmode, htm_spr_regno (fcode));
12442 #ifdef ENABLE_CHECKING
12443 int expected_nopnds = 0;
12444 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
12445 expected_nopnds = 1;
12446 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
12447 expected_nopnds = 2;
12448 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
12449 expected_nopnds = 3;
12450 if (!(attr & RS6000_BTC_VOID))
12451 expected_nopnds += 1;
12452 if (attr & RS6000_BTC_SPR)
12453 expected_nopnds += 2;
12455 gcc_assert (nopnds == expected_nopnds && nopnds <= MAX_HTM_OPERANDS);
12456 #endif
12458 switch (nopnds)
12460 case 1:
12461 pat = GEN_FCN (icode) (op[0]);
12462 break;
12463 case 2:
12464 pat = GEN_FCN (icode) (op[0], op[1]);
12465 break;
12466 case 3:
12467 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
12468 break;
12469 case 4:
12470 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
12471 break;
12472 default:
12473 gcc_unreachable ();
12475 if (!pat)
12476 return NULL_RTX;
12477 emit_insn (pat);
12479 *expandedp = true;
12480 if (nonvoid)
12481 return target;
12482 return const0_rtx;
12485 return NULL_RTX;
12488 static rtx
12489 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
12491 rtx pat;
12492 tree arg0 = CALL_EXPR_ARG (exp, 0);
12493 tree arg1 = CALL_EXPR_ARG (exp, 1);
12494 tree arg2 = CALL_EXPR_ARG (exp, 2);
12495 rtx op0 = expand_normal (arg0);
12496 rtx op1 = expand_normal (arg1);
12497 rtx op2 = expand_normal (arg2);
12498 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12499 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12500 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12501 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
12503 if (icode == CODE_FOR_nothing)
12504 /* Builtin not supported on this processor. */
12505 return 0;
12507 /* If we got invalid arguments bail out before generating bad rtl. */
12508 if (arg0 == error_mark_node
12509 || arg1 == error_mark_node
12510 || arg2 == error_mark_node)
12511 return const0_rtx;
12513 /* Check and prepare argument depending on the instruction code.
12515 Note that a switch statement instead of the sequence of tests
12516 would be incorrect as many of the CODE_FOR values could be
12517 CODE_FOR_nothing and that would yield multiple alternatives
12518 with identical values. We'd never reach here at runtime in
12519 this case. */
12520 if (icode == CODE_FOR_altivec_vsldoi_v4sf
12521 || icode == CODE_FOR_altivec_vsldoi_v4si
12522 || icode == CODE_FOR_altivec_vsldoi_v8hi
12523 || icode == CODE_FOR_altivec_vsldoi_v16qi)
12525 /* Only allow 4-bit unsigned literals. */
12526 STRIP_NOPS (arg2);
12527 if (TREE_CODE (arg2) != INTEGER_CST
12528 || TREE_INT_CST_LOW (arg2) & ~0xf)
12530 error ("argument 3 must be a 4-bit unsigned literal");
12531 return const0_rtx;
12534 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
12535 || icode == CODE_FOR_vsx_xxpermdi_v2di
12536 || icode == CODE_FOR_vsx_xxsldwi_v16qi
12537 || icode == CODE_FOR_vsx_xxsldwi_v8hi
12538 || icode == CODE_FOR_vsx_xxsldwi_v4si
12539 || icode == CODE_FOR_vsx_xxsldwi_v4sf
12540 || icode == CODE_FOR_vsx_xxsldwi_v2di
12541 || icode == CODE_FOR_vsx_xxsldwi_v2df)
12543 /* Only allow 2-bit unsigned literals. */
12544 STRIP_NOPS (arg2);
12545 if (TREE_CODE (arg2) != INTEGER_CST
12546 || TREE_INT_CST_LOW (arg2) & ~0x3)
12548 error ("argument 3 must be a 2-bit unsigned literal");
12549 return const0_rtx;
12552 else if (icode == CODE_FOR_vsx_set_v2df
12553 || icode == CODE_FOR_vsx_set_v2di
12554 || icode == CODE_FOR_bcdadd
12555 || icode == CODE_FOR_bcdadd_lt
12556 || icode == CODE_FOR_bcdadd_eq
12557 || icode == CODE_FOR_bcdadd_gt
12558 || icode == CODE_FOR_bcdsub
12559 || icode == CODE_FOR_bcdsub_lt
12560 || icode == CODE_FOR_bcdsub_eq
12561 || icode == CODE_FOR_bcdsub_gt)
12563 /* Only allow 1-bit unsigned literals. */
12564 STRIP_NOPS (arg2);
12565 if (TREE_CODE (arg2) != INTEGER_CST
12566 || TREE_INT_CST_LOW (arg2) & ~0x1)
12568 error ("argument 3 must be a 1-bit unsigned literal");
12569 return const0_rtx;
12572 else if (icode == CODE_FOR_dfp_ddedpd_dd
12573 || icode == CODE_FOR_dfp_ddedpd_td)
12575 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
12576 STRIP_NOPS (arg0);
12577 if (TREE_CODE (arg0) != INTEGER_CST
12578 || TREE_INT_CST_LOW (arg2) & ~0x3)
12580 error ("argument 1 must be 0 or 2");
12581 return const0_rtx;
12584 else if (icode == CODE_FOR_dfp_denbcd_dd
12585 || icode == CODE_FOR_dfp_denbcd_td)
12587 /* Only allow 1-bit unsigned literals. */
12588 STRIP_NOPS (arg0);
12589 if (TREE_CODE (arg0) != INTEGER_CST
12590 || TREE_INT_CST_LOW (arg0) & ~0x1)
12592 error ("argument 1 must be a 1-bit unsigned literal");
12593 return const0_rtx;
12596 else if (icode == CODE_FOR_dfp_dscli_dd
12597 || icode == CODE_FOR_dfp_dscli_td
12598 || icode == CODE_FOR_dfp_dscri_dd
12599 || icode == CODE_FOR_dfp_dscri_td)
12601 /* Only allow 6-bit unsigned literals. */
12602 STRIP_NOPS (arg1);
12603 if (TREE_CODE (arg1) != INTEGER_CST
12604 || TREE_INT_CST_LOW (arg1) & ~0x3f)
12606 error ("argument 2 must be a 6-bit unsigned literal");
12607 return const0_rtx;
12610 else if (icode == CODE_FOR_crypto_vshasigmaw
12611 || icode == CODE_FOR_crypto_vshasigmad)
12613 /* Check whether the 2nd and 3rd arguments are integer constants and in
12614 range and prepare arguments. */
12615 STRIP_NOPS (arg1);
12616 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
12618 error ("argument 2 must be 0 or 1");
12619 return const0_rtx;
12622 STRIP_NOPS (arg2);
12623 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
12625 error ("argument 3 must be in the range 0..15");
12626 return const0_rtx;
12630 if (target == 0
12631 || GET_MODE (target) != tmode
12632 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12633 target = gen_reg_rtx (tmode);
12635 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12636 op0 = copy_to_mode_reg (mode0, op0);
12637 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12638 op1 = copy_to_mode_reg (mode1, op1);
12639 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12640 op2 = copy_to_mode_reg (mode2, op2);
12642 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
12643 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
12644 else
12645 pat = GEN_FCN (icode) (target, op0, op1, op2);
12646 if (! pat)
12647 return 0;
12648 emit_insn (pat);
12650 return target;
12653 /* Expand the lvx builtins. */
12654 static rtx
12655 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
12657 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12658 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12659 tree arg0;
12660 enum machine_mode tmode, mode0;
12661 rtx pat, op0;
12662 enum insn_code icode;
12664 switch (fcode)
12666 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
12667 icode = CODE_FOR_vector_altivec_load_v16qi;
12668 break;
12669 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
12670 icode = CODE_FOR_vector_altivec_load_v8hi;
12671 break;
12672 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
12673 icode = CODE_FOR_vector_altivec_load_v4si;
12674 break;
12675 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
12676 icode = CODE_FOR_vector_altivec_load_v4sf;
12677 break;
12678 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
12679 icode = CODE_FOR_vector_altivec_load_v2df;
12680 break;
12681 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
12682 icode = CODE_FOR_vector_altivec_load_v2di;
12683 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
12684 icode = CODE_FOR_vector_altivec_load_v1ti;
12685 break;
12686 default:
12687 *expandedp = false;
12688 return NULL_RTX;
12691 *expandedp = true;
12693 arg0 = CALL_EXPR_ARG (exp, 0);
12694 op0 = expand_normal (arg0);
12695 tmode = insn_data[icode].operand[0].mode;
12696 mode0 = insn_data[icode].operand[1].mode;
12698 if (target == 0
12699 || GET_MODE (target) != tmode
12700 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12701 target = gen_reg_rtx (tmode);
12703 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12704 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12706 pat = GEN_FCN (icode) (target, op0);
12707 if (! pat)
12708 return 0;
12709 emit_insn (pat);
12710 return target;
12713 /* Expand the stvx builtins. */
12714 static rtx
12715 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12716 bool *expandedp)
12718 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12719 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12720 tree arg0, arg1;
12721 enum machine_mode mode0, mode1;
12722 rtx pat, op0, op1;
12723 enum insn_code icode;
12725 switch (fcode)
12727 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
12728 icode = CODE_FOR_vector_altivec_store_v16qi;
12729 break;
12730 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
12731 icode = CODE_FOR_vector_altivec_store_v8hi;
12732 break;
12733 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
12734 icode = CODE_FOR_vector_altivec_store_v4si;
12735 break;
12736 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
12737 icode = CODE_FOR_vector_altivec_store_v4sf;
12738 break;
12739 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
12740 icode = CODE_FOR_vector_altivec_store_v2df;
12741 break;
12742 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
12743 icode = CODE_FOR_vector_altivec_store_v2di;
12744 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
12745 icode = CODE_FOR_vector_altivec_store_v1ti;
12746 break;
12747 default:
12748 *expandedp = false;
12749 return NULL_RTX;
12752 arg0 = CALL_EXPR_ARG (exp, 0);
12753 arg1 = CALL_EXPR_ARG (exp, 1);
12754 op0 = expand_normal (arg0);
12755 op1 = expand_normal (arg1);
12756 mode0 = insn_data[icode].operand[0].mode;
12757 mode1 = insn_data[icode].operand[1].mode;
12759 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12760 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12761 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
12762 op1 = copy_to_mode_reg (mode1, op1);
12764 pat = GEN_FCN (icode) (op0, op1);
12765 if (pat)
12766 emit_insn (pat);
12768 *expandedp = true;
12769 return NULL_RTX;
12772 /* Expand the dst builtins. */
12773 static rtx
12774 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12775 bool *expandedp)
12777 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12778 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12779 tree arg0, arg1, arg2;
12780 enum machine_mode mode0, mode1;
12781 rtx pat, op0, op1, op2;
12782 const struct builtin_description *d;
12783 size_t i;
12785 *expandedp = false;
12787 /* Handle DST variants. */
12788 d = bdesc_dst;
12789 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12790 if (d->code == fcode)
12792 arg0 = CALL_EXPR_ARG (exp, 0);
12793 arg1 = CALL_EXPR_ARG (exp, 1);
12794 arg2 = CALL_EXPR_ARG (exp, 2);
12795 op0 = expand_normal (arg0);
12796 op1 = expand_normal (arg1);
12797 op2 = expand_normal (arg2);
12798 mode0 = insn_data[d->icode].operand[0].mode;
12799 mode1 = insn_data[d->icode].operand[1].mode;
12801 /* Invalid arguments, bail out before generating bad rtl. */
12802 if (arg0 == error_mark_node
12803 || arg1 == error_mark_node
12804 || arg2 == error_mark_node)
12805 return const0_rtx;
12807 *expandedp = true;
12808 STRIP_NOPS (arg2);
12809 if (TREE_CODE (arg2) != INTEGER_CST
12810 || TREE_INT_CST_LOW (arg2) & ~0x3)
12812 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
12813 return const0_rtx;
12816 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
12817 op0 = copy_to_mode_reg (Pmode, op0);
12818 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
12819 op1 = copy_to_mode_reg (mode1, op1);
12821 pat = GEN_FCN (d->icode) (op0, op1, op2);
12822 if (pat != 0)
12823 emit_insn (pat);
12825 return NULL_RTX;
12828 return NULL_RTX;
12831 /* Expand vec_init builtin. */
12832 static rtx
12833 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
12835 enum machine_mode tmode = TYPE_MODE (type);
12836 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
12837 int i, n_elt = GET_MODE_NUNITS (tmode);
12839 gcc_assert (VECTOR_MODE_P (tmode));
12840 gcc_assert (n_elt == call_expr_nargs (exp));
12842 if (!target || !register_operand (target, tmode))
12843 target = gen_reg_rtx (tmode);
12845 /* If we have a vector compromised of a single element, such as V1TImode, do
12846 the initialization directly. */
12847 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
12849 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
12850 emit_move_insn (target, gen_lowpart (tmode, x));
12852 else
12854 rtvec v = rtvec_alloc (n_elt);
12856 for (i = 0; i < n_elt; ++i)
12858 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
12859 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
12862 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
12865 return target;
12868 /* Return the integer constant in ARG. Constrain it to be in the range
12869 of the subparts of VEC_TYPE; issue an error if not. */
12871 static int
12872 get_element_number (tree vec_type, tree arg)
12874 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
12876 if (!tree_fits_uhwi_p (arg)
12877 || (elt = tree_to_uhwi (arg), elt > max))
12879 error ("selector must be an integer constant in the range 0..%wi", max);
12880 return 0;
12883 return elt;
12886 /* Expand vec_set builtin. */
12887 static rtx
12888 altivec_expand_vec_set_builtin (tree exp)
12890 enum machine_mode tmode, mode1;
12891 tree arg0, arg1, arg2;
12892 int elt;
12893 rtx op0, op1;
12895 arg0 = CALL_EXPR_ARG (exp, 0);
12896 arg1 = CALL_EXPR_ARG (exp, 1);
12897 arg2 = CALL_EXPR_ARG (exp, 2);
12899 tmode = TYPE_MODE (TREE_TYPE (arg0));
12900 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
12901 gcc_assert (VECTOR_MODE_P (tmode));
12903 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
12904 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
12905 elt = get_element_number (TREE_TYPE (arg0), arg2);
12907 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
12908 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
12910 op0 = force_reg (tmode, op0);
12911 op1 = force_reg (mode1, op1);
12913 rs6000_expand_vector_set (op0, op1, elt);
12915 return op0;
12918 /* Expand vec_ext builtin. */
12919 static rtx
12920 altivec_expand_vec_ext_builtin (tree exp, rtx target)
12922 enum machine_mode tmode, mode0;
12923 tree arg0, arg1;
12924 int elt;
12925 rtx op0;
12927 arg0 = CALL_EXPR_ARG (exp, 0);
12928 arg1 = CALL_EXPR_ARG (exp, 1);
12930 op0 = expand_normal (arg0);
12931 elt = get_element_number (TREE_TYPE (arg0), arg1);
12933 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
12934 mode0 = TYPE_MODE (TREE_TYPE (arg0));
12935 gcc_assert (VECTOR_MODE_P (mode0));
12937 op0 = force_reg (mode0, op0);
12939 if (optimize || !target || !register_operand (target, tmode))
12940 target = gen_reg_rtx (tmode);
12942 rs6000_expand_vector_extract (target, op0, elt);
12944 return target;
12947 /* Expand the builtin in EXP and store the result in TARGET. Store
12948 true in *EXPANDEDP if we found a builtin to expand. */
12949 static rtx
12950 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
12952 const struct builtin_description *d;
12953 size_t i;
12954 enum insn_code icode;
12955 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12956 tree arg0;
12957 rtx op0, pat;
12958 enum machine_mode tmode, mode0;
12959 enum rs6000_builtins fcode
12960 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12962 if (rs6000_overloaded_builtin_p (fcode))
12964 *expandedp = true;
12965 error ("unresolved overload for Altivec builtin %qF", fndecl);
12967 /* Given it is invalid, just generate a normal call. */
12968 return expand_call (exp, target, false);
12971 target = altivec_expand_ld_builtin (exp, target, expandedp);
12972 if (*expandedp)
12973 return target;
12975 target = altivec_expand_st_builtin (exp, target, expandedp);
12976 if (*expandedp)
12977 return target;
12979 target = altivec_expand_dst_builtin (exp, target, expandedp);
12980 if (*expandedp)
12981 return target;
12983 *expandedp = true;
12985 switch (fcode)
12987 case ALTIVEC_BUILTIN_STVX_V2DF:
12988 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
12989 case ALTIVEC_BUILTIN_STVX_V2DI:
12990 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
12991 case ALTIVEC_BUILTIN_STVX_V4SF:
12992 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
12993 case ALTIVEC_BUILTIN_STVX:
12994 case ALTIVEC_BUILTIN_STVX_V4SI:
12995 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
12996 case ALTIVEC_BUILTIN_STVX_V8HI:
12997 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
12998 case ALTIVEC_BUILTIN_STVX_V16QI:
12999 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
13000 case ALTIVEC_BUILTIN_STVEBX:
13001 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
13002 case ALTIVEC_BUILTIN_STVEHX:
13003 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
13004 case ALTIVEC_BUILTIN_STVEWX:
13005 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
13006 case ALTIVEC_BUILTIN_STVXL_V2DF:
13007 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
13008 case ALTIVEC_BUILTIN_STVXL_V2DI:
13009 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
13010 case ALTIVEC_BUILTIN_STVXL_V4SF:
13011 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
13012 case ALTIVEC_BUILTIN_STVXL:
13013 case ALTIVEC_BUILTIN_STVXL_V4SI:
13014 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
13015 case ALTIVEC_BUILTIN_STVXL_V8HI:
13016 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
13017 case ALTIVEC_BUILTIN_STVXL_V16QI:
13018 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
13020 case ALTIVEC_BUILTIN_STVLX:
13021 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
13022 case ALTIVEC_BUILTIN_STVLXL:
13023 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
13024 case ALTIVEC_BUILTIN_STVRX:
13025 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
13026 case ALTIVEC_BUILTIN_STVRXL:
13027 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
13029 case VSX_BUILTIN_STXVD2X_V1TI:
13030 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
13031 case VSX_BUILTIN_STXVD2X_V2DF:
13032 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
13033 case VSX_BUILTIN_STXVD2X_V2DI:
13034 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
13035 case VSX_BUILTIN_STXVW4X_V4SF:
13036 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
13037 case VSX_BUILTIN_STXVW4X_V4SI:
13038 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
13039 case VSX_BUILTIN_STXVW4X_V8HI:
13040 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
13041 case VSX_BUILTIN_STXVW4X_V16QI:
13042 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
13044 case ALTIVEC_BUILTIN_MFVSCR:
13045 icode = CODE_FOR_altivec_mfvscr;
13046 tmode = insn_data[icode].operand[0].mode;
13048 if (target == 0
13049 || GET_MODE (target) != tmode
13050 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13051 target = gen_reg_rtx (tmode);
13053 pat = GEN_FCN (icode) (target);
13054 if (! pat)
13055 return 0;
13056 emit_insn (pat);
13057 return target;
13059 case ALTIVEC_BUILTIN_MTVSCR:
13060 icode = CODE_FOR_altivec_mtvscr;
13061 arg0 = CALL_EXPR_ARG (exp, 0);
13062 op0 = expand_normal (arg0);
13063 mode0 = insn_data[icode].operand[0].mode;
13065 /* If we got invalid arguments bail out before generating bad rtl. */
13066 if (arg0 == error_mark_node)
13067 return const0_rtx;
13069 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13070 op0 = copy_to_mode_reg (mode0, op0);
13072 pat = GEN_FCN (icode) (op0);
13073 if (pat)
13074 emit_insn (pat);
13075 return NULL_RTX;
13077 case ALTIVEC_BUILTIN_DSSALL:
13078 emit_insn (gen_altivec_dssall ());
13079 return NULL_RTX;
13081 case ALTIVEC_BUILTIN_DSS:
13082 icode = CODE_FOR_altivec_dss;
13083 arg0 = CALL_EXPR_ARG (exp, 0);
13084 STRIP_NOPS (arg0);
13085 op0 = expand_normal (arg0);
13086 mode0 = insn_data[icode].operand[0].mode;
13088 /* If we got invalid arguments bail out before generating bad rtl. */
13089 if (arg0 == error_mark_node)
13090 return const0_rtx;
13092 if (TREE_CODE (arg0) != INTEGER_CST
13093 || TREE_INT_CST_LOW (arg0) & ~0x3)
13095 error ("argument to dss must be a 2-bit unsigned literal");
13096 return const0_rtx;
13099 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13100 op0 = copy_to_mode_reg (mode0, op0);
13102 emit_insn (gen_altivec_dss (op0));
13103 return NULL_RTX;
13105 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
13106 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
13107 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
13108 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
13109 case VSX_BUILTIN_VEC_INIT_V2DF:
13110 case VSX_BUILTIN_VEC_INIT_V2DI:
13111 case VSX_BUILTIN_VEC_INIT_V1TI:
13112 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
13114 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
13115 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
13116 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
13117 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
13118 case VSX_BUILTIN_VEC_SET_V2DF:
13119 case VSX_BUILTIN_VEC_SET_V2DI:
13120 case VSX_BUILTIN_VEC_SET_V1TI:
13121 return altivec_expand_vec_set_builtin (exp);
13123 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
13124 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
13125 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
13126 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
13127 case VSX_BUILTIN_VEC_EXT_V2DF:
13128 case VSX_BUILTIN_VEC_EXT_V2DI:
13129 case VSX_BUILTIN_VEC_EXT_V1TI:
13130 return altivec_expand_vec_ext_builtin (exp, target);
13132 default:
13133 break;
13134 /* Fall through. */
13137 /* Expand abs* operations. */
13138 d = bdesc_abs;
13139 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
13140 if (d->code == fcode)
13141 return altivec_expand_abs_builtin (d->icode, exp, target);
13143 /* Expand the AltiVec predicates. */
13144 d = bdesc_altivec_preds;
13145 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
13146 if (d->code == fcode)
13147 return altivec_expand_predicate_builtin (d->icode, exp, target);
13149 /* LV* are funky. We initialized them differently. */
13150 switch (fcode)
13152 case ALTIVEC_BUILTIN_LVSL:
13153 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
13154 exp, target, false);
13155 case ALTIVEC_BUILTIN_LVSR:
13156 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
13157 exp, target, false);
13158 case ALTIVEC_BUILTIN_LVEBX:
13159 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
13160 exp, target, false);
13161 case ALTIVEC_BUILTIN_LVEHX:
13162 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
13163 exp, target, false);
13164 case ALTIVEC_BUILTIN_LVEWX:
13165 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
13166 exp, target, false);
13167 case ALTIVEC_BUILTIN_LVXL_V2DF:
13168 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
13169 exp, target, false);
13170 case ALTIVEC_BUILTIN_LVXL_V2DI:
13171 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
13172 exp, target, false);
13173 case ALTIVEC_BUILTIN_LVXL_V4SF:
13174 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
13175 exp, target, false);
13176 case ALTIVEC_BUILTIN_LVXL:
13177 case ALTIVEC_BUILTIN_LVXL_V4SI:
13178 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
13179 exp, target, false);
13180 case ALTIVEC_BUILTIN_LVXL_V8HI:
13181 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
13182 exp, target, false);
13183 case ALTIVEC_BUILTIN_LVXL_V16QI:
13184 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
13185 exp, target, false);
13186 case ALTIVEC_BUILTIN_LVX_V2DF:
13187 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
13188 exp, target, false);
13189 case ALTIVEC_BUILTIN_LVX_V2DI:
13190 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
13191 exp, target, false);
13192 case ALTIVEC_BUILTIN_LVX_V4SF:
13193 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
13194 exp, target, false);
13195 case ALTIVEC_BUILTIN_LVX:
13196 case ALTIVEC_BUILTIN_LVX_V4SI:
13197 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
13198 exp, target, false);
13199 case ALTIVEC_BUILTIN_LVX_V8HI:
13200 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
13201 exp, target, false);
13202 case ALTIVEC_BUILTIN_LVX_V16QI:
13203 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
13204 exp, target, false);
13205 case ALTIVEC_BUILTIN_LVLX:
13206 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
13207 exp, target, true);
13208 case ALTIVEC_BUILTIN_LVLXL:
13209 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
13210 exp, target, true);
13211 case ALTIVEC_BUILTIN_LVRX:
13212 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
13213 exp, target, true);
13214 case ALTIVEC_BUILTIN_LVRXL:
13215 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
13216 exp, target, true);
13217 case VSX_BUILTIN_LXVD2X_V1TI:
13218 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
13219 exp, target, false);
13220 case VSX_BUILTIN_LXVD2X_V2DF:
13221 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
13222 exp, target, false);
13223 case VSX_BUILTIN_LXVD2X_V2DI:
13224 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
13225 exp, target, false);
13226 case VSX_BUILTIN_LXVW4X_V4SF:
13227 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
13228 exp, target, false);
13229 case VSX_BUILTIN_LXVW4X_V4SI:
13230 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
13231 exp, target, false);
13232 case VSX_BUILTIN_LXVW4X_V8HI:
13233 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
13234 exp, target, false);
13235 case VSX_BUILTIN_LXVW4X_V16QI:
13236 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
13237 exp, target, false);
13238 break;
13239 default:
13240 break;
13241 /* Fall through. */
13244 *expandedp = false;
13245 return NULL_RTX;
13248 /* Expand the builtin in EXP and store the result in TARGET. Store
13249 true in *EXPANDEDP if we found a builtin to expand. */
13250 static rtx
13251 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
13253 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13254 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13255 const struct builtin_description *d;
13256 size_t i;
13258 *expandedp = true;
13260 switch (fcode)
13262 case PAIRED_BUILTIN_STX:
13263 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
13264 case PAIRED_BUILTIN_LX:
13265 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
13266 default:
13267 break;
13268 /* Fall through. */
13271 /* Expand the paired predicates. */
13272 d = bdesc_paired_preds;
13273 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
13274 if (d->code == fcode)
13275 return paired_expand_predicate_builtin (d->icode, exp, target);
13277 *expandedp = false;
13278 return NULL_RTX;
13281 /* Binops that need to be initialized manually, but can be expanded
13282 automagically by rs6000_expand_binop_builtin. */
13283 static const struct builtin_description bdesc_2arg_spe[] =
13285 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
13286 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
13287 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
13288 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
13289 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
13290 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
13291 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
13292 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
13293 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
13294 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
13295 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
13296 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
13297 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
13298 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
13299 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
13300 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
13301 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
13302 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
13303 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
13304 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
13305 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
13306 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
13309 /* Expand the builtin in EXP and store the result in TARGET. Store
13310 true in *EXPANDEDP if we found a builtin to expand.
13312 This expands the SPE builtins that are not simple unary and binary
13313 operations. */
13314 static rtx
13315 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
13317 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13318 tree arg1, arg0;
13319 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13320 enum insn_code icode;
13321 enum machine_mode tmode, mode0;
13322 rtx pat, op0;
13323 const struct builtin_description *d;
13324 size_t i;
13326 *expandedp = true;
13328 /* Syntax check for a 5-bit unsigned immediate. */
13329 switch (fcode)
13331 case SPE_BUILTIN_EVSTDD:
13332 case SPE_BUILTIN_EVSTDH:
13333 case SPE_BUILTIN_EVSTDW:
13334 case SPE_BUILTIN_EVSTWHE:
13335 case SPE_BUILTIN_EVSTWHO:
13336 case SPE_BUILTIN_EVSTWWE:
13337 case SPE_BUILTIN_EVSTWWO:
13338 arg1 = CALL_EXPR_ARG (exp, 2);
13339 if (TREE_CODE (arg1) != INTEGER_CST
13340 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13342 error ("argument 2 must be a 5-bit unsigned literal");
13343 return const0_rtx;
13345 break;
13346 default:
13347 break;
13350 /* The evsplat*i instructions are not quite generic. */
13351 switch (fcode)
13353 case SPE_BUILTIN_EVSPLATFI:
13354 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
13355 exp, target);
13356 case SPE_BUILTIN_EVSPLATI:
13357 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
13358 exp, target);
13359 default:
13360 break;
13363 d = bdesc_2arg_spe;
13364 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
13365 if (d->code == fcode)
13366 return rs6000_expand_binop_builtin (d->icode, exp, target);
13368 d = bdesc_spe_predicates;
13369 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
13370 if (d->code == fcode)
13371 return spe_expand_predicate_builtin (d->icode, exp, target);
13373 d = bdesc_spe_evsel;
13374 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
13375 if (d->code == fcode)
13376 return spe_expand_evsel_builtin (d->icode, exp, target);
13378 switch (fcode)
13380 case SPE_BUILTIN_EVSTDDX:
13381 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
13382 case SPE_BUILTIN_EVSTDHX:
13383 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
13384 case SPE_BUILTIN_EVSTDWX:
13385 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
13386 case SPE_BUILTIN_EVSTWHEX:
13387 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
13388 case SPE_BUILTIN_EVSTWHOX:
13389 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
13390 case SPE_BUILTIN_EVSTWWEX:
13391 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
13392 case SPE_BUILTIN_EVSTWWOX:
13393 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
13394 case SPE_BUILTIN_EVSTDD:
13395 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
13396 case SPE_BUILTIN_EVSTDH:
13397 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
13398 case SPE_BUILTIN_EVSTDW:
13399 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
13400 case SPE_BUILTIN_EVSTWHE:
13401 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
13402 case SPE_BUILTIN_EVSTWHO:
13403 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
13404 case SPE_BUILTIN_EVSTWWE:
13405 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
13406 case SPE_BUILTIN_EVSTWWO:
13407 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
13408 case SPE_BUILTIN_MFSPEFSCR:
13409 icode = CODE_FOR_spe_mfspefscr;
13410 tmode = insn_data[icode].operand[0].mode;
13412 if (target == 0
13413 || GET_MODE (target) != tmode
13414 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13415 target = gen_reg_rtx (tmode);
13417 pat = GEN_FCN (icode) (target);
13418 if (! pat)
13419 return 0;
13420 emit_insn (pat);
13421 return target;
13422 case SPE_BUILTIN_MTSPEFSCR:
13423 icode = CODE_FOR_spe_mtspefscr;
13424 arg0 = CALL_EXPR_ARG (exp, 0);
13425 op0 = expand_normal (arg0);
13426 mode0 = insn_data[icode].operand[0].mode;
13428 if (arg0 == error_mark_node)
13429 return const0_rtx;
13431 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13432 op0 = copy_to_mode_reg (mode0, op0);
13434 pat = GEN_FCN (icode) (op0);
13435 if (pat)
13436 emit_insn (pat);
13437 return NULL_RTX;
13438 default:
13439 break;
13442 *expandedp = false;
13443 return NULL_RTX;
13446 static rtx
13447 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13449 rtx pat, scratch, tmp;
13450 tree form = CALL_EXPR_ARG (exp, 0);
13451 tree arg0 = CALL_EXPR_ARG (exp, 1);
13452 tree arg1 = CALL_EXPR_ARG (exp, 2);
13453 rtx op0 = expand_normal (arg0);
13454 rtx op1 = expand_normal (arg1);
13455 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13456 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13457 int form_int;
13458 enum rtx_code code;
13460 if (TREE_CODE (form) != INTEGER_CST)
13462 error ("argument 1 of __builtin_paired_predicate must be a constant");
13463 return const0_rtx;
13465 else
13466 form_int = TREE_INT_CST_LOW (form);
13468 gcc_assert (mode0 == mode1);
13470 if (arg0 == error_mark_node || arg1 == error_mark_node)
13471 return const0_rtx;
13473 if (target == 0
13474 || GET_MODE (target) != SImode
13475 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
13476 target = gen_reg_rtx (SImode);
13477 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
13478 op0 = copy_to_mode_reg (mode0, op0);
13479 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
13480 op1 = copy_to_mode_reg (mode1, op1);
13482 scratch = gen_reg_rtx (CCFPmode);
13484 pat = GEN_FCN (icode) (scratch, op0, op1);
13485 if (!pat)
13486 return const0_rtx;
13488 emit_insn (pat);
13490 switch (form_int)
13492 /* LT bit. */
13493 case 0:
13494 code = LT;
13495 break;
13496 /* GT bit. */
13497 case 1:
13498 code = GT;
13499 break;
13500 /* EQ bit. */
13501 case 2:
13502 code = EQ;
13503 break;
13504 /* UN bit. */
13505 case 3:
13506 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13507 return target;
13508 default:
13509 error ("argument 1 of __builtin_paired_predicate is out of range");
13510 return const0_rtx;
13513 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13514 emit_move_insn (target, tmp);
13515 return target;
13518 static rtx
13519 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13521 rtx pat, scratch, tmp;
13522 tree form = CALL_EXPR_ARG (exp, 0);
13523 tree arg0 = CALL_EXPR_ARG (exp, 1);
13524 tree arg1 = CALL_EXPR_ARG (exp, 2);
13525 rtx op0 = expand_normal (arg0);
13526 rtx op1 = expand_normal (arg1);
13527 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13528 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13529 int form_int;
13530 enum rtx_code code;
13532 if (TREE_CODE (form) != INTEGER_CST)
13534 error ("argument 1 of __builtin_spe_predicate must be a constant");
13535 return const0_rtx;
13537 else
13538 form_int = TREE_INT_CST_LOW (form);
13540 gcc_assert (mode0 == mode1);
13542 if (arg0 == error_mark_node || arg1 == error_mark_node)
13543 return const0_rtx;
13545 if (target == 0
13546 || GET_MODE (target) != SImode
13547 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
13548 target = gen_reg_rtx (SImode);
13550 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13551 op0 = copy_to_mode_reg (mode0, op0);
13552 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13553 op1 = copy_to_mode_reg (mode1, op1);
13555 scratch = gen_reg_rtx (CCmode);
13557 pat = GEN_FCN (icode) (scratch, op0, op1);
13558 if (! pat)
13559 return const0_rtx;
13560 emit_insn (pat);
13562 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
13563 _lower_. We use one compare, but look in different bits of the
13564 CR for each variant.
13566 There are 2 elements in each SPE simd type (upper/lower). The CR
13567 bits are set as follows:
13569 BIT0 | BIT 1 | BIT 2 | BIT 3
13570 U | L | (U | L) | (U & L)
13572 So, for an "all" relationship, BIT 3 would be set.
13573 For an "any" relationship, BIT 2 would be set. Etc.
13575 Following traditional nomenclature, these bits map to:
13577 BIT0 | BIT 1 | BIT 2 | BIT 3
13578 LT | GT | EQ | OV
13580 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
13583 switch (form_int)
13585 /* All variant. OV bit. */
13586 case 0:
13587 /* We need to get to the OV bit, which is the ORDERED bit. We
13588 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
13589 that's ugly and will make validate_condition_mode die.
13590 So let's just use another pattern. */
13591 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13592 return target;
13593 /* Any variant. EQ bit. */
13594 case 1:
13595 code = EQ;
13596 break;
13597 /* Upper variant. LT bit. */
13598 case 2:
13599 code = LT;
13600 break;
13601 /* Lower variant. GT bit. */
13602 case 3:
13603 code = GT;
13604 break;
13605 default:
13606 error ("argument 1 of __builtin_spe_predicate is out of range");
13607 return const0_rtx;
13610 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13611 emit_move_insn (target, tmp);
13613 return target;
13616 /* The evsel builtins look like this:
13618 e = __builtin_spe_evsel_OP (a, b, c, d);
13620 and work like this:
13622 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
13623 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
13626 static rtx
13627 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
13629 rtx pat, scratch;
13630 tree arg0 = CALL_EXPR_ARG (exp, 0);
13631 tree arg1 = CALL_EXPR_ARG (exp, 1);
13632 tree arg2 = CALL_EXPR_ARG (exp, 2);
13633 tree arg3 = CALL_EXPR_ARG (exp, 3);
13634 rtx op0 = expand_normal (arg0);
13635 rtx op1 = expand_normal (arg1);
13636 rtx op2 = expand_normal (arg2);
13637 rtx op3 = expand_normal (arg3);
13638 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13639 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13641 gcc_assert (mode0 == mode1);
13643 if (arg0 == error_mark_node || arg1 == error_mark_node
13644 || arg2 == error_mark_node || arg3 == error_mark_node)
13645 return const0_rtx;
13647 if (target == 0
13648 || GET_MODE (target) != mode0
13649 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
13650 target = gen_reg_rtx (mode0);
13652 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13653 op0 = copy_to_mode_reg (mode0, op0);
13654 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13655 op1 = copy_to_mode_reg (mode0, op1);
13656 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
13657 op2 = copy_to_mode_reg (mode0, op2);
13658 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
13659 op3 = copy_to_mode_reg (mode0, op3);
13661 /* Generate the compare. */
13662 scratch = gen_reg_rtx (CCmode);
13663 pat = GEN_FCN (icode) (scratch, op0, op1);
13664 if (! pat)
13665 return const0_rtx;
13666 emit_insn (pat);
13668 if (mode0 == V2SImode)
13669 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
13670 else
13671 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
13673 return target;
13676 /* Raise an error message for a builtin function that is called without the
13677 appropriate target options being set. */
13679 static void
13680 rs6000_invalid_builtin (enum rs6000_builtins fncode)
13682 size_t uns_fncode = (size_t)fncode;
13683 const char *name = rs6000_builtin_info[uns_fncode].name;
13684 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
13686 gcc_assert (name != NULL);
13687 if ((fnmask & RS6000_BTM_CELL) != 0)
13688 error ("Builtin function %s is only valid for the cell processor", name);
13689 else if ((fnmask & RS6000_BTM_VSX) != 0)
13690 error ("Builtin function %s requires the -mvsx option", name);
13691 else if ((fnmask & RS6000_BTM_HTM) != 0)
13692 error ("Builtin function %s requires the -mhtm option", name);
13693 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
13694 error ("Builtin function %s requires the -maltivec option", name);
13695 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
13696 error ("Builtin function %s requires the -mpaired option", name);
13697 else if ((fnmask & RS6000_BTM_SPE) != 0)
13698 error ("Builtin function %s requires the -mspe option", name);
13699 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
13700 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
13701 error ("Builtin function %s requires the -mhard-dfp and"
13702 " -mpower8-vector options", name);
13703 else if ((fnmask & RS6000_BTM_DFP) != 0)
13704 error ("Builtin function %s requires the -mhard-dfp option", name);
13705 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
13706 error ("Builtin function %s requires the -mpower8-vector option", name);
13707 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
13708 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
13709 error ("Builtin function %s requires the -mhard-float and"
13710 " -mlong-double-128 options", name);
13711 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
13712 error ("Builtin function %s requires the -mhard-float option", name);
13713 else
13714 error ("Builtin function %s is not supported with the current options",
13715 name);
13718 /* Expand an expression EXP that calls a built-in function,
13719 with result going to TARGET if that's convenient
13720 (and in mode MODE if that's convenient).
13721 SUBTARGET may be used as the target for computing one of EXP's operands.
13722 IGNORE is nonzero if the value is to be ignored. */
13724 static rtx
13725 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13726 enum machine_mode mode ATTRIBUTE_UNUSED,
13727 int ignore ATTRIBUTE_UNUSED)
13729 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13730 enum rs6000_builtins fcode
13731 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
13732 size_t uns_fcode = (size_t)fcode;
13733 const struct builtin_description *d;
13734 size_t i;
13735 rtx ret;
13736 bool success;
13737 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
13738 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
13740 if (TARGET_DEBUG_BUILTIN)
13742 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
13743 const char *name1 = rs6000_builtin_info[uns_fcode].name;
13744 const char *name2 = ((icode != CODE_FOR_nothing)
13745 ? get_insn_name ((int)icode)
13746 : "nothing");
13747 const char *name3;
13749 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
13751 default: name3 = "unknown"; break;
13752 case RS6000_BTC_SPECIAL: name3 = "special"; break;
13753 case RS6000_BTC_UNARY: name3 = "unary"; break;
13754 case RS6000_BTC_BINARY: name3 = "binary"; break;
13755 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
13756 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
13757 case RS6000_BTC_ABS: name3 = "abs"; break;
13758 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
13759 case RS6000_BTC_DST: name3 = "dst"; break;
13763 fprintf (stderr,
13764 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
13765 (name1) ? name1 : "---", fcode,
13766 (name2) ? name2 : "---", (int)icode,
13767 name3,
13768 func_valid_p ? "" : ", not valid");
13771 if (!func_valid_p)
13773 rs6000_invalid_builtin (fcode);
13775 /* Given it is invalid, just generate a normal call. */
13776 return expand_call (exp, target, ignore);
13779 switch (fcode)
13781 case RS6000_BUILTIN_RECIP:
13782 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
13784 case RS6000_BUILTIN_RECIPF:
13785 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
13787 case RS6000_BUILTIN_RSQRTF:
13788 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
13790 case RS6000_BUILTIN_RSQRT:
13791 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
13793 case POWER7_BUILTIN_BPERMD:
13794 return rs6000_expand_binop_builtin (((TARGET_64BIT)
13795 ? CODE_FOR_bpermd_di
13796 : CODE_FOR_bpermd_si), exp, target);
13798 case RS6000_BUILTIN_GET_TB:
13799 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
13800 target);
13802 case RS6000_BUILTIN_MFTB:
13803 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
13804 ? CODE_FOR_rs6000_mftb_di
13805 : CODE_FOR_rs6000_mftb_si),
13806 target);
13808 case RS6000_BUILTIN_MFFS:
13809 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
13811 case RS6000_BUILTIN_MTFSF:
13812 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
13814 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
13815 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
13817 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr
13818 : (int) CODE_FOR_altivec_lvsl);
13819 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13820 enum machine_mode mode = insn_data[icode].operand[1].mode;
13821 tree arg;
13822 rtx op, addr, pat;
13824 gcc_assert (TARGET_ALTIVEC);
13826 arg = CALL_EXPR_ARG (exp, 0);
13827 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
13828 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
13829 addr = memory_address (mode, op);
13830 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
13831 op = addr;
13832 else
13834 /* For the load case need to negate the address. */
13835 op = gen_reg_rtx (GET_MODE (addr));
13836 emit_insn (gen_rtx_SET (VOIDmode, op,
13837 gen_rtx_NEG (GET_MODE (addr), addr)));
13839 op = gen_rtx_MEM (mode, op);
13841 if (target == 0
13842 || GET_MODE (target) != tmode
13843 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13844 target = gen_reg_rtx (tmode);
13846 /*pat = gen_altivec_lvsr (target, op);*/
13847 pat = GEN_FCN (icode) (target, op);
13848 if (!pat)
13849 return 0;
13850 emit_insn (pat);
13852 return target;
13855 case ALTIVEC_BUILTIN_VCFUX:
13856 case ALTIVEC_BUILTIN_VCFSX:
13857 case ALTIVEC_BUILTIN_VCTUXS:
13858 case ALTIVEC_BUILTIN_VCTSXS:
13859 /* FIXME: There's got to be a nicer way to handle this case than
13860 constructing a new CALL_EXPR. */
13861 if (call_expr_nargs (exp) == 1)
13863 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
13864 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
13866 break;
13868 default:
13869 break;
13872 if (TARGET_ALTIVEC)
13874 ret = altivec_expand_builtin (exp, target, &success);
13876 if (success)
13877 return ret;
13879 if (TARGET_SPE)
13881 ret = spe_expand_builtin (exp, target, &success);
13883 if (success)
13884 return ret;
13886 if (TARGET_PAIRED_FLOAT)
13888 ret = paired_expand_builtin (exp, target, &success);
13890 if (success)
13891 return ret;
13893 if (TARGET_HTM)
13895 ret = htm_expand_builtin (exp, target, &success);
13897 if (success)
13898 return ret;
13901 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
13902 gcc_assert (attr == RS6000_BTC_UNARY
13903 || attr == RS6000_BTC_BINARY
13904 || attr == RS6000_BTC_TERNARY);
13906 /* Handle simple unary operations. */
13907 d = bdesc_1arg;
13908 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13909 if (d->code == fcode)
13910 return rs6000_expand_unop_builtin (d->icode, exp, target);
13912 /* Handle simple binary operations. */
13913 d = bdesc_2arg;
13914 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13915 if (d->code == fcode)
13916 return rs6000_expand_binop_builtin (d->icode, exp, target);
13918 /* Handle simple ternary operations. */
13919 d = bdesc_3arg;
13920 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
13921 if (d->code == fcode)
13922 return rs6000_expand_ternop_builtin (d->icode, exp, target);
13924 gcc_unreachable ();
13927 static void
13928 rs6000_init_builtins (void)
13930 tree tdecl;
13931 tree ftype;
13932 enum machine_mode mode;
13934 if (TARGET_DEBUG_BUILTIN)
13935 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
13936 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
13937 (TARGET_SPE) ? ", spe" : "",
13938 (TARGET_ALTIVEC) ? ", altivec" : "",
13939 (TARGET_VSX) ? ", vsx" : "");
13941 V2SI_type_node = build_vector_type (intSI_type_node, 2);
13942 V2SF_type_node = build_vector_type (float_type_node, 2);
13943 V2DI_type_node = build_vector_type (intDI_type_node, 2);
13944 V2DF_type_node = build_vector_type (double_type_node, 2);
13945 V4HI_type_node = build_vector_type (intHI_type_node, 4);
13946 V4SI_type_node = build_vector_type (intSI_type_node, 4);
13947 V4SF_type_node = build_vector_type (float_type_node, 4);
13948 V8HI_type_node = build_vector_type (intHI_type_node, 8);
13949 V16QI_type_node = build_vector_type (intQI_type_node, 16);
13951 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
13952 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
13953 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
13954 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
13956 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
13957 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
13958 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
13959 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
13961 /* We use V1TI mode as a special container to hold __int128_t items that
13962 must live in VSX registers. */
13963 if (intTI_type_node)
13965 V1TI_type_node = build_vector_type (intTI_type_node, 1);
13966 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
13969 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
13970 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
13971 'vector unsigned short'. */
13973 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
13974 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
13975 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
13976 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
13977 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
13979 long_integer_type_internal_node = long_integer_type_node;
13980 long_unsigned_type_internal_node = long_unsigned_type_node;
13981 long_long_integer_type_internal_node = long_long_integer_type_node;
13982 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
13983 intQI_type_internal_node = intQI_type_node;
13984 uintQI_type_internal_node = unsigned_intQI_type_node;
13985 intHI_type_internal_node = intHI_type_node;
13986 uintHI_type_internal_node = unsigned_intHI_type_node;
13987 intSI_type_internal_node = intSI_type_node;
13988 uintSI_type_internal_node = unsigned_intSI_type_node;
13989 intDI_type_internal_node = intDI_type_node;
13990 uintDI_type_internal_node = unsigned_intDI_type_node;
13991 intTI_type_internal_node = intTI_type_node;
13992 uintTI_type_internal_node = unsigned_intTI_type_node;
13993 float_type_internal_node = float_type_node;
13994 double_type_internal_node = double_type_node;
13995 long_double_type_internal_node = long_double_type_node;
13996 dfloat64_type_internal_node = dfloat64_type_node;
13997 dfloat128_type_internal_node = dfloat128_type_node;
13998 void_type_internal_node = void_type_node;
14000 /* Initialize the modes for builtin_function_type, mapping a machine mode to
14001 tree type node. */
14002 builtin_mode_to_type[QImode][0] = integer_type_node;
14003 builtin_mode_to_type[HImode][0] = integer_type_node;
14004 builtin_mode_to_type[SImode][0] = intSI_type_node;
14005 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
14006 builtin_mode_to_type[DImode][0] = intDI_type_node;
14007 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
14008 builtin_mode_to_type[TImode][0] = intTI_type_node;
14009 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
14010 builtin_mode_to_type[SFmode][0] = float_type_node;
14011 builtin_mode_to_type[DFmode][0] = double_type_node;
14012 builtin_mode_to_type[TFmode][0] = long_double_type_node;
14013 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
14014 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
14015 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
14016 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
14017 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
14018 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
14019 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
14020 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
14021 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
14022 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
14023 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
14024 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
14025 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
14026 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
14027 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
14028 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
14029 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
14031 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
14032 TYPE_NAME (bool_char_type_node) = tdecl;
14034 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
14035 TYPE_NAME (bool_short_type_node) = tdecl;
14037 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
14038 TYPE_NAME (bool_int_type_node) = tdecl;
14040 tdecl = add_builtin_type ("__pixel", pixel_type_node);
14041 TYPE_NAME (pixel_type_node) = tdecl;
14043 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
14044 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
14045 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
14046 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
14047 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
14049 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
14050 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
14052 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
14053 TYPE_NAME (V16QI_type_node) = tdecl;
14055 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
14056 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
14058 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
14059 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
14061 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
14062 TYPE_NAME (V8HI_type_node) = tdecl;
14064 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
14065 TYPE_NAME (bool_V8HI_type_node) = tdecl;
14067 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
14068 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
14070 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
14071 TYPE_NAME (V4SI_type_node) = tdecl;
14073 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
14074 TYPE_NAME (bool_V4SI_type_node) = tdecl;
14076 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
14077 TYPE_NAME (V4SF_type_node) = tdecl;
14079 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
14080 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
14082 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
14083 TYPE_NAME (V2DF_type_node) = tdecl;
14085 if (TARGET_POWERPC64)
14087 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
14088 TYPE_NAME (V2DI_type_node) = tdecl;
14090 tdecl = add_builtin_type ("__vector unsigned long",
14091 unsigned_V2DI_type_node);
14092 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
14094 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
14095 TYPE_NAME (bool_V2DI_type_node) = tdecl;
14097 else
14099 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
14100 TYPE_NAME (V2DI_type_node) = tdecl;
14102 tdecl = add_builtin_type ("__vector unsigned long long",
14103 unsigned_V2DI_type_node);
14104 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
14106 tdecl = add_builtin_type ("__vector __bool long long",
14107 bool_V2DI_type_node);
14108 TYPE_NAME (bool_V2DI_type_node) = tdecl;
14111 if (V1TI_type_node)
14113 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
14114 TYPE_NAME (V1TI_type_node) = tdecl;
14116 tdecl = add_builtin_type ("__vector unsigned __int128",
14117 unsigned_V1TI_type_node);
14118 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
14121 /* Paired and SPE builtins are only available if you build a compiler with
14122 the appropriate options, so only create those builtins with the
14123 appropriate compiler option. Create Altivec and VSX builtins on machines
14124 with at least the general purpose extensions (970 and newer) to allow the
14125 use of the target attribute. */
14126 if (TARGET_PAIRED_FLOAT)
14127 paired_init_builtins ();
14128 if (TARGET_SPE)
14129 spe_init_builtins ();
14130 if (TARGET_EXTRA_BUILTINS)
14131 altivec_init_builtins ();
14132 if (TARGET_HTM)
14133 htm_init_builtins ();
14135 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
14136 rs6000_common_init_builtins ();
14138 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
14139 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
14140 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
14142 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
14143 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
14144 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
14146 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
14147 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
14148 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
14150 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
14151 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
14152 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
14154 mode = (TARGET_64BIT) ? DImode : SImode;
14155 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
14156 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
14157 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
14159 ftype = build_function_type_list (unsigned_intDI_type_node,
14160 NULL_TREE);
14161 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
14163 if (TARGET_64BIT)
14164 ftype = build_function_type_list (unsigned_intDI_type_node,
14165 NULL_TREE);
14166 else
14167 ftype = build_function_type_list (unsigned_intSI_type_node,
14168 NULL_TREE);
14169 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
14171 ftype = build_function_type_list (double_type_node, NULL_TREE);
14172 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
14174 ftype = build_function_type_list (void_type_node,
14175 intSI_type_node, double_type_node,
14176 NULL_TREE);
14177 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
14179 #if TARGET_XCOFF
14180 /* AIX libm provides clog as __clog. */
14181 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
14182 set_user_assembler_name (tdecl, "__clog");
14183 #endif
14185 #ifdef SUBTARGET_INIT_BUILTINS
14186 SUBTARGET_INIT_BUILTINS;
14187 #endif
14190 /* Returns the rs6000 builtin decl for CODE. */
14192 static tree
14193 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
14195 HOST_WIDE_INT fnmask;
14197 if (code >= RS6000_BUILTIN_COUNT)
14198 return error_mark_node;
14200 fnmask = rs6000_builtin_info[code].mask;
14201 if ((fnmask & rs6000_builtin_mask) != fnmask)
14203 rs6000_invalid_builtin ((enum rs6000_builtins)code);
14204 return error_mark_node;
14207 return rs6000_builtin_decls[code];
14210 static void
14211 spe_init_builtins (void)
14213 tree puint_type_node = build_pointer_type (unsigned_type_node);
14214 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
14215 const struct builtin_description *d;
14216 size_t i;
14218 tree v2si_ftype_4_v2si
14219 = build_function_type_list (opaque_V2SI_type_node,
14220 opaque_V2SI_type_node,
14221 opaque_V2SI_type_node,
14222 opaque_V2SI_type_node,
14223 opaque_V2SI_type_node,
14224 NULL_TREE);
14226 tree v2sf_ftype_4_v2sf
14227 = build_function_type_list (opaque_V2SF_type_node,
14228 opaque_V2SF_type_node,
14229 opaque_V2SF_type_node,
14230 opaque_V2SF_type_node,
14231 opaque_V2SF_type_node,
14232 NULL_TREE);
14234 tree int_ftype_int_v2si_v2si
14235 = build_function_type_list (integer_type_node,
14236 integer_type_node,
14237 opaque_V2SI_type_node,
14238 opaque_V2SI_type_node,
14239 NULL_TREE);
14241 tree int_ftype_int_v2sf_v2sf
14242 = build_function_type_list (integer_type_node,
14243 integer_type_node,
14244 opaque_V2SF_type_node,
14245 opaque_V2SF_type_node,
14246 NULL_TREE);
14248 tree void_ftype_v2si_puint_int
14249 = build_function_type_list (void_type_node,
14250 opaque_V2SI_type_node,
14251 puint_type_node,
14252 integer_type_node,
14253 NULL_TREE);
14255 tree void_ftype_v2si_puint_char
14256 = build_function_type_list (void_type_node,
14257 opaque_V2SI_type_node,
14258 puint_type_node,
14259 char_type_node,
14260 NULL_TREE);
14262 tree void_ftype_v2si_pv2si_int
14263 = build_function_type_list (void_type_node,
14264 opaque_V2SI_type_node,
14265 opaque_p_V2SI_type_node,
14266 integer_type_node,
14267 NULL_TREE);
14269 tree void_ftype_v2si_pv2si_char
14270 = build_function_type_list (void_type_node,
14271 opaque_V2SI_type_node,
14272 opaque_p_V2SI_type_node,
14273 char_type_node,
14274 NULL_TREE);
14276 tree void_ftype_int
14277 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14279 tree int_ftype_void
14280 = build_function_type_list (integer_type_node, NULL_TREE);
14282 tree v2si_ftype_pv2si_int
14283 = build_function_type_list (opaque_V2SI_type_node,
14284 opaque_p_V2SI_type_node,
14285 integer_type_node,
14286 NULL_TREE);
14288 tree v2si_ftype_puint_int
14289 = build_function_type_list (opaque_V2SI_type_node,
14290 puint_type_node,
14291 integer_type_node,
14292 NULL_TREE);
14294 tree v2si_ftype_pushort_int
14295 = build_function_type_list (opaque_V2SI_type_node,
14296 pushort_type_node,
14297 integer_type_node,
14298 NULL_TREE);
14300 tree v2si_ftype_signed_char
14301 = build_function_type_list (opaque_V2SI_type_node,
14302 signed_char_type_node,
14303 NULL_TREE);
14305 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
14307 /* Initialize irregular SPE builtins. */
14309 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
14310 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
14311 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
14312 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
14313 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
14314 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
14315 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
14316 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
14317 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
14318 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
14319 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
14320 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
14321 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
14322 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
14323 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
14324 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
14325 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
14326 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
14328 /* Loads. */
14329 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
14330 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
14331 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
14332 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
14333 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
14334 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
14335 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
14336 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
14337 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
14338 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
14339 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
14340 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
14341 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
14342 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
14343 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
14344 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
14345 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
14346 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
14347 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
14348 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
14349 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
14350 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
14352 /* Predicates. */
14353 d = bdesc_spe_predicates;
14354 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
14356 tree type;
14358 switch (insn_data[d->icode].operand[1].mode)
14360 case V2SImode:
14361 type = int_ftype_int_v2si_v2si;
14362 break;
14363 case V2SFmode:
14364 type = int_ftype_int_v2sf_v2sf;
14365 break;
14366 default:
14367 gcc_unreachable ();
14370 def_builtin (d->name, type, d->code);
14373 /* Evsel predicates. */
14374 d = bdesc_spe_evsel;
14375 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
14377 tree type;
14379 switch (insn_data[d->icode].operand[1].mode)
14381 case V2SImode:
14382 type = v2si_ftype_4_v2si;
14383 break;
14384 case V2SFmode:
14385 type = v2sf_ftype_4_v2sf;
14386 break;
14387 default:
14388 gcc_unreachable ();
14391 def_builtin (d->name, type, d->code);
14395 static void
14396 paired_init_builtins (void)
14398 const struct builtin_description *d;
14399 size_t i;
14401 tree int_ftype_int_v2sf_v2sf
14402 = build_function_type_list (integer_type_node,
14403 integer_type_node,
14404 V2SF_type_node,
14405 V2SF_type_node,
14406 NULL_TREE);
14407 tree pcfloat_type_node =
14408 build_pointer_type (build_qualified_type
14409 (float_type_node, TYPE_QUAL_CONST));
14411 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
14412 long_integer_type_node,
14413 pcfloat_type_node,
14414 NULL_TREE);
14415 tree void_ftype_v2sf_long_pcfloat =
14416 build_function_type_list (void_type_node,
14417 V2SF_type_node,
14418 long_integer_type_node,
14419 pcfloat_type_node,
14420 NULL_TREE);
14423 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
14424 PAIRED_BUILTIN_LX);
14427 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
14428 PAIRED_BUILTIN_STX);
14430 /* Predicates. */
14431 d = bdesc_paired_preds;
14432 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
14434 tree type;
14436 if (TARGET_DEBUG_BUILTIN)
14437 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
14438 (int)i, get_insn_name (d->icode), (int)d->icode,
14439 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
14441 switch (insn_data[d->icode].operand[1].mode)
14443 case V2SFmode:
14444 type = int_ftype_int_v2sf_v2sf;
14445 break;
14446 default:
14447 gcc_unreachable ();
14450 def_builtin (d->name, type, d->code);
14454 static void
14455 altivec_init_builtins (void)
14457 const struct builtin_description *d;
14458 size_t i;
14459 tree ftype;
14460 tree decl;
14462 tree pvoid_type_node = build_pointer_type (void_type_node);
14464 tree pcvoid_type_node
14465 = build_pointer_type (build_qualified_type (void_type_node,
14466 TYPE_QUAL_CONST));
14468 tree int_ftype_opaque
14469 = build_function_type_list (integer_type_node,
14470 opaque_V4SI_type_node, NULL_TREE);
14471 tree opaque_ftype_opaque
14472 = build_function_type_list (integer_type_node, NULL_TREE);
14473 tree opaque_ftype_opaque_int
14474 = build_function_type_list (opaque_V4SI_type_node,
14475 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
14476 tree opaque_ftype_opaque_opaque_int
14477 = build_function_type_list (opaque_V4SI_type_node,
14478 opaque_V4SI_type_node, opaque_V4SI_type_node,
14479 integer_type_node, NULL_TREE);
14480 tree int_ftype_int_opaque_opaque
14481 = build_function_type_list (integer_type_node,
14482 integer_type_node, opaque_V4SI_type_node,
14483 opaque_V4SI_type_node, NULL_TREE);
14484 tree int_ftype_int_v4si_v4si
14485 = build_function_type_list (integer_type_node,
14486 integer_type_node, V4SI_type_node,
14487 V4SI_type_node, NULL_TREE);
14488 tree int_ftype_int_v2di_v2di
14489 = build_function_type_list (integer_type_node,
14490 integer_type_node, V2DI_type_node,
14491 V2DI_type_node, NULL_TREE);
14492 tree void_ftype_v4si
14493 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
14494 tree v8hi_ftype_void
14495 = build_function_type_list (V8HI_type_node, NULL_TREE);
14496 tree void_ftype_void
14497 = build_function_type_list (void_type_node, NULL_TREE);
14498 tree void_ftype_int
14499 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14501 tree opaque_ftype_long_pcvoid
14502 = build_function_type_list (opaque_V4SI_type_node,
14503 long_integer_type_node, pcvoid_type_node,
14504 NULL_TREE);
14505 tree v16qi_ftype_long_pcvoid
14506 = build_function_type_list (V16QI_type_node,
14507 long_integer_type_node, pcvoid_type_node,
14508 NULL_TREE);
14509 tree v8hi_ftype_long_pcvoid
14510 = build_function_type_list (V8HI_type_node,
14511 long_integer_type_node, pcvoid_type_node,
14512 NULL_TREE);
14513 tree v4si_ftype_long_pcvoid
14514 = build_function_type_list (V4SI_type_node,
14515 long_integer_type_node, pcvoid_type_node,
14516 NULL_TREE);
14517 tree v4sf_ftype_long_pcvoid
14518 = build_function_type_list (V4SF_type_node,
14519 long_integer_type_node, pcvoid_type_node,
14520 NULL_TREE);
14521 tree v2df_ftype_long_pcvoid
14522 = build_function_type_list (V2DF_type_node,
14523 long_integer_type_node, pcvoid_type_node,
14524 NULL_TREE);
14525 tree v2di_ftype_long_pcvoid
14526 = build_function_type_list (V2DI_type_node,
14527 long_integer_type_node, pcvoid_type_node,
14528 NULL_TREE);
14530 tree void_ftype_opaque_long_pvoid
14531 = build_function_type_list (void_type_node,
14532 opaque_V4SI_type_node, long_integer_type_node,
14533 pvoid_type_node, NULL_TREE);
14534 tree void_ftype_v4si_long_pvoid
14535 = build_function_type_list (void_type_node,
14536 V4SI_type_node, long_integer_type_node,
14537 pvoid_type_node, NULL_TREE);
14538 tree void_ftype_v16qi_long_pvoid
14539 = build_function_type_list (void_type_node,
14540 V16QI_type_node, long_integer_type_node,
14541 pvoid_type_node, NULL_TREE);
14542 tree void_ftype_v8hi_long_pvoid
14543 = build_function_type_list (void_type_node,
14544 V8HI_type_node, long_integer_type_node,
14545 pvoid_type_node, NULL_TREE);
14546 tree void_ftype_v4sf_long_pvoid
14547 = build_function_type_list (void_type_node,
14548 V4SF_type_node, long_integer_type_node,
14549 pvoid_type_node, NULL_TREE);
14550 tree void_ftype_v2df_long_pvoid
14551 = build_function_type_list (void_type_node,
14552 V2DF_type_node, long_integer_type_node,
14553 pvoid_type_node, NULL_TREE);
14554 tree void_ftype_v2di_long_pvoid
14555 = build_function_type_list (void_type_node,
14556 V2DI_type_node, long_integer_type_node,
14557 pvoid_type_node, NULL_TREE);
14558 tree int_ftype_int_v8hi_v8hi
14559 = build_function_type_list (integer_type_node,
14560 integer_type_node, V8HI_type_node,
14561 V8HI_type_node, NULL_TREE);
14562 tree int_ftype_int_v16qi_v16qi
14563 = build_function_type_list (integer_type_node,
14564 integer_type_node, V16QI_type_node,
14565 V16QI_type_node, NULL_TREE);
14566 tree int_ftype_int_v4sf_v4sf
14567 = build_function_type_list (integer_type_node,
14568 integer_type_node, V4SF_type_node,
14569 V4SF_type_node, NULL_TREE);
14570 tree int_ftype_int_v2df_v2df
14571 = build_function_type_list (integer_type_node,
14572 integer_type_node, V2DF_type_node,
14573 V2DF_type_node, NULL_TREE);
14574 tree v2di_ftype_v2di
14575 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
14576 tree v4si_ftype_v4si
14577 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
14578 tree v8hi_ftype_v8hi
14579 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
14580 tree v16qi_ftype_v16qi
14581 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
14582 tree v4sf_ftype_v4sf
14583 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14584 tree v2df_ftype_v2df
14585 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14586 tree void_ftype_pcvoid_int_int
14587 = build_function_type_list (void_type_node,
14588 pcvoid_type_node, integer_type_node,
14589 integer_type_node, NULL_TREE);
14591 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
14592 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
14593 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
14594 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
14595 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
14596 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
14597 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
14598 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
14599 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
14600 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
14601 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
14602 ALTIVEC_BUILTIN_LVXL_V2DF);
14603 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
14604 ALTIVEC_BUILTIN_LVXL_V2DI);
14605 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
14606 ALTIVEC_BUILTIN_LVXL_V4SF);
14607 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
14608 ALTIVEC_BUILTIN_LVXL_V4SI);
14609 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
14610 ALTIVEC_BUILTIN_LVXL_V8HI);
14611 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
14612 ALTIVEC_BUILTIN_LVXL_V16QI);
14613 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
14614 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
14615 ALTIVEC_BUILTIN_LVX_V2DF);
14616 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
14617 ALTIVEC_BUILTIN_LVX_V2DI);
14618 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
14619 ALTIVEC_BUILTIN_LVX_V4SF);
14620 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
14621 ALTIVEC_BUILTIN_LVX_V4SI);
14622 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
14623 ALTIVEC_BUILTIN_LVX_V8HI);
14624 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
14625 ALTIVEC_BUILTIN_LVX_V16QI);
14626 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
14627 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
14628 ALTIVEC_BUILTIN_STVX_V2DF);
14629 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
14630 ALTIVEC_BUILTIN_STVX_V2DI);
14631 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
14632 ALTIVEC_BUILTIN_STVX_V4SF);
14633 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
14634 ALTIVEC_BUILTIN_STVX_V4SI);
14635 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
14636 ALTIVEC_BUILTIN_STVX_V8HI);
14637 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
14638 ALTIVEC_BUILTIN_STVX_V16QI);
14639 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
14640 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
14641 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
14642 ALTIVEC_BUILTIN_STVXL_V2DF);
14643 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
14644 ALTIVEC_BUILTIN_STVXL_V2DI);
14645 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
14646 ALTIVEC_BUILTIN_STVXL_V4SF);
14647 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
14648 ALTIVEC_BUILTIN_STVXL_V4SI);
14649 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
14650 ALTIVEC_BUILTIN_STVXL_V8HI);
14651 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
14652 ALTIVEC_BUILTIN_STVXL_V16QI);
14653 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
14654 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
14655 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
14656 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
14657 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
14658 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
14659 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
14660 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
14661 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
14662 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
14663 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
14664 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
14665 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
14666 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
14667 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
14668 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
14670 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
14671 VSX_BUILTIN_LXVD2X_V2DF);
14672 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
14673 VSX_BUILTIN_LXVD2X_V2DI);
14674 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
14675 VSX_BUILTIN_LXVW4X_V4SF);
14676 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
14677 VSX_BUILTIN_LXVW4X_V4SI);
14678 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
14679 VSX_BUILTIN_LXVW4X_V8HI);
14680 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
14681 VSX_BUILTIN_LXVW4X_V16QI);
14682 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
14683 VSX_BUILTIN_STXVD2X_V2DF);
14684 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
14685 VSX_BUILTIN_STXVD2X_V2DI);
14686 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
14687 VSX_BUILTIN_STXVW4X_V4SF);
14688 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
14689 VSX_BUILTIN_STXVW4X_V4SI);
14690 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
14691 VSX_BUILTIN_STXVW4X_V8HI);
14692 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
14693 VSX_BUILTIN_STXVW4X_V16QI);
14694 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
14695 VSX_BUILTIN_VEC_LD);
14696 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
14697 VSX_BUILTIN_VEC_ST);
14699 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
14700 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
14701 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
14703 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
14704 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
14705 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
14706 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
14707 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
14708 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
14709 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
14710 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
14711 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
14712 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
14713 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
14714 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
14716 /* Cell builtins. */
14717 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
14718 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
14719 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
14720 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
14722 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
14723 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
14724 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
14725 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
14727 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
14728 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
14729 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
14730 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
14732 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
14733 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
14734 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
14735 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
14737 /* Add the DST variants. */
14738 d = bdesc_dst;
14739 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14740 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
14742 /* Initialize the predicates. */
14743 d = bdesc_altivec_preds;
14744 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14746 enum machine_mode mode1;
14747 tree type;
14749 if (rs6000_overloaded_builtin_p (d->code))
14750 mode1 = VOIDmode;
14751 else
14752 mode1 = insn_data[d->icode].operand[1].mode;
14754 switch (mode1)
14756 case VOIDmode:
14757 type = int_ftype_int_opaque_opaque;
14758 break;
14759 case V2DImode:
14760 type = int_ftype_int_v2di_v2di;
14761 break;
14762 case V4SImode:
14763 type = int_ftype_int_v4si_v4si;
14764 break;
14765 case V8HImode:
14766 type = int_ftype_int_v8hi_v8hi;
14767 break;
14768 case V16QImode:
14769 type = int_ftype_int_v16qi_v16qi;
14770 break;
14771 case V4SFmode:
14772 type = int_ftype_int_v4sf_v4sf;
14773 break;
14774 case V2DFmode:
14775 type = int_ftype_int_v2df_v2df;
14776 break;
14777 default:
14778 gcc_unreachable ();
14781 def_builtin (d->name, type, d->code);
14784 /* Initialize the abs* operators. */
14785 d = bdesc_abs;
14786 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14788 enum machine_mode mode0;
14789 tree type;
14791 mode0 = insn_data[d->icode].operand[0].mode;
14793 switch (mode0)
14795 case V2DImode:
14796 type = v2di_ftype_v2di;
14797 break;
14798 case V4SImode:
14799 type = v4si_ftype_v4si;
14800 break;
14801 case V8HImode:
14802 type = v8hi_ftype_v8hi;
14803 break;
14804 case V16QImode:
14805 type = v16qi_ftype_v16qi;
14806 break;
14807 case V4SFmode:
14808 type = v4sf_ftype_v4sf;
14809 break;
14810 case V2DFmode:
14811 type = v2df_ftype_v2df;
14812 break;
14813 default:
14814 gcc_unreachable ();
14817 def_builtin (d->name, type, d->code);
14820 /* Initialize target builtin that implements
14821 targetm.vectorize.builtin_mask_for_load. */
14823 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
14824 v16qi_ftype_long_pcvoid,
14825 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
14826 BUILT_IN_MD, NULL, NULL_TREE);
14827 TREE_READONLY (decl) = 1;
14828 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
14829 altivec_builtin_mask_for_load = decl;
14831 /* Access to the vec_init patterns. */
14832 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
14833 integer_type_node, integer_type_node,
14834 integer_type_node, NULL_TREE);
14835 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
14837 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
14838 short_integer_type_node,
14839 short_integer_type_node,
14840 short_integer_type_node,
14841 short_integer_type_node,
14842 short_integer_type_node,
14843 short_integer_type_node,
14844 short_integer_type_node, NULL_TREE);
14845 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
14847 ftype = build_function_type_list (V16QI_type_node, char_type_node,
14848 char_type_node, char_type_node,
14849 char_type_node, char_type_node,
14850 char_type_node, char_type_node,
14851 char_type_node, char_type_node,
14852 char_type_node, char_type_node,
14853 char_type_node, char_type_node,
14854 char_type_node, char_type_node,
14855 char_type_node, NULL_TREE);
14856 def_builtin ("__builtin_vec_init_v16qi", ftype,
14857 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
14859 ftype = build_function_type_list (V4SF_type_node, float_type_node,
14860 float_type_node, float_type_node,
14861 float_type_node, NULL_TREE);
14862 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
14864 /* VSX builtins. */
14865 ftype = build_function_type_list (V2DF_type_node, double_type_node,
14866 double_type_node, NULL_TREE);
14867 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
14869 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
14870 intDI_type_node, NULL_TREE);
14871 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
14873 /* Access to the vec_set patterns. */
14874 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
14875 intSI_type_node,
14876 integer_type_node, NULL_TREE);
14877 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
14879 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14880 intHI_type_node,
14881 integer_type_node, NULL_TREE);
14882 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
14884 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
14885 intQI_type_node,
14886 integer_type_node, NULL_TREE);
14887 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
14889 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
14890 float_type_node,
14891 integer_type_node, NULL_TREE);
14892 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
14894 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
14895 double_type_node,
14896 integer_type_node, NULL_TREE);
14897 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
14899 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
14900 intDI_type_node,
14901 integer_type_node, NULL_TREE);
14902 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
14904 /* Access to the vec_extract patterns. */
14905 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14906 integer_type_node, NULL_TREE);
14907 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
14909 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14910 integer_type_node, NULL_TREE);
14911 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
14913 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
14914 integer_type_node, NULL_TREE);
14915 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
14917 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14918 integer_type_node, NULL_TREE);
14919 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
14921 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14922 integer_type_node, NULL_TREE);
14923 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
14925 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
14926 integer_type_node, NULL_TREE);
14927 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
14930 if (V1TI_type_node)
14932 tree v1ti_ftype_long_pcvoid
14933 = build_function_type_list (V1TI_type_node,
14934 long_integer_type_node, pcvoid_type_node,
14935 NULL_TREE);
14936 tree void_ftype_v1ti_long_pvoid
14937 = build_function_type_list (void_type_node,
14938 V1TI_type_node, long_integer_type_node,
14939 pvoid_type_node, NULL_TREE);
14940 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
14941 VSX_BUILTIN_LXVD2X_V1TI);
14942 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
14943 VSX_BUILTIN_STXVD2X_V1TI);
14944 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
14945 NULL_TREE, NULL_TREE);
14946 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
14947 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
14948 intTI_type_node,
14949 integer_type_node, NULL_TREE);
14950 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
14951 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
14952 integer_type_node, NULL_TREE);
14953 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
14958 static void
14959 htm_init_builtins (void)
14961 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
14962 const struct builtin_description *d;
14963 size_t i;
14965 d = bdesc_htm;
14966 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14968 tree op[MAX_HTM_OPERANDS], type;
14969 HOST_WIDE_INT mask = d->mask;
14970 unsigned attr = rs6000_builtin_info[d->code].attr;
14971 bool void_func = (attr & RS6000_BTC_VOID);
14972 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
14973 int nopnds = 0;
14974 tree argtype = (attr & RS6000_BTC_SPR) ? long_unsigned_type_node
14975 : unsigned_type_node;
14977 if ((mask & builtin_mask) != mask)
14979 if (TARGET_DEBUG_BUILTIN)
14980 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
14981 continue;
14984 if (d->name == 0)
14986 if (TARGET_DEBUG_BUILTIN)
14987 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
14988 (long unsigned) i);
14989 continue;
14992 op[nopnds++] = (void_func) ? void_type_node : argtype;
14994 if (attr_args == RS6000_BTC_UNARY)
14995 op[nopnds++] = argtype;
14996 else if (attr_args == RS6000_BTC_BINARY)
14998 op[nopnds++] = argtype;
14999 op[nopnds++] = argtype;
15001 else if (attr_args == RS6000_BTC_TERNARY)
15003 op[nopnds++] = argtype;
15004 op[nopnds++] = argtype;
15005 op[nopnds++] = argtype;
15008 switch (nopnds)
15010 case 1:
15011 type = build_function_type_list (op[0], NULL_TREE);
15012 break;
15013 case 2:
15014 type = build_function_type_list (op[0], op[1], NULL_TREE);
15015 break;
15016 case 3:
15017 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
15018 break;
15019 case 4:
15020 type = build_function_type_list (op[0], op[1], op[2], op[3],
15021 NULL_TREE);
15022 break;
15023 default:
15024 gcc_unreachable ();
15027 def_builtin (d->name, type, d->code);
15031 /* Hash function for builtin functions with up to 3 arguments and a return
15032 type. */
15033 static unsigned
15034 builtin_hash_function (const void *hash_entry)
15036 unsigned ret = 0;
15037 int i;
15038 const struct builtin_hash_struct *bh =
15039 (const struct builtin_hash_struct *) hash_entry;
15041 for (i = 0; i < 4; i++)
15043 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
15044 ret = (ret * 2) + bh->uns_p[i];
15047 return ret;
15050 /* Compare builtin hash entries H1 and H2 for equivalence. */
15051 static int
15052 builtin_hash_eq (const void *h1, const void *h2)
15054 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
15055 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
15057 return ((p1->mode[0] == p2->mode[0])
15058 && (p1->mode[1] == p2->mode[1])
15059 && (p1->mode[2] == p2->mode[2])
15060 && (p1->mode[3] == p2->mode[3])
15061 && (p1->uns_p[0] == p2->uns_p[0])
15062 && (p1->uns_p[1] == p2->uns_p[1])
15063 && (p1->uns_p[2] == p2->uns_p[2])
15064 && (p1->uns_p[3] == p2->uns_p[3]));
15067 /* Map types for builtin functions with an explicit return type and up to 3
15068 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
15069 of the argument. */
15070 static tree
15071 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
15072 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
15073 enum rs6000_builtins builtin, const char *name)
15075 struct builtin_hash_struct h;
15076 struct builtin_hash_struct *h2;
15077 void **found;
15078 int num_args = 3;
15079 int i;
15080 tree ret_type = NULL_TREE;
15081 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
15083 /* Create builtin_hash_table. */
15084 if (builtin_hash_table == NULL)
15085 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
15086 builtin_hash_eq, NULL);
15088 h.type = NULL_TREE;
15089 h.mode[0] = mode_ret;
15090 h.mode[1] = mode_arg0;
15091 h.mode[2] = mode_arg1;
15092 h.mode[3] = mode_arg2;
15093 h.uns_p[0] = 0;
15094 h.uns_p[1] = 0;
15095 h.uns_p[2] = 0;
15096 h.uns_p[3] = 0;
15098 /* If the builtin is a type that produces unsigned results or takes unsigned
15099 arguments, and it is returned as a decl for the vectorizer (such as
15100 widening multiplies, permute), make sure the arguments and return value
15101 are type correct. */
15102 switch (builtin)
15104 /* unsigned 1 argument functions. */
15105 case CRYPTO_BUILTIN_VSBOX:
15106 case P8V_BUILTIN_VGBBD:
15107 case MISC_BUILTIN_CDTBCD:
15108 case MISC_BUILTIN_CBCDTD:
15109 h.uns_p[0] = 1;
15110 h.uns_p[1] = 1;
15111 break;
15113 /* unsigned 2 argument functions. */
15114 case ALTIVEC_BUILTIN_VMULEUB_UNS:
15115 case ALTIVEC_BUILTIN_VMULEUH_UNS:
15116 case ALTIVEC_BUILTIN_VMULOUB_UNS:
15117 case ALTIVEC_BUILTIN_VMULOUH_UNS:
15118 case CRYPTO_BUILTIN_VCIPHER:
15119 case CRYPTO_BUILTIN_VCIPHERLAST:
15120 case CRYPTO_BUILTIN_VNCIPHER:
15121 case CRYPTO_BUILTIN_VNCIPHERLAST:
15122 case CRYPTO_BUILTIN_VPMSUMB:
15123 case CRYPTO_BUILTIN_VPMSUMH:
15124 case CRYPTO_BUILTIN_VPMSUMW:
15125 case CRYPTO_BUILTIN_VPMSUMD:
15126 case CRYPTO_BUILTIN_VPMSUM:
15127 case MISC_BUILTIN_ADDG6S:
15128 case MISC_BUILTIN_DIVWEU:
15129 case MISC_BUILTIN_DIVWEUO:
15130 case MISC_BUILTIN_DIVDEU:
15131 case MISC_BUILTIN_DIVDEUO:
15132 h.uns_p[0] = 1;
15133 h.uns_p[1] = 1;
15134 h.uns_p[2] = 1;
15135 break;
15137 /* unsigned 3 argument functions. */
15138 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
15139 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
15140 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
15141 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
15142 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
15143 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
15144 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
15145 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
15146 case VSX_BUILTIN_VPERM_16QI_UNS:
15147 case VSX_BUILTIN_VPERM_8HI_UNS:
15148 case VSX_BUILTIN_VPERM_4SI_UNS:
15149 case VSX_BUILTIN_VPERM_2DI_UNS:
15150 case VSX_BUILTIN_XXSEL_16QI_UNS:
15151 case VSX_BUILTIN_XXSEL_8HI_UNS:
15152 case VSX_BUILTIN_XXSEL_4SI_UNS:
15153 case VSX_BUILTIN_XXSEL_2DI_UNS:
15154 case CRYPTO_BUILTIN_VPERMXOR:
15155 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
15156 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
15157 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
15158 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
15159 case CRYPTO_BUILTIN_VSHASIGMAW:
15160 case CRYPTO_BUILTIN_VSHASIGMAD:
15161 case CRYPTO_BUILTIN_VSHASIGMA:
15162 h.uns_p[0] = 1;
15163 h.uns_p[1] = 1;
15164 h.uns_p[2] = 1;
15165 h.uns_p[3] = 1;
15166 break;
15168 /* signed permute functions with unsigned char mask. */
15169 case ALTIVEC_BUILTIN_VPERM_16QI:
15170 case ALTIVEC_BUILTIN_VPERM_8HI:
15171 case ALTIVEC_BUILTIN_VPERM_4SI:
15172 case ALTIVEC_BUILTIN_VPERM_4SF:
15173 case ALTIVEC_BUILTIN_VPERM_2DI:
15174 case ALTIVEC_BUILTIN_VPERM_2DF:
15175 case VSX_BUILTIN_VPERM_16QI:
15176 case VSX_BUILTIN_VPERM_8HI:
15177 case VSX_BUILTIN_VPERM_4SI:
15178 case VSX_BUILTIN_VPERM_4SF:
15179 case VSX_BUILTIN_VPERM_2DI:
15180 case VSX_BUILTIN_VPERM_2DF:
15181 h.uns_p[3] = 1;
15182 break;
15184 /* unsigned args, signed return. */
15185 case VSX_BUILTIN_XVCVUXDDP_UNS:
15186 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
15187 h.uns_p[1] = 1;
15188 break;
15190 /* signed args, unsigned return. */
15191 case VSX_BUILTIN_XVCVDPUXDS_UNS:
15192 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
15193 case MISC_BUILTIN_UNPACK_TD:
15194 case MISC_BUILTIN_UNPACK_V1TI:
15195 h.uns_p[0] = 1;
15196 break;
15198 /* unsigned arguments for 128-bit pack instructions. */
15199 case MISC_BUILTIN_PACK_TD:
15200 case MISC_BUILTIN_PACK_V1TI:
15201 h.uns_p[1] = 1;
15202 h.uns_p[2] = 1;
15203 break;
15205 default:
15206 break;
15209 /* Figure out how many args are present. */
15210 while (num_args > 0 && h.mode[num_args] == VOIDmode)
15211 num_args--;
15213 if (num_args == 0)
15214 fatal_error ("internal error: builtin function %s had no type", name);
15216 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
15217 if (!ret_type && h.uns_p[0])
15218 ret_type = builtin_mode_to_type[h.mode[0]][0];
15220 if (!ret_type)
15221 fatal_error ("internal error: builtin function %s had an unexpected "
15222 "return type %s", name, GET_MODE_NAME (h.mode[0]));
15224 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
15225 arg_type[i] = NULL_TREE;
15227 for (i = 0; i < num_args; i++)
15229 int m = (int) h.mode[i+1];
15230 int uns_p = h.uns_p[i+1];
15232 arg_type[i] = builtin_mode_to_type[m][uns_p];
15233 if (!arg_type[i] && uns_p)
15234 arg_type[i] = builtin_mode_to_type[m][0];
15236 if (!arg_type[i])
15237 fatal_error ("internal error: builtin function %s, argument %d "
15238 "had unexpected argument type %s", name, i,
15239 GET_MODE_NAME (m));
15242 found = htab_find_slot (builtin_hash_table, &h, INSERT);
15243 if (*found == NULL)
15245 h2 = ggc_alloc<builtin_hash_struct> ();
15246 *h2 = h;
15247 *found = (void *)h2;
15249 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
15250 arg_type[2], NULL_TREE);
15253 return ((struct builtin_hash_struct *)(*found))->type;
15256 static void
15257 rs6000_common_init_builtins (void)
15259 const struct builtin_description *d;
15260 size_t i;
15262 tree opaque_ftype_opaque = NULL_TREE;
15263 tree opaque_ftype_opaque_opaque = NULL_TREE;
15264 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
15265 tree v2si_ftype_qi = NULL_TREE;
15266 tree v2si_ftype_v2si_qi = NULL_TREE;
15267 tree v2si_ftype_int_qi = NULL_TREE;
15268 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
15270 if (!TARGET_PAIRED_FLOAT)
15272 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
15273 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
15276 /* Paired and SPE builtins are only available if you build a compiler with
15277 the appropriate options, so only create those builtins with the
15278 appropriate compiler option. Create Altivec and VSX builtins on machines
15279 with at least the general purpose extensions (970 and newer) to allow the
15280 use of the target attribute.. */
15282 if (TARGET_EXTRA_BUILTINS)
15283 builtin_mask |= RS6000_BTM_COMMON;
15285 /* Add the ternary operators. */
15286 d = bdesc_3arg;
15287 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
15289 tree type;
15290 HOST_WIDE_INT mask = d->mask;
15292 if ((mask & builtin_mask) != mask)
15294 if (TARGET_DEBUG_BUILTIN)
15295 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
15296 continue;
15299 if (rs6000_overloaded_builtin_p (d->code))
15301 if (! (type = opaque_ftype_opaque_opaque_opaque))
15302 type = opaque_ftype_opaque_opaque_opaque
15303 = build_function_type_list (opaque_V4SI_type_node,
15304 opaque_V4SI_type_node,
15305 opaque_V4SI_type_node,
15306 opaque_V4SI_type_node,
15307 NULL_TREE);
15309 else
15311 enum insn_code icode = d->icode;
15312 if (d->name == 0)
15314 if (TARGET_DEBUG_BUILTIN)
15315 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
15316 (long unsigned)i);
15318 continue;
15321 if (icode == CODE_FOR_nothing)
15323 if (TARGET_DEBUG_BUILTIN)
15324 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
15325 d->name);
15327 continue;
15330 type = builtin_function_type (insn_data[icode].operand[0].mode,
15331 insn_data[icode].operand[1].mode,
15332 insn_data[icode].operand[2].mode,
15333 insn_data[icode].operand[3].mode,
15334 d->code, d->name);
15337 def_builtin (d->name, type, d->code);
15340 /* Add the binary operators. */
15341 d = bdesc_2arg;
15342 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15344 enum machine_mode mode0, mode1, mode2;
15345 tree type;
15346 HOST_WIDE_INT mask = d->mask;
15348 if ((mask & builtin_mask) != mask)
15350 if (TARGET_DEBUG_BUILTIN)
15351 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
15352 continue;
15355 if (rs6000_overloaded_builtin_p (d->code))
15357 if (! (type = opaque_ftype_opaque_opaque))
15358 type = opaque_ftype_opaque_opaque
15359 = build_function_type_list (opaque_V4SI_type_node,
15360 opaque_V4SI_type_node,
15361 opaque_V4SI_type_node,
15362 NULL_TREE);
15364 else
15366 enum insn_code icode = d->icode;
15367 if (d->name == 0)
15369 if (TARGET_DEBUG_BUILTIN)
15370 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
15371 (long unsigned)i);
15373 continue;
15376 if (icode == CODE_FOR_nothing)
15378 if (TARGET_DEBUG_BUILTIN)
15379 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
15380 d->name);
15382 continue;
15385 mode0 = insn_data[icode].operand[0].mode;
15386 mode1 = insn_data[icode].operand[1].mode;
15387 mode2 = insn_data[icode].operand[2].mode;
15389 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
15391 if (! (type = v2si_ftype_v2si_qi))
15392 type = v2si_ftype_v2si_qi
15393 = build_function_type_list (opaque_V2SI_type_node,
15394 opaque_V2SI_type_node,
15395 char_type_node,
15396 NULL_TREE);
15399 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
15400 && mode2 == QImode)
15402 if (! (type = v2si_ftype_int_qi))
15403 type = v2si_ftype_int_qi
15404 = build_function_type_list (opaque_V2SI_type_node,
15405 integer_type_node,
15406 char_type_node,
15407 NULL_TREE);
15410 else
15411 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
15412 d->code, d->name);
15415 def_builtin (d->name, type, d->code);
15418 /* Add the simple unary operators. */
15419 d = bdesc_1arg;
15420 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15422 enum machine_mode mode0, mode1;
15423 tree type;
15424 HOST_WIDE_INT mask = d->mask;
15426 if ((mask & builtin_mask) != mask)
15428 if (TARGET_DEBUG_BUILTIN)
15429 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
15430 continue;
15433 if (rs6000_overloaded_builtin_p (d->code))
15435 if (! (type = opaque_ftype_opaque))
15436 type = opaque_ftype_opaque
15437 = build_function_type_list (opaque_V4SI_type_node,
15438 opaque_V4SI_type_node,
15439 NULL_TREE);
15441 else
15443 enum insn_code icode = d->icode;
15444 if (d->name == 0)
15446 if (TARGET_DEBUG_BUILTIN)
15447 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
15448 (long unsigned)i);
15450 continue;
15453 if (icode == CODE_FOR_nothing)
15455 if (TARGET_DEBUG_BUILTIN)
15456 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
15457 d->name);
15459 continue;
15462 mode0 = insn_data[icode].operand[0].mode;
15463 mode1 = insn_data[icode].operand[1].mode;
15465 if (mode0 == V2SImode && mode1 == QImode)
15467 if (! (type = v2si_ftype_qi))
15468 type = v2si_ftype_qi
15469 = build_function_type_list (opaque_V2SI_type_node,
15470 char_type_node,
15471 NULL_TREE);
15474 else
15475 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
15476 d->code, d->name);
15479 def_builtin (d->name, type, d->code);
15483 static void
15484 rs6000_init_libfuncs (void)
15486 if (!TARGET_IEEEQUAD)
15487 /* AIX/Darwin/64-bit Linux quad floating point routines. */
15488 if (!TARGET_XL_COMPAT)
15490 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
15491 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
15492 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
15493 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
15495 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
15497 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
15498 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
15499 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
15500 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
15501 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
15502 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
15503 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
15505 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
15506 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
15507 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
15508 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
15509 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
15510 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
15511 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
15512 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
15515 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
15516 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
15518 else
15520 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
15521 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
15522 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
15523 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
15525 else
15527 /* 32-bit SVR4 quad floating point routines. */
15529 set_optab_libfunc (add_optab, TFmode, "_q_add");
15530 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
15531 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
15532 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
15533 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
15534 if (TARGET_PPC_GPOPT)
15535 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
15537 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
15538 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
15539 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
15540 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
15541 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
15542 set_optab_libfunc (le_optab, TFmode, "_q_fle");
15544 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
15545 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
15546 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
15547 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
15548 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
15549 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
15550 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
15551 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
15556 /* Expand a block clear operation, and return 1 if successful. Return 0
15557 if we should let the compiler generate normal code.
15559 operands[0] is the destination
15560 operands[1] is the length
15561 operands[3] is the alignment */
15564 expand_block_clear (rtx operands[])
15566 rtx orig_dest = operands[0];
15567 rtx bytes_rtx = operands[1];
15568 rtx align_rtx = operands[3];
15569 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
15570 HOST_WIDE_INT align;
15571 HOST_WIDE_INT bytes;
15572 int offset;
15573 int clear_bytes;
15574 int clear_step;
15576 /* If this is not a fixed size move, just call memcpy */
15577 if (! constp)
15578 return 0;
15580 /* This must be a fixed size alignment */
15581 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15582 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15584 /* Anything to clear? */
15585 bytes = INTVAL (bytes_rtx);
15586 if (bytes <= 0)
15587 return 1;
15589 /* Use the builtin memset after a point, to avoid huge code bloat.
15590 When optimize_size, avoid any significant code bloat; calling
15591 memset is about 4 instructions, so allow for one instruction to
15592 load zero and three to do clearing. */
15593 if (TARGET_ALTIVEC && align >= 128)
15594 clear_step = 16;
15595 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
15596 clear_step = 8;
15597 else if (TARGET_SPE && align >= 64)
15598 clear_step = 8;
15599 else
15600 clear_step = 4;
15602 if (optimize_size && bytes > 3 * clear_step)
15603 return 0;
15604 if (! optimize_size && bytes > 8 * clear_step)
15605 return 0;
15607 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
15609 enum machine_mode mode = BLKmode;
15610 rtx dest;
15612 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
15614 clear_bytes = 16;
15615 mode = V4SImode;
15617 else if (bytes >= 8 && TARGET_SPE && align >= 64)
15619 clear_bytes = 8;
15620 mode = V2SImode;
15622 else if (bytes >= 8 && TARGET_POWERPC64
15623 && (align >= 64 || !STRICT_ALIGNMENT))
15625 clear_bytes = 8;
15626 mode = DImode;
15627 if (offset == 0 && align < 64)
15629 rtx addr;
15631 /* If the address form is reg+offset with offset not a
15632 multiple of four, reload into reg indirect form here
15633 rather than waiting for reload. This way we get one
15634 reload, not one per store. */
15635 addr = XEXP (orig_dest, 0);
15636 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15637 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15638 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15640 addr = copy_addr_to_reg (addr);
15641 orig_dest = replace_equiv_address (orig_dest, addr);
15645 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15646 { /* move 4 bytes */
15647 clear_bytes = 4;
15648 mode = SImode;
15650 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15651 { /* move 2 bytes */
15652 clear_bytes = 2;
15653 mode = HImode;
15655 else /* move 1 byte at a time */
15657 clear_bytes = 1;
15658 mode = QImode;
15661 dest = adjust_address (orig_dest, mode, offset);
15663 emit_move_insn (dest, CONST0_RTX (mode));
15666 return 1;
15670 /* Expand a block move operation, and return 1 if successful. Return 0
15671 if we should let the compiler generate normal code.
15673 operands[0] is the destination
15674 operands[1] is the source
15675 operands[2] is the length
15676 operands[3] is the alignment */
15678 #define MAX_MOVE_REG 4
15681 expand_block_move (rtx operands[])
15683 rtx orig_dest = operands[0];
15684 rtx orig_src = operands[1];
15685 rtx bytes_rtx = operands[2];
15686 rtx align_rtx = operands[3];
15687 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
15688 int align;
15689 int bytes;
15690 int offset;
15691 int move_bytes;
15692 rtx stores[MAX_MOVE_REG];
15693 int num_reg = 0;
15695 /* If this is not a fixed size move, just call memcpy */
15696 if (! constp)
15697 return 0;
15699 /* This must be a fixed size alignment */
15700 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15701 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15703 /* Anything to move? */
15704 bytes = INTVAL (bytes_rtx);
15705 if (bytes <= 0)
15706 return 1;
15708 if (bytes > rs6000_block_move_inline_limit)
15709 return 0;
15711 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
15713 union {
15714 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
15715 rtx (*mov) (rtx, rtx);
15716 } gen_func;
15717 enum machine_mode mode = BLKmode;
15718 rtx src, dest;
15720 /* Altivec first, since it will be faster than a string move
15721 when it applies, and usually not significantly larger. */
15722 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
15724 move_bytes = 16;
15725 mode = V4SImode;
15726 gen_func.mov = gen_movv4si;
15728 else if (TARGET_SPE && bytes >= 8 && align >= 64)
15730 move_bytes = 8;
15731 mode = V2SImode;
15732 gen_func.mov = gen_movv2si;
15734 else if (TARGET_STRING
15735 && bytes > 24 /* move up to 32 bytes at a time */
15736 && ! fixed_regs[5]
15737 && ! fixed_regs[6]
15738 && ! fixed_regs[7]
15739 && ! fixed_regs[8]
15740 && ! fixed_regs[9]
15741 && ! fixed_regs[10]
15742 && ! fixed_regs[11]
15743 && ! fixed_regs[12])
15745 move_bytes = (bytes > 32) ? 32 : bytes;
15746 gen_func.movmemsi = gen_movmemsi_8reg;
15748 else if (TARGET_STRING
15749 && bytes > 16 /* move up to 24 bytes at a time */
15750 && ! fixed_regs[5]
15751 && ! fixed_regs[6]
15752 && ! fixed_regs[7]
15753 && ! fixed_regs[8]
15754 && ! fixed_regs[9]
15755 && ! fixed_regs[10])
15757 move_bytes = (bytes > 24) ? 24 : bytes;
15758 gen_func.movmemsi = gen_movmemsi_6reg;
15760 else if (TARGET_STRING
15761 && bytes > 8 /* move up to 16 bytes at a time */
15762 && ! fixed_regs[5]
15763 && ! fixed_regs[6]
15764 && ! fixed_regs[7]
15765 && ! fixed_regs[8])
15767 move_bytes = (bytes > 16) ? 16 : bytes;
15768 gen_func.movmemsi = gen_movmemsi_4reg;
15770 else if (bytes >= 8 && TARGET_POWERPC64
15771 && (align >= 64 || !STRICT_ALIGNMENT))
15773 move_bytes = 8;
15774 mode = DImode;
15775 gen_func.mov = gen_movdi;
15776 if (offset == 0 && align < 64)
15778 rtx addr;
15780 /* If the address form is reg+offset with offset not a
15781 multiple of four, reload into reg indirect form here
15782 rather than waiting for reload. This way we get one
15783 reload, not one per load and/or store. */
15784 addr = XEXP (orig_dest, 0);
15785 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15786 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15787 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15789 addr = copy_addr_to_reg (addr);
15790 orig_dest = replace_equiv_address (orig_dest, addr);
15792 addr = XEXP (orig_src, 0);
15793 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15794 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15795 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15797 addr = copy_addr_to_reg (addr);
15798 orig_src = replace_equiv_address (orig_src, addr);
15802 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
15803 { /* move up to 8 bytes at a time */
15804 move_bytes = (bytes > 8) ? 8 : bytes;
15805 gen_func.movmemsi = gen_movmemsi_2reg;
15807 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15808 { /* move 4 bytes */
15809 move_bytes = 4;
15810 mode = SImode;
15811 gen_func.mov = gen_movsi;
15813 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15814 { /* move 2 bytes */
15815 move_bytes = 2;
15816 mode = HImode;
15817 gen_func.mov = gen_movhi;
15819 else if (TARGET_STRING && bytes > 1)
15820 { /* move up to 4 bytes at a time */
15821 move_bytes = (bytes > 4) ? 4 : bytes;
15822 gen_func.movmemsi = gen_movmemsi_1reg;
15824 else /* move 1 byte at a time */
15826 move_bytes = 1;
15827 mode = QImode;
15828 gen_func.mov = gen_movqi;
15831 src = adjust_address (orig_src, mode, offset);
15832 dest = adjust_address (orig_dest, mode, offset);
15834 if (mode != BLKmode)
15836 rtx tmp_reg = gen_reg_rtx (mode);
15838 emit_insn ((*gen_func.mov) (tmp_reg, src));
15839 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
15842 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
15844 int i;
15845 for (i = 0; i < num_reg; i++)
15846 emit_insn (stores[i]);
15847 num_reg = 0;
15850 if (mode == BLKmode)
15852 /* Move the address into scratch registers. The movmemsi
15853 patterns require zero offset. */
15854 if (!REG_P (XEXP (src, 0)))
15856 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
15857 src = replace_equiv_address (src, src_reg);
15859 set_mem_size (src, move_bytes);
15861 if (!REG_P (XEXP (dest, 0)))
15863 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
15864 dest = replace_equiv_address (dest, dest_reg);
15866 set_mem_size (dest, move_bytes);
15868 emit_insn ((*gen_func.movmemsi) (dest, src,
15869 GEN_INT (move_bytes & 31),
15870 align_rtx));
15874 return 1;
15878 /* Return a string to perform a load_multiple operation.
15879 operands[0] is the vector.
15880 operands[1] is the source address.
15881 operands[2] is the first destination register. */
15883 const char *
15884 rs6000_output_load_multiple (rtx operands[3])
15886 /* We have to handle the case where the pseudo used to contain the address
15887 is assigned to one of the output registers. */
15888 int i, j;
15889 int words = XVECLEN (operands[0], 0);
15890 rtx xop[10];
15892 if (XVECLEN (operands[0], 0) == 1)
15893 return "lwz %2,0(%1)";
15895 for (i = 0; i < words; i++)
15896 if (refers_to_regno_p (REGNO (operands[2]) + i,
15897 REGNO (operands[2]) + i + 1, operands[1], 0))
15899 if (i == words-1)
15901 xop[0] = GEN_INT (4 * (words-1));
15902 xop[1] = operands[1];
15903 xop[2] = operands[2];
15904 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
15905 return "";
15907 else if (i == 0)
15909 xop[0] = GEN_INT (4 * (words-1));
15910 xop[1] = operands[1];
15911 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
15912 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
15913 return "";
15915 else
15917 for (j = 0; j < words; j++)
15918 if (j != i)
15920 xop[0] = GEN_INT (j * 4);
15921 xop[1] = operands[1];
15922 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
15923 output_asm_insn ("lwz %2,%0(%1)", xop);
15925 xop[0] = GEN_INT (i * 4);
15926 xop[1] = operands[1];
15927 output_asm_insn ("lwz %1,%0(%1)", xop);
15928 return "";
15932 return "lswi %2,%1,%N0";
15936 /* A validation routine: say whether CODE, a condition code, and MODE
15937 match. The other alternatives either don't make sense or should
15938 never be generated. */
15940 void
15941 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
15943 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
15944 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
15945 && GET_MODE_CLASS (mode) == MODE_CC);
15947 /* These don't make sense. */
15948 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
15949 || mode != CCUNSmode);
15951 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
15952 || mode == CCUNSmode);
15954 gcc_assert (mode == CCFPmode
15955 || (code != ORDERED && code != UNORDERED
15956 && code != UNEQ && code != LTGT
15957 && code != UNGT && code != UNLT
15958 && code != UNGE && code != UNLE));
15960 /* These should never be generated except for
15961 flag_finite_math_only. */
15962 gcc_assert (mode != CCFPmode
15963 || flag_finite_math_only
15964 || (code != LE && code != GE
15965 && code != UNEQ && code != LTGT
15966 && code != UNGT && code != UNLT));
15968 /* These are invalid; the information is not there. */
15969 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
15973 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
15974 mask required to convert the result of a rotate insn into a shift
15975 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
15978 includes_lshift_p (rtx shiftop, rtx andop)
15980 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
15982 shift_mask <<= INTVAL (shiftop);
15984 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
15987 /* Similar, but for right shift. */
15990 includes_rshift_p (rtx shiftop, rtx andop)
15992 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
15994 shift_mask >>= INTVAL (shiftop);
15996 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
15999 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
16000 to perform a left shift. It must have exactly SHIFTOP least
16001 significant 0's, then one or more 1's, then zero or more 0's. */
16004 includes_rldic_lshift_p (rtx shiftop, rtx andop)
16006 if (GET_CODE (andop) == CONST_INT)
16008 HOST_WIDE_INT c, lsb, shift_mask;
16010 c = INTVAL (andop);
16011 if (c == 0 || c == ~0)
16012 return 0;
16014 shift_mask = ~0;
16015 shift_mask <<= INTVAL (shiftop);
16017 /* Find the least significant one bit. */
16018 lsb = c & -c;
16020 /* It must coincide with the LSB of the shift mask. */
16021 if (-lsb != shift_mask)
16022 return 0;
16024 /* Invert to look for the next transition (if any). */
16025 c = ~c;
16027 /* Remove the low group of ones (originally low group of zeros). */
16028 c &= -lsb;
16030 /* Again find the lsb, and check we have all 1's above. */
16031 lsb = c & -c;
16032 return c == -lsb;
16034 else
16035 return 0;
16038 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
16039 to perform a left shift. It must have SHIFTOP or more least
16040 significant 0's, with the remainder of the word 1's. */
16043 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
16045 if (GET_CODE (andop) == CONST_INT)
16047 HOST_WIDE_INT c, lsb, shift_mask;
16049 shift_mask = ~0;
16050 shift_mask <<= INTVAL (shiftop);
16051 c = INTVAL (andop);
16053 /* Find the least significant one bit. */
16054 lsb = c & -c;
16056 /* It must be covered by the shift mask.
16057 This test also rejects c == 0. */
16058 if ((lsb & shift_mask) == 0)
16059 return 0;
16061 /* Check we have all 1's above the transition, and reject all 1's. */
16062 return c == -lsb && lsb != 1;
16064 else
16065 return 0;
16068 /* Return 1 if operands will generate a valid arguments to rlwimi
16069 instruction for insert with right shift in 64-bit mode. The mask may
16070 not start on the first bit or stop on the last bit because wrap-around
16071 effects of instruction do not correspond to semantics of RTL insn. */
16074 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
16076 if (INTVAL (startop) > 32
16077 && INTVAL (startop) < 64
16078 && INTVAL (sizeop) > 1
16079 && INTVAL (sizeop) + INTVAL (startop) < 64
16080 && INTVAL (shiftop) > 0
16081 && INTVAL (sizeop) + INTVAL (shiftop) < 32
16082 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
16083 return 1;
16085 return 0;
16088 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
16089 for lfq and stfq insns iff the registers are hard registers. */
16092 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
16094 /* We might have been passed a SUBREG. */
16095 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
16096 return 0;
16098 /* We might have been passed non floating point registers. */
16099 if (!FP_REGNO_P (REGNO (reg1))
16100 || !FP_REGNO_P (REGNO (reg2)))
16101 return 0;
16103 return (REGNO (reg1) == REGNO (reg2) - 1);
16106 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
16107 addr1 and addr2 must be in consecutive memory locations
16108 (addr2 == addr1 + 8). */
16111 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
16113 rtx addr1, addr2;
16114 unsigned int reg1, reg2;
16115 int offset1, offset2;
16117 /* The mems cannot be volatile. */
16118 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
16119 return 0;
16121 addr1 = XEXP (mem1, 0);
16122 addr2 = XEXP (mem2, 0);
16124 /* Extract an offset (if used) from the first addr. */
16125 if (GET_CODE (addr1) == PLUS)
16127 /* If not a REG, return zero. */
16128 if (GET_CODE (XEXP (addr1, 0)) != REG)
16129 return 0;
16130 else
16132 reg1 = REGNO (XEXP (addr1, 0));
16133 /* The offset must be constant! */
16134 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
16135 return 0;
16136 offset1 = INTVAL (XEXP (addr1, 1));
16139 else if (GET_CODE (addr1) != REG)
16140 return 0;
16141 else
16143 reg1 = REGNO (addr1);
16144 /* This was a simple (mem (reg)) expression. Offset is 0. */
16145 offset1 = 0;
16148 /* And now for the second addr. */
16149 if (GET_CODE (addr2) == PLUS)
16151 /* If not a REG, return zero. */
16152 if (GET_CODE (XEXP (addr2, 0)) != REG)
16153 return 0;
16154 else
16156 reg2 = REGNO (XEXP (addr2, 0));
16157 /* The offset must be constant. */
16158 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
16159 return 0;
16160 offset2 = INTVAL (XEXP (addr2, 1));
16163 else if (GET_CODE (addr2) != REG)
16164 return 0;
16165 else
16167 reg2 = REGNO (addr2);
16168 /* This was a simple (mem (reg)) expression. Offset is 0. */
16169 offset2 = 0;
16172 /* Both of these must have the same base register. */
16173 if (reg1 != reg2)
16174 return 0;
16176 /* The offset for the second addr must be 8 more than the first addr. */
16177 if (offset2 != offset1 + 8)
16178 return 0;
16180 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
16181 instructions. */
16182 return 1;
16187 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
16189 static bool eliminated = false;
16190 rtx ret;
16192 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
16193 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
16194 else
16196 rtx mem = cfun->machine->sdmode_stack_slot;
16197 gcc_assert (mem != NULL_RTX);
16199 if (!eliminated)
16201 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
16202 cfun->machine->sdmode_stack_slot = mem;
16203 eliminated = true;
16205 ret = mem;
16208 if (TARGET_DEBUG_ADDR)
16210 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
16211 GET_MODE_NAME (mode));
16212 if (!ret)
16213 fprintf (stderr, "\tNULL_RTX\n");
16214 else
16215 debug_rtx (ret);
16218 return ret;
16221 /* Return the mode to be used for memory when a secondary memory
16222 location is needed. For SDmode values we need to use DDmode, in
16223 all other cases we can use the same mode. */
16224 enum machine_mode
16225 rs6000_secondary_memory_needed_mode (enum machine_mode mode)
16227 if (lra_in_progress && mode == SDmode)
16228 return DDmode;
16229 return mode;
16232 static tree
16233 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
16235 /* Don't walk into types. */
16236 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
16238 *walk_subtrees = 0;
16239 return NULL_TREE;
16242 switch (TREE_CODE (*tp))
16244 case VAR_DECL:
16245 case PARM_DECL:
16246 case FIELD_DECL:
16247 case RESULT_DECL:
16248 case SSA_NAME:
16249 case REAL_CST:
16250 case MEM_REF:
16251 case VIEW_CONVERT_EXPR:
16252 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
16253 return *tp;
16254 break;
16255 default:
16256 break;
16259 return NULL_TREE;
16262 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
16263 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
16264 only work on the traditional altivec registers, note if an altivec register
16265 was chosen. */
16267 static enum rs6000_reg_type
16268 register_to_reg_type (rtx reg, bool *is_altivec)
16270 HOST_WIDE_INT regno;
16271 enum reg_class rclass;
16273 if (GET_CODE (reg) == SUBREG)
16274 reg = SUBREG_REG (reg);
16276 if (!REG_P (reg))
16277 return NO_REG_TYPE;
16279 regno = REGNO (reg);
16280 if (regno >= FIRST_PSEUDO_REGISTER)
16282 if (!lra_in_progress && !reload_in_progress && !reload_completed)
16283 return PSEUDO_REG_TYPE;
16285 regno = true_regnum (reg);
16286 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16287 return PSEUDO_REG_TYPE;
16290 gcc_assert (regno >= 0);
16292 if (is_altivec && ALTIVEC_REGNO_P (regno))
16293 *is_altivec = true;
16295 rclass = rs6000_regno_regclass[regno];
16296 return reg_class_to_reg_type[(int)rclass];
16299 /* Helper function for rs6000_secondary_reload to return true if a move to a
16300 different register classe is really a simple move. */
16302 static bool
16303 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
16304 enum rs6000_reg_type from_type,
16305 enum machine_mode mode)
16307 int size;
16309 /* Add support for various direct moves available. In this function, we only
16310 look at cases where we don't need any extra registers, and one or more
16311 simple move insns are issued. At present, 32-bit integers are not allowed
16312 in FPR/VSX registers. Single precision binary floating is not a simple
16313 move because we need to convert to the single precision memory layout.
16314 The 4-byte SDmode can be moved. */
16315 size = GET_MODE_SIZE (mode);
16316 if (TARGET_DIRECT_MOVE
16317 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
16318 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16319 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
16320 return true;
16322 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
16323 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
16324 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16325 return true;
16327 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
16328 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
16329 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16330 return true;
16332 return false;
16335 /* Power8 helper function for rs6000_secondary_reload, handle all of the
16336 special direct moves that involve allocating an extra register, return the
16337 insn code of the helper function if there is such a function or
16338 CODE_FOR_nothing if not. */
16340 static bool
16341 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
16342 enum rs6000_reg_type from_type,
16343 enum machine_mode mode,
16344 secondary_reload_info *sri,
16345 bool altivec_p)
16347 bool ret = false;
16348 enum insn_code icode = CODE_FOR_nothing;
16349 int cost = 0;
16350 int size = GET_MODE_SIZE (mode);
16352 if (TARGET_POWERPC64)
16354 if (size == 16)
16356 /* Handle moving 128-bit values from GPRs to VSX point registers on
16357 power8 when running in 64-bit mode using XXPERMDI to glue the two
16358 64-bit values back together. */
16359 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16361 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16362 icode = reg_addr[mode].reload_vsx_gpr;
16365 /* Handle moving 128-bit values from VSX point registers to GPRs on
16366 power8 when running in 64-bit mode using XXPERMDI to get access to the
16367 bottom 64-bit value. */
16368 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16370 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16371 icode = reg_addr[mode].reload_gpr_vsx;
16375 else if (mode == SFmode)
16377 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16379 cost = 3; /* xscvdpspn, mfvsrd, and. */
16380 icode = reg_addr[mode].reload_gpr_vsx;
16383 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16385 cost = 2; /* mtvsrz, xscvspdpn. */
16386 icode = reg_addr[mode].reload_vsx_gpr;
16391 if (TARGET_POWERPC64 && size == 16)
16393 /* Handle moving 128-bit values from GPRs to VSX point registers on
16394 power8 when running in 64-bit mode using XXPERMDI to glue the two
16395 64-bit values back together. */
16396 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16398 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16399 icode = reg_addr[mode].reload_vsx_gpr;
16402 /* Handle moving 128-bit values from VSX point registers to GPRs on
16403 power8 when running in 64-bit mode using XXPERMDI to get access to the
16404 bottom 64-bit value. */
16405 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16407 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16408 icode = reg_addr[mode].reload_gpr_vsx;
16412 else if (!TARGET_POWERPC64 && size == 8)
16414 /* Handle moving 64-bit values from GPRs to floating point registers on
16415 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
16416 values back together. Altivec register classes must be handled
16417 specially since a different instruction is used, and the secondary
16418 reload support requires a single instruction class in the scratch
16419 register constraint. However, right now TFmode is not allowed in
16420 Altivec registers, so the pattern will never match. */
16421 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
16423 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
16424 icode = reg_addr[mode].reload_fpr_gpr;
16428 if (icode != CODE_FOR_nothing)
16430 ret = true;
16431 if (sri)
16433 sri->icode = icode;
16434 sri->extra_cost = cost;
16438 return ret;
16441 /* Return whether a move between two register classes can be done either
16442 directly (simple move) or via a pattern that uses a single extra temporary
16443 (using power8's direct move in this case. */
16445 static bool
16446 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
16447 enum rs6000_reg_type from_type,
16448 enum machine_mode mode,
16449 secondary_reload_info *sri,
16450 bool altivec_p)
16452 /* Fall back to load/store reloads if either type is not a register. */
16453 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
16454 return false;
16456 /* If we haven't allocated registers yet, assume the move can be done for the
16457 standard register types. */
16458 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
16459 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
16460 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
16461 return true;
16463 /* Moves to the same set of registers is a simple move for non-specialized
16464 registers. */
16465 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
16466 return true;
16468 /* Check whether a simple move can be done directly. */
16469 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
16471 if (sri)
16473 sri->icode = CODE_FOR_nothing;
16474 sri->extra_cost = 0;
16476 return true;
16479 /* Now check if we can do it in a few steps. */
16480 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
16481 altivec_p);
16484 /* Inform reload about cases where moving X with a mode MODE to a register in
16485 RCLASS requires an extra scratch or immediate register. Return the class
16486 needed for the immediate register.
16488 For VSX and Altivec, we may need a register to convert sp+offset into
16489 reg+sp.
16491 For misaligned 64-bit gpr loads and stores we need a register to
16492 convert an offset address to indirect. */
16494 static reg_class_t
16495 rs6000_secondary_reload (bool in_p,
16496 rtx x,
16497 reg_class_t rclass_i,
16498 enum machine_mode mode,
16499 secondary_reload_info *sri)
16501 enum reg_class rclass = (enum reg_class) rclass_i;
16502 reg_class_t ret = ALL_REGS;
16503 enum insn_code icode;
16504 bool default_p = false;
16506 sri->icode = CODE_FOR_nothing;
16507 icode = ((in_p)
16508 ? reg_addr[mode].reload_load
16509 : reg_addr[mode].reload_store);
16511 if (REG_P (x) || register_operand (x, mode))
16513 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
16514 bool altivec_p = (rclass == ALTIVEC_REGS);
16515 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
16517 if (!in_p)
16519 enum rs6000_reg_type exchange = to_type;
16520 to_type = from_type;
16521 from_type = exchange;
16524 /* Can we do a direct move of some sort? */
16525 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
16526 altivec_p))
16528 icode = (enum insn_code)sri->icode;
16529 default_p = false;
16530 ret = NO_REGS;
16534 /* Handle vector moves with reload helper functions. */
16535 if (ret == ALL_REGS && icode != CODE_FOR_nothing)
16537 ret = NO_REGS;
16538 sri->icode = CODE_FOR_nothing;
16539 sri->extra_cost = 0;
16541 if (GET_CODE (x) == MEM)
16543 rtx addr = XEXP (x, 0);
16545 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
16546 an extra register in that case, but it would need an extra
16547 register if the addressing is reg+reg or (reg+reg)&(-16). Special
16548 case load/store quad. */
16549 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
16551 if (TARGET_POWERPC64 && TARGET_QUAD_MEMORY
16552 && GET_MODE_SIZE (mode) == 16
16553 && quad_memory_operand (x, mode))
16555 sri->icode = icode;
16556 sri->extra_cost = 2;
16559 else if (!legitimate_indirect_address_p (addr, false)
16560 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16561 false, true))
16563 sri->icode = icode;
16564 /* account for splitting the loads, and converting the
16565 address from reg+reg to reg. */
16566 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
16567 + ((GET_CODE (addr) == AND) ? 1 : 0));
16570 /* Allow scalar loads to/from the traditional floating point
16571 registers, even if VSX memory is set. */
16572 else if ((rclass == FLOAT_REGS || rclass == NO_REGS)
16573 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
16574 && (legitimate_indirect_address_p (addr, false)
16575 || legitimate_indirect_address_p (addr, false)
16576 || rs6000_legitimate_offset_address_p (mode, addr,
16577 false, true)))
16580 /* Loads to and stores from vector registers can only do reg+reg
16581 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
16582 scalar modes loading up the traditional floating point registers
16583 to use offset addresses. */
16584 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
16585 || rclass == FLOAT_REGS || rclass == NO_REGS)
16587 if (!VECTOR_MEM_ALTIVEC_P (mode)
16588 && GET_CODE (addr) == AND
16589 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16590 && INTVAL (XEXP (addr, 1)) == -16
16591 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
16592 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
16594 sri->icode = icode;
16595 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
16596 ? 2 : 1);
16598 else if (!legitimate_indirect_address_p (addr, false)
16599 && (rclass == NO_REGS
16600 || !legitimate_indexed_address_p (addr, false)))
16602 sri->icode = icode;
16603 sri->extra_cost = 1;
16605 else
16606 icode = CODE_FOR_nothing;
16608 /* Any other loads, including to pseudo registers which haven't been
16609 assigned to a register yet, default to require a scratch
16610 register. */
16611 else
16613 sri->icode = icode;
16614 sri->extra_cost = 2;
16617 else if (REG_P (x))
16619 int regno = true_regnum (x);
16621 icode = CODE_FOR_nothing;
16622 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16623 default_p = true;
16624 else
16626 enum reg_class xclass = REGNO_REG_CLASS (regno);
16627 enum rs6000_reg_type rtype1 = reg_class_to_reg_type[(int)rclass];
16628 enum rs6000_reg_type rtype2 = reg_class_to_reg_type[(int)xclass];
16630 /* If memory is needed, use default_secondary_reload to create the
16631 stack slot. */
16632 if (rtype1 != rtype2 || !IS_STD_REG_TYPE (rtype1))
16633 default_p = true;
16634 else
16635 ret = NO_REGS;
16638 else
16639 default_p = true;
16641 else if (TARGET_POWERPC64
16642 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16643 && MEM_P (x)
16644 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
16646 rtx addr = XEXP (x, 0);
16647 rtx off = address_offset (addr);
16649 if (off != NULL_RTX)
16651 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16652 unsigned HOST_WIDE_INT offset = INTVAL (off);
16654 /* We need a secondary reload when our legitimate_address_p
16655 says the address is good (as otherwise the entire address
16656 will be reloaded), and the offset is not a multiple of
16657 four or we have an address wrap. Address wrap will only
16658 occur for LO_SUMs since legitimate_offset_address_p
16659 rejects addresses for 16-byte mems that will wrap. */
16660 if (GET_CODE (addr) == LO_SUM
16661 ? (1 /* legitimate_address_p allows any offset for lo_sum */
16662 && ((offset & 3) != 0
16663 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
16664 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
16665 && (offset & 3) != 0))
16667 if (in_p)
16668 sri->icode = CODE_FOR_reload_di_load;
16669 else
16670 sri->icode = CODE_FOR_reload_di_store;
16671 sri->extra_cost = 2;
16672 ret = NO_REGS;
16674 else
16675 default_p = true;
16677 else
16678 default_p = true;
16680 else if (!TARGET_POWERPC64
16681 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16682 && MEM_P (x)
16683 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
16685 rtx addr = XEXP (x, 0);
16686 rtx off = address_offset (addr);
16688 if (off != NULL_RTX)
16690 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16691 unsigned HOST_WIDE_INT offset = INTVAL (off);
16693 /* We need a secondary reload when our legitimate_address_p
16694 says the address is good (as otherwise the entire address
16695 will be reloaded), and we have a wrap.
16697 legitimate_lo_sum_address_p allows LO_SUM addresses to
16698 have any offset so test for wrap in the low 16 bits.
16700 legitimate_offset_address_p checks for the range
16701 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
16702 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
16703 [0x7ff4,0x7fff] respectively, so test for the
16704 intersection of these ranges, [0x7ffc,0x7fff] and
16705 [0x7ff4,0x7ff7] respectively.
16707 Note that the address we see here may have been
16708 manipulated by legitimize_reload_address. */
16709 if (GET_CODE (addr) == LO_SUM
16710 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
16711 : offset - (0x8000 - extra) < UNITS_PER_WORD)
16713 if (in_p)
16714 sri->icode = CODE_FOR_reload_si_load;
16715 else
16716 sri->icode = CODE_FOR_reload_si_store;
16717 sri->extra_cost = 2;
16718 ret = NO_REGS;
16720 else
16721 default_p = true;
16723 else
16724 default_p = true;
16726 else
16727 default_p = true;
16729 if (default_p)
16730 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
16732 gcc_assert (ret != ALL_REGS);
16734 if (TARGET_DEBUG_ADDR)
16736 fprintf (stderr,
16737 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
16738 "mode = %s",
16739 reg_class_names[ret],
16740 in_p ? "true" : "false",
16741 reg_class_names[rclass],
16742 GET_MODE_NAME (mode));
16744 if (default_p)
16745 fprintf (stderr, ", default secondary reload");
16747 if (sri->icode != CODE_FOR_nothing)
16748 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
16749 insn_data[sri->icode].name, sri->extra_cost);
16750 else
16751 fprintf (stderr, "\n");
16753 debug_rtx (x);
16756 return ret;
16759 /* Better tracing for rs6000_secondary_reload_inner. */
16761 static void
16762 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
16763 bool store_p)
16765 rtx set, clobber;
16767 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
16769 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
16770 store_p ? "store" : "load");
16772 if (store_p)
16773 set = gen_rtx_SET (VOIDmode, mem, reg);
16774 else
16775 set = gen_rtx_SET (VOIDmode, reg, mem);
16777 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
16778 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
16781 static void
16782 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
16783 bool store_p)
16785 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
16786 gcc_unreachable ();
16789 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
16790 to SP+reg addressing. */
16792 void
16793 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
16795 int regno = true_regnum (reg);
16796 enum machine_mode mode = GET_MODE (reg);
16797 enum reg_class rclass;
16798 rtx addr;
16799 rtx and_op2 = NULL_RTX;
16800 rtx addr_op1;
16801 rtx addr_op2;
16802 rtx scratch_or_premodify = scratch;
16803 rtx and_rtx;
16804 rtx cc_clobber;
16806 if (TARGET_DEBUG_ADDR)
16807 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
16809 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16810 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16812 if (GET_CODE (mem) != MEM)
16813 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16815 rclass = REGNO_REG_CLASS (regno);
16816 addr = find_replacement (&XEXP (mem, 0));
16818 switch (rclass)
16820 /* GPRs can handle reg + small constant, all other addresses need to use
16821 the scratch register. */
16822 case GENERAL_REGS:
16823 case BASE_REGS:
16824 if (GET_CODE (addr) == AND)
16826 and_op2 = XEXP (addr, 1);
16827 addr = find_replacement (&XEXP (addr, 0));
16830 if (GET_CODE (addr) == PRE_MODIFY)
16832 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
16833 if (!REG_P (scratch_or_premodify))
16834 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16836 addr = find_replacement (&XEXP (addr, 1));
16837 if (GET_CODE (addr) != PLUS)
16838 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16841 if (GET_CODE (addr) == PLUS
16842 && (and_op2 != NULL_RTX
16843 || !rs6000_legitimate_offset_address_p (PTImode, addr,
16844 false, true)))
16846 /* find_replacement already recurses into both operands of
16847 PLUS so we don't need to call it here. */
16848 addr_op1 = XEXP (addr, 0);
16849 addr_op2 = XEXP (addr, 1);
16850 if (!legitimate_indirect_address_p (addr_op1, false))
16851 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16853 if (!REG_P (addr_op2)
16854 && (GET_CODE (addr_op2) != CONST_INT
16855 || !satisfies_constraint_I (addr_op2)))
16857 if (TARGET_DEBUG_ADDR)
16859 fprintf (stderr,
16860 "\nMove plus addr to register %s, mode = %s: ",
16861 rs6000_reg_names[REGNO (scratch)],
16862 GET_MODE_NAME (mode));
16863 debug_rtx (addr_op2);
16865 rs6000_emit_move (scratch, addr_op2, Pmode);
16866 addr_op2 = scratch;
16869 emit_insn (gen_rtx_SET (VOIDmode,
16870 scratch_or_premodify,
16871 gen_rtx_PLUS (Pmode,
16872 addr_op1,
16873 addr_op2)));
16875 addr = scratch_or_premodify;
16876 scratch_or_premodify = scratch;
16878 else if (!legitimate_indirect_address_p (addr, false)
16879 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16880 false, true))
16882 if (TARGET_DEBUG_ADDR)
16884 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
16885 rs6000_reg_names[REGNO (scratch_or_premodify)],
16886 GET_MODE_NAME (mode));
16887 debug_rtx (addr);
16889 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16890 addr = scratch_or_premodify;
16891 scratch_or_premodify = scratch;
16893 break;
16895 /* Float registers can do offset+reg addressing for scalar types. */
16896 case FLOAT_REGS:
16897 if (legitimate_indirect_address_p (addr, false) /* reg */
16898 || legitimate_indexed_address_p (addr, false) /* reg+reg */
16899 || ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
16900 && and_op2 == NULL_RTX
16901 && scratch_or_premodify == scratch
16902 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
16903 break;
16905 /* If this isn't a legacy floating point load/store, fall through to the
16906 VSX defaults. */
16908 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
16909 addresses into a scratch register. */
16910 case VSX_REGS:
16911 case ALTIVEC_REGS:
16913 /* With float regs, we need to handle the AND ourselves, since we can't
16914 use the Altivec instruction with an implicit AND -16. Allow scalar
16915 loads to float registers to use reg+offset even if VSX. */
16916 if (GET_CODE (addr) == AND
16917 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
16918 || GET_CODE (XEXP (addr, 1)) != CONST_INT
16919 || INTVAL (XEXP (addr, 1)) != -16
16920 || !VECTOR_MEM_ALTIVEC_P (mode)))
16922 and_op2 = XEXP (addr, 1);
16923 addr = find_replacement (&XEXP (addr, 0));
16926 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
16927 as the address later. */
16928 if (GET_CODE (addr) == PRE_MODIFY
16929 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode)
16930 && (rclass != FLOAT_REGS
16931 || (GET_MODE_SIZE (mode) != 4 && GET_MODE_SIZE (mode) != 8)))
16932 || and_op2 != NULL_RTX
16933 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
16935 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
16936 if (!legitimate_indirect_address_p (scratch_or_premodify, false))
16937 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16939 addr = find_replacement (&XEXP (addr, 1));
16940 if (GET_CODE (addr) != PLUS)
16941 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16944 if (legitimate_indirect_address_p (addr, false) /* reg */
16945 || legitimate_indexed_address_p (addr, false) /* reg+reg */
16946 || (GET_CODE (addr) == AND /* Altivec memory */
16947 && rclass == ALTIVEC_REGS
16948 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16949 && INTVAL (XEXP (addr, 1)) == -16
16950 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
16951 || legitimate_indexed_address_p (XEXP (addr, 0), false))))
16954 else if (GET_CODE (addr) == PLUS)
16956 addr_op1 = XEXP (addr, 0);
16957 addr_op2 = XEXP (addr, 1);
16958 if (!REG_P (addr_op1))
16959 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16961 if (TARGET_DEBUG_ADDR)
16963 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
16964 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
16965 debug_rtx (addr_op2);
16967 rs6000_emit_move (scratch, addr_op2, Pmode);
16968 emit_insn (gen_rtx_SET (VOIDmode,
16969 scratch_or_premodify,
16970 gen_rtx_PLUS (Pmode,
16971 addr_op1,
16972 scratch)));
16973 addr = scratch_or_premodify;
16974 scratch_or_premodify = scratch;
16977 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
16978 || GET_CODE (addr) == CONST_INT || GET_CODE (addr) == LO_SUM
16979 || REG_P (addr))
16981 if (TARGET_DEBUG_ADDR)
16983 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
16984 rs6000_reg_names[REGNO (scratch_or_premodify)],
16985 GET_MODE_NAME (mode));
16986 debug_rtx (addr);
16989 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16990 addr = scratch_or_premodify;
16991 scratch_or_premodify = scratch;
16994 else
16995 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16997 break;
16999 default:
17000 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17003 /* If the original address involved a pre-modify that we couldn't use the VSX
17004 memory instruction with update, and we haven't taken care of already,
17005 store the address in the pre-modify register and use that as the
17006 address. */
17007 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
17009 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
17010 addr = scratch_or_premodify;
17013 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
17014 memory instruction, recreate the AND now, including the clobber which is
17015 generated by the general ANDSI3/ANDDI3 patterns for the
17016 andi. instruction. */
17017 if (and_op2 != NULL_RTX)
17019 if (! legitimate_indirect_address_p (addr, false))
17021 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
17022 addr = scratch;
17025 if (TARGET_DEBUG_ADDR)
17027 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
17028 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
17029 debug_rtx (and_op2);
17032 and_rtx = gen_rtx_SET (VOIDmode,
17033 scratch,
17034 gen_rtx_AND (Pmode,
17035 addr,
17036 and_op2));
17038 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
17039 emit_insn (gen_rtx_PARALLEL (VOIDmode,
17040 gen_rtvec (2, and_rtx, cc_clobber)));
17041 addr = scratch;
17044 /* Adjust the address if it changed. */
17045 if (addr != XEXP (mem, 0))
17047 mem = replace_equiv_address_nv (mem, addr);
17048 if (TARGET_DEBUG_ADDR)
17049 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
17052 /* Now create the move. */
17053 if (store_p)
17054 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
17055 else
17056 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
17058 return;
17061 /* Convert reloads involving 64-bit gprs and misaligned offset
17062 addressing, or multiple 32-bit gprs and offsets that are too large,
17063 to use indirect addressing. */
17065 void
17066 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
17068 int regno = true_regnum (reg);
17069 enum reg_class rclass;
17070 rtx addr;
17071 rtx scratch_or_premodify = scratch;
17073 if (TARGET_DEBUG_ADDR)
17075 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
17076 store_p ? "store" : "load");
17077 fprintf (stderr, "reg:\n");
17078 debug_rtx (reg);
17079 fprintf (stderr, "mem:\n");
17080 debug_rtx (mem);
17081 fprintf (stderr, "scratch:\n");
17082 debug_rtx (scratch);
17085 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
17086 gcc_assert (GET_CODE (mem) == MEM);
17087 rclass = REGNO_REG_CLASS (regno);
17088 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
17089 addr = XEXP (mem, 0);
17091 if (GET_CODE (addr) == PRE_MODIFY)
17093 scratch_or_premodify = XEXP (addr, 0);
17094 gcc_assert (REG_P (scratch_or_premodify));
17095 addr = XEXP (addr, 1);
17097 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
17099 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
17101 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
17103 /* Now create the move. */
17104 if (store_p)
17105 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
17106 else
17107 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
17109 return;
17112 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
17113 this function has any SDmode references. If we are on a power7 or later, we
17114 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
17115 can load/store the value. */
17117 static void
17118 rs6000_alloc_sdmode_stack_slot (void)
17120 tree t;
17121 basic_block bb;
17122 gimple_stmt_iterator gsi;
17124 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
17125 /* We use a different approach for dealing with the secondary
17126 memory in LRA. */
17127 if (ira_use_lra_p)
17128 return;
17130 if (TARGET_NO_SDMODE_STACK)
17131 return;
17133 FOR_EACH_BB_FN (bb, cfun)
17134 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
17136 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
17137 if (ret)
17139 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17140 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17141 SDmode, 0);
17142 return;
17146 /* Check for any SDmode parameters of the function. */
17147 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
17149 if (TREE_TYPE (t) == error_mark_node)
17150 continue;
17152 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
17153 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
17155 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17156 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17157 SDmode, 0);
17158 return;
17163 static void
17164 rs6000_instantiate_decls (void)
17166 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
17167 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
17170 /* Given an rtx X being reloaded into a reg required to be
17171 in class CLASS, return the class of reg to actually use.
17172 In general this is just CLASS; but on some machines
17173 in some cases it is preferable to use a more restrictive class.
17175 On the RS/6000, we have to return NO_REGS when we want to reload a
17176 floating-point CONST_DOUBLE to force it to be copied to memory.
17178 We also don't want to reload integer values into floating-point
17179 registers if we can at all help it. In fact, this can
17180 cause reload to die, if it tries to generate a reload of CTR
17181 into a FP register and discovers it doesn't have the memory location
17182 required.
17184 ??? Would it be a good idea to have reload do the converse, that is
17185 try to reload floating modes into FP registers if possible?
17188 static enum reg_class
17189 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
17191 enum machine_mode mode = GET_MODE (x);
17193 if (TARGET_VSX && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
17194 return rclass;
17196 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
17197 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
17198 && easy_vector_constant (x, mode))
17199 return ALTIVEC_REGS;
17201 if ((CONSTANT_P (x) || GET_CODE (x) == PLUS))
17203 if (reg_class_subset_p (GENERAL_REGS, rclass))
17204 return GENERAL_REGS;
17205 if (reg_class_subset_p (BASE_REGS, rclass))
17206 return BASE_REGS;
17207 return NO_REGS;
17210 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
17211 return GENERAL_REGS;
17213 /* For VSX, prefer the traditional registers for 64-bit values because we can
17214 use the non-VSX loads. Prefer the Altivec registers if Altivec is
17215 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
17216 prefer Altivec loads.. */
17217 if (rclass == VSX_REGS)
17219 if (GET_MODE_SIZE (mode) <= 8)
17220 return FLOAT_REGS;
17222 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
17223 || mode == V1TImode)
17224 return ALTIVEC_REGS;
17226 return rclass;
17229 return rclass;
17232 /* Debug version of rs6000_preferred_reload_class. */
17233 static enum reg_class
17234 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
17236 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
17238 fprintf (stderr,
17239 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
17240 "mode = %s, x:\n",
17241 reg_class_names[ret], reg_class_names[rclass],
17242 GET_MODE_NAME (GET_MODE (x)));
17243 debug_rtx (x);
17245 return ret;
17248 /* If we are copying between FP or AltiVec registers and anything else, we need
17249 a memory location. The exception is when we are targeting ppc64 and the
17250 move to/from fpr to gpr instructions are available. Also, under VSX, you
17251 can copy vector registers from the FP register set to the Altivec register
17252 set and vice versa. */
17254 static bool
17255 rs6000_secondary_memory_needed (enum reg_class from_class,
17256 enum reg_class to_class,
17257 enum machine_mode mode)
17259 enum rs6000_reg_type from_type, to_type;
17260 bool altivec_p = ((from_class == ALTIVEC_REGS)
17261 || (to_class == ALTIVEC_REGS));
17263 /* If a simple/direct move is available, we don't need secondary memory */
17264 from_type = reg_class_to_reg_type[(int)from_class];
17265 to_type = reg_class_to_reg_type[(int)to_class];
17267 if (rs6000_secondary_reload_move (to_type, from_type, mode,
17268 (secondary_reload_info *)0, altivec_p))
17269 return false;
17271 /* If we have a floating point or vector register class, we need to use
17272 memory to transfer the data. */
17273 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
17274 return true;
17276 return false;
17279 /* Debug version of rs6000_secondary_memory_needed. */
17280 static bool
17281 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
17282 enum reg_class to_class,
17283 enum machine_mode mode)
17285 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
17287 fprintf (stderr,
17288 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
17289 "to_class = %s, mode = %s\n",
17290 ret ? "true" : "false",
17291 reg_class_names[from_class],
17292 reg_class_names[to_class],
17293 GET_MODE_NAME (mode));
17295 return ret;
17298 /* Return the register class of a scratch register needed to copy IN into
17299 or out of a register in RCLASS in MODE. If it can be done directly,
17300 NO_REGS is returned. */
17302 static enum reg_class
17303 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
17304 rtx in)
17306 int regno;
17308 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
17309 #if TARGET_MACHO
17310 && MACHOPIC_INDIRECT
17311 #endif
17314 /* We cannot copy a symbolic operand directly into anything
17315 other than BASE_REGS for TARGET_ELF. So indicate that a
17316 register from BASE_REGS is needed as an intermediate
17317 register.
17319 On Darwin, pic addresses require a load from memory, which
17320 needs a base register. */
17321 if (rclass != BASE_REGS
17322 && (GET_CODE (in) == SYMBOL_REF
17323 || GET_CODE (in) == HIGH
17324 || GET_CODE (in) == LABEL_REF
17325 || GET_CODE (in) == CONST))
17326 return BASE_REGS;
17329 if (GET_CODE (in) == REG)
17331 regno = REGNO (in);
17332 if (regno >= FIRST_PSEUDO_REGISTER)
17334 regno = true_regnum (in);
17335 if (regno >= FIRST_PSEUDO_REGISTER)
17336 regno = -1;
17339 else if (GET_CODE (in) == SUBREG)
17341 regno = true_regnum (in);
17342 if (regno >= FIRST_PSEUDO_REGISTER)
17343 regno = -1;
17345 else
17346 regno = -1;
17348 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
17349 into anything. */
17350 if (rclass == GENERAL_REGS || rclass == BASE_REGS
17351 || (regno >= 0 && INT_REGNO_P (regno)))
17352 return NO_REGS;
17354 /* Constants, memory, and FP registers can go into FP registers. */
17355 if ((regno == -1 || FP_REGNO_P (regno))
17356 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
17357 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
17359 /* Memory, and FP/altivec registers can go into fp/altivec registers under
17360 VSX. However, for scalar variables, use the traditional floating point
17361 registers so that we can use offset+register addressing. */
17362 if (TARGET_VSX
17363 && (regno == -1 || VSX_REGNO_P (regno))
17364 && VSX_REG_CLASS_P (rclass))
17366 if (GET_MODE_SIZE (mode) < 16)
17367 return FLOAT_REGS;
17369 return NO_REGS;
17372 /* Memory, and AltiVec registers can go into AltiVec registers. */
17373 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
17374 && rclass == ALTIVEC_REGS)
17375 return NO_REGS;
17377 /* We can copy among the CR registers. */
17378 if ((rclass == CR_REGS || rclass == CR0_REGS)
17379 && regno >= 0 && CR_REGNO_P (regno))
17380 return NO_REGS;
17382 /* Otherwise, we need GENERAL_REGS. */
17383 return GENERAL_REGS;
17386 /* Debug version of rs6000_secondary_reload_class. */
17387 static enum reg_class
17388 rs6000_debug_secondary_reload_class (enum reg_class rclass,
17389 enum machine_mode mode, rtx in)
17391 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
17392 fprintf (stderr,
17393 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
17394 "mode = %s, input rtx:\n",
17395 reg_class_names[ret], reg_class_names[rclass],
17396 GET_MODE_NAME (mode));
17397 debug_rtx (in);
17399 return ret;
17402 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
17404 static bool
17405 rs6000_cannot_change_mode_class (enum machine_mode from,
17406 enum machine_mode to,
17407 enum reg_class rclass)
17409 unsigned from_size = GET_MODE_SIZE (from);
17410 unsigned to_size = GET_MODE_SIZE (to);
17412 if (from_size != to_size)
17414 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
17416 if (reg_classes_intersect_p (xclass, rclass))
17418 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
17419 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
17421 /* Don't allow 64-bit types to overlap with 128-bit types that take a
17422 single register under VSX because the scalar part of the register
17423 is in the upper 64-bits, and not the lower 64-bits. Types like
17424 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
17425 IEEE floating point can't overlap, and neither can small
17426 values. */
17428 if (TARGET_IEEEQUAD && (to == TFmode || from == TFmode))
17429 return true;
17431 /* TDmode in floating-mode registers must always go into a register
17432 pair with the most significant word in the even-numbered register
17433 to match ISA requirements. In little-endian mode, this does not
17434 match subreg numbering, so we cannot allow subregs. */
17435 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
17436 return true;
17438 if (from_size < 8 || to_size < 8)
17439 return true;
17441 if (from_size == 8 && (8 * to_nregs) != to_size)
17442 return true;
17444 if (to_size == 8 && (8 * from_nregs) != from_size)
17445 return true;
17447 return false;
17449 else
17450 return false;
17453 if (TARGET_E500_DOUBLE
17454 && ((((to) == DFmode) + ((from) == DFmode)) == 1
17455 || (((to) == TFmode) + ((from) == TFmode)) == 1
17456 || (((to) == DDmode) + ((from) == DDmode)) == 1
17457 || (((to) == TDmode) + ((from) == TDmode)) == 1
17458 || (((to) == DImode) + ((from) == DImode)) == 1))
17459 return true;
17461 /* Since the VSX register set includes traditional floating point registers
17462 and altivec registers, just check for the size being different instead of
17463 trying to check whether the modes are vector modes. Otherwise it won't
17464 allow say DF and DI to change classes. For types like TFmode and TDmode
17465 that take 2 64-bit registers, rather than a single 128-bit register, don't
17466 allow subregs of those types to other 128 bit types. */
17467 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
17469 unsigned num_regs = (from_size + 15) / 16;
17470 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
17471 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
17472 return true;
17474 return (from_size != 8 && from_size != 16);
17477 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
17478 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
17479 return true;
17481 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
17482 && reg_classes_intersect_p (GENERAL_REGS, rclass))
17483 return true;
17485 return false;
17488 /* Debug version of rs6000_cannot_change_mode_class. */
17489 static bool
17490 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
17491 enum machine_mode to,
17492 enum reg_class rclass)
17494 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
17496 fprintf (stderr,
17497 "rs6000_cannot_change_mode_class, return %s, from = %s, "
17498 "to = %s, rclass = %s\n",
17499 ret ? "true" : "false",
17500 GET_MODE_NAME (from), GET_MODE_NAME (to),
17501 reg_class_names[rclass]);
17503 return ret;
17506 /* Return a string to do a move operation of 128 bits of data. */
17508 const char *
17509 rs6000_output_move_128bit (rtx operands[])
17511 rtx dest = operands[0];
17512 rtx src = operands[1];
17513 enum machine_mode mode = GET_MODE (dest);
17514 int dest_regno;
17515 int src_regno;
17516 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
17517 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
17519 if (REG_P (dest))
17521 dest_regno = REGNO (dest);
17522 dest_gpr_p = INT_REGNO_P (dest_regno);
17523 dest_fp_p = FP_REGNO_P (dest_regno);
17524 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
17525 dest_vsx_p = dest_fp_p | dest_vmx_p;
17527 else
17529 dest_regno = -1;
17530 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
17533 if (REG_P (src))
17535 src_regno = REGNO (src);
17536 src_gpr_p = INT_REGNO_P (src_regno);
17537 src_fp_p = FP_REGNO_P (src_regno);
17538 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
17539 src_vsx_p = src_fp_p | src_vmx_p;
17541 else
17543 src_regno = -1;
17544 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
17547 /* Register moves. */
17548 if (dest_regno >= 0 && src_regno >= 0)
17550 if (dest_gpr_p)
17552 if (src_gpr_p)
17553 return "#";
17555 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
17556 return "#";
17559 else if (TARGET_VSX && dest_vsx_p)
17561 if (src_vsx_p)
17562 return "xxlor %x0,%x1,%x1";
17564 else if (TARGET_DIRECT_MOVE && src_gpr_p)
17565 return "#";
17568 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
17569 return "vor %0,%1,%1";
17571 else if (dest_fp_p && src_fp_p)
17572 return "#";
17575 /* Loads. */
17576 else if (dest_regno >= 0 && MEM_P (src))
17578 if (dest_gpr_p)
17580 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17581 return "lq %0,%1";
17582 else
17583 return "#";
17586 else if (TARGET_ALTIVEC && dest_vmx_p
17587 && altivec_indexed_or_indirect_operand (src, mode))
17588 return "lvx %0,%y1";
17590 else if (TARGET_VSX && dest_vsx_p)
17592 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17593 return "lxvw4x %x0,%y1";
17594 else
17595 return "lxvd2x %x0,%y1";
17598 else if (TARGET_ALTIVEC && dest_vmx_p)
17599 return "lvx %0,%y1";
17601 else if (dest_fp_p)
17602 return "#";
17605 /* Stores. */
17606 else if (src_regno >= 0 && MEM_P (dest))
17608 if (src_gpr_p)
17610 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17611 return "stq %1,%0";
17612 else
17613 return "#";
17616 else if (TARGET_ALTIVEC && src_vmx_p
17617 && altivec_indexed_or_indirect_operand (src, mode))
17618 return "stvx %1,%y0";
17620 else if (TARGET_VSX && src_vsx_p)
17622 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17623 return "stxvw4x %x1,%y0";
17624 else
17625 return "stxvd2x %x1,%y0";
17628 else if (TARGET_ALTIVEC && src_vmx_p)
17629 return "stvx %1,%y0";
17631 else if (src_fp_p)
17632 return "#";
17635 /* Constants. */
17636 else if (dest_regno >= 0
17637 && (GET_CODE (src) == CONST_INT
17638 || GET_CODE (src) == CONST_WIDE_INT
17639 || GET_CODE (src) == CONST_DOUBLE
17640 || GET_CODE (src) == CONST_VECTOR))
17642 if (dest_gpr_p)
17643 return "#";
17645 else if (TARGET_VSX && dest_vsx_p && zero_constant (src, mode))
17646 return "xxlxor %x0,%x0,%x0";
17648 else if (TARGET_ALTIVEC && dest_vmx_p)
17649 return output_vec_const_move (operands);
17652 if (TARGET_DEBUG_ADDR)
17654 fprintf (stderr, "\n===== Bad 128 bit move:\n");
17655 debug_rtx (gen_rtx_SET (VOIDmode, dest, src));
17658 gcc_unreachable ();
17661 /* Validate a 128-bit move. */
17662 bool
17663 rs6000_move_128bit_ok_p (rtx operands[])
17665 enum machine_mode mode = GET_MODE (operands[0]);
17666 return (gpc_reg_operand (operands[0], mode)
17667 || gpc_reg_operand (operands[1], mode));
17670 /* Return true if a 128-bit move needs to be split. */
17671 bool
17672 rs6000_split_128bit_ok_p (rtx operands[])
17674 if (!reload_completed)
17675 return false;
17677 if (!gpr_or_gpr_p (operands[0], operands[1]))
17678 return false;
17680 if (quad_load_store_p (operands[0], operands[1]))
17681 return false;
17683 return true;
17687 /* Given a comparison operation, return the bit number in CCR to test. We
17688 know this is a valid comparison.
17690 SCC_P is 1 if this is for an scc. That means that %D will have been
17691 used instead of %C, so the bits will be in different places.
17693 Return -1 if OP isn't a valid comparison for some reason. */
17696 ccr_bit (rtx op, int scc_p)
17698 enum rtx_code code = GET_CODE (op);
17699 enum machine_mode cc_mode;
17700 int cc_regnum;
17701 int base_bit;
17702 rtx reg;
17704 if (!COMPARISON_P (op))
17705 return -1;
17707 reg = XEXP (op, 0);
17709 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
17711 cc_mode = GET_MODE (reg);
17712 cc_regnum = REGNO (reg);
17713 base_bit = 4 * (cc_regnum - CR0_REGNO);
17715 validate_condition_mode (code, cc_mode);
17717 /* When generating a sCOND operation, only positive conditions are
17718 allowed. */
17719 gcc_assert (!scc_p
17720 || code == EQ || code == GT || code == LT || code == UNORDERED
17721 || code == GTU || code == LTU);
17723 switch (code)
17725 case NE:
17726 return scc_p ? base_bit + 3 : base_bit + 2;
17727 case EQ:
17728 return base_bit + 2;
17729 case GT: case GTU: case UNLE:
17730 return base_bit + 1;
17731 case LT: case LTU: case UNGE:
17732 return base_bit;
17733 case ORDERED: case UNORDERED:
17734 return base_bit + 3;
17736 case GE: case GEU:
17737 /* If scc, we will have done a cror to put the bit in the
17738 unordered position. So test that bit. For integer, this is ! LT
17739 unless this is an scc insn. */
17740 return scc_p ? base_bit + 3 : base_bit;
17742 case LE: case LEU:
17743 return scc_p ? base_bit + 3 : base_bit + 1;
17745 default:
17746 gcc_unreachable ();
17750 /* Return the GOT register. */
17753 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
17755 /* The second flow pass currently (June 1999) can't update
17756 regs_ever_live without disturbing other parts of the compiler, so
17757 update it here to make the prolog/epilogue code happy. */
17758 if (!can_create_pseudo_p ()
17759 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17760 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
17762 crtl->uses_pic_offset_table = 1;
17764 return pic_offset_table_rtx;
17767 static rs6000_stack_t stack_info;
17769 /* Function to init struct machine_function.
17770 This will be called, via a pointer variable,
17771 from push_function_context. */
17773 static struct machine_function *
17774 rs6000_init_machine_status (void)
17776 stack_info.reload_completed = 0;
17777 return ggc_cleared_alloc<machine_function> ();
17780 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
17783 extract_MB (rtx op)
17785 int i;
17786 unsigned long val = INTVAL (op);
17788 /* If the high bit is zero, the value is the first 1 bit we find
17789 from the left. */
17790 if ((val & 0x80000000) == 0)
17792 gcc_assert (val & 0xffffffff);
17794 i = 1;
17795 while (((val <<= 1) & 0x80000000) == 0)
17796 ++i;
17797 return i;
17800 /* If the high bit is set and the low bit is not, or the mask is all
17801 1's, the value is zero. */
17802 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
17803 return 0;
17805 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17806 from the right. */
17807 i = 31;
17808 while (((val >>= 1) & 1) != 0)
17809 --i;
17811 return i;
17815 extract_ME (rtx op)
17817 int i;
17818 unsigned long val = INTVAL (op);
17820 /* If the low bit is zero, the value is the first 1 bit we find from
17821 the right. */
17822 if ((val & 1) == 0)
17824 gcc_assert (val & 0xffffffff);
17826 i = 30;
17827 while (((val >>= 1) & 1) == 0)
17828 --i;
17830 return i;
17833 /* If the low bit is set and the high bit is not, or the mask is all
17834 1's, the value is 31. */
17835 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
17836 return 31;
17838 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17839 from the left. */
17840 i = 0;
17841 while (((val <<= 1) & 0x80000000) != 0)
17842 ++i;
17844 return i;
17847 /* Locate some local-dynamic symbol still in use by this function
17848 so that we can print its name in some tls_ld pattern. */
17850 static const char *
17851 rs6000_get_some_local_dynamic_name (void)
17853 rtx insn;
17855 if (cfun->machine->some_ld_name)
17856 return cfun->machine->some_ld_name;
17858 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
17859 if (INSN_P (insn)
17860 && for_each_rtx (&PATTERN (insn),
17861 rs6000_get_some_local_dynamic_name_1, 0))
17862 return cfun->machine->some_ld_name;
17864 gcc_unreachable ();
17867 /* Helper function for rs6000_get_some_local_dynamic_name. */
17869 static int
17870 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
17872 rtx x = *px;
17874 if (GET_CODE (x) == SYMBOL_REF)
17876 const char *str = XSTR (x, 0);
17877 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
17879 cfun->machine->some_ld_name = str;
17880 return 1;
17884 return 0;
17887 /* Write out a function code label. */
17889 void
17890 rs6000_output_function_entry (FILE *file, const char *fname)
17892 if (fname[0] != '.')
17894 switch (DEFAULT_ABI)
17896 default:
17897 gcc_unreachable ();
17899 case ABI_AIX:
17900 if (DOT_SYMBOLS)
17901 putc ('.', file);
17902 else
17903 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
17904 break;
17906 case ABI_ELFv2:
17907 case ABI_V4:
17908 case ABI_DARWIN:
17909 break;
17913 RS6000_OUTPUT_BASENAME (file, fname);
17916 /* Print an operand. Recognize special options, documented below. */
17918 #if TARGET_ELF
17919 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
17920 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
17921 #else
17922 #define SMALL_DATA_RELOC "sda21"
17923 #define SMALL_DATA_REG 0
17924 #endif
17926 void
17927 print_operand (FILE *file, rtx x, int code)
17929 int i;
17930 unsigned HOST_WIDE_INT uval;
17932 switch (code)
17934 /* %a is output_address. */
17936 case 'b':
17937 /* If constant, low-order 16 bits of constant, unsigned.
17938 Otherwise, write normally. */
17939 if (INT_P (x))
17940 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
17941 else
17942 print_operand (file, x, 0);
17943 return;
17945 case 'B':
17946 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
17947 for 64-bit mask direction. */
17948 putc (((INTVAL (x) & 1) == 0 ? 'r' : 'l'), file);
17949 return;
17951 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
17952 output_operand. */
17954 case 'D':
17955 /* Like 'J' but get to the GT bit only. */
17956 gcc_assert (REG_P (x));
17958 /* Bit 1 is GT bit. */
17959 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
17961 /* Add one for shift count in rlinm for scc. */
17962 fprintf (file, "%d", i + 1);
17963 return;
17965 case 'E':
17966 /* X is a CR register. Print the number of the EQ bit of the CR */
17967 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17968 output_operand_lossage ("invalid %%E value");
17969 else
17970 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
17971 return;
17973 case 'f':
17974 /* X is a CR register. Print the shift count needed to move it
17975 to the high-order four bits. */
17976 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17977 output_operand_lossage ("invalid %%f value");
17978 else
17979 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
17980 return;
17982 case 'F':
17983 /* Similar, but print the count for the rotate in the opposite
17984 direction. */
17985 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17986 output_operand_lossage ("invalid %%F value");
17987 else
17988 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
17989 return;
17991 case 'G':
17992 /* X is a constant integer. If it is negative, print "m",
17993 otherwise print "z". This is to make an aze or ame insn. */
17994 if (GET_CODE (x) != CONST_INT)
17995 output_operand_lossage ("invalid %%G value");
17996 else if (INTVAL (x) >= 0)
17997 putc ('z', file);
17998 else
17999 putc ('m', file);
18000 return;
18002 case 'h':
18003 /* If constant, output low-order five bits. Otherwise, write
18004 normally. */
18005 if (INT_P (x))
18006 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
18007 else
18008 print_operand (file, x, 0);
18009 return;
18011 case 'H':
18012 /* If constant, output low-order six bits. Otherwise, write
18013 normally. */
18014 if (INT_P (x))
18015 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
18016 else
18017 print_operand (file, x, 0);
18018 return;
18020 case 'I':
18021 /* Print `i' if this is a constant, else nothing. */
18022 if (INT_P (x))
18023 putc ('i', file);
18024 return;
18026 case 'j':
18027 /* Write the bit number in CCR for jump. */
18028 i = ccr_bit (x, 0);
18029 if (i == -1)
18030 output_operand_lossage ("invalid %%j code");
18031 else
18032 fprintf (file, "%d", i);
18033 return;
18035 case 'J':
18036 /* Similar, but add one for shift count in rlinm for scc and pass
18037 scc flag to `ccr_bit'. */
18038 i = ccr_bit (x, 1);
18039 if (i == -1)
18040 output_operand_lossage ("invalid %%J code");
18041 else
18042 /* If we want bit 31, write a shift count of zero, not 32. */
18043 fprintf (file, "%d", i == 31 ? 0 : i + 1);
18044 return;
18046 case 'k':
18047 /* X must be a constant. Write the 1's complement of the
18048 constant. */
18049 if (! INT_P (x))
18050 output_operand_lossage ("invalid %%k value");
18051 else
18052 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
18053 return;
18055 case 'K':
18056 /* X must be a symbolic constant on ELF. Write an
18057 expression suitable for an 'addi' that adds in the low 16
18058 bits of the MEM. */
18059 if (GET_CODE (x) == CONST)
18061 if (GET_CODE (XEXP (x, 0)) != PLUS
18062 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
18063 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
18064 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
18065 output_operand_lossage ("invalid %%K value");
18067 print_operand_address (file, x);
18068 fputs ("@l", file);
18069 return;
18071 /* %l is output_asm_label. */
18073 case 'L':
18074 /* Write second word of DImode or DFmode reference. Works on register
18075 or non-indexed memory only. */
18076 if (REG_P (x))
18077 fputs (reg_names[REGNO (x) + 1], file);
18078 else if (MEM_P (x))
18080 /* Handle possible auto-increment. Since it is pre-increment and
18081 we have already done it, we can just use an offset of word. */
18082 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18083 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18084 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
18085 UNITS_PER_WORD));
18086 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18087 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
18088 UNITS_PER_WORD));
18089 else
18090 output_address (XEXP (adjust_address_nv (x, SImode,
18091 UNITS_PER_WORD),
18092 0));
18094 if (small_data_operand (x, GET_MODE (x)))
18095 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18096 reg_names[SMALL_DATA_REG]);
18098 return;
18100 case 'm':
18101 /* MB value for a mask operand. */
18102 if (! mask_operand (x, SImode))
18103 output_operand_lossage ("invalid %%m value");
18105 fprintf (file, "%d", extract_MB (x));
18106 return;
18108 case 'M':
18109 /* ME value for a mask operand. */
18110 if (! mask_operand (x, SImode))
18111 output_operand_lossage ("invalid %%M value");
18113 fprintf (file, "%d", extract_ME (x));
18114 return;
18116 /* %n outputs the negative of its operand. */
18118 case 'N':
18119 /* Write the number of elements in the vector times 4. */
18120 if (GET_CODE (x) != PARALLEL)
18121 output_operand_lossage ("invalid %%N value");
18122 else
18123 fprintf (file, "%d", XVECLEN (x, 0) * 4);
18124 return;
18126 case 'O':
18127 /* Similar, but subtract 1 first. */
18128 if (GET_CODE (x) != PARALLEL)
18129 output_operand_lossage ("invalid %%O value");
18130 else
18131 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
18132 return;
18134 case 'p':
18135 /* X is a CONST_INT that is a power of two. Output the logarithm. */
18136 if (! INT_P (x)
18137 || INTVAL (x) < 0
18138 || (i = exact_log2 (INTVAL (x))) < 0)
18139 output_operand_lossage ("invalid %%p value");
18140 else
18141 fprintf (file, "%d", i);
18142 return;
18144 case 'P':
18145 /* The operand must be an indirect memory reference. The result
18146 is the register name. */
18147 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
18148 || REGNO (XEXP (x, 0)) >= 32)
18149 output_operand_lossage ("invalid %%P value");
18150 else
18151 fputs (reg_names[REGNO (XEXP (x, 0))], file);
18152 return;
18154 case 'q':
18155 /* This outputs the logical code corresponding to a boolean
18156 expression. The expression may have one or both operands
18157 negated (if one, only the first one). For condition register
18158 logical operations, it will also treat the negated
18159 CR codes as NOTs, but not handle NOTs of them. */
18161 const char *const *t = 0;
18162 const char *s;
18163 enum rtx_code code = GET_CODE (x);
18164 static const char * const tbl[3][3] = {
18165 { "and", "andc", "nor" },
18166 { "or", "orc", "nand" },
18167 { "xor", "eqv", "xor" } };
18169 if (code == AND)
18170 t = tbl[0];
18171 else if (code == IOR)
18172 t = tbl[1];
18173 else if (code == XOR)
18174 t = tbl[2];
18175 else
18176 output_operand_lossage ("invalid %%q value");
18178 if (GET_CODE (XEXP (x, 0)) != NOT)
18179 s = t[0];
18180 else
18182 if (GET_CODE (XEXP (x, 1)) == NOT)
18183 s = t[2];
18184 else
18185 s = t[1];
18188 fputs (s, file);
18190 return;
18192 case 'Q':
18193 if (! TARGET_MFCRF)
18194 return;
18195 fputc (',', file);
18196 /* FALLTHRU */
18198 case 'R':
18199 /* X is a CR register. Print the mask for `mtcrf'. */
18200 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18201 output_operand_lossage ("invalid %%R value");
18202 else
18203 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
18204 return;
18206 case 's':
18207 /* Low 5 bits of 32 - value */
18208 if (! INT_P (x))
18209 output_operand_lossage ("invalid %%s value");
18210 else
18211 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
18212 return;
18214 case 'S':
18215 /* PowerPC64 mask position. All 0's is excluded.
18216 CONST_INT 32-bit mask is considered sign-extended so any
18217 transition must occur within the CONST_INT, not on the boundary. */
18218 if (! mask64_operand (x, DImode))
18219 output_operand_lossage ("invalid %%S value");
18221 uval = INTVAL (x);
18223 if (uval & 1) /* Clear Left */
18225 #if HOST_BITS_PER_WIDE_INT > 64
18226 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18227 #endif
18228 i = 64;
18230 else /* Clear Right */
18232 uval = ~uval;
18233 #if HOST_BITS_PER_WIDE_INT > 64
18234 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18235 #endif
18236 i = 63;
18238 while (uval != 0)
18239 --i, uval >>= 1;
18240 gcc_assert (i >= 0);
18241 fprintf (file, "%d", i);
18242 return;
18244 case 't':
18245 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
18246 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
18248 /* Bit 3 is OV bit. */
18249 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
18251 /* If we want bit 31, write a shift count of zero, not 32. */
18252 fprintf (file, "%d", i == 31 ? 0 : i + 1);
18253 return;
18255 case 'T':
18256 /* Print the symbolic name of a branch target register. */
18257 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
18258 && REGNO (x) != CTR_REGNO))
18259 output_operand_lossage ("invalid %%T value");
18260 else if (REGNO (x) == LR_REGNO)
18261 fputs ("lr", file);
18262 else
18263 fputs ("ctr", file);
18264 return;
18266 case 'u':
18267 /* High-order 16 bits of constant for use in unsigned operand. */
18268 if (! INT_P (x))
18269 output_operand_lossage ("invalid %%u value");
18270 else
18271 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
18272 (INTVAL (x) >> 16) & 0xffff);
18273 return;
18275 case 'v':
18276 /* High-order 16 bits of constant for use in signed operand. */
18277 if (! INT_P (x))
18278 output_operand_lossage ("invalid %%v value");
18279 else
18280 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
18281 (INTVAL (x) >> 16) & 0xffff);
18282 return;
18284 case 'U':
18285 /* Print `u' if this has an auto-increment or auto-decrement. */
18286 if (MEM_P (x)
18287 && (GET_CODE (XEXP (x, 0)) == PRE_INC
18288 || GET_CODE (XEXP (x, 0)) == PRE_DEC
18289 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
18290 putc ('u', file);
18291 return;
18293 case 'V':
18294 /* Print the trap code for this operand. */
18295 switch (GET_CODE (x))
18297 case EQ:
18298 fputs ("eq", file); /* 4 */
18299 break;
18300 case NE:
18301 fputs ("ne", file); /* 24 */
18302 break;
18303 case LT:
18304 fputs ("lt", file); /* 16 */
18305 break;
18306 case LE:
18307 fputs ("le", file); /* 20 */
18308 break;
18309 case GT:
18310 fputs ("gt", file); /* 8 */
18311 break;
18312 case GE:
18313 fputs ("ge", file); /* 12 */
18314 break;
18315 case LTU:
18316 fputs ("llt", file); /* 2 */
18317 break;
18318 case LEU:
18319 fputs ("lle", file); /* 6 */
18320 break;
18321 case GTU:
18322 fputs ("lgt", file); /* 1 */
18323 break;
18324 case GEU:
18325 fputs ("lge", file); /* 5 */
18326 break;
18327 default:
18328 gcc_unreachable ();
18330 break;
18332 case 'w':
18333 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
18334 normally. */
18335 if (INT_P (x))
18336 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
18337 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
18338 else
18339 print_operand (file, x, 0);
18340 return;
18342 case 'W':
18343 /* MB value for a PowerPC64 rldic operand. */
18344 i = clz_hwi (INTVAL (x));
18346 fprintf (file, "%d", i);
18347 return;
18349 case 'x':
18350 /* X is a FPR or Altivec register used in a VSX context. */
18351 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
18352 output_operand_lossage ("invalid %%x value");
18353 else
18355 int reg = REGNO (x);
18356 int vsx_reg = (FP_REGNO_P (reg)
18357 ? reg - 32
18358 : reg - FIRST_ALTIVEC_REGNO + 32);
18360 #ifdef TARGET_REGNAMES
18361 if (TARGET_REGNAMES)
18362 fprintf (file, "%%vs%d", vsx_reg);
18363 else
18364 #endif
18365 fprintf (file, "%d", vsx_reg);
18367 return;
18369 case 'X':
18370 if (MEM_P (x)
18371 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
18372 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
18373 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
18374 putc ('x', file);
18375 return;
18377 case 'Y':
18378 /* Like 'L', for third word of TImode/PTImode */
18379 if (REG_P (x))
18380 fputs (reg_names[REGNO (x) + 2], file);
18381 else if (MEM_P (x))
18383 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18384 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18385 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18386 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18387 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18388 else
18389 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
18390 if (small_data_operand (x, GET_MODE (x)))
18391 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18392 reg_names[SMALL_DATA_REG]);
18394 return;
18396 case 'z':
18397 /* X is a SYMBOL_REF. Write out the name preceded by a
18398 period and without any trailing data in brackets. Used for function
18399 names. If we are configured for System V (or the embedded ABI) on
18400 the PowerPC, do not emit the period, since those systems do not use
18401 TOCs and the like. */
18402 gcc_assert (GET_CODE (x) == SYMBOL_REF);
18404 /* For macho, check to see if we need a stub. */
18405 if (TARGET_MACHO)
18407 const char *name = XSTR (x, 0);
18408 #if TARGET_MACHO
18409 if (darwin_emit_branch_islands
18410 && MACHOPIC_INDIRECT
18411 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
18412 name = machopic_indirection_name (x, /*stub_p=*/true);
18413 #endif
18414 assemble_name (file, name);
18416 else if (!DOT_SYMBOLS)
18417 assemble_name (file, XSTR (x, 0));
18418 else
18419 rs6000_output_function_entry (file, XSTR (x, 0));
18420 return;
18422 case 'Z':
18423 /* Like 'L', for last word of TImode/PTImode. */
18424 if (REG_P (x))
18425 fputs (reg_names[REGNO (x) + 3], file);
18426 else if (MEM_P (x))
18428 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18429 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18430 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18431 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18432 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18433 else
18434 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
18435 if (small_data_operand (x, GET_MODE (x)))
18436 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18437 reg_names[SMALL_DATA_REG]);
18439 return;
18441 /* Print AltiVec or SPE memory operand. */
18442 case 'y':
18444 rtx tmp;
18446 gcc_assert (MEM_P (x));
18448 tmp = XEXP (x, 0);
18450 /* Ugly hack because %y is overloaded. */
18451 if ((TARGET_SPE || TARGET_E500_DOUBLE)
18452 && (GET_MODE_SIZE (GET_MODE (x)) == 8
18453 || GET_MODE (x) == TFmode
18454 || GET_MODE (x) == TImode
18455 || GET_MODE (x) == PTImode))
18457 /* Handle [reg]. */
18458 if (REG_P (tmp))
18460 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
18461 break;
18463 /* Handle [reg+UIMM]. */
18464 else if (GET_CODE (tmp) == PLUS &&
18465 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
18467 int x;
18469 gcc_assert (REG_P (XEXP (tmp, 0)));
18471 x = INTVAL (XEXP (tmp, 1));
18472 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
18473 break;
18476 /* Fall through. Must be [reg+reg]. */
18478 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
18479 && GET_CODE (tmp) == AND
18480 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
18481 && INTVAL (XEXP (tmp, 1)) == -16)
18482 tmp = XEXP (tmp, 0);
18483 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
18484 && GET_CODE (tmp) == PRE_MODIFY)
18485 tmp = XEXP (tmp, 1);
18486 if (REG_P (tmp))
18487 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
18488 else
18490 if (!GET_CODE (tmp) == PLUS
18491 || !REG_P (XEXP (tmp, 0))
18492 || !REG_P (XEXP (tmp, 1)))
18494 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
18495 break;
18498 if (REGNO (XEXP (tmp, 0)) == 0)
18499 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
18500 reg_names[ REGNO (XEXP (tmp, 0)) ]);
18501 else
18502 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
18503 reg_names[ REGNO (XEXP (tmp, 1)) ]);
18505 break;
18508 case 0:
18509 if (REG_P (x))
18510 fprintf (file, "%s", reg_names[REGNO (x)]);
18511 else if (MEM_P (x))
18513 /* We need to handle PRE_INC and PRE_DEC here, since we need to
18514 know the width from the mode. */
18515 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
18516 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
18517 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18518 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
18519 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
18520 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18521 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18522 output_address (XEXP (XEXP (x, 0), 1));
18523 else
18524 output_address (XEXP (x, 0));
18526 else
18528 if (toc_relative_expr_p (x, false))
18529 /* This hack along with a corresponding hack in
18530 rs6000_output_addr_const_extra arranges to output addends
18531 where the assembler expects to find them. eg.
18532 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
18533 without this hack would be output as "x@toc+4". We
18534 want "x+4@toc". */
18535 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18536 else
18537 output_addr_const (file, x);
18539 return;
18541 case '&':
18542 assemble_name (file, rs6000_get_some_local_dynamic_name ());
18543 return;
18545 default:
18546 output_operand_lossage ("invalid %%xn code");
18550 /* Print the address of an operand. */
18552 void
18553 print_operand_address (FILE *file, rtx x)
18555 if (REG_P (x))
18556 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
18557 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
18558 || GET_CODE (x) == LABEL_REF)
18560 output_addr_const (file, x);
18561 if (small_data_operand (x, GET_MODE (x)))
18562 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18563 reg_names[SMALL_DATA_REG]);
18564 else
18565 gcc_assert (!TARGET_TOC);
18567 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18568 && REG_P (XEXP (x, 1)))
18570 if (REGNO (XEXP (x, 0)) == 0)
18571 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
18572 reg_names[ REGNO (XEXP (x, 0)) ]);
18573 else
18574 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
18575 reg_names[ REGNO (XEXP (x, 1)) ]);
18577 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18578 && GET_CODE (XEXP (x, 1)) == CONST_INT)
18579 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
18580 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
18581 #if TARGET_MACHO
18582 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18583 && CONSTANT_P (XEXP (x, 1)))
18585 fprintf (file, "lo16(");
18586 output_addr_const (file, XEXP (x, 1));
18587 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18589 #endif
18590 #if TARGET_ELF
18591 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18592 && CONSTANT_P (XEXP (x, 1)))
18594 output_addr_const (file, XEXP (x, 1));
18595 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18597 #endif
18598 else if (toc_relative_expr_p (x, false))
18600 /* This hack along with a corresponding hack in
18601 rs6000_output_addr_const_extra arranges to output addends
18602 where the assembler expects to find them. eg.
18603 (lo_sum (reg 9)
18604 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
18605 without this hack would be output as "x@toc+8@l(9)". We
18606 want "x+8@toc@l(9)". */
18607 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18608 if (GET_CODE (x) == LO_SUM)
18609 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
18610 else
18611 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
18613 else
18614 gcc_unreachable ();
18617 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
18619 static bool
18620 rs6000_output_addr_const_extra (FILE *file, rtx x)
18622 if (GET_CODE (x) == UNSPEC)
18623 switch (XINT (x, 1))
18625 case UNSPEC_TOCREL:
18626 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
18627 && REG_P (XVECEXP (x, 0, 1))
18628 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
18629 output_addr_const (file, XVECEXP (x, 0, 0));
18630 if (x == tocrel_base && tocrel_offset != const0_rtx)
18632 if (INTVAL (tocrel_offset) >= 0)
18633 fprintf (file, "+");
18634 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
18636 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
18638 putc ('-', file);
18639 assemble_name (file, toc_label_name);
18641 else if (TARGET_ELF)
18642 fputs ("@toc", file);
18643 return true;
18645 #if TARGET_MACHO
18646 case UNSPEC_MACHOPIC_OFFSET:
18647 output_addr_const (file, XVECEXP (x, 0, 0));
18648 putc ('-', file);
18649 machopic_output_function_base_name (file);
18650 return true;
18651 #endif
18653 return false;
18656 /* Target hook for assembling integer objects. The PowerPC version has
18657 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
18658 is defined. It also needs to handle DI-mode objects on 64-bit
18659 targets. */
18661 static bool
18662 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
18664 #ifdef RELOCATABLE_NEEDS_FIXUP
18665 /* Special handling for SI values. */
18666 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
18668 static int recurse = 0;
18670 /* For -mrelocatable, we mark all addresses that need to be fixed up in
18671 the .fixup section. Since the TOC section is already relocated, we
18672 don't need to mark it here. We used to skip the text section, but it
18673 should never be valid for relocated addresses to be placed in the text
18674 section. */
18675 if (TARGET_RELOCATABLE
18676 && in_section != toc_section
18677 && !recurse
18678 && !CONST_SCALAR_INT_P (x)
18679 && CONSTANT_P (x))
18681 char buf[256];
18683 recurse = 1;
18684 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
18685 fixuplabelno++;
18686 ASM_OUTPUT_LABEL (asm_out_file, buf);
18687 fprintf (asm_out_file, "\t.long\t(");
18688 output_addr_const (asm_out_file, x);
18689 fprintf (asm_out_file, ")@fixup\n");
18690 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
18691 ASM_OUTPUT_ALIGN (asm_out_file, 2);
18692 fprintf (asm_out_file, "\t.long\t");
18693 assemble_name (asm_out_file, buf);
18694 fprintf (asm_out_file, "\n\t.previous\n");
18695 recurse = 0;
18696 return true;
18698 /* Remove initial .'s to turn a -mcall-aixdesc function
18699 address into the address of the descriptor, not the function
18700 itself. */
18701 else if (GET_CODE (x) == SYMBOL_REF
18702 && XSTR (x, 0)[0] == '.'
18703 && DEFAULT_ABI == ABI_AIX)
18705 const char *name = XSTR (x, 0);
18706 while (*name == '.')
18707 name++;
18709 fprintf (asm_out_file, "\t.long\t%s\n", name);
18710 return true;
18713 #endif /* RELOCATABLE_NEEDS_FIXUP */
18714 return default_assemble_integer (x, size, aligned_p);
18717 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
18718 /* Emit an assembler directive to set symbol visibility for DECL to
18719 VISIBILITY_TYPE. */
18721 static void
18722 rs6000_assemble_visibility (tree decl, int vis)
18724 if (TARGET_XCOFF)
18725 return;
18727 /* Functions need to have their entry point symbol visibility set as
18728 well as their descriptor symbol visibility. */
18729 if (DEFAULT_ABI == ABI_AIX
18730 && DOT_SYMBOLS
18731 && TREE_CODE (decl) == FUNCTION_DECL)
18733 static const char * const visibility_types[] = {
18734 NULL, "internal", "hidden", "protected"
18737 const char *name, *type;
18739 name = ((* targetm.strip_name_encoding)
18740 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
18741 type = visibility_types[vis];
18743 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
18744 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
18746 else
18747 default_assemble_visibility (decl, vis);
18749 #endif
18751 enum rtx_code
18752 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
18754 /* Reversal of FP compares takes care -- an ordered compare
18755 becomes an unordered compare and vice versa. */
18756 if (mode == CCFPmode
18757 && (!flag_finite_math_only
18758 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
18759 || code == UNEQ || code == LTGT))
18760 return reverse_condition_maybe_unordered (code);
18761 else
18762 return reverse_condition (code);
18765 /* Generate a compare for CODE. Return a brand-new rtx that
18766 represents the result of the compare. */
18768 static rtx
18769 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
18771 enum machine_mode comp_mode;
18772 rtx compare_result;
18773 enum rtx_code code = GET_CODE (cmp);
18774 rtx op0 = XEXP (cmp, 0);
18775 rtx op1 = XEXP (cmp, 1);
18777 if (FLOAT_MODE_P (mode))
18778 comp_mode = CCFPmode;
18779 else if (code == GTU || code == LTU
18780 || code == GEU || code == LEU)
18781 comp_mode = CCUNSmode;
18782 else if ((code == EQ || code == NE)
18783 && unsigned_reg_p (op0)
18784 && (unsigned_reg_p (op1)
18785 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
18786 /* These are unsigned values, perhaps there will be a later
18787 ordering compare that can be shared with this one. */
18788 comp_mode = CCUNSmode;
18789 else
18790 comp_mode = CCmode;
18792 /* If we have an unsigned compare, make sure we don't have a signed value as
18793 an immediate. */
18794 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
18795 && INTVAL (op1) < 0)
18797 op0 = copy_rtx_if_shared (op0);
18798 op1 = force_reg (GET_MODE (op0), op1);
18799 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
18802 /* First, the compare. */
18803 compare_result = gen_reg_rtx (comp_mode);
18805 /* E500 FP compare instructions on the GPRs. Yuck! */
18806 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
18807 && FLOAT_MODE_P (mode))
18809 rtx cmp, or_result, compare_result2;
18810 enum machine_mode op_mode = GET_MODE (op0);
18811 bool reverse_p;
18813 if (op_mode == VOIDmode)
18814 op_mode = GET_MODE (op1);
18816 /* First reverse the condition codes that aren't directly supported. */
18817 switch (code)
18819 case NE:
18820 case UNLT:
18821 case UNLE:
18822 case UNGT:
18823 case UNGE:
18824 code = reverse_condition_maybe_unordered (code);
18825 reverse_p = true;
18826 break;
18828 case EQ:
18829 case LT:
18830 case LE:
18831 case GT:
18832 case GE:
18833 reverse_p = false;
18834 break;
18836 default:
18837 gcc_unreachable ();
18840 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
18841 This explains the following mess. */
18843 switch (code)
18845 case EQ:
18846 switch (op_mode)
18848 case SFmode:
18849 cmp = (flag_finite_math_only && !flag_trapping_math)
18850 ? gen_tstsfeq_gpr (compare_result, op0, op1)
18851 : gen_cmpsfeq_gpr (compare_result, op0, op1);
18852 break;
18854 case DFmode:
18855 cmp = (flag_finite_math_only && !flag_trapping_math)
18856 ? gen_tstdfeq_gpr (compare_result, op0, op1)
18857 : gen_cmpdfeq_gpr (compare_result, op0, op1);
18858 break;
18860 case TFmode:
18861 cmp = (flag_finite_math_only && !flag_trapping_math)
18862 ? gen_tsttfeq_gpr (compare_result, op0, op1)
18863 : gen_cmptfeq_gpr (compare_result, op0, op1);
18864 break;
18866 default:
18867 gcc_unreachable ();
18869 break;
18871 case GT:
18872 case GE:
18873 switch (op_mode)
18875 case SFmode:
18876 cmp = (flag_finite_math_only && !flag_trapping_math)
18877 ? gen_tstsfgt_gpr (compare_result, op0, op1)
18878 : gen_cmpsfgt_gpr (compare_result, op0, op1);
18879 break;
18881 case DFmode:
18882 cmp = (flag_finite_math_only && !flag_trapping_math)
18883 ? gen_tstdfgt_gpr (compare_result, op0, op1)
18884 : gen_cmpdfgt_gpr (compare_result, op0, op1);
18885 break;
18887 case TFmode:
18888 cmp = (flag_finite_math_only && !flag_trapping_math)
18889 ? gen_tsttfgt_gpr (compare_result, op0, op1)
18890 : gen_cmptfgt_gpr (compare_result, op0, op1);
18891 break;
18893 default:
18894 gcc_unreachable ();
18896 break;
18898 case LT:
18899 case LE:
18900 switch (op_mode)
18902 case SFmode:
18903 cmp = (flag_finite_math_only && !flag_trapping_math)
18904 ? gen_tstsflt_gpr (compare_result, op0, op1)
18905 : gen_cmpsflt_gpr (compare_result, op0, op1);
18906 break;
18908 case DFmode:
18909 cmp = (flag_finite_math_only && !flag_trapping_math)
18910 ? gen_tstdflt_gpr (compare_result, op0, op1)
18911 : gen_cmpdflt_gpr (compare_result, op0, op1);
18912 break;
18914 case TFmode:
18915 cmp = (flag_finite_math_only && !flag_trapping_math)
18916 ? gen_tsttflt_gpr (compare_result, op0, op1)
18917 : gen_cmptflt_gpr (compare_result, op0, op1);
18918 break;
18920 default:
18921 gcc_unreachable ();
18923 break;
18925 default:
18926 gcc_unreachable ();
18929 /* Synthesize LE and GE from LT/GT || EQ. */
18930 if (code == LE || code == GE)
18932 emit_insn (cmp);
18934 compare_result2 = gen_reg_rtx (CCFPmode);
18936 /* Do the EQ. */
18937 switch (op_mode)
18939 case SFmode:
18940 cmp = (flag_finite_math_only && !flag_trapping_math)
18941 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
18942 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
18943 break;
18945 case DFmode:
18946 cmp = (flag_finite_math_only && !flag_trapping_math)
18947 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
18948 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
18949 break;
18951 case TFmode:
18952 cmp = (flag_finite_math_only && !flag_trapping_math)
18953 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
18954 : gen_cmptfeq_gpr (compare_result2, op0, op1);
18955 break;
18957 default:
18958 gcc_unreachable ();
18961 emit_insn (cmp);
18963 /* OR them together. */
18964 or_result = gen_reg_rtx (CCFPmode);
18965 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
18966 compare_result2);
18967 compare_result = or_result;
18970 code = reverse_p ? NE : EQ;
18972 emit_insn (cmp);
18974 else
18976 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
18977 CLOBBERs to match cmptf_internal2 pattern. */
18978 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
18979 && GET_MODE (op0) == TFmode
18980 && !TARGET_IEEEQUAD
18981 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
18982 emit_insn (gen_rtx_PARALLEL (VOIDmode,
18983 gen_rtvec (10,
18984 gen_rtx_SET (VOIDmode,
18985 compare_result,
18986 gen_rtx_COMPARE (comp_mode, op0, op1)),
18987 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18988 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18989 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18990 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18991 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18992 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18993 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18994 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18995 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
18996 else if (GET_CODE (op1) == UNSPEC
18997 && XINT (op1, 1) == UNSPEC_SP_TEST)
18999 rtx op1b = XVECEXP (op1, 0, 0);
19000 comp_mode = CCEQmode;
19001 compare_result = gen_reg_rtx (CCEQmode);
19002 if (TARGET_64BIT)
19003 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
19004 else
19005 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
19007 else
19008 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
19009 gen_rtx_COMPARE (comp_mode, op0, op1)));
19012 /* Some kinds of FP comparisons need an OR operation;
19013 under flag_finite_math_only we don't bother. */
19014 if (FLOAT_MODE_P (mode)
19015 && !flag_finite_math_only
19016 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
19017 && (code == LE || code == GE
19018 || code == UNEQ || code == LTGT
19019 || code == UNGT || code == UNLT))
19021 enum rtx_code or1, or2;
19022 rtx or1_rtx, or2_rtx, compare2_rtx;
19023 rtx or_result = gen_reg_rtx (CCEQmode);
19025 switch (code)
19027 case LE: or1 = LT; or2 = EQ; break;
19028 case GE: or1 = GT; or2 = EQ; break;
19029 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
19030 case LTGT: or1 = LT; or2 = GT; break;
19031 case UNGT: or1 = UNORDERED; or2 = GT; break;
19032 case UNLT: or1 = UNORDERED; or2 = LT; break;
19033 default: gcc_unreachable ();
19035 validate_condition_mode (or1, comp_mode);
19036 validate_condition_mode (or2, comp_mode);
19037 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
19038 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
19039 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
19040 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
19041 const_true_rtx);
19042 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
19044 compare_result = or_result;
19045 code = EQ;
19048 validate_condition_mode (code, GET_MODE (compare_result));
19050 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
19054 /* Emit the RTL for an sISEL pattern. */
19056 void
19057 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
19059 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
19062 void
19063 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
19065 rtx condition_rtx;
19066 enum machine_mode op_mode;
19067 enum rtx_code cond_code;
19068 rtx result = operands[0];
19070 if (TARGET_ISEL && (mode == SImode || mode == DImode))
19072 rs6000_emit_sISEL (mode, operands);
19073 return;
19076 condition_rtx = rs6000_generate_compare (operands[1], mode);
19077 cond_code = GET_CODE (condition_rtx);
19079 if (FLOAT_MODE_P (mode)
19080 && !TARGET_FPRS && TARGET_HARD_FLOAT)
19082 rtx t;
19084 PUT_MODE (condition_rtx, SImode);
19085 t = XEXP (condition_rtx, 0);
19087 gcc_assert (cond_code == NE || cond_code == EQ);
19089 if (cond_code == NE)
19090 emit_insn (gen_e500_flip_gt_bit (t, t));
19092 emit_insn (gen_move_from_CR_gt_bit (result, t));
19093 return;
19096 if (cond_code == NE
19097 || cond_code == GE || cond_code == LE
19098 || cond_code == GEU || cond_code == LEU
19099 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
19101 rtx not_result = gen_reg_rtx (CCEQmode);
19102 rtx not_op, rev_cond_rtx;
19103 enum machine_mode cc_mode;
19105 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
19107 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
19108 SImode, XEXP (condition_rtx, 0), const0_rtx);
19109 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
19110 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
19111 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
19114 op_mode = GET_MODE (XEXP (operands[1], 0));
19115 if (op_mode == VOIDmode)
19116 op_mode = GET_MODE (XEXP (operands[1], 1));
19118 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
19120 PUT_MODE (condition_rtx, DImode);
19121 convert_move (result, condition_rtx, 0);
19123 else
19125 PUT_MODE (condition_rtx, SImode);
19126 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
19130 /* Emit a branch of kind CODE to location LOC. */
19132 void
19133 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
19135 rtx condition_rtx, loc_ref;
19137 condition_rtx = rs6000_generate_compare (operands[0], mode);
19138 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
19139 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
19140 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
19141 loc_ref, pc_rtx)));
19144 /* Return the string to output a conditional branch to LABEL, which is
19145 the operand template of the label, or NULL if the branch is really a
19146 conditional return.
19148 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
19149 condition code register and its mode specifies what kind of
19150 comparison we made.
19152 REVERSED is nonzero if we should reverse the sense of the comparison.
19154 INSN is the insn. */
19156 char *
19157 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
19159 static char string[64];
19160 enum rtx_code code = GET_CODE (op);
19161 rtx cc_reg = XEXP (op, 0);
19162 enum machine_mode mode = GET_MODE (cc_reg);
19163 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
19164 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
19165 int really_reversed = reversed ^ need_longbranch;
19166 char *s = string;
19167 const char *ccode;
19168 const char *pred;
19169 rtx note;
19171 validate_condition_mode (code, mode);
19173 /* Work out which way this really branches. We could use
19174 reverse_condition_maybe_unordered here always but this
19175 makes the resulting assembler clearer. */
19176 if (really_reversed)
19178 /* Reversal of FP compares takes care -- an ordered compare
19179 becomes an unordered compare and vice versa. */
19180 if (mode == CCFPmode)
19181 code = reverse_condition_maybe_unordered (code);
19182 else
19183 code = reverse_condition (code);
19186 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
19188 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
19189 to the GT bit. */
19190 switch (code)
19192 case EQ:
19193 /* Opposite of GT. */
19194 code = GT;
19195 break;
19197 case NE:
19198 code = UNLE;
19199 break;
19201 default:
19202 gcc_unreachable ();
19206 switch (code)
19208 /* Not all of these are actually distinct opcodes, but
19209 we distinguish them for clarity of the resulting assembler. */
19210 case NE: case LTGT:
19211 ccode = "ne"; break;
19212 case EQ: case UNEQ:
19213 ccode = "eq"; break;
19214 case GE: case GEU:
19215 ccode = "ge"; break;
19216 case GT: case GTU: case UNGT:
19217 ccode = "gt"; break;
19218 case LE: case LEU:
19219 ccode = "le"; break;
19220 case LT: case LTU: case UNLT:
19221 ccode = "lt"; break;
19222 case UNORDERED: ccode = "un"; break;
19223 case ORDERED: ccode = "nu"; break;
19224 case UNGE: ccode = "nl"; break;
19225 case UNLE: ccode = "ng"; break;
19226 default:
19227 gcc_unreachable ();
19230 /* Maybe we have a guess as to how likely the branch is. */
19231 pred = "";
19232 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
19233 if (note != NULL_RTX)
19235 /* PROB is the difference from 50%. */
19236 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
19238 /* Only hint for highly probable/improbable branches on newer
19239 cpus as static prediction overrides processor dynamic
19240 prediction. For older cpus we may as well always hint, but
19241 assume not taken for branches that are very close to 50% as a
19242 mispredicted taken branch is more expensive than a
19243 mispredicted not-taken branch. */
19244 if (rs6000_always_hint
19245 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
19246 && br_prob_note_reliable_p (note)))
19248 if (abs (prob) > REG_BR_PROB_BASE / 20
19249 && ((prob > 0) ^ need_longbranch))
19250 pred = "+";
19251 else
19252 pred = "-";
19256 if (label == NULL)
19257 s += sprintf (s, "b%slr%s ", ccode, pred);
19258 else
19259 s += sprintf (s, "b%s%s ", ccode, pred);
19261 /* We need to escape any '%' characters in the reg_names string.
19262 Assume they'd only be the first character.... */
19263 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
19264 *s++ = '%';
19265 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
19267 if (label != NULL)
19269 /* If the branch distance was too far, we may have to use an
19270 unconditional branch to go the distance. */
19271 if (need_longbranch)
19272 s += sprintf (s, ",$+8\n\tb %s", label);
19273 else
19274 s += sprintf (s, ",%s", label);
19277 return string;
19280 /* Return the string to flip the GT bit on a CR. */
19281 char *
19282 output_e500_flip_gt_bit (rtx dst, rtx src)
19284 static char string[64];
19285 int a, b;
19287 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
19288 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
19290 /* GT bit. */
19291 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
19292 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
19294 sprintf (string, "crnot %d,%d", a, b);
19295 return string;
19298 /* Return insn for VSX or Altivec comparisons. */
19300 static rtx
19301 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
19303 rtx mask;
19304 enum machine_mode mode = GET_MODE (op0);
19306 switch (code)
19308 default:
19309 break;
19311 case GE:
19312 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
19313 return NULL_RTX;
19315 case EQ:
19316 case GT:
19317 case GTU:
19318 case ORDERED:
19319 case UNORDERED:
19320 case UNEQ:
19321 case LTGT:
19322 mask = gen_reg_rtx (mode);
19323 emit_insn (gen_rtx_SET (VOIDmode,
19324 mask,
19325 gen_rtx_fmt_ee (code, mode, op0, op1)));
19326 return mask;
19329 return NULL_RTX;
19332 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
19333 DMODE is expected destination mode. This is a recursive function. */
19335 static rtx
19336 rs6000_emit_vector_compare (enum rtx_code rcode,
19337 rtx op0, rtx op1,
19338 enum machine_mode dmode)
19340 rtx mask;
19341 bool swap_operands = false;
19342 bool try_again = false;
19344 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
19345 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
19347 /* See if the comparison works as is. */
19348 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19349 if (mask)
19350 return mask;
19352 switch (rcode)
19354 case LT:
19355 rcode = GT;
19356 swap_operands = true;
19357 try_again = true;
19358 break;
19359 case LTU:
19360 rcode = GTU;
19361 swap_operands = true;
19362 try_again = true;
19363 break;
19364 case NE:
19365 case UNLE:
19366 case UNLT:
19367 case UNGE:
19368 case UNGT:
19369 /* Invert condition and try again.
19370 e.g., A != B becomes ~(A==B). */
19372 enum rtx_code rev_code;
19373 enum insn_code nor_code;
19374 rtx mask2;
19376 rev_code = reverse_condition_maybe_unordered (rcode);
19377 if (rev_code == UNKNOWN)
19378 return NULL_RTX;
19380 nor_code = optab_handler (one_cmpl_optab, dmode);
19381 if (nor_code == CODE_FOR_nothing)
19382 return NULL_RTX;
19384 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
19385 if (!mask2)
19386 return NULL_RTX;
19388 mask = gen_reg_rtx (dmode);
19389 emit_insn (GEN_FCN (nor_code) (mask, mask2));
19390 return mask;
19392 break;
19393 case GE:
19394 case GEU:
19395 case LE:
19396 case LEU:
19397 /* Try GT/GTU/LT/LTU OR EQ */
19399 rtx c_rtx, eq_rtx;
19400 enum insn_code ior_code;
19401 enum rtx_code new_code;
19403 switch (rcode)
19405 case GE:
19406 new_code = GT;
19407 break;
19409 case GEU:
19410 new_code = GTU;
19411 break;
19413 case LE:
19414 new_code = LT;
19415 break;
19417 case LEU:
19418 new_code = LTU;
19419 break;
19421 default:
19422 gcc_unreachable ();
19425 ior_code = optab_handler (ior_optab, dmode);
19426 if (ior_code == CODE_FOR_nothing)
19427 return NULL_RTX;
19429 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
19430 if (!c_rtx)
19431 return NULL_RTX;
19433 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
19434 if (!eq_rtx)
19435 return NULL_RTX;
19437 mask = gen_reg_rtx (dmode);
19438 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
19439 return mask;
19441 break;
19442 default:
19443 return NULL_RTX;
19446 if (try_again)
19448 if (swap_operands)
19450 rtx tmp;
19451 tmp = op0;
19452 op0 = op1;
19453 op1 = tmp;
19456 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19457 if (mask)
19458 return mask;
19461 /* You only get two chances. */
19462 return NULL_RTX;
19465 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
19466 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
19467 operands for the relation operation COND. */
19470 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
19471 rtx cond, rtx cc_op0, rtx cc_op1)
19473 enum machine_mode dest_mode = GET_MODE (dest);
19474 enum machine_mode mask_mode = GET_MODE (cc_op0);
19475 enum rtx_code rcode = GET_CODE (cond);
19476 enum machine_mode cc_mode = CCmode;
19477 rtx mask;
19478 rtx cond2;
19479 rtx tmp;
19480 bool invert_move = false;
19482 if (VECTOR_UNIT_NONE_P (dest_mode))
19483 return 0;
19485 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
19486 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
19488 switch (rcode)
19490 /* Swap operands if we can, and fall back to doing the operation as
19491 specified, and doing a NOR to invert the test. */
19492 case NE:
19493 case UNLE:
19494 case UNLT:
19495 case UNGE:
19496 case UNGT:
19497 /* Invert condition and try again.
19498 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
19499 invert_move = true;
19500 rcode = reverse_condition_maybe_unordered (rcode);
19501 if (rcode == UNKNOWN)
19502 return 0;
19503 break;
19505 /* Mark unsigned tests with CCUNSmode. */
19506 case GTU:
19507 case GEU:
19508 case LTU:
19509 case LEU:
19510 cc_mode = CCUNSmode;
19511 break;
19513 default:
19514 break;
19517 /* Get the vector mask for the given relational operations. */
19518 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
19520 if (!mask)
19521 return 0;
19523 if (invert_move)
19525 tmp = op_true;
19526 op_true = op_false;
19527 op_false = tmp;
19530 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
19531 CONST0_RTX (dest_mode));
19532 emit_insn (gen_rtx_SET (VOIDmode,
19533 dest,
19534 gen_rtx_IF_THEN_ELSE (dest_mode,
19535 cond2,
19536 op_true,
19537 op_false)));
19538 return 1;
19541 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
19542 operands of the last comparison is nonzero/true, FALSE_COND if it
19543 is zero/false. Return 0 if the hardware has no such operation. */
19546 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19548 enum rtx_code code = GET_CODE (op);
19549 rtx op0 = XEXP (op, 0);
19550 rtx op1 = XEXP (op, 1);
19551 REAL_VALUE_TYPE c1;
19552 enum machine_mode compare_mode = GET_MODE (op0);
19553 enum machine_mode result_mode = GET_MODE (dest);
19554 rtx temp;
19555 bool is_against_zero;
19557 /* These modes should always match. */
19558 if (GET_MODE (op1) != compare_mode
19559 /* In the isel case however, we can use a compare immediate, so
19560 op1 may be a small constant. */
19561 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
19562 return 0;
19563 if (GET_MODE (true_cond) != result_mode)
19564 return 0;
19565 if (GET_MODE (false_cond) != result_mode)
19566 return 0;
19568 /* Don't allow using floating point comparisons for integer results for
19569 now. */
19570 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
19571 return 0;
19573 /* First, work out if the hardware can do this at all, or
19574 if it's too slow.... */
19575 if (!FLOAT_MODE_P (compare_mode))
19577 if (TARGET_ISEL)
19578 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
19579 return 0;
19581 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
19582 && SCALAR_FLOAT_MODE_P (compare_mode))
19583 return 0;
19585 is_against_zero = op1 == CONST0_RTX (compare_mode);
19587 /* A floating-point subtract might overflow, underflow, or produce
19588 an inexact result, thus changing the floating-point flags, so it
19589 can't be generated if we care about that. It's safe if one side
19590 of the construct is zero, since then no subtract will be
19591 generated. */
19592 if (SCALAR_FLOAT_MODE_P (compare_mode)
19593 && flag_trapping_math && ! is_against_zero)
19594 return 0;
19596 /* Eliminate half of the comparisons by switching operands, this
19597 makes the remaining code simpler. */
19598 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
19599 || code == LTGT || code == LT || code == UNLE)
19601 code = reverse_condition_maybe_unordered (code);
19602 temp = true_cond;
19603 true_cond = false_cond;
19604 false_cond = temp;
19607 /* UNEQ and LTGT take four instructions for a comparison with zero,
19608 it'll probably be faster to use a branch here too. */
19609 if (code == UNEQ && HONOR_NANS (compare_mode))
19610 return 0;
19612 if (GET_CODE (op1) == CONST_DOUBLE)
19613 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
19615 /* We're going to try to implement comparisons by performing
19616 a subtract, then comparing against zero. Unfortunately,
19617 Inf - Inf is NaN which is not zero, and so if we don't
19618 know that the operand is finite and the comparison
19619 would treat EQ different to UNORDERED, we can't do it. */
19620 if (HONOR_INFINITIES (compare_mode)
19621 && code != GT && code != UNGE
19622 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
19623 /* Constructs of the form (a OP b ? a : b) are safe. */
19624 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
19625 || (! rtx_equal_p (op0, true_cond)
19626 && ! rtx_equal_p (op1, true_cond))))
19627 return 0;
19629 /* At this point we know we can use fsel. */
19631 /* Reduce the comparison to a comparison against zero. */
19632 if (! is_against_zero)
19634 temp = gen_reg_rtx (compare_mode);
19635 emit_insn (gen_rtx_SET (VOIDmode, temp,
19636 gen_rtx_MINUS (compare_mode, op0, op1)));
19637 op0 = temp;
19638 op1 = CONST0_RTX (compare_mode);
19641 /* If we don't care about NaNs we can reduce some of the comparisons
19642 down to faster ones. */
19643 if (! HONOR_NANS (compare_mode))
19644 switch (code)
19646 case GT:
19647 code = LE;
19648 temp = true_cond;
19649 true_cond = false_cond;
19650 false_cond = temp;
19651 break;
19652 case UNGE:
19653 code = GE;
19654 break;
19655 case UNEQ:
19656 code = EQ;
19657 break;
19658 default:
19659 break;
19662 /* Now, reduce everything down to a GE. */
19663 switch (code)
19665 case GE:
19666 break;
19668 case LE:
19669 temp = gen_reg_rtx (compare_mode);
19670 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19671 op0 = temp;
19672 break;
19674 case ORDERED:
19675 temp = gen_reg_rtx (compare_mode);
19676 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
19677 op0 = temp;
19678 break;
19680 case EQ:
19681 temp = gen_reg_rtx (compare_mode);
19682 emit_insn (gen_rtx_SET (VOIDmode, temp,
19683 gen_rtx_NEG (compare_mode,
19684 gen_rtx_ABS (compare_mode, op0))));
19685 op0 = temp;
19686 break;
19688 case UNGE:
19689 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
19690 temp = gen_reg_rtx (result_mode);
19691 emit_insn (gen_rtx_SET (VOIDmode, temp,
19692 gen_rtx_IF_THEN_ELSE (result_mode,
19693 gen_rtx_GE (VOIDmode,
19694 op0, op1),
19695 true_cond, false_cond)));
19696 false_cond = true_cond;
19697 true_cond = temp;
19699 temp = gen_reg_rtx (compare_mode);
19700 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19701 op0 = temp;
19702 break;
19704 case GT:
19705 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
19706 temp = gen_reg_rtx (result_mode);
19707 emit_insn (gen_rtx_SET (VOIDmode, temp,
19708 gen_rtx_IF_THEN_ELSE (result_mode,
19709 gen_rtx_GE (VOIDmode,
19710 op0, op1),
19711 true_cond, false_cond)));
19712 true_cond = false_cond;
19713 false_cond = temp;
19715 temp = gen_reg_rtx (compare_mode);
19716 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19717 op0 = temp;
19718 break;
19720 default:
19721 gcc_unreachable ();
19724 emit_insn (gen_rtx_SET (VOIDmode, dest,
19725 gen_rtx_IF_THEN_ELSE (result_mode,
19726 gen_rtx_GE (VOIDmode,
19727 op0, op1),
19728 true_cond, false_cond)));
19729 return 1;
19732 /* Same as above, but for ints (isel). */
19734 static int
19735 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19737 rtx condition_rtx, cr;
19738 enum machine_mode mode = GET_MODE (dest);
19739 enum rtx_code cond_code;
19740 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
19741 bool signedp;
19743 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
19744 return 0;
19746 /* We still have to do the compare, because isel doesn't do a
19747 compare, it just looks at the CRx bits set by a previous compare
19748 instruction. */
19749 condition_rtx = rs6000_generate_compare (op, mode);
19750 cond_code = GET_CODE (condition_rtx);
19751 cr = XEXP (condition_rtx, 0);
19752 signedp = GET_MODE (cr) == CCmode;
19754 isel_func = (mode == SImode
19755 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
19756 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
19758 switch (cond_code)
19760 case LT: case GT: case LTU: case GTU: case EQ:
19761 /* isel handles these directly. */
19762 break;
19764 default:
19765 /* We need to swap the sense of the comparison. */
19767 rtx t = true_cond;
19768 true_cond = false_cond;
19769 false_cond = t;
19770 PUT_CODE (condition_rtx, reverse_condition (cond_code));
19772 break;
19775 false_cond = force_reg (mode, false_cond);
19776 if (true_cond != const0_rtx)
19777 true_cond = force_reg (mode, true_cond);
19779 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
19781 return 1;
19784 const char *
19785 output_isel (rtx *operands)
19787 enum rtx_code code;
19789 code = GET_CODE (operands[1]);
19791 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
19793 gcc_assert (GET_CODE (operands[2]) == REG
19794 && GET_CODE (operands[3]) == REG);
19795 PUT_CODE (operands[1], reverse_condition (code));
19796 return "isel %0,%3,%2,%j1";
19799 return "isel %0,%2,%3,%j1";
19802 void
19803 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
19805 enum machine_mode mode = GET_MODE (op0);
19806 enum rtx_code c;
19807 rtx target;
19809 /* VSX/altivec have direct min/max insns. */
19810 if ((code == SMAX || code == SMIN)
19811 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
19812 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
19814 emit_insn (gen_rtx_SET (VOIDmode,
19815 dest,
19816 gen_rtx_fmt_ee (code, mode, op0, op1)));
19817 return;
19820 if (code == SMAX || code == SMIN)
19821 c = GE;
19822 else
19823 c = GEU;
19825 if (code == SMAX || code == UMAX)
19826 target = emit_conditional_move (dest, c, op0, op1, mode,
19827 op0, op1, mode, 0);
19828 else
19829 target = emit_conditional_move (dest, c, op0, op1, mode,
19830 op1, op0, mode, 0);
19831 gcc_assert (target);
19832 if (target != dest)
19833 emit_move_insn (dest, target);
19836 /* A subroutine of the atomic operation splitters. Jump to LABEL if
19837 COND is true. Mark the jump as unlikely to be taken. */
19839 static void
19840 emit_unlikely_jump (rtx cond, rtx label)
19842 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
19843 rtx x;
19845 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
19846 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
19847 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
19850 /* A subroutine of the atomic operation splitters. Emit a load-locked
19851 instruction in MODE. For QI/HImode, possibly use a pattern than includes
19852 the zero_extend operation. */
19854 static void
19855 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
19857 rtx (*fn) (rtx, rtx) = NULL;
19859 switch (mode)
19861 case QImode:
19862 fn = gen_load_lockedqi;
19863 break;
19864 case HImode:
19865 fn = gen_load_lockedhi;
19866 break;
19867 case SImode:
19868 if (GET_MODE (mem) == QImode)
19869 fn = gen_load_lockedqi_si;
19870 else if (GET_MODE (mem) == HImode)
19871 fn = gen_load_lockedhi_si;
19872 else
19873 fn = gen_load_lockedsi;
19874 break;
19875 case DImode:
19876 fn = gen_load_lockeddi;
19877 break;
19878 case TImode:
19879 fn = gen_load_lockedti;
19880 break;
19881 default:
19882 gcc_unreachable ();
19884 emit_insn (fn (reg, mem));
19887 /* A subroutine of the atomic operation splitters. Emit a store-conditional
19888 instruction in MODE. */
19890 static void
19891 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
19893 rtx (*fn) (rtx, rtx, rtx) = NULL;
19895 switch (mode)
19897 case QImode:
19898 fn = gen_store_conditionalqi;
19899 break;
19900 case HImode:
19901 fn = gen_store_conditionalhi;
19902 break;
19903 case SImode:
19904 fn = gen_store_conditionalsi;
19905 break;
19906 case DImode:
19907 fn = gen_store_conditionaldi;
19908 break;
19909 case TImode:
19910 fn = gen_store_conditionalti;
19911 break;
19912 default:
19913 gcc_unreachable ();
19916 /* Emit sync before stwcx. to address PPC405 Erratum. */
19917 if (PPC405_ERRATUM77)
19918 emit_insn (gen_hwsync ());
19920 emit_insn (fn (res, mem, val));
19923 /* Expand barriers before and after a load_locked/store_cond sequence. */
19925 static rtx
19926 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
19928 rtx addr = XEXP (mem, 0);
19929 int strict_p = (reload_in_progress || reload_completed);
19931 if (!legitimate_indirect_address_p (addr, strict_p)
19932 && !legitimate_indexed_address_p (addr, strict_p))
19934 addr = force_reg (Pmode, addr);
19935 mem = replace_equiv_address_nv (mem, addr);
19938 switch (model)
19940 case MEMMODEL_RELAXED:
19941 case MEMMODEL_CONSUME:
19942 case MEMMODEL_ACQUIRE:
19943 break;
19944 case MEMMODEL_RELEASE:
19945 case MEMMODEL_ACQ_REL:
19946 emit_insn (gen_lwsync ());
19947 break;
19948 case MEMMODEL_SEQ_CST:
19949 emit_insn (gen_hwsync ());
19950 break;
19951 default:
19952 gcc_unreachable ();
19954 return mem;
19957 static void
19958 rs6000_post_atomic_barrier (enum memmodel model)
19960 switch (model)
19962 case MEMMODEL_RELAXED:
19963 case MEMMODEL_CONSUME:
19964 case MEMMODEL_RELEASE:
19965 break;
19966 case MEMMODEL_ACQUIRE:
19967 case MEMMODEL_ACQ_REL:
19968 case MEMMODEL_SEQ_CST:
19969 emit_insn (gen_isync ());
19970 break;
19971 default:
19972 gcc_unreachable ();
19976 /* A subroutine of the various atomic expanders. For sub-word operations,
19977 we must adjust things to operate on SImode. Given the original MEM,
19978 return a new aligned memory. Also build and return the quantities by
19979 which to shift and mask. */
19981 static rtx
19982 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
19984 rtx addr, align, shift, mask, mem;
19985 HOST_WIDE_INT shift_mask;
19986 enum machine_mode mode = GET_MODE (orig_mem);
19988 /* For smaller modes, we have to implement this via SImode. */
19989 shift_mask = (mode == QImode ? 0x18 : 0x10);
19991 addr = XEXP (orig_mem, 0);
19992 addr = force_reg (GET_MODE (addr), addr);
19994 /* Aligned memory containing subword. Generate a new memory. We
19995 do not want any of the existing MEM_ATTR data, as we're now
19996 accessing memory outside the original object. */
19997 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
19998 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19999 mem = gen_rtx_MEM (SImode, align);
20000 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
20001 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
20002 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
20004 /* Shift amount for subword relative to aligned word. */
20005 shift = gen_reg_rtx (SImode);
20006 addr = gen_lowpart (SImode, addr);
20007 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
20008 if (BYTES_BIG_ENDIAN)
20009 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
20010 shift, 1, OPTAB_LIB_WIDEN);
20011 *pshift = shift;
20013 /* Mask for insertion. */
20014 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
20015 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
20016 *pmask = mask;
20018 return mem;
20021 /* A subroutine of the various atomic expanders. For sub-word operands,
20022 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
20024 static rtx
20025 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
20027 rtx x;
20029 x = gen_reg_rtx (SImode);
20030 emit_insn (gen_rtx_SET (VOIDmode, x,
20031 gen_rtx_AND (SImode,
20032 gen_rtx_NOT (SImode, mask),
20033 oldval)));
20035 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
20037 return x;
20040 /* A subroutine of the various atomic expanders. For sub-word operands,
20041 extract WIDE to NARROW via SHIFT. */
20043 static void
20044 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
20046 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
20047 wide, 1, OPTAB_LIB_WIDEN);
20048 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
20051 /* Expand an atomic compare and swap operation. */
20053 void
20054 rs6000_expand_atomic_compare_and_swap (rtx operands[])
20056 rtx boolval, retval, mem, oldval, newval, cond;
20057 rtx label1, label2, x, mask, shift;
20058 enum machine_mode mode, orig_mode;
20059 enum memmodel mod_s, mod_f;
20060 bool is_weak;
20062 boolval = operands[0];
20063 retval = operands[1];
20064 mem = operands[2];
20065 oldval = operands[3];
20066 newval = operands[4];
20067 is_weak = (INTVAL (operands[5]) != 0);
20068 mod_s = (enum memmodel) INTVAL (operands[6]);
20069 mod_f = (enum memmodel) INTVAL (operands[7]);
20070 orig_mode = mode = GET_MODE (mem);
20072 mask = shift = NULL_RTX;
20073 if (mode == QImode || mode == HImode)
20075 /* Before power8, we didn't have access to lbarx/lharx, so generate a
20076 lwarx and shift/mask operations. With power8, we need to do the
20077 comparison in SImode, but the store is still done in QI/HImode. */
20078 oldval = convert_modes (SImode, mode, oldval, 1);
20080 if (!TARGET_SYNC_HI_QI)
20082 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20084 /* Shift and mask OLDVAL into position with the word. */
20085 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
20086 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20088 /* Shift and mask NEWVAL into position within the word. */
20089 newval = convert_modes (SImode, mode, newval, 1);
20090 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
20091 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20094 /* Prepare to adjust the return value. */
20095 retval = gen_reg_rtx (SImode);
20096 mode = SImode;
20098 else if (reg_overlap_mentioned_p (retval, oldval))
20099 oldval = copy_to_reg (oldval);
20101 mem = rs6000_pre_atomic_barrier (mem, mod_s);
20103 label1 = NULL_RTX;
20104 if (!is_weak)
20106 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20107 emit_label (XEXP (label1, 0));
20109 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20111 emit_load_locked (mode, retval, mem);
20113 x = retval;
20114 if (mask)
20116 x = expand_simple_binop (SImode, AND, retval, mask,
20117 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20120 cond = gen_reg_rtx (CCmode);
20121 /* If we have TImode, synthesize a comparison. */
20122 if (mode != TImode)
20123 x = gen_rtx_COMPARE (CCmode, x, oldval);
20124 else
20126 rtx xor1_result = gen_reg_rtx (DImode);
20127 rtx xor2_result = gen_reg_rtx (DImode);
20128 rtx or_result = gen_reg_rtx (DImode);
20129 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
20130 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
20131 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
20132 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
20134 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
20135 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
20136 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
20137 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
20140 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
20142 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20143 emit_unlikely_jump (x, label2);
20145 x = newval;
20146 if (mask)
20147 x = rs6000_mask_atomic_subword (retval, newval, mask);
20149 emit_store_conditional (orig_mode, cond, mem, x);
20151 if (!is_weak)
20153 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20154 emit_unlikely_jump (x, label1);
20157 if (mod_f != MEMMODEL_RELAXED)
20158 emit_label (XEXP (label2, 0));
20160 rs6000_post_atomic_barrier (mod_s);
20162 if (mod_f == MEMMODEL_RELAXED)
20163 emit_label (XEXP (label2, 0));
20165 if (shift)
20166 rs6000_finish_atomic_subword (operands[1], retval, shift);
20167 else if (mode != GET_MODE (operands[1]))
20168 convert_move (operands[1], retval, 1);
20170 /* In all cases, CR0 contains EQ on success, and NE on failure. */
20171 x = gen_rtx_EQ (SImode, cond, const0_rtx);
20172 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
20175 /* Expand an atomic exchange operation. */
20177 void
20178 rs6000_expand_atomic_exchange (rtx operands[])
20180 rtx retval, mem, val, cond;
20181 enum machine_mode mode;
20182 enum memmodel model;
20183 rtx label, x, mask, shift;
20185 retval = operands[0];
20186 mem = operands[1];
20187 val = operands[2];
20188 model = (enum memmodel) INTVAL (operands[3]);
20189 mode = GET_MODE (mem);
20191 mask = shift = NULL_RTX;
20192 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
20194 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20196 /* Shift and mask VAL into position with the word. */
20197 val = convert_modes (SImode, mode, val, 1);
20198 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20199 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20201 /* Prepare to adjust the return value. */
20202 retval = gen_reg_rtx (SImode);
20203 mode = SImode;
20206 mem = rs6000_pre_atomic_barrier (mem, model);
20208 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20209 emit_label (XEXP (label, 0));
20211 emit_load_locked (mode, retval, mem);
20213 x = val;
20214 if (mask)
20215 x = rs6000_mask_atomic_subword (retval, val, mask);
20217 cond = gen_reg_rtx (CCmode);
20218 emit_store_conditional (mode, cond, mem, x);
20220 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20221 emit_unlikely_jump (x, label);
20223 rs6000_post_atomic_barrier (model);
20225 if (shift)
20226 rs6000_finish_atomic_subword (operands[0], retval, shift);
20229 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
20230 to perform. MEM is the memory on which to operate. VAL is the second
20231 operand of the binary operator. BEFORE and AFTER are optional locations to
20232 return the value of MEM either before of after the operation. MODEL_RTX
20233 is a CONST_INT containing the memory model to use. */
20235 void
20236 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
20237 rtx orig_before, rtx orig_after, rtx model_rtx)
20239 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
20240 enum machine_mode mode = GET_MODE (mem);
20241 enum machine_mode store_mode = mode;
20242 rtx label, x, cond, mask, shift;
20243 rtx before = orig_before, after = orig_after;
20245 mask = shift = NULL_RTX;
20246 /* On power8, we want to use SImode for the operation. On previous systems,
20247 use the operation in a subword and shift/mask to get the proper byte or
20248 halfword. */
20249 if (mode == QImode || mode == HImode)
20251 if (TARGET_SYNC_HI_QI)
20253 val = convert_modes (SImode, mode, val, 1);
20255 /* Prepare to adjust the return value. */
20256 before = gen_reg_rtx (SImode);
20257 if (after)
20258 after = gen_reg_rtx (SImode);
20259 mode = SImode;
20261 else
20263 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20265 /* Shift and mask VAL into position with the word. */
20266 val = convert_modes (SImode, mode, val, 1);
20267 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20268 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20270 switch (code)
20272 case IOR:
20273 case XOR:
20274 /* We've already zero-extended VAL. That is sufficient to
20275 make certain that it does not affect other bits. */
20276 mask = NULL;
20277 break;
20279 case AND:
20280 /* If we make certain that all of the other bits in VAL are
20281 set, that will be sufficient to not affect other bits. */
20282 x = gen_rtx_NOT (SImode, mask);
20283 x = gen_rtx_IOR (SImode, x, val);
20284 emit_insn (gen_rtx_SET (VOIDmode, val, x));
20285 mask = NULL;
20286 break;
20288 case NOT:
20289 case PLUS:
20290 case MINUS:
20291 /* These will all affect bits outside the field and need
20292 adjustment via MASK within the loop. */
20293 break;
20295 default:
20296 gcc_unreachable ();
20299 /* Prepare to adjust the return value. */
20300 before = gen_reg_rtx (SImode);
20301 if (after)
20302 after = gen_reg_rtx (SImode);
20303 store_mode = mode = SImode;
20307 mem = rs6000_pre_atomic_barrier (mem, model);
20309 label = gen_label_rtx ();
20310 emit_label (label);
20311 label = gen_rtx_LABEL_REF (VOIDmode, label);
20313 if (before == NULL_RTX)
20314 before = gen_reg_rtx (mode);
20316 emit_load_locked (mode, before, mem);
20318 if (code == NOT)
20320 x = expand_simple_binop (mode, AND, before, val,
20321 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20322 after = expand_simple_unop (mode, NOT, x, after, 1);
20324 else
20326 after = expand_simple_binop (mode, code, before, val,
20327 after, 1, OPTAB_LIB_WIDEN);
20330 x = after;
20331 if (mask)
20333 x = expand_simple_binop (SImode, AND, after, mask,
20334 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20335 x = rs6000_mask_atomic_subword (before, x, mask);
20337 else if (store_mode != mode)
20338 x = convert_modes (store_mode, mode, x, 1);
20340 cond = gen_reg_rtx (CCmode);
20341 emit_store_conditional (store_mode, cond, mem, x);
20343 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20344 emit_unlikely_jump (x, label);
20346 rs6000_post_atomic_barrier (model);
20348 if (shift)
20350 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
20351 then do the calcuations in a SImode register. */
20352 if (orig_before)
20353 rs6000_finish_atomic_subword (orig_before, before, shift);
20354 if (orig_after)
20355 rs6000_finish_atomic_subword (orig_after, after, shift);
20357 else if (store_mode != mode)
20359 /* QImode/HImode on machines with lbarx/lharx where we do the native
20360 operation and then do the calcuations in a SImode register. */
20361 if (orig_before)
20362 convert_move (orig_before, before, 1);
20363 if (orig_after)
20364 convert_move (orig_after, after, 1);
20366 else if (orig_after && after != orig_after)
20367 emit_move_insn (orig_after, after);
20370 /* Emit instructions to move SRC to DST. Called by splitters for
20371 multi-register moves. It will emit at most one instruction for
20372 each register that is accessed; that is, it won't emit li/lis pairs
20373 (or equivalent for 64-bit code). One of SRC or DST must be a hard
20374 register. */
20376 void
20377 rs6000_split_multireg_move (rtx dst, rtx src)
20379 /* The register number of the first register being moved. */
20380 int reg;
20381 /* The mode that is to be moved. */
20382 enum machine_mode mode;
20383 /* The mode that the move is being done in, and its size. */
20384 enum machine_mode reg_mode;
20385 int reg_mode_size;
20386 /* The number of registers that will be moved. */
20387 int nregs;
20389 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
20390 mode = GET_MODE (dst);
20391 nregs = hard_regno_nregs[reg][mode];
20392 if (FP_REGNO_P (reg))
20393 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
20394 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
20395 else if (ALTIVEC_REGNO_P (reg))
20396 reg_mode = V16QImode;
20397 else if (TARGET_E500_DOUBLE && mode == TFmode)
20398 reg_mode = DFmode;
20399 else
20400 reg_mode = word_mode;
20401 reg_mode_size = GET_MODE_SIZE (reg_mode);
20403 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
20405 /* TDmode residing in FP registers is special, since the ISA requires that
20406 the lower-numbered word of a register pair is always the most significant
20407 word, even in little-endian mode. This does not match the usual subreg
20408 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
20409 the appropriate constituent registers "by hand" in little-endian mode.
20411 Note we do not need to check for destructive overlap here since TDmode
20412 can only reside in even/odd register pairs. */
20413 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
20415 rtx p_src, p_dst;
20416 int i;
20418 for (i = 0; i < nregs; i++)
20420 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
20421 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
20422 else
20423 p_src = simplify_gen_subreg (reg_mode, src, mode,
20424 i * reg_mode_size);
20426 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
20427 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
20428 else
20429 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
20430 i * reg_mode_size);
20432 emit_insn (gen_rtx_SET (VOIDmode, p_dst, p_src));
20435 return;
20438 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
20440 /* Move register range backwards, if we might have destructive
20441 overlap. */
20442 int i;
20443 for (i = nregs - 1; i >= 0; i--)
20444 emit_insn (gen_rtx_SET (VOIDmode,
20445 simplify_gen_subreg (reg_mode, dst, mode,
20446 i * reg_mode_size),
20447 simplify_gen_subreg (reg_mode, src, mode,
20448 i * reg_mode_size)));
20450 else
20452 int i;
20453 int j = -1;
20454 bool used_update = false;
20455 rtx restore_basereg = NULL_RTX;
20457 if (MEM_P (src) && INT_REGNO_P (reg))
20459 rtx breg;
20461 if (GET_CODE (XEXP (src, 0)) == PRE_INC
20462 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
20464 rtx delta_rtx;
20465 breg = XEXP (XEXP (src, 0), 0);
20466 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
20467 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
20468 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
20469 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20470 src = replace_equiv_address (src, breg);
20472 else if (! rs6000_offsettable_memref_p (src, reg_mode))
20474 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
20476 rtx basereg = XEXP (XEXP (src, 0), 0);
20477 if (TARGET_UPDATE)
20479 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
20480 emit_insn (gen_rtx_SET (VOIDmode, ndst,
20481 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
20482 used_update = true;
20484 else
20485 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20486 XEXP (XEXP (src, 0), 1)));
20487 src = replace_equiv_address (src, basereg);
20489 else
20491 rtx basereg = gen_rtx_REG (Pmode, reg);
20492 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
20493 src = replace_equiv_address (src, basereg);
20497 breg = XEXP (src, 0);
20498 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
20499 breg = XEXP (breg, 0);
20501 /* If the base register we are using to address memory is
20502 also a destination reg, then change that register last. */
20503 if (REG_P (breg)
20504 && REGNO (breg) >= REGNO (dst)
20505 && REGNO (breg) < REGNO (dst) + nregs)
20506 j = REGNO (breg) - REGNO (dst);
20508 else if (MEM_P (dst) && INT_REGNO_P (reg))
20510 rtx breg;
20512 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
20513 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
20515 rtx delta_rtx;
20516 breg = XEXP (XEXP (dst, 0), 0);
20517 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
20518 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
20519 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
20521 /* We have to update the breg before doing the store.
20522 Use store with update, if available. */
20524 if (TARGET_UPDATE)
20526 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20527 emit_insn (TARGET_32BIT
20528 ? (TARGET_POWERPC64
20529 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
20530 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
20531 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
20532 used_update = true;
20534 else
20535 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20536 dst = replace_equiv_address (dst, breg);
20538 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
20539 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
20541 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
20543 rtx basereg = XEXP (XEXP (dst, 0), 0);
20544 if (TARGET_UPDATE)
20546 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20547 emit_insn (gen_rtx_SET (VOIDmode,
20548 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
20549 used_update = true;
20551 else
20552 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20553 XEXP (XEXP (dst, 0), 1)));
20554 dst = replace_equiv_address (dst, basereg);
20556 else
20558 rtx basereg = XEXP (XEXP (dst, 0), 0);
20559 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
20560 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
20561 && REG_P (basereg)
20562 && REG_P (offsetreg)
20563 && REGNO (basereg) != REGNO (offsetreg));
20564 if (REGNO (basereg) == 0)
20566 rtx tmp = offsetreg;
20567 offsetreg = basereg;
20568 basereg = tmp;
20570 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
20571 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
20572 dst = replace_equiv_address (dst, basereg);
20575 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
20576 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
20579 for (i = 0; i < nregs; i++)
20581 /* Calculate index to next subword. */
20582 ++j;
20583 if (j == nregs)
20584 j = 0;
20586 /* If compiler already emitted move of first word by
20587 store with update, no need to do anything. */
20588 if (j == 0 && used_update)
20589 continue;
20591 emit_insn (gen_rtx_SET (VOIDmode,
20592 simplify_gen_subreg (reg_mode, dst, mode,
20593 j * reg_mode_size),
20594 simplify_gen_subreg (reg_mode, src, mode,
20595 j * reg_mode_size)));
20597 if (restore_basereg != NULL_RTX)
20598 emit_insn (restore_basereg);
20603 /* This page contains routines that are used to determine what the
20604 function prologue and epilogue code will do and write them out. */
20606 static inline bool
20607 save_reg_p (int r)
20609 return !call_used_regs[r] && df_regs_ever_live_p (r);
20612 /* Return the first fixed-point register that is required to be
20613 saved. 32 if none. */
20616 first_reg_to_save (void)
20618 int first_reg;
20620 /* Find lowest numbered live register. */
20621 for (first_reg = 13; first_reg <= 31; first_reg++)
20622 if (save_reg_p (first_reg))
20623 break;
20625 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
20626 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
20627 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
20628 || (TARGET_TOC && TARGET_MINIMAL_TOC))
20629 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20630 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
20632 #if TARGET_MACHO
20633 if (flag_pic
20634 && crtl->uses_pic_offset_table
20635 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
20636 return RS6000_PIC_OFFSET_TABLE_REGNUM;
20637 #endif
20639 return first_reg;
20642 /* Similar, for FP regs. */
20645 first_fp_reg_to_save (void)
20647 int first_reg;
20649 /* Find lowest numbered live register. */
20650 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
20651 if (save_reg_p (first_reg))
20652 break;
20654 return first_reg;
20657 /* Similar, for AltiVec regs. */
20659 static int
20660 first_altivec_reg_to_save (void)
20662 int i;
20664 /* Stack frame remains as is unless we are in AltiVec ABI. */
20665 if (! TARGET_ALTIVEC_ABI)
20666 return LAST_ALTIVEC_REGNO + 1;
20668 /* On Darwin, the unwind routines are compiled without
20669 TARGET_ALTIVEC, and use save_world to save/restore the
20670 altivec registers when necessary. */
20671 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20672 && ! TARGET_ALTIVEC)
20673 return FIRST_ALTIVEC_REGNO + 20;
20675 /* Find lowest numbered live register. */
20676 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
20677 if (save_reg_p (i))
20678 break;
20680 return i;
20683 /* Return a 32-bit mask of the AltiVec registers we need to set in
20684 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
20685 the 32-bit word is 0. */
20687 static unsigned int
20688 compute_vrsave_mask (void)
20690 unsigned int i, mask = 0;
20692 /* On Darwin, the unwind routines are compiled without
20693 TARGET_ALTIVEC, and use save_world to save/restore the
20694 call-saved altivec registers when necessary. */
20695 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20696 && ! TARGET_ALTIVEC)
20697 mask |= 0xFFF;
20699 /* First, find out if we use _any_ altivec registers. */
20700 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
20701 if (df_regs_ever_live_p (i))
20702 mask |= ALTIVEC_REG_BIT (i);
20704 if (mask == 0)
20705 return mask;
20707 /* Next, remove the argument registers from the set. These must
20708 be in the VRSAVE mask set by the caller, so we don't need to add
20709 them in again. More importantly, the mask we compute here is
20710 used to generate CLOBBERs in the set_vrsave insn, and we do not
20711 wish the argument registers to die. */
20712 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
20713 mask &= ~ALTIVEC_REG_BIT (i);
20715 /* Similarly, remove the return value from the set. */
20717 bool yes = false;
20718 diddle_return_value (is_altivec_return_reg, &yes);
20719 if (yes)
20720 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
20723 return mask;
20726 /* For a very restricted set of circumstances, we can cut down the
20727 size of prologues/epilogues by calling our own save/restore-the-world
20728 routines. */
20730 static void
20731 compute_save_world_info (rs6000_stack_t *info_ptr)
20733 info_ptr->world_save_p = 1;
20734 info_ptr->world_save_p
20735 = (WORLD_SAVE_P (info_ptr)
20736 && DEFAULT_ABI == ABI_DARWIN
20737 && !cfun->has_nonlocal_label
20738 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
20739 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
20740 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
20741 && info_ptr->cr_save_p);
20743 /* This will not work in conjunction with sibcalls. Make sure there
20744 are none. (This check is expensive, but seldom executed.) */
20745 if (WORLD_SAVE_P (info_ptr))
20747 rtx insn;
20748 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
20749 if (CALL_P (insn) && SIBLING_CALL_P (insn))
20751 info_ptr->world_save_p = 0;
20752 break;
20756 if (WORLD_SAVE_P (info_ptr))
20758 /* Even if we're not touching VRsave, make sure there's room on the
20759 stack for it, if it looks like we're calling SAVE_WORLD, which
20760 will attempt to save it. */
20761 info_ptr->vrsave_size = 4;
20763 /* If we are going to save the world, we need to save the link register too. */
20764 info_ptr->lr_save_p = 1;
20766 /* "Save" the VRsave register too if we're saving the world. */
20767 if (info_ptr->vrsave_mask == 0)
20768 info_ptr->vrsave_mask = compute_vrsave_mask ();
20770 /* Because the Darwin register save/restore routines only handle
20771 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
20772 check. */
20773 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
20774 && (info_ptr->first_altivec_reg_save
20775 >= FIRST_SAVED_ALTIVEC_REGNO));
20777 return;
20781 static void
20782 is_altivec_return_reg (rtx reg, void *xyes)
20784 bool *yes = (bool *) xyes;
20785 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
20786 *yes = true;
20790 /* Look for user-defined global regs in the range FIRST to LAST-1.
20791 We should not restore these, and so cannot use lmw or out-of-line
20792 restore functions if there are any. We also can't save them
20793 (well, emit frame notes for them), because frame unwinding during
20794 exception handling will restore saved registers. */
20796 static bool
20797 global_regs_p (unsigned first, unsigned last)
20799 while (first < last)
20800 if (global_regs[first++])
20801 return true;
20802 return false;
20805 /* Determine the strategy for savings/restoring registers. */
20807 enum {
20808 SAVRES_MULTIPLE = 0x1,
20809 SAVE_INLINE_FPRS = 0x2,
20810 SAVE_INLINE_GPRS = 0x4,
20811 REST_INLINE_FPRS = 0x8,
20812 REST_INLINE_GPRS = 0x10,
20813 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
20814 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
20815 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
20816 SAVE_INLINE_VRS = 0x100,
20817 REST_INLINE_VRS = 0x200
20820 static int
20821 rs6000_savres_strategy (rs6000_stack_t *info,
20822 bool using_static_chain_p)
20824 int strategy = 0;
20825 bool lr_save_p;
20827 if (TARGET_MULTIPLE
20828 && !TARGET_POWERPC64
20829 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
20830 && info->first_gp_reg_save < 31
20831 && !global_regs_p (info->first_gp_reg_save, 32))
20832 strategy |= SAVRES_MULTIPLE;
20834 if (crtl->calls_eh_return
20835 || cfun->machine->ra_need_lr)
20836 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
20837 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
20838 | SAVE_INLINE_VRS | REST_INLINE_VRS);
20840 if (info->first_fp_reg_save == 64
20841 /* The out-of-line FP routines use double-precision stores;
20842 we can't use those routines if we don't have such stores. */
20843 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
20844 || global_regs_p (info->first_fp_reg_save, 64))
20845 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20847 if (info->first_gp_reg_save == 32
20848 || (!(strategy & SAVRES_MULTIPLE)
20849 && global_regs_p (info->first_gp_reg_save, 32)))
20850 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20852 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
20853 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
20854 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20856 /* Define cutoff for using out-of-line functions to save registers. */
20857 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
20859 if (!optimize_size)
20861 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20862 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20863 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20865 else
20867 /* Prefer out-of-line restore if it will exit. */
20868 if (info->first_fp_reg_save > 61)
20869 strategy |= SAVE_INLINE_FPRS;
20870 if (info->first_gp_reg_save > 29)
20872 if (info->first_fp_reg_save == 64)
20873 strategy |= SAVE_INLINE_GPRS;
20874 else
20875 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20877 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
20878 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20881 else if (DEFAULT_ABI == ABI_DARWIN)
20883 if (info->first_fp_reg_save > 60)
20884 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20885 if (info->first_gp_reg_save > 29)
20886 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20887 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20889 else
20891 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
20892 if (info->first_fp_reg_save > 61)
20893 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20894 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20895 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20898 /* Don't bother to try to save things out-of-line if r11 is occupied
20899 by the static chain. It would require too much fiddling and the
20900 static chain is rarely used anyway. FPRs are saved w.r.t the stack
20901 pointer on Darwin, and AIX uses r1 or r12. */
20902 if (using_static_chain_p
20903 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
20904 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
20905 | SAVE_INLINE_GPRS
20906 | SAVE_INLINE_VRS | REST_INLINE_VRS);
20908 /* We can only use the out-of-line routines to restore if we've
20909 saved all the registers from first_fp_reg_save in the prologue.
20910 Otherwise, we risk loading garbage. */
20911 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
20913 int i;
20915 for (i = info->first_fp_reg_save; i < 64; i++)
20916 if (!save_reg_p (i))
20918 strategy |= REST_INLINE_FPRS;
20919 break;
20923 /* If we are going to use store multiple, then don't even bother
20924 with the out-of-line routines, since the store-multiple
20925 instruction will always be smaller. */
20926 if ((strategy & SAVRES_MULTIPLE))
20927 strategy |= SAVE_INLINE_GPRS;
20929 /* info->lr_save_p isn't yet set if the only reason lr needs to be
20930 saved is an out-of-line save or restore. Set up the value for
20931 the next test (excluding out-of-line gpr restore). */
20932 lr_save_p = (info->lr_save_p
20933 || !(strategy & SAVE_INLINE_GPRS)
20934 || !(strategy & SAVE_INLINE_FPRS)
20935 || !(strategy & SAVE_INLINE_VRS)
20936 || !(strategy & REST_INLINE_FPRS)
20937 || !(strategy & REST_INLINE_VRS));
20939 /* The situation is more complicated with load multiple. We'd
20940 prefer to use the out-of-line routines for restores, since the
20941 "exit" out-of-line routines can handle the restore of LR and the
20942 frame teardown. However if doesn't make sense to use the
20943 out-of-line routine if that is the only reason we'd need to save
20944 LR, and we can't use the "exit" out-of-line gpr restore if we
20945 have saved some fprs; In those cases it is advantageous to use
20946 load multiple when available. */
20947 if ((strategy & SAVRES_MULTIPLE)
20948 && (!lr_save_p
20949 || info->first_fp_reg_save != 64))
20950 strategy |= REST_INLINE_GPRS;
20952 /* Saving CR interferes with the exit routines used on the SPE, so
20953 just punt here. */
20954 if (TARGET_SPE_ABI
20955 && info->spe_64bit_regs_used
20956 && info->cr_save_p)
20957 strategy |= REST_INLINE_GPRS;
20959 /* We can only use load multiple or the out-of-line routines to
20960 restore if we've used store multiple or out-of-line routines
20961 in the prologue, i.e. if we've saved all the registers from
20962 first_gp_reg_save. Otherwise, we risk loading garbage. */
20963 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
20964 == SAVE_INLINE_GPRS)
20966 int i;
20968 for (i = info->first_gp_reg_save; i < 32; i++)
20969 if (!save_reg_p (i))
20971 strategy |= REST_INLINE_GPRS;
20972 break;
20976 if (TARGET_ELF && TARGET_64BIT)
20978 if (!(strategy & SAVE_INLINE_FPRS))
20979 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
20980 else if (!(strategy & SAVE_INLINE_GPRS)
20981 && info->first_fp_reg_save == 64)
20982 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
20984 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
20985 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
20987 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
20988 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
20990 return strategy;
20993 /* Calculate the stack information for the current function. This is
20994 complicated by having two separate calling sequences, the AIX calling
20995 sequence and the V.4 calling sequence.
20997 AIX (and Darwin/Mac OS X) stack frames look like:
20998 32-bit 64-bit
20999 SP----> +---------------------------------------+
21000 | back chain to caller | 0 0
21001 +---------------------------------------+
21002 | saved CR | 4 8 (8-11)
21003 +---------------------------------------+
21004 | saved LR | 8 16
21005 +---------------------------------------+
21006 | reserved for compilers | 12 24
21007 +---------------------------------------+
21008 | reserved for binders | 16 32
21009 +---------------------------------------+
21010 | saved TOC pointer | 20 40
21011 +---------------------------------------+
21012 | Parameter save area (P) | 24 48
21013 +---------------------------------------+
21014 | Alloca space (A) | 24+P etc.
21015 +---------------------------------------+
21016 | Local variable space (L) | 24+P+A
21017 +---------------------------------------+
21018 | Float/int conversion temporary (X) | 24+P+A+L
21019 +---------------------------------------+
21020 | Save area for AltiVec registers (W) | 24+P+A+L+X
21021 +---------------------------------------+
21022 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
21023 +---------------------------------------+
21024 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
21025 +---------------------------------------+
21026 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
21027 +---------------------------------------+
21028 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
21029 +---------------------------------------+
21030 old SP->| back chain to caller's caller |
21031 +---------------------------------------+
21033 The required alignment for AIX configurations is two words (i.e., 8
21034 or 16 bytes).
21036 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
21038 SP----> +---------------------------------------+
21039 | Back chain to caller | 0
21040 +---------------------------------------+
21041 | Save area for CR | 8
21042 +---------------------------------------+
21043 | Saved LR | 16
21044 +---------------------------------------+
21045 | Saved TOC pointer | 24
21046 +---------------------------------------+
21047 | Parameter save area (P) | 32
21048 +---------------------------------------+
21049 | Alloca space (A) | 32+P
21050 +---------------------------------------+
21051 | Local variable space (L) | 32+P+A
21052 +---------------------------------------+
21053 | Save area for AltiVec registers (W) | 32+P+A+L
21054 +---------------------------------------+
21055 | AltiVec alignment padding (Y) | 32+P+A+L+W
21056 +---------------------------------------+
21057 | Save area for GP registers (G) | 32+P+A+L+W+Y
21058 +---------------------------------------+
21059 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
21060 +---------------------------------------+
21061 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
21062 +---------------------------------------+
21065 V.4 stack frames look like:
21067 SP----> +---------------------------------------+
21068 | back chain to caller | 0
21069 +---------------------------------------+
21070 | caller's saved LR | 4
21071 +---------------------------------------+
21072 | Parameter save area (P) | 8
21073 +---------------------------------------+
21074 | Alloca space (A) | 8+P
21075 +---------------------------------------+
21076 | Varargs save area (V) | 8+P+A
21077 +---------------------------------------+
21078 | Local variable space (L) | 8+P+A+V
21079 +---------------------------------------+
21080 | Float/int conversion temporary (X) | 8+P+A+V+L
21081 +---------------------------------------+
21082 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
21083 +---------------------------------------+
21084 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
21085 +---------------------------------------+
21086 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
21087 +---------------------------------------+
21088 | SPE: area for 64-bit GP registers |
21089 +---------------------------------------+
21090 | SPE alignment padding |
21091 +---------------------------------------+
21092 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
21093 +---------------------------------------+
21094 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
21095 +---------------------------------------+
21096 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
21097 +---------------------------------------+
21098 old SP->| back chain to caller's caller |
21099 +---------------------------------------+
21101 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
21102 given. (But note below and in sysv4.h that we require only 8 and
21103 may round up the size of our stack frame anyways. The historical
21104 reason is early versions of powerpc-linux which didn't properly
21105 align the stack at program startup. A happy side-effect is that
21106 -mno-eabi libraries can be used with -meabi programs.)
21108 The EABI configuration defaults to the V.4 layout. However,
21109 the stack alignment requirements may differ. If -mno-eabi is not
21110 given, the required stack alignment is 8 bytes; if -mno-eabi is
21111 given, the required alignment is 16 bytes. (But see V.4 comment
21112 above.) */
21114 #ifndef ABI_STACK_BOUNDARY
21115 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
21116 #endif
21118 static rs6000_stack_t *
21119 rs6000_stack_info (void)
21121 rs6000_stack_t *info_ptr = &stack_info;
21122 int reg_size = TARGET_32BIT ? 4 : 8;
21123 int ehrd_size;
21124 int ehcr_size;
21125 int save_align;
21126 int first_gp;
21127 HOST_WIDE_INT non_fixed_size;
21128 bool using_static_chain_p;
21130 if (reload_completed && info_ptr->reload_completed)
21131 return info_ptr;
21133 memset (info_ptr, 0, sizeof (*info_ptr));
21134 info_ptr->reload_completed = reload_completed;
21136 if (TARGET_SPE)
21138 /* Cache value so we don't rescan instruction chain over and over. */
21139 if (cfun->machine->insn_chain_scanned_p == 0)
21140 cfun->machine->insn_chain_scanned_p
21141 = spe_func_has_64bit_regs_p () + 1;
21142 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
21145 /* Select which calling sequence. */
21146 info_ptr->abi = DEFAULT_ABI;
21148 /* Calculate which registers need to be saved & save area size. */
21149 info_ptr->first_gp_reg_save = first_reg_to_save ();
21150 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
21151 even if it currently looks like we won't. Reload may need it to
21152 get at a constant; if so, it will have already created a constant
21153 pool entry for it. */
21154 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
21155 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
21156 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
21157 && crtl->uses_const_pool
21158 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
21159 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
21160 else
21161 first_gp = info_ptr->first_gp_reg_save;
21163 info_ptr->gp_size = reg_size * (32 - first_gp);
21165 /* For the SPE, we have an additional upper 32-bits on each GPR.
21166 Ideally we should save the entire 64-bits only when the upper
21167 half is used in SIMD instructions. Since we only record
21168 registers live (not the size they are used in), this proves
21169 difficult because we'd have to traverse the instruction chain at
21170 the right time, taking reload into account. This is a real pain,
21171 so we opt to save the GPRs in 64-bits always if but one register
21172 gets used in 64-bits. Otherwise, all the registers in the frame
21173 get saved in 32-bits.
21175 So... since when we save all GPRs (except the SP) in 64-bits, the
21176 traditional GP save area will be empty. */
21177 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21178 info_ptr->gp_size = 0;
21180 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
21181 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
21183 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
21184 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
21185 - info_ptr->first_altivec_reg_save);
21187 /* Does this function call anything? */
21188 info_ptr->calls_p = (! crtl->is_leaf
21189 || cfun->machine->ra_needs_full_frame);
21191 /* Determine if we need to save the condition code registers. */
21192 if (df_regs_ever_live_p (CR2_REGNO)
21193 || df_regs_ever_live_p (CR3_REGNO)
21194 || df_regs_ever_live_p (CR4_REGNO))
21196 info_ptr->cr_save_p = 1;
21197 if (DEFAULT_ABI == ABI_V4)
21198 info_ptr->cr_size = reg_size;
21201 /* If the current function calls __builtin_eh_return, then we need
21202 to allocate stack space for registers that will hold data for
21203 the exception handler. */
21204 if (crtl->calls_eh_return)
21206 unsigned int i;
21207 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
21208 continue;
21210 /* SPE saves EH registers in 64-bits. */
21211 ehrd_size = i * (TARGET_SPE_ABI
21212 && info_ptr->spe_64bit_regs_used != 0
21213 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
21215 else
21216 ehrd_size = 0;
21218 /* In the ELFv2 ABI, we also need to allocate space for separate
21219 CR field save areas if the function calls __builtin_eh_return. */
21220 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
21222 /* This hard-codes that we have three call-saved CR fields. */
21223 ehcr_size = 3 * reg_size;
21224 /* We do *not* use the regular CR save mechanism. */
21225 info_ptr->cr_save_p = 0;
21227 else
21228 ehcr_size = 0;
21230 /* Determine various sizes. */
21231 info_ptr->reg_size = reg_size;
21232 info_ptr->fixed_size = RS6000_SAVE_AREA;
21233 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
21234 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
21235 TARGET_ALTIVEC ? 16 : 8);
21236 if (FRAME_GROWS_DOWNWARD)
21237 info_ptr->vars_size
21238 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
21239 + info_ptr->parm_size,
21240 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
21241 - (info_ptr->fixed_size + info_ptr->vars_size
21242 + info_ptr->parm_size);
21244 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21245 info_ptr->spe_gp_size = 8 * (32 - first_gp);
21246 else
21247 info_ptr->spe_gp_size = 0;
21249 if (TARGET_ALTIVEC_ABI)
21250 info_ptr->vrsave_mask = compute_vrsave_mask ();
21251 else
21252 info_ptr->vrsave_mask = 0;
21254 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
21255 info_ptr->vrsave_size = 4;
21256 else
21257 info_ptr->vrsave_size = 0;
21259 compute_save_world_info (info_ptr);
21261 /* Calculate the offsets. */
21262 switch (DEFAULT_ABI)
21264 case ABI_NONE:
21265 default:
21266 gcc_unreachable ();
21268 case ABI_AIX:
21269 case ABI_ELFv2:
21270 case ABI_DARWIN:
21271 info_ptr->fp_save_offset = - info_ptr->fp_size;
21272 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21274 if (TARGET_ALTIVEC_ABI)
21276 info_ptr->vrsave_save_offset
21277 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
21279 /* Align stack so vector save area is on a quadword boundary.
21280 The padding goes above the vectors. */
21281 if (info_ptr->altivec_size != 0)
21282 info_ptr->altivec_padding_size
21283 = info_ptr->vrsave_save_offset & 0xF;
21284 else
21285 info_ptr->altivec_padding_size = 0;
21287 info_ptr->altivec_save_offset
21288 = info_ptr->vrsave_save_offset
21289 - info_ptr->altivec_padding_size
21290 - info_ptr->altivec_size;
21291 gcc_assert (info_ptr->altivec_size == 0
21292 || info_ptr->altivec_save_offset % 16 == 0);
21294 /* Adjust for AltiVec case. */
21295 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
21297 else
21298 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
21300 info_ptr->ehcr_offset = info_ptr->ehrd_offset - ehcr_size;
21301 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
21302 info_ptr->lr_save_offset = 2*reg_size;
21303 break;
21305 case ABI_V4:
21306 info_ptr->fp_save_offset = - info_ptr->fp_size;
21307 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21308 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
21310 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21312 /* Align stack so SPE GPR save area is aligned on a
21313 double-word boundary. */
21314 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
21315 info_ptr->spe_padding_size
21316 = 8 - (-info_ptr->cr_save_offset % 8);
21317 else
21318 info_ptr->spe_padding_size = 0;
21320 info_ptr->spe_gp_save_offset
21321 = info_ptr->cr_save_offset
21322 - info_ptr->spe_padding_size
21323 - info_ptr->spe_gp_size;
21325 /* Adjust for SPE case. */
21326 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
21328 else if (TARGET_ALTIVEC_ABI)
21330 info_ptr->vrsave_save_offset
21331 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
21333 /* Align stack so vector save area is on a quadword boundary. */
21334 if (info_ptr->altivec_size != 0)
21335 info_ptr->altivec_padding_size
21336 = 16 - (-info_ptr->vrsave_save_offset % 16);
21337 else
21338 info_ptr->altivec_padding_size = 0;
21340 info_ptr->altivec_save_offset
21341 = info_ptr->vrsave_save_offset
21342 - info_ptr->altivec_padding_size
21343 - info_ptr->altivec_size;
21345 /* Adjust for AltiVec case. */
21346 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
21348 else
21349 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
21350 info_ptr->ehrd_offset -= ehrd_size;
21351 info_ptr->lr_save_offset = reg_size;
21352 break;
21355 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
21356 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
21357 + info_ptr->gp_size
21358 + info_ptr->altivec_size
21359 + info_ptr->altivec_padding_size
21360 + info_ptr->spe_gp_size
21361 + info_ptr->spe_padding_size
21362 + ehrd_size
21363 + ehcr_size
21364 + info_ptr->cr_size
21365 + info_ptr->vrsave_size,
21366 save_align);
21368 non_fixed_size = (info_ptr->vars_size
21369 + info_ptr->parm_size
21370 + info_ptr->save_size);
21372 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
21373 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
21375 /* Determine if we need to save the link register. */
21376 if (info_ptr->calls_p
21377 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21378 && crtl->profile
21379 && !TARGET_PROFILE_KERNEL)
21380 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
21381 #ifdef TARGET_RELOCATABLE
21382 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
21383 #endif
21384 || rs6000_ra_ever_killed ())
21385 info_ptr->lr_save_p = 1;
21387 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
21388 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
21389 && call_used_regs[STATIC_CHAIN_REGNUM]);
21390 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
21391 using_static_chain_p);
21393 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
21394 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
21395 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
21396 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
21397 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
21398 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
21399 info_ptr->lr_save_p = 1;
21401 if (info_ptr->lr_save_p)
21402 df_set_regs_ever_live (LR_REGNO, true);
21404 /* Determine if we need to allocate any stack frame:
21406 For AIX we need to push the stack if a frame pointer is needed
21407 (because the stack might be dynamically adjusted), if we are
21408 debugging, if we make calls, or if the sum of fp_save, gp_save,
21409 and local variables are more than the space needed to save all
21410 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
21411 + 18*8 = 288 (GPR13 reserved).
21413 For V.4 we don't have the stack cushion that AIX uses, but assume
21414 that the debugger can handle stackless frames. */
21416 if (info_ptr->calls_p)
21417 info_ptr->push_p = 1;
21419 else if (DEFAULT_ABI == ABI_V4)
21420 info_ptr->push_p = non_fixed_size != 0;
21422 else if (frame_pointer_needed)
21423 info_ptr->push_p = 1;
21425 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
21426 info_ptr->push_p = 1;
21428 else
21429 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
21431 /* Zero offsets if we're not saving those registers. */
21432 if (info_ptr->fp_size == 0)
21433 info_ptr->fp_save_offset = 0;
21435 if (info_ptr->gp_size == 0)
21436 info_ptr->gp_save_offset = 0;
21438 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
21439 info_ptr->altivec_save_offset = 0;
21441 /* Zero VRSAVE offset if not saved and restored. */
21442 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
21443 info_ptr->vrsave_save_offset = 0;
21445 if (! TARGET_SPE_ABI
21446 || info_ptr->spe_64bit_regs_used == 0
21447 || info_ptr->spe_gp_size == 0)
21448 info_ptr->spe_gp_save_offset = 0;
21450 if (! info_ptr->lr_save_p)
21451 info_ptr->lr_save_offset = 0;
21453 if (! info_ptr->cr_save_p)
21454 info_ptr->cr_save_offset = 0;
21456 return info_ptr;
21459 /* Return true if the current function uses any GPRs in 64-bit SIMD
21460 mode. */
21462 static bool
21463 spe_func_has_64bit_regs_p (void)
21465 rtx insns, insn;
21467 /* Functions that save and restore all the call-saved registers will
21468 need to save/restore the registers in 64-bits. */
21469 if (crtl->calls_eh_return
21470 || cfun->calls_setjmp
21471 || crtl->has_nonlocal_goto)
21472 return true;
21474 insns = get_insns ();
21476 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
21478 if (INSN_P (insn))
21480 rtx i;
21482 /* FIXME: This should be implemented with attributes...
21484 (set_attr "spe64" "true")....then,
21485 if (get_spe64(insn)) return true;
21487 It's the only reliable way to do the stuff below. */
21489 i = PATTERN (insn);
21490 if (GET_CODE (i) == SET)
21492 enum machine_mode mode = GET_MODE (SET_SRC (i));
21494 if (SPE_VECTOR_MODE (mode))
21495 return true;
21496 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
21497 return true;
21502 return false;
21505 static void
21506 debug_stack_info (rs6000_stack_t *info)
21508 const char *abi_string;
21510 if (! info)
21511 info = rs6000_stack_info ();
21513 fprintf (stderr, "\nStack information for function %s:\n",
21514 ((current_function_decl && DECL_NAME (current_function_decl))
21515 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
21516 : "<unknown>"));
21518 switch (info->abi)
21520 default: abi_string = "Unknown"; break;
21521 case ABI_NONE: abi_string = "NONE"; break;
21522 case ABI_AIX: abi_string = "AIX"; break;
21523 case ABI_ELFv2: abi_string = "ELFv2"; break;
21524 case ABI_DARWIN: abi_string = "Darwin"; break;
21525 case ABI_V4: abi_string = "V.4"; break;
21528 fprintf (stderr, "\tABI = %5s\n", abi_string);
21530 if (TARGET_ALTIVEC_ABI)
21531 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
21533 if (TARGET_SPE_ABI)
21534 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
21536 if (info->first_gp_reg_save != 32)
21537 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
21539 if (info->first_fp_reg_save != 64)
21540 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
21542 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
21543 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
21544 info->first_altivec_reg_save);
21546 if (info->lr_save_p)
21547 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
21549 if (info->cr_save_p)
21550 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
21552 if (info->vrsave_mask)
21553 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
21555 if (info->push_p)
21556 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
21558 if (info->calls_p)
21559 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
21561 if (info->gp_save_offset)
21562 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
21564 if (info->fp_save_offset)
21565 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
21567 if (info->altivec_save_offset)
21568 fprintf (stderr, "\taltivec_save_offset = %5d\n",
21569 info->altivec_save_offset);
21571 if (info->spe_gp_save_offset)
21572 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
21573 info->spe_gp_save_offset);
21575 if (info->vrsave_save_offset)
21576 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
21577 info->vrsave_save_offset);
21579 if (info->lr_save_offset)
21580 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
21582 if (info->cr_save_offset)
21583 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
21585 if (info->varargs_save_offset)
21586 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
21588 if (info->total_size)
21589 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21590 info->total_size);
21592 if (info->vars_size)
21593 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21594 info->vars_size);
21596 if (info->parm_size)
21597 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
21599 if (info->fixed_size)
21600 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
21602 if (info->gp_size)
21603 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
21605 if (info->spe_gp_size)
21606 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
21608 if (info->fp_size)
21609 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
21611 if (info->altivec_size)
21612 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
21614 if (info->vrsave_size)
21615 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
21617 if (info->altivec_padding_size)
21618 fprintf (stderr, "\taltivec_padding_size= %5d\n",
21619 info->altivec_padding_size);
21621 if (info->spe_padding_size)
21622 fprintf (stderr, "\tspe_padding_size = %5d\n",
21623 info->spe_padding_size);
21625 if (info->cr_size)
21626 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
21628 if (info->save_size)
21629 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
21631 if (info->reg_size != 4)
21632 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
21634 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
21636 fprintf (stderr, "\n");
21640 rs6000_return_addr (int count, rtx frame)
21642 /* Currently we don't optimize very well between prolog and body
21643 code and for PIC code the code can be actually quite bad, so
21644 don't try to be too clever here. */
21645 if (count != 0
21646 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
21648 cfun->machine->ra_needs_full_frame = 1;
21650 return
21651 gen_rtx_MEM
21652 (Pmode,
21653 memory_address
21654 (Pmode,
21655 plus_constant (Pmode,
21656 copy_to_reg
21657 (gen_rtx_MEM (Pmode,
21658 memory_address (Pmode, frame))),
21659 RETURN_ADDRESS_OFFSET)));
21662 cfun->machine->ra_need_lr = 1;
21663 return get_hard_reg_initial_val (Pmode, LR_REGNO);
21666 /* Say whether a function is a candidate for sibcall handling or not. */
21668 static bool
21669 rs6000_function_ok_for_sibcall (tree decl, tree exp)
21671 tree fntype;
21673 if (decl)
21674 fntype = TREE_TYPE (decl);
21675 else
21676 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
21678 /* We can't do it if the called function has more vector parameters
21679 than the current function; there's nowhere to put the VRsave code. */
21680 if (TARGET_ALTIVEC_ABI
21681 && TARGET_ALTIVEC_VRSAVE
21682 && !(decl && decl == current_function_decl))
21684 function_args_iterator args_iter;
21685 tree type;
21686 int nvreg = 0;
21688 /* Functions with vector parameters are required to have a
21689 prototype, so the argument type info must be available
21690 here. */
21691 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
21692 if (TREE_CODE (type) == VECTOR_TYPE
21693 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21694 nvreg++;
21696 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
21697 if (TREE_CODE (type) == VECTOR_TYPE
21698 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21699 nvreg--;
21701 if (nvreg > 0)
21702 return false;
21705 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
21706 functions, because the callee may have a different TOC pointer to
21707 the caller and there's no way to ensure we restore the TOC when
21708 we return. With the secure-plt SYSV ABI we can't make non-local
21709 calls when -fpic/PIC because the plt call stubs use r30. */
21710 if (DEFAULT_ABI == ABI_DARWIN
21711 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21712 && decl
21713 && !DECL_EXTERNAL (decl)
21714 && (*targetm.binds_local_p) (decl))
21715 || (DEFAULT_ABI == ABI_V4
21716 && (!TARGET_SECURE_PLT
21717 || !flag_pic
21718 || (decl
21719 && (*targetm.binds_local_p) (decl)))))
21721 tree attr_list = TYPE_ATTRIBUTES (fntype);
21723 if (!lookup_attribute ("longcall", attr_list)
21724 || lookup_attribute ("shortcall", attr_list))
21725 return true;
21728 return false;
21731 static int
21732 rs6000_ra_ever_killed (void)
21734 rtx top;
21735 rtx reg;
21736 rtx insn;
21738 if (cfun->is_thunk)
21739 return 0;
21741 if (cfun->machine->lr_save_state)
21742 return cfun->machine->lr_save_state - 1;
21744 /* regs_ever_live has LR marked as used if any sibcalls are present,
21745 but this should not force saving and restoring in the
21746 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
21747 clobbers LR, so that is inappropriate. */
21749 /* Also, the prologue can generate a store into LR that
21750 doesn't really count, like this:
21752 move LR->R0
21753 bcl to set PIC register
21754 move LR->R31
21755 move R0->LR
21757 When we're called from the epilogue, we need to avoid counting
21758 this as a store. */
21760 push_topmost_sequence ();
21761 top = get_insns ();
21762 pop_topmost_sequence ();
21763 reg = gen_rtx_REG (Pmode, LR_REGNO);
21765 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
21767 if (INSN_P (insn))
21769 if (CALL_P (insn))
21771 if (!SIBLING_CALL_P (insn))
21772 return 1;
21774 else if (find_regno_note (insn, REG_INC, LR_REGNO))
21775 return 1;
21776 else if (set_of (reg, insn) != NULL_RTX
21777 && !prologue_epilogue_contains (insn))
21778 return 1;
21781 return 0;
21784 /* Emit instructions needed to load the TOC register.
21785 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
21786 a constant pool; or for SVR4 -fpic. */
21788 void
21789 rs6000_emit_load_toc_table (int fromprolog)
21791 rtx dest;
21792 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
21794 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
21796 char buf[30];
21797 rtx lab, tmp1, tmp2, got;
21799 lab = gen_label_rtx ();
21800 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
21801 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21802 if (flag_pic == 2)
21803 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21804 else
21805 got = rs6000_got_sym ();
21806 tmp1 = tmp2 = dest;
21807 if (!fromprolog)
21809 tmp1 = gen_reg_rtx (Pmode);
21810 tmp2 = gen_reg_rtx (Pmode);
21812 emit_insn (gen_load_toc_v4_PIC_1 (lab));
21813 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
21814 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
21815 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
21817 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
21819 emit_insn (gen_load_toc_v4_pic_si ());
21820 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21822 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
21824 char buf[30];
21825 rtx temp0 = (fromprolog
21826 ? gen_rtx_REG (Pmode, 0)
21827 : gen_reg_rtx (Pmode));
21829 if (fromprolog)
21831 rtx symF, symL;
21833 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
21834 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21836 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
21837 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21839 emit_insn (gen_load_toc_v4_PIC_1 (symF));
21840 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21841 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
21843 else
21845 rtx tocsym, lab;
21847 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21848 lab = gen_label_rtx ();
21849 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
21850 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21851 if (TARGET_LINK_STACK)
21852 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
21853 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
21855 emit_insn (gen_addsi3 (dest, temp0, dest));
21857 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
21859 /* This is for AIX code running in non-PIC ELF32. */
21860 char buf[30];
21861 rtx realsym;
21862 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
21863 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21865 emit_insn (gen_elf_high (dest, realsym));
21866 emit_insn (gen_elf_low (dest, dest, realsym));
21868 else
21870 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
21872 if (TARGET_32BIT)
21873 emit_insn (gen_load_toc_aix_si (dest));
21874 else
21875 emit_insn (gen_load_toc_aix_di (dest));
21879 /* Emit instructions to restore the link register after determining where
21880 its value has been stored. */
21882 void
21883 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
21885 rs6000_stack_t *info = rs6000_stack_info ();
21886 rtx operands[2];
21888 operands[0] = source;
21889 operands[1] = scratch;
21891 if (info->lr_save_p)
21893 rtx frame_rtx = stack_pointer_rtx;
21894 HOST_WIDE_INT sp_offset = 0;
21895 rtx tmp;
21897 if (frame_pointer_needed
21898 || cfun->calls_alloca
21899 || info->total_size > 32767)
21901 tmp = gen_frame_mem (Pmode, frame_rtx);
21902 emit_move_insn (operands[1], tmp);
21903 frame_rtx = operands[1];
21905 else if (info->push_p)
21906 sp_offset = info->total_size;
21908 tmp = plus_constant (Pmode, frame_rtx,
21909 info->lr_save_offset + sp_offset);
21910 tmp = gen_frame_mem (Pmode, tmp);
21911 emit_move_insn (tmp, operands[0]);
21913 else
21914 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
21916 /* Freeze lr_save_p. We've just emitted rtl that depends on the
21917 state of lr_save_p so any change from here on would be a bug. In
21918 particular, stop rs6000_ra_ever_killed from considering the SET
21919 of lr we may have added just above. */
21920 cfun->machine->lr_save_state = info->lr_save_p + 1;
21923 static GTY(()) alias_set_type set = -1;
21925 alias_set_type
21926 get_TOC_alias_set (void)
21928 if (set == -1)
21929 set = new_alias_set ();
21930 return set;
21933 /* This returns nonzero if the current function uses the TOC. This is
21934 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
21935 is generated by the ABI_V4 load_toc_* patterns. */
21936 #if TARGET_ELF
21937 static int
21938 uses_TOC (void)
21940 rtx insn;
21942 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
21943 if (INSN_P (insn))
21945 rtx pat = PATTERN (insn);
21946 int i;
21948 if (GET_CODE (pat) == PARALLEL)
21949 for (i = 0; i < XVECLEN (pat, 0); i++)
21951 rtx sub = XVECEXP (pat, 0, i);
21952 if (GET_CODE (sub) == USE)
21954 sub = XEXP (sub, 0);
21955 if (GET_CODE (sub) == UNSPEC
21956 && XINT (sub, 1) == UNSPEC_TOC)
21957 return 1;
21961 return 0;
21963 #endif
21966 create_TOC_reference (rtx symbol, rtx largetoc_reg)
21968 rtx tocrel, tocreg, hi;
21970 if (TARGET_DEBUG_ADDR)
21972 if (GET_CODE (symbol) == SYMBOL_REF)
21973 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
21974 XSTR (symbol, 0));
21975 else
21977 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
21978 GET_RTX_NAME (GET_CODE (symbol)));
21979 debug_rtx (symbol);
21983 if (!can_create_pseudo_p ())
21984 df_set_regs_ever_live (TOC_REGISTER, true);
21986 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
21987 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
21988 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
21989 return tocrel;
21991 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
21992 if (largetoc_reg != NULL)
21994 emit_move_insn (largetoc_reg, hi);
21995 hi = largetoc_reg;
21997 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
22000 /* Issue assembly directives that create a reference to the given DWARF
22001 FRAME_TABLE_LABEL from the current function section. */
22002 void
22003 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
22005 fprintf (asm_out_file, "\t.ref %s\n",
22006 (* targetm.strip_name_encoding) (frame_table_label));
22009 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
22010 and the change to the stack pointer. */
22012 static void
22013 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
22015 rtvec p;
22016 int i;
22017 rtx regs[3];
22019 i = 0;
22020 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22021 if (hard_frame_needed)
22022 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
22023 if (!(REGNO (fp) == STACK_POINTER_REGNUM
22024 || (hard_frame_needed
22025 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
22026 regs[i++] = fp;
22028 p = rtvec_alloc (i);
22029 while (--i >= 0)
22031 rtx mem = gen_frame_mem (BLKmode, regs[i]);
22032 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
22035 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
22038 /* Emit the correct code for allocating stack space, as insns.
22039 If COPY_REG, make sure a copy of the old frame is left there.
22040 The generated code may use hard register 0 as a temporary. */
22042 static void
22043 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
22045 rtx insn;
22046 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22047 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
22048 rtx todec = gen_int_mode (-size, Pmode);
22049 rtx par, set, mem;
22051 if (INTVAL (todec) != -size)
22053 warning (0, "stack frame too large");
22054 emit_insn (gen_trap ());
22055 return;
22058 if (crtl->limit_stack)
22060 if (REG_P (stack_limit_rtx)
22061 && REGNO (stack_limit_rtx) > 1
22062 && REGNO (stack_limit_rtx) <= 31)
22064 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
22065 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
22066 const0_rtx));
22068 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
22069 && TARGET_32BIT
22070 && DEFAULT_ABI == ABI_V4)
22072 rtx toload = gen_rtx_CONST (VOIDmode,
22073 gen_rtx_PLUS (Pmode,
22074 stack_limit_rtx,
22075 GEN_INT (size)));
22077 emit_insn (gen_elf_high (tmp_reg, toload));
22078 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
22079 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
22080 const0_rtx));
22082 else
22083 warning (0, "stack limit expression is not supported");
22086 if (copy_reg)
22088 if (copy_off != 0)
22089 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
22090 else
22091 emit_move_insn (copy_reg, stack_reg);
22094 if (size > 32767)
22096 /* Need a note here so that try_split doesn't get confused. */
22097 if (get_last_insn () == NULL_RTX)
22098 emit_note (NOTE_INSN_DELETED);
22099 insn = emit_move_insn (tmp_reg, todec);
22100 try_split (PATTERN (insn), insn, 0);
22101 todec = tmp_reg;
22104 insn = emit_insn (TARGET_32BIT
22105 ? gen_movsi_update_stack (stack_reg, stack_reg,
22106 todec, stack_reg)
22107 : gen_movdi_di_update_stack (stack_reg, stack_reg,
22108 todec, stack_reg));
22109 /* Since we didn't use gen_frame_mem to generate the MEM, grab
22110 it now and set the alias set/attributes. The above gen_*_update
22111 calls will generate a PARALLEL with the MEM set being the first
22112 operation. */
22113 par = PATTERN (insn);
22114 gcc_assert (GET_CODE (par) == PARALLEL);
22115 set = XVECEXP (par, 0, 0);
22116 gcc_assert (GET_CODE (set) == SET);
22117 mem = SET_DEST (set);
22118 gcc_assert (MEM_P (mem));
22119 MEM_NOTRAP_P (mem) = 1;
22120 set_mem_alias_set (mem, get_frame_alias_set ());
22122 RTX_FRAME_RELATED_P (insn) = 1;
22123 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
22124 gen_rtx_SET (VOIDmode, stack_reg,
22125 gen_rtx_PLUS (Pmode, stack_reg,
22126 GEN_INT (-size))));
22129 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
22131 #if PROBE_INTERVAL > 32768
22132 #error Cannot use indexed addressing mode for stack probing
22133 #endif
22135 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
22136 inclusive. These are offsets from the current stack pointer. */
22138 static void
22139 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
22141 /* See if we have a constant small number of probes to generate. If so,
22142 that's the easy case. */
22143 if (first + size <= 32768)
22145 HOST_WIDE_INT i;
22147 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
22148 it exceeds SIZE. If only one probe is needed, this will not
22149 generate any code. Then probe at FIRST + SIZE. */
22150 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
22151 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22152 -(first + i)));
22154 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22155 -(first + size)));
22158 /* Otherwise, do the same as above, but in a loop. Note that we must be
22159 extra careful with variables wrapping around because we might be at
22160 the very top (or the very bottom) of the address space and we have
22161 to be able to handle this case properly; in particular, we use an
22162 equality test for the loop condition. */
22163 else
22165 HOST_WIDE_INT rounded_size;
22166 rtx r12 = gen_rtx_REG (Pmode, 12);
22167 rtx r0 = gen_rtx_REG (Pmode, 0);
22169 /* Sanity check for the addressing mode we're going to use. */
22170 gcc_assert (first <= 32768);
22172 /* Step 1: round SIZE to the previous multiple of the interval. */
22174 rounded_size = size & -PROBE_INTERVAL;
22177 /* Step 2: compute initial and final value of the loop counter. */
22179 /* TEST_ADDR = SP + FIRST. */
22180 emit_insn (gen_rtx_SET (VOIDmode, r12,
22181 plus_constant (Pmode, stack_pointer_rtx,
22182 -first)));
22184 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
22185 if (rounded_size > 32768)
22187 emit_move_insn (r0, GEN_INT (-rounded_size));
22188 emit_insn (gen_rtx_SET (VOIDmode, r0,
22189 gen_rtx_PLUS (Pmode, r12, r0)));
22191 else
22192 emit_insn (gen_rtx_SET (VOIDmode, r0,
22193 plus_constant (Pmode, r12, -rounded_size)));
22196 /* Step 3: the loop
22198 while (TEST_ADDR != LAST_ADDR)
22200 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
22201 probe at TEST_ADDR
22204 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
22205 until it is equal to ROUNDED_SIZE. */
22207 if (TARGET_64BIT)
22208 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
22209 else
22210 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
22213 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
22214 that SIZE is equal to ROUNDED_SIZE. */
22216 if (size != rounded_size)
22217 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
22221 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
22222 absolute addresses. */
22224 const char *
22225 output_probe_stack_range (rtx reg1, rtx reg2)
22227 static int labelno = 0;
22228 char loop_lab[32], end_lab[32];
22229 rtx xops[2];
22231 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
22232 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
22234 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
22236 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
22237 xops[0] = reg1;
22238 xops[1] = reg2;
22239 if (TARGET_64BIT)
22240 output_asm_insn ("cmpd 0,%0,%1", xops);
22241 else
22242 output_asm_insn ("cmpw 0,%0,%1", xops);
22244 fputs ("\tbeq 0,", asm_out_file);
22245 assemble_name_raw (asm_out_file, end_lab);
22246 fputc ('\n', asm_out_file);
22248 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
22249 xops[1] = GEN_INT (-PROBE_INTERVAL);
22250 output_asm_insn ("addi %0,%0,%1", xops);
22252 /* Probe at TEST_ADDR and branch. */
22253 xops[1] = gen_rtx_REG (Pmode, 0);
22254 output_asm_insn ("stw %1,0(%0)", xops);
22255 fprintf (asm_out_file, "\tb ");
22256 assemble_name_raw (asm_out_file, loop_lab);
22257 fputc ('\n', asm_out_file);
22259 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
22261 return "";
22264 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
22265 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
22266 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
22267 deduce these equivalences by itself so it wasn't necessary to hold
22268 its hand so much. Don't be tempted to always supply d2_f_d_e with
22269 the actual cfa register, ie. r31 when we are using a hard frame
22270 pointer. That fails when saving regs off r1, and sched moves the
22271 r31 setup past the reg saves. */
22273 static rtx
22274 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
22275 rtx reg2, rtx rreg, rtx split_reg)
22277 rtx real, temp;
22279 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
22281 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
22282 int i;
22284 gcc_checking_assert (val == 0);
22285 real = PATTERN (insn);
22286 if (GET_CODE (real) == PARALLEL)
22287 for (i = 0; i < XVECLEN (real, 0); i++)
22288 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22290 rtx set = XVECEXP (real, 0, i);
22292 RTX_FRAME_RELATED_P (set) = 1;
22294 RTX_FRAME_RELATED_P (insn) = 1;
22295 return insn;
22298 /* copy_rtx will not make unique copies of registers, so we need to
22299 ensure we don't have unwanted sharing here. */
22300 if (reg == reg2)
22301 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22303 if (reg == rreg)
22304 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22306 real = copy_rtx (PATTERN (insn));
22308 if (reg2 != NULL_RTX)
22309 real = replace_rtx (real, reg2, rreg);
22311 if (REGNO (reg) == STACK_POINTER_REGNUM)
22312 gcc_checking_assert (val == 0);
22313 else
22314 real = replace_rtx (real, reg,
22315 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
22316 STACK_POINTER_REGNUM),
22317 GEN_INT (val)));
22319 /* We expect that 'real' is either a SET or a PARALLEL containing
22320 SETs (and possibly other stuff). In a PARALLEL, all the SETs
22321 are important so they all have to be marked RTX_FRAME_RELATED_P. */
22323 if (GET_CODE (real) == SET)
22325 rtx set = real;
22327 temp = simplify_rtx (SET_SRC (set));
22328 if (temp)
22329 SET_SRC (set) = temp;
22330 temp = simplify_rtx (SET_DEST (set));
22331 if (temp)
22332 SET_DEST (set) = temp;
22333 if (GET_CODE (SET_DEST (set)) == MEM)
22335 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22336 if (temp)
22337 XEXP (SET_DEST (set), 0) = temp;
22340 else
22342 int i;
22344 gcc_assert (GET_CODE (real) == PARALLEL);
22345 for (i = 0; i < XVECLEN (real, 0); i++)
22346 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22348 rtx set = XVECEXP (real, 0, i);
22350 temp = simplify_rtx (SET_SRC (set));
22351 if (temp)
22352 SET_SRC (set) = temp;
22353 temp = simplify_rtx (SET_DEST (set));
22354 if (temp)
22355 SET_DEST (set) = temp;
22356 if (GET_CODE (SET_DEST (set)) == MEM)
22358 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22359 if (temp)
22360 XEXP (SET_DEST (set), 0) = temp;
22362 RTX_FRAME_RELATED_P (set) = 1;
22366 /* If a store insn has been split into multiple insns, the
22367 true source register is given by split_reg. */
22368 if (split_reg != NULL_RTX)
22369 real = gen_rtx_SET (VOIDmode, SET_DEST (real), split_reg);
22371 RTX_FRAME_RELATED_P (insn) = 1;
22372 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
22374 return insn;
22377 /* Returns an insn that has a vrsave set operation with the
22378 appropriate CLOBBERs. */
22380 static rtx
22381 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
22383 int nclobs, i;
22384 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
22385 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
22387 clobs[0]
22388 = gen_rtx_SET (VOIDmode,
22389 vrsave,
22390 gen_rtx_UNSPEC_VOLATILE (SImode,
22391 gen_rtvec (2, reg, vrsave),
22392 UNSPECV_SET_VRSAVE));
22394 nclobs = 1;
22396 /* We need to clobber the registers in the mask so the scheduler
22397 does not move sets to VRSAVE before sets of AltiVec registers.
22399 However, if the function receives nonlocal gotos, reload will set
22400 all call saved registers live. We will end up with:
22402 (set (reg 999) (mem))
22403 (parallel [ (set (reg vrsave) (unspec blah))
22404 (clobber (reg 999))])
22406 The clobber will cause the store into reg 999 to be dead, and
22407 flow will attempt to delete an epilogue insn. In this case, we
22408 need an unspec use/set of the register. */
22410 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
22411 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
22413 if (!epiloguep || call_used_regs [i])
22414 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
22415 gen_rtx_REG (V4SImode, i));
22416 else
22418 rtx reg = gen_rtx_REG (V4SImode, i);
22420 clobs[nclobs++]
22421 = gen_rtx_SET (VOIDmode,
22422 reg,
22423 gen_rtx_UNSPEC (V4SImode,
22424 gen_rtvec (1, reg), 27));
22428 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
22430 for (i = 0; i < nclobs; ++i)
22431 XVECEXP (insn, 0, i) = clobs[i];
22433 return insn;
22436 static rtx
22437 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
22439 rtx addr, mem;
22441 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
22442 mem = gen_frame_mem (GET_MODE (reg), addr);
22443 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
22446 static rtx
22447 gen_frame_load (rtx reg, rtx frame_reg, int offset)
22449 return gen_frame_set (reg, frame_reg, offset, false);
22452 static rtx
22453 gen_frame_store (rtx reg, rtx frame_reg, int offset)
22455 return gen_frame_set (reg, frame_reg, offset, true);
22458 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
22459 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
22461 static rtx
22462 emit_frame_save (rtx frame_reg, enum machine_mode mode,
22463 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
22465 rtx reg, insn;
22467 /* Some cases that need register indexed addressing. */
22468 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
22469 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
22470 || (TARGET_E500_DOUBLE && mode == DFmode)
22471 || (TARGET_SPE_ABI
22472 && SPE_VECTOR_MODE (mode)
22473 && !SPE_CONST_OFFSET_OK (offset))));
22475 reg = gen_rtx_REG (mode, regno);
22476 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
22477 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
22478 NULL_RTX, NULL_RTX, NULL_RTX);
22481 /* Emit an offset memory reference suitable for a frame store, while
22482 converting to a valid addressing mode. */
22484 static rtx
22485 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
22487 rtx int_rtx, offset_rtx;
22489 int_rtx = GEN_INT (offset);
22491 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
22492 || (TARGET_E500_DOUBLE && mode == DFmode))
22494 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
22495 emit_move_insn (offset_rtx, int_rtx);
22497 else
22498 offset_rtx = int_rtx;
22500 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
22503 #ifndef TARGET_FIX_AND_CONTINUE
22504 #define TARGET_FIX_AND_CONTINUE 0
22505 #endif
22507 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
22508 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
22509 #define LAST_SAVRES_REGISTER 31
22510 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
22512 enum {
22513 SAVRES_LR = 0x1,
22514 SAVRES_SAVE = 0x2,
22515 SAVRES_REG = 0x0c,
22516 SAVRES_GPR = 0,
22517 SAVRES_FPR = 4,
22518 SAVRES_VR = 8
22521 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
22523 /* Temporary holding space for an out-of-line register save/restore
22524 routine name. */
22525 static char savres_routine_name[30];
22527 /* Return the name for an out-of-line register save/restore routine.
22528 We are saving/restoring GPRs if GPR is true. */
22530 static char *
22531 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
22533 const char *prefix = "";
22534 const char *suffix = "";
22536 /* Different targets are supposed to define
22537 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
22538 routine name could be defined with:
22540 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
22542 This is a nice idea in practice, but in reality, things are
22543 complicated in several ways:
22545 - ELF targets have save/restore routines for GPRs.
22547 - SPE targets use different prefixes for 32/64-bit registers, and
22548 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
22550 - PPC64 ELF targets have routines for save/restore of GPRs that
22551 differ in what they do with the link register, so having a set
22552 prefix doesn't work. (We only use one of the save routines at
22553 the moment, though.)
22555 - PPC32 elf targets have "exit" versions of the restore routines
22556 that restore the link register and can save some extra space.
22557 These require an extra suffix. (There are also "tail" versions
22558 of the restore routines and "GOT" versions of the save routines,
22559 but we don't generate those at present. Same problems apply,
22560 though.)
22562 We deal with all this by synthesizing our own prefix/suffix and
22563 using that for the simple sprintf call shown above. */
22564 if (TARGET_SPE)
22566 /* No floating point saves on the SPE. */
22567 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
22569 if ((sel & SAVRES_SAVE))
22570 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
22571 else
22572 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
22574 if ((sel & SAVRES_LR))
22575 suffix = "_x";
22577 else if (DEFAULT_ABI == ABI_V4)
22579 if (TARGET_64BIT)
22580 goto aix_names;
22582 if ((sel & SAVRES_REG) == SAVRES_GPR)
22583 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
22584 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22585 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
22586 else if ((sel & SAVRES_REG) == SAVRES_VR)
22587 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22588 else
22589 abort ();
22591 if ((sel & SAVRES_LR))
22592 suffix = "_x";
22594 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22596 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
22597 /* No out-of-line save/restore routines for GPRs on AIX. */
22598 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
22599 #endif
22601 aix_names:
22602 if ((sel & SAVRES_REG) == SAVRES_GPR)
22603 prefix = ((sel & SAVRES_SAVE)
22604 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
22605 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
22606 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22608 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
22609 if ((sel & SAVRES_LR))
22610 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
22611 else
22612 #endif
22614 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
22615 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
22618 else if ((sel & SAVRES_REG) == SAVRES_VR)
22619 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22620 else
22621 abort ();
22624 if (DEFAULT_ABI == ABI_DARWIN)
22626 /* The Darwin approach is (slightly) different, in order to be
22627 compatible with code generated by the system toolchain. There is a
22628 single symbol for the start of save sequence, and the code here
22629 embeds an offset into that code on the basis of the first register
22630 to be saved. */
22631 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
22632 if ((sel & SAVRES_REG) == SAVRES_GPR)
22633 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
22634 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
22635 (regno - 13) * 4, prefix, regno);
22636 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22637 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
22638 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
22639 else if ((sel & SAVRES_REG) == SAVRES_VR)
22640 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
22641 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
22642 else
22643 abort ();
22645 else
22646 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
22648 return savres_routine_name;
22651 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
22652 We are saving/restoring GPRs if GPR is true. */
22654 static rtx
22655 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
22657 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
22658 ? info->first_gp_reg_save
22659 : (sel & SAVRES_REG) == SAVRES_FPR
22660 ? info->first_fp_reg_save - 32
22661 : (sel & SAVRES_REG) == SAVRES_VR
22662 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
22663 : -1);
22664 rtx sym;
22665 int select = sel;
22667 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
22668 versions of the gpr routines. */
22669 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
22670 && info->spe_64bit_regs_used)
22671 select ^= SAVRES_FPR ^ SAVRES_GPR;
22673 /* Don't generate bogus routine names. */
22674 gcc_assert (FIRST_SAVRES_REGISTER <= regno
22675 && regno <= LAST_SAVRES_REGISTER
22676 && select >= 0 && select <= 12);
22678 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
22680 if (sym == NULL)
22682 char *name;
22684 name = rs6000_savres_routine_name (info, regno, sel);
22686 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
22687 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
22688 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
22691 return sym;
22694 /* Emit a sequence of insns, including a stack tie if needed, for
22695 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
22696 reset the stack pointer, but move the base of the frame into
22697 reg UPDT_REGNO for use by out-of-line register restore routines. */
22699 static rtx
22700 rs6000_emit_stack_reset (rs6000_stack_t *info,
22701 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
22702 unsigned updt_regno)
22704 rtx updt_reg_rtx;
22706 /* This blockage is needed so that sched doesn't decide to move
22707 the sp change before the register restores. */
22708 if (DEFAULT_ABI == ABI_V4
22709 || (TARGET_SPE_ABI
22710 && info->spe_64bit_regs_used != 0
22711 && info->first_gp_reg_save != 32))
22712 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
22714 /* If we are restoring registers out-of-line, we will be using the
22715 "exit" variants of the restore routines, which will reset the
22716 stack for us. But we do need to point updt_reg into the
22717 right place for those routines. */
22718 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
22720 if (frame_off != 0)
22721 return emit_insn (gen_add3_insn (updt_reg_rtx,
22722 frame_reg_rtx, GEN_INT (frame_off)));
22723 else if (REGNO (frame_reg_rtx) != updt_regno)
22724 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
22726 return NULL_RTX;
22729 /* Return the register number used as a pointer by out-of-line
22730 save/restore functions. */
22732 static inline unsigned
22733 ptr_regno_for_savres (int sel)
22735 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22736 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
22737 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
22740 /* Construct a parallel rtx describing the effect of a call to an
22741 out-of-line register save/restore routine, and emit the insn
22742 or jump_insn as appropriate. */
22744 static rtx
22745 rs6000_emit_savres_rtx (rs6000_stack_t *info,
22746 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
22747 enum machine_mode reg_mode, int sel)
22749 int i;
22750 int offset, start_reg, end_reg, n_regs, use_reg;
22751 int reg_size = GET_MODE_SIZE (reg_mode);
22752 rtx sym;
22753 rtvec p;
22754 rtx par, insn;
22756 offset = 0;
22757 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22758 ? info->first_gp_reg_save
22759 : (sel & SAVRES_REG) == SAVRES_FPR
22760 ? info->first_fp_reg_save
22761 : (sel & SAVRES_REG) == SAVRES_VR
22762 ? info->first_altivec_reg_save
22763 : -1);
22764 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22765 ? 32
22766 : (sel & SAVRES_REG) == SAVRES_FPR
22767 ? 64
22768 : (sel & SAVRES_REG) == SAVRES_VR
22769 ? LAST_ALTIVEC_REGNO + 1
22770 : -1);
22771 n_regs = end_reg - start_reg;
22772 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
22773 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
22774 + n_regs);
22776 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22777 RTVEC_ELT (p, offset++) = ret_rtx;
22779 RTVEC_ELT (p, offset++)
22780 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
22782 sym = rs6000_savres_routine_sym (info, sel);
22783 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
22785 use_reg = ptr_regno_for_savres (sel);
22786 if ((sel & SAVRES_REG) == SAVRES_VR)
22788 /* Vector regs are saved/restored using [reg+reg] addressing. */
22789 RTVEC_ELT (p, offset++)
22790 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22791 RTVEC_ELT (p, offset++)
22792 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
22794 else
22795 RTVEC_ELT (p, offset++)
22796 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22798 for (i = 0; i < end_reg - start_reg; i++)
22799 RTVEC_ELT (p, i + offset)
22800 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
22801 frame_reg_rtx, save_area_offset + reg_size * i,
22802 (sel & SAVRES_SAVE) != 0);
22804 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22805 RTVEC_ELT (p, i + offset)
22806 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
22808 par = gen_rtx_PARALLEL (VOIDmode, p);
22810 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22812 insn = emit_jump_insn (par);
22813 JUMP_LABEL (insn) = ret_rtx;
22815 else
22816 insn = emit_insn (par);
22817 return insn;
22820 /* Emit code to store CR fields that need to be saved into REG. */
22822 static void
22823 rs6000_emit_move_from_cr (rtx reg)
22825 /* Only the ELFv2 ABI allows storing only selected fields. */
22826 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
22828 int i, cr_reg[8], count = 0;
22830 /* Collect CR fields that must be saved. */
22831 for (i = 0; i < 8; i++)
22832 if (save_reg_p (CR0_REGNO + i))
22833 cr_reg[count++] = i;
22835 /* If it's just a single one, use mfcrf. */
22836 if (count == 1)
22838 rtvec p = rtvec_alloc (1);
22839 rtvec r = rtvec_alloc (2);
22840 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
22841 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
22842 RTVEC_ELT (p, 0)
22843 = gen_rtx_SET (VOIDmode, reg,
22844 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
22846 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22847 return;
22850 /* ??? It might be better to handle count == 2 / 3 cases here
22851 as well, using logical operations to combine the values. */
22854 emit_insn (gen_movesi_from_cr (reg));
22857 /* Determine whether the gp REG is really used. */
22859 static bool
22860 rs6000_reg_live_or_pic_offset_p (int reg)
22862 /* If the function calls eh_return, claim used all the registers that would
22863 be checked for liveness otherwise. This is required for the PIC offset
22864 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
22865 register allocation purposes in this case. */
22867 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
22868 && (!call_used_regs[reg]
22869 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
22870 && !TARGET_SINGLE_PIC_BASE
22871 && TARGET_TOC && TARGET_MINIMAL_TOC)))
22872 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
22873 && !TARGET_SINGLE_PIC_BASE
22874 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
22875 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
22878 /* Emit function prologue as insns. */
22880 void
22881 rs6000_emit_prologue (void)
22883 rs6000_stack_t *info = rs6000_stack_info ();
22884 enum machine_mode reg_mode = Pmode;
22885 int reg_size = TARGET_32BIT ? 4 : 8;
22886 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22887 rtx frame_reg_rtx = sp_reg_rtx;
22888 unsigned int cr_save_regno;
22889 rtx cr_save_rtx = NULL_RTX;
22890 rtx insn;
22891 int strategy;
22892 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
22893 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
22894 && call_used_regs[STATIC_CHAIN_REGNUM]);
22895 /* Offset to top of frame for frame_reg and sp respectively. */
22896 HOST_WIDE_INT frame_off = 0;
22897 HOST_WIDE_INT sp_off = 0;
22899 #ifdef ENABLE_CHECKING
22900 /* Track and check usage of r0, r11, r12. */
22901 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
22902 #define START_USE(R) do \
22904 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22905 reg_inuse |= 1 << (R); \
22906 } while (0)
22907 #define END_USE(R) do \
22909 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
22910 reg_inuse &= ~(1 << (R)); \
22911 } while (0)
22912 #define NOT_INUSE(R) do \
22914 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22915 } while (0)
22916 #else
22917 #define START_USE(R) do {} while (0)
22918 #define END_USE(R) do {} while (0)
22919 #define NOT_INUSE(R) do {} while (0)
22920 #endif
22922 if (DEFAULT_ABI == ABI_ELFv2)
22924 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
22926 /* With -mminimal-toc we may generate an extra use of r2 below. */
22927 if (!TARGET_SINGLE_PIC_BASE
22928 && TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
22929 cfun->machine->r2_setup_needed = true;
22933 if (flag_stack_usage_info)
22934 current_function_static_stack_size = info->total_size;
22936 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
22938 HOST_WIDE_INT size = info->total_size;
22940 if (crtl->is_leaf && !cfun->calls_alloca)
22942 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
22943 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
22944 size - STACK_CHECK_PROTECT);
22946 else if (size > 0)
22947 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
22950 if (TARGET_FIX_AND_CONTINUE)
22952 /* gdb on darwin arranges to forward a function from the old
22953 address by modifying the first 5 instructions of the function
22954 to branch to the overriding function. This is necessary to
22955 permit function pointers that point to the old function to
22956 actually forward to the new function. */
22957 emit_insn (gen_nop ());
22958 emit_insn (gen_nop ());
22959 emit_insn (gen_nop ());
22960 emit_insn (gen_nop ());
22961 emit_insn (gen_nop ());
22964 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
22966 reg_mode = V2SImode;
22967 reg_size = 8;
22970 /* Handle world saves specially here. */
22971 if (WORLD_SAVE_P (info))
22973 int i, j, sz;
22974 rtx treg;
22975 rtvec p;
22976 rtx reg0;
22978 /* save_world expects lr in r0. */
22979 reg0 = gen_rtx_REG (Pmode, 0);
22980 if (info->lr_save_p)
22982 insn = emit_move_insn (reg0,
22983 gen_rtx_REG (Pmode, LR_REGNO));
22984 RTX_FRAME_RELATED_P (insn) = 1;
22987 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
22988 assumptions about the offsets of various bits of the stack
22989 frame. */
22990 gcc_assert (info->gp_save_offset == -220
22991 && info->fp_save_offset == -144
22992 && info->lr_save_offset == 8
22993 && info->cr_save_offset == 4
22994 && info->push_p
22995 && info->lr_save_p
22996 && (!crtl->calls_eh_return
22997 || info->ehrd_offset == -432)
22998 && info->vrsave_save_offset == -224
22999 && info->altivec_save_offset == -416);
23001 treg = gen_rtx_REG (SImode, 11);
23002 emit_move_insn (treg, GEN_INT (-info->total_size));
23004 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
23005 in R11. It also clobbers R12, so beware! */
23007 /* Preserve CR2 for save_world prologues */
23008 sz = 5;
23009 sz += 32 - info->first_gp_reg_save;
23010 sz += 64 - info->first_fp_reg_save;
23011 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
23012 p = rtvec_alloc (sz);
23013 j = 0;
23014 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
23015 gen_rtx_REG (SImode,
23016 LR_REGNO));
23017 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
23018 gen_rtx_SYMBOL_REF (Pmode,
23019 "*save_world"));
23020 /* We do floats first so that the instruction pattern matches
23021 properly. */
23022 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23023 RTVEC_ELT (p, j++)
23024 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23025 ? DFmode : SFmode,
23026 info->first_fp_reg_save + i),
23027 frame_reg_rtx,
23028 info->fp_save_offset + frame_off + 8 * i);
23029 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
23030 RTVEC_ELT (p, j++)
23031 = gen_frame_store (gen_rtx_REG (V4SImode,
23032 info->first_altivec_reg_save + i),
23033 frame_reg_rtx,
23034 info->altivec_save_offset + frame_off + 16 * i);
23035 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23036 RTVEC_ELT (p, j++)
23037 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23038 frame_reg_rtx,
23039 info->gp_save_offset + frame_off + reg_size * i);
23041 /* CR register traditionally saved as CR2. */
23042 RTVEC_ELT (p, j++)
23043 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
23044 frame_reg_rtx, info->cr_save_offset + frame_off);
23045 /* Explain about use of R0. */
23046 if (info->lr_save_p)
23047 RTVEC_ELT (p, j++)
23048 = gen_frame_store (reg0,
23049 frame_reg_rtx, info->lr_save_offset + frame_off);
23050 /* Explain what happens to the stack pointer. */
23052 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
23053 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
23056 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23057 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23058 treg, GEN_INT (-info->total_size), NULL_RTX);
23059 sp_off = frame_off = info->total_size;
23062 strategy = info->savres_strategy;
23064 /* For V.4, update stack before we do any saving and set back pointer. */
23065 if (! WORLD_SAVE_P (info)
23066 && info->push_p
23067 && (DEFAULT_ABI == ABI_V4
23068 || crtl->calls_eh_return))
23070 bool need_r11 = (TARGET_SPE
23071 ? (!(strategy & SAVE_INLINE_GPRS)
23072 && info->spe_64bit_regs_used == 0)
23073 : (!(strategy & SAVE_INLINE_FPRS)
23074 || !(strategy & SAVE_INLINE_GPRS)
23075 || !(strategy & SAVE_INLINE_VRS)));
23076 int ptr_regno = -1;
23077 rtx ptr_reg = NULL_RTX;
23078 int ptr_off = 0;
23080 if (info->total_size < 32767)
23081 frame_off = info->total_size;
23082 else if (need_r11)
23083 ptr_regno = 11;
23084 else if (info->cr_save_p
23085 || info->lr_save_p
23086 || info->first_fp_reg_save < 64
23087 || info->first_gp_reg_save < 32
23088 || info->altivec_size != 0
23089 || info->vrsave_mask != 0
23090 || crtl->calls_eh_return)
23091 ptr_regno = 12;
23092 else
23094 /* The prologue won't be saving any regs so there is no need
23095 to set up a frame register to access any frame save area.
23096 We also won't be using frame_off anywhere below, but set
23097 the correct value anyway to protect against future
23098 changes to this function. */
23099 frame_off = info->total_size;
23101 if (ptr_regno != -1)
23103 /* Set up the frame offset to that needed by the first
23104 out-of-line save function. */
23105 START_USE (ptr_regno);
23106 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23107 frame_reg_rtx = ptr_reg;
23108 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
23109 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
23110 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
23111 ptr_off = info->gp_save_offset + info->gp_size;
23112 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
23113 ptr_off = info->altivec_save_offset + info->altivec_size;
23114 frame_off = -ptr_off;
23116 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
23117 sp_off = info->total_size;
23118 if (frame_reg_rtx != sp_reg_rtx)
23119 rs6000_emit_stack_tie (frame_reg_rtx, false);
23122 /* If we use the link register, get it into r0. */
23123 if (!WORLD_SAVE_P (info) && info->lr_save_p)
23125 rtx addr, reg, mem;
23127 reg = gen_rtx_REG (Pmode, 0);
23128 START_USE (0);
23129 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
23130 RTX_FRAME_RELATED_P (insn) = 1;
23132 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
23133 | SAVE_NOINLINE_FPRS_SAVES_LR)))
23135 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
23136 GEN_INT (info->lr_save_offset + frame_off));
23137 mem = gen_rtx_MEM (Pmode, addr);
23138 /* This should not be of rs6000_sr_alias_set, because of
23139 __builtin_return_address. */
23141 insn = emit_move_insn (mem, reg);
23142 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23143 NULL_RTX, NULL_RTX, NULL_RTX);
23144 END_USE (0);
23148 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
23149 r12 will be needed by out-of-line gpr restore. */
23150 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23151 && !(strategy & (SAVE_INLINE_GPRS
23152 | SAVE_NOINLINE_GPRS_SAVES_LR))
23153 ? 11 : 12);
23154 if (!WORLD_SAVE_P (info)
23155 && info->cr_save_p
23156 && REGNO (frame_reg_rtx) != cr_save_regno
23157 && !(using_static_chain_p && cr_save_regno == 11))
23159 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
23160 START_USE (cr_save_regno);
23161 rs6000_emit_move_from_cr (cr_save_rtx);
23164 /* Do any required saving of fpr's. If only one or two to save, do
23165 it ourselves. Otherwise, call function. */
23166 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
23168 int i;
23169 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23170 if (save_reg_p (info->first_fp_reg_save + i))
23171 emit_frame_save (frame_reg_rtx,
23172 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23173 ? DFmode : SFmode),
23174 info->first_fp_reg_save + i,
23175 info->fp_save_offset + frame_off + 8 * i,
23176 sp_off - frame_off);
23178 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
23180 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23181 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23182 unsigned ptr_regno = ptr_regno_for_savres (sel);
23183 rtx ptr_reg = frame_reg_rtx;
23185 if (REGNO (frame_reg_rtx) == ptr_regno)
23186 gcc_checking_assert (frame_off == 0);
23187 else
23189 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23190 NOT_INUSE (ptr_regno);
23191 emit_insn (gen_add3_insn (ptr_reg,
23192 frame_reg_rtx, GEN_INT (frame_off)));
23194 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23195 info->fp_save_offset,
23196 info->lr_save_offset,
23197 DFmode, sel);
23198 rs6000_frame_related (insn, ptr_reg, sp_off,
23199 NULL_RTX, NULL_RTX, NULL_RTX);
23200 if (lr)
23201 END_USE (0);
23204 /* Save GPRs. This is done as a PARALLEL if we are using
23205 the store-multiple instructions. */
23206 if (!WORLD_SAVE_P (info)
23207 && TARGET_SPE_ABI
23208 && info->spe_64bit_regs_used != 0
23209 && info->first_gp_reg_save != 32)
23211 int i;
23212 rtx spe_save_area_ptr;
23213 HOST_WIDE_INT save_off;
23214 int ool_adjust = 0;
23216 /* Determine whether we can address all of the registers that need
23217 to be saved with an offset from frame_reg_rtx that fits in
23218 the small const field for SPE memory instructions. */
23219 int spe_regs_addressable
23220 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
23221 + reg_size * (32 - info->first_gp_reg_save - 1))
23222 && (strategy & SAVE_INLINE_GPRS));
23224 if (spe_regs_addressable)
23226 spe_save_area_ptr = frame_reg_rtx;
23227 save_off = frame_off;
23229 else
23231 /* Make r11 point to the start of the SPE save area. We need
23232 to be careful here if r11 is holding the static chain. If
23233 it is, then temporarily save it in r0. */
23234 HOST_WIDE_INT offset;
23236 if (!(strategy & SAVE_INLINE_GPRS))
23237 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
23238 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
23239 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
23240 save_off = frame_off - offset;
23242 if (using_static_chain_p)
23244 rtx r0 = gen_rtx_REG (Pmode, 0);
23246 START_USE (0);
23247 gcc_assert (info->first_gp_reg_save > 11);
23249 emit_move_insn (r0, spe_save_area_ptr);
23251 else if (REGNO (frame_reg_rtx) != 11)
23252 START_USE (11);
23254 emit_insn (gen_addsi3 (spe_save_area_ptr,
23255 frame_reg_rtx, GEN_INT (offset)));
23256 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
23257 frame_off = -info->spe_gp_save_offset + ool_adjust;
23260 if ((strategy & SAVE_INLINE_GPRS))
23262 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23263 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23264 emit_frame_save (spe_save_area_ptr, reg_mode,
23265 info->first_gp_reg_save + i,
23266 (info->spe_gp_save_offset + save_off
23267 + reg_size * i),
23268 sp_off - save_off);
23270 else
23272 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
23273 info->spe_gp_save_offset + save_off,
23274 0, reg_mode,
23275 SAVRES_SAVE | SAVRES_GPR);
23277 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
23278 NULL_RTX, NULL_RTX, NULL_RTX);
23281 /* Move the static chain pointer back. */
23282 if (!spe_regs_addressable)
23284 if (using_static_chain_p)
23286 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
23287 END_USE (0);
23289 else if (REGNO (frame_reg_rtx) != 11)
23290 END_USE (11);
23293 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
23295 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
23296 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
23297 unsigned ptr_regno = ptr_regno_for_savres (sel);
23298 rtx ptr_reg = frame_reg_rtx;
23299 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
23300 int end_save = info->gp_save_offset + info->gp_size;
23301 int ptr_off;
23303 if (!ptr_set_up)
23304 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23306 /* Need to adjust r11 (r12) if we saved any FPRs. */
23307 if (end_save + frame_off != 0)
23309 rtx offset = GEN_INT (end_save + frame_off);
23311 if (ptr_set_up)
23312 frame_off = -end_save;
23313 else
23314 NOT_INUSE (ptr_regno);
23315 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23317 else if (!ptr_set_up)
23319 NOT_INUSE (ptr_regno);
23320 emit_move_insn (ptr_reg, frame_reg_rtx);
23322 ptr_off = -end_save;
23323 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23324 info->gp_save_offset + ptr_off,
23325 info->lr_save_offset + ptr_off,
23326 reg_mode, sel);
23327 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
23328 NULL_RTX, NULL_RTX, NULL_RTX);
23329 if (lr)
23330 END_USE (0);
23332 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
23334 rtvec p;
23335 int i;
23336 p = rtvec_alloc (32 - info->first_gp_reg_save);
23337 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23338 RTVEC_ELT (p, i)
23339 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23340 frame_reg_rtx,
23341 info->gp_save_offset + frame_off + reg_size * i);
23342 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23343 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23344 NULL_RTX, NULL_RTX, NULL_RTX);
23346 else if (!WORLD_SAVE_P (info))
23348 int i;
23349 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23350 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23351 emit_frame_save (frame_reg_rtx, reg_mode,
23352 info->first_gp_reg_save + i,
23353 info->gp_save_offset + frame_off + reg_size * i,
23354 sp_off - frame_off);
23357 if (crtl->calls_eh_return)
23359 unsigned int i;
23360 rtvec p;
23362 for (i = 0; ; ++i)
23364 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23365 if (regno == INVALID_REGNUM)
23366 break;
23369 p = rtvec_alloc (i);
23371 for (i = 0; ; ++i)
23373 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23374 if (regno == INVALID_REGNUM)
23375 break;
23377 insn
23378 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
23379 sp_reg_rtx,
23380 info->ehrd_offset + sp_off + reg_size * (int) i);
23381 RTVEC_ELT (p, i) = insn;
23382 RTX_FRAME_RELATED_P (insn) = 1;
23385 insn = emit_insn (gen_blockage ());
23386 RTX_FRAME_RELATED_P (insn) = 1;
23387 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
23390 /* In AIX ABI we need to make sure r2 is really saved. */
23391 if (TARGET_AIX && crtl->calls_eh_return)
23393 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
23394 rtx save_insn, join_insn, note;
23395 long toc_restore_insn;
23397 tmp_reg = gen_rtx_REG (Pmode, 11);
23398 tmp_reg_si = gen_rtx_REG (SImode, 11);
23399 if (using_static_chain_p)
23401 START_USE (0);
23402 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
23404 else
23405 START_USE (11);
23406 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
23407 /* Peek at instruction to which this function returns. If it's
23408 restoring r2, then we know we've already saved r2. We can't
23409 unconditionally save r2 because the value we have will already
23410 be updated if we arrived at this function via a plt call or
23411 toc adjusting stub. */
23412 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
23413 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
23414 + RS6000_TOC_SAVE_SLOT);
23415 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
23416 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
23417 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
23418 validate_condition_mode (EQ, CCUNSmode);
23419 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
23420 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
23421 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
23422 toc_save_done = gen_label_rtx ();
23423 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
23424 gen_rtx_EQ (VOIDmode, compare_result,
23425 const0_rtx),
23426 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
23427 pc_rtx);
23428 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
23429 JUMP_LABEL (jump) = toc_save_done;
23430 LABEL_NUSES (toc_save_done) += 1;
23432 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
23433 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
23434 sp_off - frame_off);
23436 emit_label (toc_save_done);
23438 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
23439 have a CFG that has different saves along different paths.
23440 Move the note to a dummy blockage insn, which describes that
23441 R2 is unconditionally saved after the label. */
23442 /* ??? An alternate representation might be a special insn pattern
23443 containing both the branch and the store. That might let the
23444 code that minimizes the number of DW_CFA_advance opcodes better
23445 freedom in placing the annotations. */
23446 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
23447 if (note)
23448 remove_note (save_insn, note);
23449 else
23450 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
23451 copy_rtx (PATTERN (save_insn)), NULL_RTX);
23452 RTX_FRAME_RELATED_P (save_insn) = 0;
23454 join_insn = emit_insn (gen_blockage ());
23455 REG_NOTES (join_insn) = note;
23456 RTX_FRAME_RELATED_P (join_insn) = 1;
23458 if (using_static_chain_p)
23460 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
23461 END_USE (0);
23463 else
23464 END_USE (11);
23467 /* Save CR if we use any that must be preserved. */
23468 if (!WORLD_SAVE_P (info) && info->cr_save_p)
23470 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
23471 GEN_INT (info->cr_save_offset + frame_off));
23472 rtx mem = gen_frame_mem (SImode, addr);
23474 /* If we didn't copy cr before, do so now using r0. */
23475 if (cr_save_rtx == NULL_RTX)
23477 START_USE (0);
23478 cr_save_rtx = gen_rtx_REG (SImode, 0);
23479 rs6000_emit_move_from_cr (cr_save_rtx);
23482 /* Saving CR requires a two-instruction sequence: one instruction
23483 to move the CR to a general-purpose register, and a second
23484 instruction that stores the GPR to memory.
23486 We do not emit any DWARF CFI records for the first of these,
23487 because we cannot properly represent the fact that CR is saved in
23488 a register. One reason is that we cannot express that multiple
23489 CR fields are saved; another reason is that on 64-bit, the size
23490 of the CR register in DWARF (4 bytes) differs from the size of
23491 a general-purpose register.
23493 This means if any intervening instruction were to clobber one of
23494 the call-saved CR fields, we'd have incorrect CFI. To prevent
23495 this from happening, we mark the store to memory as a use of
23496 those CR fields, which prevents any such instruction from being
23497 scheduled in between the two instructions. */
23498 rtx crsave_v[9];
23499 int n_crsave = 0;
23500 int i;
23502 crsave_v[n_crsave++] = gen_rtx_SET (VOIDmode, mem, cr_save_rtx);
23503 for (i = 0; i < 8; i++)
23504 if (save_reg_p (CR0_REGNO + i))
23505 crsave_v[n_crsave++]
23506 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23508 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
23509 gen_rtvec_v (n_crsave, crsave_v)));
23510 END_USE (REGNO (cr_save_rtx));
23512 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
23513 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
23514 so we need to construct a frame expression manually. */
23515 RTX_FRAME_RELATED_P (insn) = 1;
23517 /* Update address to be stack-pointer relative, like
23518 rs6000_frame_related would do. */
23519 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
23520 GEN_INT (info->cr_save_offset + sp_off));
23521 mem = gen_frame_mem (SImode, addr);
23523 if (DEFAULT_ABI == ABI_ELFv2)
23525 /* In the ELFv2 ABI we generate separate CFI records for each
23526 CR field that was actually saved. They all point to the
23527 same 32-bit stack slot. */
23528 rtx crframe[8];
23529 int n_crframe = 0;
23531 for (i = 0; i < 8; i++)
23532 if (save_reg_p (CR0_REGNO + i))
23534 crframe[n_crframe]
23535 = gen_rtx_SET (VOIDmode, mem,
23536 gen_rtx_REG (SImode, CR0_REGNO + i));
23538 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
23539 n_crframe++;
23542 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23543 gen_rtx_PARALLEL (VOIDmode,
23544 gen_rtvec_v (n_crframe, crframe)));
23546 else
23548 /* In other ABIs, by convention, we use a single CR regnum to
23549 represent the fact that all call-saved CR fields are saved.
23550 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
23551 rtx set = gen_rtx_SET (VOIDmode, mem,
23552 gen_rtx_REG (SImode, CR2_REGNO));
23553 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
23557 /* In the ELFv2 ABI we need to save all call-saved CR fields into
23558 *separate* slots if the routine calls __builtin_eh_return, so
23559 that they can be independently restored by the unwinder. */
23560 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
23562 int i, cr_off = info->ehcr_offset;
23563 rtx crsave;
23565 /* ??? We might get better performance by using multiple mfocrf
23566 instructions. */
23567 crsave = gen_rtx_REG (SImode, 0);
23568 emit_insn (gen_movesi_from_cr (crsave));
23570 for (i = 0; i < 8; i++)
23571 if (!call_used_regs[CR0_REGNO + i])
23573 rtvec p = rtvec_alloc (2);
23574 RTVEC_ELT (p, 0)
23575 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
23576 RTVEC_ELT (p, 1)
23577 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23579 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23581 RTX_FRAME_RELATED_P (insn) = 1;
23582 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23583 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
23584 sp_reg_rtx, cr_off + sp_off));
23586 cr_off += reg_size;
23590 /* Update stack and set back pointer unless this is V.4,
23591 for which it was done previously. */
23592 if (!WORLD_SAVE_P (info) && info->push_p
23593 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
23595 rtx ptr_reg = NULL;
23596 int ptr_off = 0;
23598 /* If saving altivec regs we need to be able to address all save
23599 locations using a 16-bit offset. */
23600 if ((strategy & SAVE_INLINE_VRS) == 0
23601 || (info->altivec_size != 0
23602 && (info->altivec_save_offset + info->altivec_size - 16
23603 + info->total_size - frame_off) > 32767)
23604 || (info->vrsave_size != 0
23605 && (info->vrsave_save_offset
23606 + info->total_size - frame_off) > 32767))
23608 int sel = SAVRES_SAVE | SAVRES_VR;
23609 unsigned ptr_regno = ptr_regno_for_savres (sel);
23611 if (using_static_chain_p
23612 && ptr_regno == STATIC_CHAIN_REGNUM)
23613 ptr_regno = 12;
23614 if (REGNO (frame_reg_rtx) != ptr_regno)
23615 START_USE (ptr_regno);
23616 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23617 frame_reg_rtx = ptr_reg;
23618 ptr_off = info->altivec_save_offset + info->altivec_size;
23619 frame_off = -ptr_off;
23621 else if (REGNO (frame_reg_rtx) == 1)
23622 frame_off = info->total_size;
23623 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
23624 sp_off = info->total_size;
23625 if (frame_reg_rtx != sp_reg_rtx)
23626 rs6000_emit_stack_tie (frame_reg_rtx, false);
23629 /* Set frame pointer, if needed. */
23630 if (frame_pointer_needed)
23632 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
23633 sp_reg_rtx);
23634 RTX_FRAME_RELATED_P (insn) = 1;
23637 /* Save AltiVec registers if needed. Save here because the red zone does
23638 not always include AltiVec registers. */
23639 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23640 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
23642 int end_save = info->altivec_save_offset + info->altivec_size;
23643 int ptr_off;
23644 /* Oddly, the vector save/restore functions point r0 at the end
23645 of the save area, then use r11 or r12 to load offsets for
23646 [reg+reg] addressing. */
23647 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
23648 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
23649 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
23651 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
23652 NOT_INUSE (0);
23653 if (end_save + frame_off != 0)
23655 rtx offset = GEN_INT (end_save + frame_off);
23657 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23659 else
23660 emit_move_insn (ptr_reg, frame_reg_rtx);
23662 ptr_off = -end_save;
23663 insn = rs6000_emit_savres_rtx (info, scratch_reg,
23664 info->altivec_save_offset + ptr_off,
23665 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
23666 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
23667 NULL_RTX, NULL_RTX, NULL_RTX);
23668 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
23670 /* The oddity mentioned above clobbered our frame reg. */
23671 emit_move_insn (frame_reg_rtx, ptr_reg);
23672 frame_off = ptr_off;
23675 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23676 && info->altivec_size != 0)
23678 int i;
23680 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
23681 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
23683 rtx areg, savereg, mem, split_reg;
23684 int offset;
23686 offset = (info->altivec_save_offset + frame_off
23687 + 16 * (i - info->first_altivec_reg_save));
23689 savereg = gen_rtx_REG (V4SImode, i);
23691 NOT_INUSE (0);
23692 areg = gen_rtx_REG (Pmode, 0);
23693 emit_move_insn (areg, GEN_INT (offset));
23695 /* AltiVec addressing mode is [reg+reg]. */
23696 mem = gen_frame_mem (V4SImode,
23697 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
23699 insn = emit_move_insn (mem, savereg);
23701 /* When we split a VSX store into two insns, we need to make
23702 sure the DWARF info knows which register we are storing.
23703 Pass it in to be used on the appropriate note. */
23704 if (!BYTES_BIG_ENDIAN
23705 && GET_CODE (PATTERN (insn)) == SET
23706 && GET_CODE (SET_SRC (PATTERN (insn))) == VEC_SELECT)
23707 split_reg = savereg;
23708 else
23709 split_reg = NULL_RTX;
23711 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23712 areg, GEN_INT (offset), split_reg);
23716 /* VRSAVE is a bit vector representing which AltiVec registers
23717 are used. The OS uses this to determine which vector
23718 registers to save on a context switch. We need to save
23719 VRSAVE on the stack frame, add whatever AltiVec registers we
23720 used in this function, and do the corresponding magic in the
23721 epilogue. */
23723 if (!WORLD_SAVE_P (info)
23724 && TARGET_ALTIVEC
23725 && TARGET_ALTIVEC_VRSAVE
23726 && info->vrsave_mask != 0)
23728 rtx reg, vrsave;
23729 int offset;
23730 int save_regno;
23732 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
23733 be using r12 as frame_reg_rtx and r11 as the static chain
23734 pointer for nested functions. */
23735 save_regno = 12;
23736 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23737 && !using_static_chain_p)
23738 save_regno = 11;
23739 else if (REGNO (frame_reg_rtx) == 12)
23741 save_regno = 11;
23742 if (using_static_chain_p)
23743 save_regno = 0;
23746 NOT_INUSE (save_regno);
23747 reg = gen_rtx_REG (SImode, save_regno);
23748 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
23749 if (TARGET_MACHO)
23750 emit_insn (gen_get_vrsave_internal (reg));
23751 else
23752 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
23754 /* Save VRSAVE. */
23755 offset = info->vrsave_save_offset + frame_off;
23756 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
23758 /* Include the registers in the mask. */
23759 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
23761 insn = emit_insn (generate_set_vrsave (reg, info, 0));
23764 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
23765 if (!TARGET_SINGLE_PIC_BASE
23766 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
23767 || (DEFAULT_ABI == ABI_V4
23768 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
23769 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
23771 /* If emit_load_toc_table will use the link register, we need to save
23772 it. We use R12 for this purpose because emit_load_toc_table
23773 can use register 0. This allows us to use a plain 'blr' to return
23774 from the procedure more often. */
23775 int save_LR_around_toc_setup = (TARGET_ELF
23776 && DEFAULT_ABI == ABI_V4
23777 && flag_pic
23778 && ! info->lr_save_p
23779 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
23780 if (save_LR_around_toc_setup)
23782 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23783 rtx tmp = gen_rtx_REG (Pmode, 12);
23785 insn = emit_move_insn (tmp, lr);
23786 RTX_FRAME_RELATED_P (insn) = 1;
23788 rs6000_emit_load_toc_table (TRUE);
23790 insn = emit_move_insn (lr, tmp);
23791 add_reg_note (insn, REG_CFA_RESTORE, lr);
23792 RTX_FRAME_RELATED_P (insn) = 1;
23794 else
23795 rs6000_emit_load_toc_table (TRUE);
23798 #if TARGET_MACHO
23799 if (!TARGET_SINGLE_PIC_BASE
23800 && DEFAULT_ABI == ABI_DARWIN
23801 && flag_pic && crtl->uses_pic_offset_table)
23803 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23804 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
23806 /* Save and restore LR locally around this call (in R0). */
23807 if (!info->lr_save_p)
23808 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
23810 emit_insn (gen_load_macho_picbase (src));
23812 emit_move_insn (gen_rtx_REG (Pmode,
23813 RS6000_PIC_OFFSET_TABLE_REGNUM),
23814 lr);
23816 if (!info->lr_save_p)
23817 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
23819 #endif
23821 /* If we need to, save the TOC register after doing the stack setup.
23822 Do not emit eh frame info for this save. The unwinder wants info,
23823 conceptually attached to instructions in this function, about
23824 register values in the caller of this function. This R2 may have
23825 already been changed from the value in the caller.
23826 We don't attempt to write accurate DWARF EH frame info for R2
23827 because code emitted by gcc for a (non-pointer) function call
23828 doesn't save and restore R2. Instead, R2 is managed out-of-line
23829 by a linker generated plt call stub when the function resides in
23830 a shared library. This behaviour is costly to describe in DWARF,
23831 both in terms of the size of DWARF info and the time taken in the
23832 unwinder to interpret it. R2 changes, apart from the
23833 calls_eh_return case earlier in this function, are handled by
23834 linux-unwind.h frob_update_context. */
23835 if (rs6000_save_toc_in_prologue_p ())
23837 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
23838 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
23842 /* Write function prologue. */
23844 static void
23845 rs6000_output_function_prologue (FILE *file,
23846 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
23848 rs6000_stack_t *info = rs6000_stack_info ();
23850 if (TARGET_DEBUG_STACK)
23851 debug_stack_info (info);
23853 /* Write .extern for any function we will call to save and restore
23854 fp values. */
23855 if (info->first_fp_reg_save < 64
23856 && !TARGET_MACHO
23857 && !TARGET_ELF)
23859 char *name;
23860 int regno = info->first_fp_reg_save - 32;
23862 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
23864 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23865 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23866 name = rs6000_savres_routine_name (info, regno, sel);
23867 fprintf (file, "\t.extern %s\n", name);
23869 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
23871 bool lr = (info->savres_strategy
23872 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
23873 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
23874 name = rs6000_savres_routine_name (info, regno, sel);
23875 fprintf (file, "\t.extern %s\n", name);
23879 /* ELFv2 ABI r2 setup code and local entry point. This must follow
23880 immediately after the global entry point label. */
23881 if (DEFAULT_ABI == ABI_ELFv2 && cfun->machine->r2_setup_needed)
23883 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
23885 fprintf (file, "0:\taddis 2,12,.TOC.-0b@ha\n");
23886 fprintf (file, "\taddi 2,2,.TOC.-0b@l\n");
23888 fputs ("\t.localentry\t", file);
23889 assemble_name (file, name);
23890 fputs (",.-", file);
23891 assemble_name (file, name);
23892 fputs ("\n", file);
23895 /* Output -mprofile-kernel code. This needs to be done here instead of
23896 in output_function_profile since it must go after the ELFv2 ABI
23897 local entry point. */
23898 if (TARGET_PROFILE_KERNEL && crtl->profile)
23900 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
23901 gcc_assert (!TARGET_32BIT);
23903 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
23904 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
23906 /* In the ELFv2 ABI we have no compiler stack word. It must be
23907 the resposibility of _mcount to preserve the static chain
23908 register if required. */
23909 if (DEFAULT_ABI != ABI_ELFv2
23910 && cfun->static_chain_decl != NULL)
23912 asm_fprintf (file, "\tstd %s,24(%s)\n",
23913 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23914 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23915 asm_fprintf (file, "\tld %s,24(%s)\n",
23916 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23918 else
23919 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23922 rs6000_pic_labelno++;
23925 /* Non-zero if vmx regs are restored before the frame pop, zero if
23926 we restore after the pop when possible. */
23927 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
23929 /* Restoring cr is a two step process: loading a reg from the frame
23930 save, then moving the reg to cr. For ABI_V4 we must let the
23931 unwinder know that the stack location is no longer valid at or
23932 before the stack deallocation, but we can't emit a cfa_restore for
23933 cr at the stack deallocation like we do for other registers.
23934 The trouble is that it is possible for the move to cr to be
23935 scheduled after the stack deallocation. So say exactly where cr
23936 is located on each of the two insns. */
23938 static rtx
23939 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
23941 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
23942 rtx reg = gen_rtx_REG (SImode, regno);
23943 rtx insn = emit_move_insn (reg, mem);
23945 if (!exit_func && DEFAULT_ABI == ABI_V4)
23947 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
23948 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
23950 add_reg_note (insn, REG_CFA_REGISTER, set);
23951 RTX_FRAME_RELATED_P (insn) = 1;
23953 return reg;
23956 /* Reload CR from REG. */
23958 static void
23959 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
23961 int count = 0;
23962 int i;
23964 if (using_mfcr_multiple)
23966 for (i = 0; i < 8; i++)
23967 if (save_reg_p (CR0_REGNO + i))
23968 count++;
23969 gcc_assert (count);
23972 if (using_mfcr_multiple && count > 1)
23974 rtx insn;
23975 rtvec p;
23976 int ndx;
23978 p = rtvec_alloc (count);
23980 ndx = 0;
23981 for (i = 0; i < 8; i++)
23982 if (save_reg_p (CR0_REGNO + i))
23984 rtvec r = rtvec_alloc (2);
23985 RTVEC_ELT (r, 0) = reg;
23986 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
23987 RTVEC_ELT (p, ndx) =
23988 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
23989 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
23990 ndx++;
23992 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23993 gcc_assert (ndx == count);
23995 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
23996 CR field separately. */
23997 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
23999 for (i = 0; i < 8; i++)
24000 if (save_reg_p (CR0_REGNO + i))
24001 add_reg_note (insn, REG_CFA_RESTORE,
24002 gen_rtx_REG (SImode, CR0_REGNO + i));
24004 RTX_FRAME_RELATED_P (insn) = 1;
24007 else
24008 for (i = 0; i < 8; i++)
24009 if (save_reg_p (CR0_REGNO + i))
24011 rtx insn = emit_insn (gen_movsi_to_cr_one
24012 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
24014 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
24015 CR field separately, attached to the insn that in fact
24016 restores this particular CR field. */
24017 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
24019 add_reg_note (insn, REG_CFA_RESTORE,
24020 gen_rtx_REG (SImode, CR0_REGNO + i));
24022 RTX_FRAME_RELATED_P (insn) = 1;
24026 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
24027 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
24028 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
24030 rtx insn = get_last_insn ();
24031 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
24033 add_reg_note (insn, REG_CFA_RESTORE, cr);
24034 RTX_FRAME_RELATED_P (insn) = 1;
24038 /* Like cr, the move to lr instruction can be scheduled after the
24039 stack deallocation, but unlike cr, its stack frame save is still
24040 valid. So we only need to emit the cfa_restore on the correct
24041 instruction. */
24043 static void
24044 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
24046 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
24047 rtx reg = gen_rtx_REG (Pmode, regno);
24049 emit_move_insn (reg, mem);
24052 static void
24053 restore_saved_lr (int regno, bool exit_func)
24055 rtx reg = gen_rtx_REG (Pmode, regno);
24056 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
24057 rtx insn = emit_move_insn (lr, reg);
24059 if (!exit_func && flag_shrink_wrap)
24061 add_reg_note (insn, REG_CFA_RESTORE, lr);
24062 RTX_FRAME_RELATED_P (insn) = 1;
24066 static rtx
24067 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
24069 if (DEFAULT_ABI == ABI_ELFv2)
24071 int i;
24072 for (i = 0; i < 8; i++)
24073 if (save_reg_p (CR0_REGNO + i))
24075 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
24076 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
24077 cfa_restores);
24080 else if (info->cr_save_p)
24081 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24082 gen_rtx_REG (SImode, CR2_REGNO),
24083 cfa_restores);
24085 if (info->lr_save_p)
24086 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24087 gen_rtx_REG (Pmode, LR_REGNO),
24088 cfa_restores);
24089 return cfa_restores;
24092 /* Return true if OFFSET from stack pointer can be clobbered by signals.
24093 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
24094 below stack pointer not cloberred by signals. */
24096 static inline bool
24097 offset_below_red_zone_p (HOST_WIDE_INT offset)
24099 return offset < (DEFAULT_ABI == ABI_V4
24101 : TARGET_32BIT ? -220 : -288);
24104 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
24106 static void
24107 emit_cfa_restores (rtx cfa_restores)
24109 rtx insn = get_last_insn ();
24110 rtx *loc = &REG_NOTES (insn);
24112 while (*loc)
24113 loc = &XEXP (*loc, 1);
24114 *loc = cfa_restores;
24115 RTX_FRAME_RELATED_P (insn) = 1;
24118 /* Emit function epilogue as insns. */
24120 void
24121 rs6000_emit_epilogue (int sibcall)
24123 rs6000_stack_t *info;
24124 int restoring_GPRs_inline;
24125 int restoring_FPRs_inline;
24126 int using_load_multiple;
24127 int using_mtcr_multiple;
24128 int use_backchain_to_restore_sp;
24129 int restore_lr;
24130 int strategy;
24131 HOST_WIDE_INT frame_off = 0;
24132 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
24133 rtx frame_reg_rtx = sp_reg_rtx;
24134 rtx cfa_restores = NULL_RTX;
24135 rtx insn;
24136 rtx cr_save_reg = NULL_RTX;
24137 enum machine_mode reg_mode = Pmode;
24138 int reg_size = TARGET_32BIT ? 4 : 8;
24139 int i;
24140 bool exit_func;
24141 unsigned ptr_regno;
24143 info = rs6000_stack_info ();
24145 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24147 reg_mode = V2SImode;
24148 reg_size = 8;
24151 strategy = info->savres_strategy;
24152 using_load_multiple = strategy & SAVRES_MULTIPLE;
24153 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
24154 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
24155 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
24156 || rs6000_cpu == PROCESSOR_PPC603
24157 || rs6000_cpu == PROCESSOR_PPC750
24158 || optimize_size);
24159 /* Restore via the backchain when we have a large frame, since this
24160 is more efficient than an addis, addi pair. The second condition
24161 here will not trigger at the moment; We don't actually need a
24162 frame pointer for alloca, but the generic parts of the compiler
24163 give us one anyway. */
24164 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
24165 || (cfun->calls_alloca
24166 && !frame_pointer_needed));
24167 restore_lr = (info->lr_save_p
24168 && (restoring_FPRs_inline
24169 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
24170 && (restoring_GPRs_inline
24171 || info->first_fp_reg_save < 64));
24173 if (WORLD_SAVE_P (info))
24175 int i, j;
24176 char rname[30];
24177 const char *alloc_rname;
24178 rtvec p;
24180 /* eh_rest_world_r10 will return to the location saved in the LR
24181 stack slot (which is not likely to be our caller.)
24182 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
24183 rest_world is similar, except any R10 parameter is ignored.
24184 The exception-handling stuff that was here in 2.95 is no
24185 longer necessary. */
24187 p = rtvec_alloc (9
24189 + 32 - info->first_gp_reg_save
24190 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
24191 + 63 + 1 - info->first_fp_reg_save);
24193 strcpy (rname, ((crtl->calls_eh_return) ?
24194 "*eh_rest_world_r10" : "*rest_world"));
24195 alloc_rname = ggc_strdup (rname);
24197 j = 0;
24198 RTVEC_ELT (p, j++) = ret_rtx;
24199 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
24200 gen_rtx_REG (Pmode,
24201 LR_REGNO));
24202 RTVEC_ELT (p, j++)
24203 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
24204 /* The instruction pattern requires a clobber here;
24205 it is shared with the restVEC helper. */
24206 RTVEC_ELT (p, j++)
24207 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
24210 /* CR register traditionally saved as CR2. */
24211 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
24212 RTVEC_ELT (p, j++)
24213 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
24214 if (flag_shrink_wrap)
24216 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24217 gen_rtx_REG (Pmode, LR_REGNO),
24218 cfa_restores);
24219 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24223 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24225 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24226 RTVEC_ELT (p, j++)
24227 = gen_frame_load (reg,
24228 frame_reg_rtx, info->gp_save_offset + reg_size * i);
24229 if (flag_shrink_wrap)
24230 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24232 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
24234 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
24235 RTVEC_ELT (p, j++)
24236 = gen_frame_load (reg,
24237 frame_reg_rtx, info->altivec_save_offset + 16 * i);
24238 if (flag_shrink_wrap)
24239 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24241 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
24243 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24244 ? DFmode : SFmode),
24245 info->first_fp_reg_save + i);
24246 RTVEC_ELT (p, j++)
24247 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
24248 if (flag_shrink_wrap)
24249 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24251 RTVEC_ELT (p, j++)
24252 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
24253 RTVEC_ELT (p, j++)
24254 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
24255 RTVEC_ELT (p, j++)
24256 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
24257 RTVEC_ELT (p, j++)
24258 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
24259 RTVEC_ELT (p, j++)
24260 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
24261 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
24263 if (flag_shrink_wrap)
24265 REG_NOTES (insn) = cfa_restores;
24266 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24267 RTX_FRAME_RELATED_P (insn) = 1;
24269 return;
24272 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
24273 if (info->push_p)
24274 frame_off = info->total_size;
24276 /* Restore AltiVec registers if we must do so before adjusting the
24277 stack. */
24278 if (TARGET_ALTIVEC_ABI
24279 && info->altivec_size != 0
24280 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24281 || (DEFAULT_ABI != ABI_V4
24282 && offset_below_red_zone_p (info->altivec_save_offset))))
24284 int i;
24285 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24287 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
24288 if (use_backchain_to_restore_sp)
24290 int frame_regno = 11;
24292 if ((strategy & REST_INLINE_VRS) == 0)
24294 /* Of r11 and r12, select the one not clobbered by an
24295 out-of-line restore function for the frame register. */
24296 frame_regno = 11 + 12 - scratch_regno;
24298 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
24299 emit_move_insn (frame_reg_rtx,
24300 gen_rtx_MEM (Pmode, sp_reg_rtx));
24301 frame_off = 0;
24303 else if (frame_pointer_needed)
24304 frame_reg_rtx = hard_frame_pointer_rtx;
24306 if ((strategy & REST_INLINE_VRS) == 0)
24308 int end_save = info->altivec_save_offset + info->altivec_size;
24309 int ptr_off;
24310 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24311 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24313 if (end_save + frame_off != 0)
24315 rtx offset = GEN_INT (end_save + frame_off);
24317 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24319 else
24320 emit_move_insn (ptr_reg, frame_reg_rtx);
24322 ptr_off = -end_save;
24323 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24324 info->altivec_save_offset + ptr_off,
24325 0, V4SImode, SAVRES_VR);
24327 else
24329 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24330 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24332 rtx addr, areg, mem, reg;
24334 areg = gen_rtx_REG (Pmode, 0);
24335 emit_move_insn
24336 (areg, GEN_INT (info->altivec_save_offset
24337 + frame_off
24338 + 16 * (i - info->first_altivec_reg_save)));
24340 /* AltiVec addressing mode is [reg+reg]. */
24341 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24342 mem = gen_frame_mem (V4SImode, addr);
24344 reg = gen_rtx_REG (V4SImode, i);
24345 emit_move_insn (reg, mem);
24349 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24350 if (((strategy & REST_INLINE_VRS) == 0
24351 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24352 && (flag_shrink_wrap
24353 || (offset_below_red_zone_p
24354 (info->altivec_save_offset
24355 + 16 * (i - info->first_altivec_reg_save)))))
24357 rtx reg = gen_rtx_REG (V4SImode, i);
24358 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24362 /* Restore VRSAVE if we must do so before adjusting the stack. */
24363 if (TARGET_ALTIVEC
24364 && TARGET_ALTIVEC_VRSAVE
24365 && info->vrsave_mask != 0
24366 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24367 || (DEFAULT_ABI != ABI_V4
24368 && offset_below_red_zone_p (info->vrsave_save_offset))))
24370 rtx reg;
24372 if (frame_reg_rtx == sp_reg_rtx)
24374 if (use_backchain_to_restore_sp)
24376 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24377 emit_move_insn (frame_reg_rtx,
24378 gen_rtx_MEM (Pmode, sp_reg_rtx));
24379 frame_off = 0;
24381 else if (frame_pointer_needed)
24382 frame_reg_rtx = hard_frame_pointer_rtx;
24385 reg = gen_rtx_REG (SImode, 12);
24386 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24387 info->vrsave_save_offset + frame_off));
24389 emit_insn (generate_set_vrsave (reg, info, 1));
24392 insn = NULL_RTX;
24393 /* If we have a large stack frame, restore the old stack pointer
24394 using the backchain. */
24395 if (use_backchain_to_restore_sp)
24397 if (frame_reg_rtx == sp_reg_rtx)
24399 /* Under V.4, don't reset the stack pointer until after we're done
24400 loading the saved registers. */
24401 if (DEFAULT_ABI == ABI_V4)
24402 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24404 insn = emit_move_insn (frame_reg_rtx,
24405 gen_rtx_MEM (Pmode, sp_reg_rtx));
24406 frame_off = 0;
24408 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24409 && DEFAULT_ABI == ABI_V4)
24410 /* frame_reg_rtx has been set up by the altivec restore. */
24412 else
24414 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
24415 frame_reg_rtx = sp_reg_rtx;
24418 /* If we have a frame pointer, we can restore the old stack pointer
24419 from it. */
24420 else if (frame_pointer_needed)
24422 frame_reg_rtx = sp_reg_rtx;
24423 if (DEFAULT_ABI == ABI_V4)
24424 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24425 /* Prevent reordering memory accesses against stack pointer restore. */
24426 else if (cfun->calls_alloca
24427 || offset_below_red_zone_p (-info->total_size))
24428 rs6000_emit_stack_tie (frame_reg_rtx, true);
24430 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
24431 GEN_INT (info->total_size)));
24432 frame_off = 0;
24434 else if (info->push_p
24435 && DEFAULT_ABI != ABI_V4
24436 && !crtl->calls_eh_return)
24438 /* Prevent reordering memory accesses against stack pointer restore. */
24439 if (cfun->calls_alloca
24440 || offset_below_red_zone_p (-info->total_size))
24441 rs6000_emit_stack_tie (frame_reg_rtx, false);
24442 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
24443 GEN_INT (info->total_size)));
24444 frame_off = 0;
24446 if (insn && frame_reg_rtx == sp_reg_rtx)
24448 if (cfa_restores)
24450 REG_NOTES (insn) = cfa_restores;
24451 cfa_restores = NULL_RTX;
24453 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24454 RTX_FRAME_RELATED_P (insn) = 1;
24457 /* Restore AltiVec registers if we have not done so already. */
24458 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24459 && TARGET_ALTIVEC_ABI
24460 && info->altivec_size != 0
24461 && (DEFAULT_ABI == ABI_V4
24462 || !offset_below_red_zone_p (info->altivec_save_offset)))
24464 int i;
24466 if ((strategy & REST_INLINE_VRS) == 0)
24468 int end_save = info->altivec_save_offset + info->altivec_size;
24469 int ptr_off;
24470 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24471 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24472 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24474 if (end_save + frame_off != 0)
24476 rtx offset = GEN_INT (end_save + frame_off);
24478 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24480 else
24481 emit_move_insn (ptr_reg, frame_reg_rtx);
24483 ptr_off = -end_save;
24484 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24485 info->altivec_save_offset + ptr_off,
24486 0, V4SImode, SAVRES_VR);
24487 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
24489 /* Frame reg was clobbered by out-of-line save. Restore it
24490 from ptr_reg, and if we are calling out-of-line gpr or
24491 fpr restore set up the correct pointer and offset. */
24492 unsigned newptr_regno = 1;
24493 if (!restoring_GPRs_inline)
24495 bool lr = info->gp_save_offset + info->gp_size == 0;
24496 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24497 newptr_regno = ptr_regno_for_savres (sel);
24498 end_save = info->gp_save_offset + info->gp_size;
24500 else if (!restoring_FPRs_inline)
24502 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
24503 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24504 newptr_regno = ptr_regno_for_savres (sel);
24505 end_save = info->gp_save_offset + info->gp_size;
24508 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
24509 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
24511 if (end_save + ptr_off != 0)
24513 rtx offset = GEN_INT (end_save + ptr_off);
24515 frame_off = -end_save;
24516 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
24518 else
24520 frame_off = ptr_off;
24521 emit_move_insn (frame_reg_rtx, ptr_reg);
24525 else
24527 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24528 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24530 rtx addr, areg, mem, reg;
24532 areg = gen_rtx_REG (Pmode, 0);
24533 emit_move_insn
24534 (areg, GEN_INT (info->altivec_save_offset
24535 + frame_off
24536 + 16 * (i - info->first_altivec_reg_save)));
24538 /* AltiVec addressing mode is [reg+reg]. */
24539 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24540 mem = gen_frame_mem (V4SImode, addr);
24542 reg = gen_rtx_REG (V4SImode, i);
24543 emit_move_insn (reg, mem);
24547 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24548 if (((strategy & REST_INLINE_VRS) == 0
24549 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24550 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
24552 rtx reg = gen_rtx_REG (V4SImode, i);
24553 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24557 /* Restore VRSAVE if we have not done so already. */
24558 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24559 && TARGET_ALTIVEC
24560 && TARGET_ALTIVEC_VRSAVE
24561 && info->vrsave_mask != 0
24562 && (DEFAULT_ABI == ABI_V4
24563 || !offset_below_red_zone_p (info->vrsave_save_offset)))
24565 rtx reg;
24567 reg = gen_rtx_REG (SImode, 12);
24568 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24569 info->vrsave_save_offset + frame_off));
24571 emit_insn (generate_set_vrsave (reg, info, 1));
24574 /* If we exit by an out-of-line restore function on ABI_V4 then that
24575 function will deallocate the stack, so we don't need to worry
24576 about the unwinder restoring cr from an invalid stack frame
24577 location. */
24578 exit_func = (!restoring_FPRs_inline
24579 || (!restoring_GPRs_inline
24580 && info->first_fp_reg_save == 64));
24582 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
24583 *separate* slots if the routine calls __builtin_eh_return, so
24584 that they can be independently restored by the unwinder. */
24585 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24587 int i, cr_off = info->ehcr_offset;
24589 for (i = 0; i < 8; i++)
24590 if (!call_used_regs[CR0_REGNO + i])
24592 rtx reg = gen_rtx_REG (SImode, 0);
24593 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24594 cr_off + frame_off));
24596 insn = emit_insn (gen_movsi_to_cr_one
24597 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
24599 if (!exit_func && flag_shrink_wrap)
24601 add_reg_note (insn, REG_CFA_RESTORE,
24602 gen_rtx_REG (SImode, CR0_REGNO + i));
24604 RTX_FRAME_RELATED_P (insn) = 1;
24607 cr_off += reg_size;
24611 /* Get the old lr if we saved it. If we are restoring registers
24612 out-of-line, then the out-of-line routines can do this for us. */
24613 if (restore_lr && restoring_GPRs_inline)
24614 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24616 /* Get the old cr if we saved it. */
24617 if (info->cr_save_p)
24619 unsigned cr_save_regno = 12;
24621 if (!restoring_GPRs_inline)
24623 /* Ensure we don't use the register used by the out-of-line
24624 gpr register restore below. */
24625 bool lr = info->gp_save_offset + info->gp_size == 0;
24626 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24627 int gpr_ptr_regno = ptr_regno_for_savres (sel);
24629 if (gpr_ptr_regno == 12)
24630 cr_save_regno = 11;
24631 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
24633 else if (REGNO (frame_reg_rtx) == 12)
24634 cr_save_regno = 11;
24636 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
24637 info->cr_save_offset + frame_off,
24638 exit_func);
24641 /* Set LR here to try to overlap restores below. */
24642 if (restore_lr && restoring_GPRs_inline)
24643 restore_saved_lr (0, exit_func);
24645 /* Load exception handler data registers, if needed. */
24646 if (crtl->calls_eh_return)
24648 unsigned int i, regno;
24650 if (TARGET_AIX)
24652 rtx reg = gen_rtx_REG (reg_mode, 2);
24653 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24654 frame_off + RS6000_TOC_SAVE_SLOT));
24657 for (i = 0; ; ++i)
24659 rtx mem;
24661 regno = EH_RETURN_DATA_REGNO (i);
24662 if (regno == INVALID_REGNUM)
24663 break;
24665 /* Note: possible use of r0 here to address SPE regs. */
24666 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
24667 info->ehrd_offset + frame_off
24668 + reg_size * (int) i);
24670 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
24674 /* Restore GPRs. This is done as a PARALLEL if we are using
24675 the load-multiple instructions. */
24676 if (TARGET_SPE_ABI
24677 && info->spe_64bit_regs_used
24678 && info->first_gp_reg_save != 32)
24680 /* Determine whether we can address all of the registers that need
24681 to be saved with an offset from frame_reg_rtx that fits in
24682 the small const field for SPE memory instructions. */
24683 int spe_regs_addressable
24684 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
24685 + reg_size * (32 - info->first_gp_reg_save - 1))
24686 && restoring_GPRs_inline);
24688 if (!spe_regs_addressable)
24690 int ool_adjust = 0;
24691 rtx old_frame_reg_rtx = frame_reg_rtx;
24692 /* Make r11 point to the start of the SPE save area. We worried about
24693 not clobbering it when we were saving registers in the prologue.
24694 There's no need to worry here because the static chain is passed
24695 anew to every function. */
24697 if (!restoring_GPRs_inline)
24698 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
24699 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24700 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
24701 GEN_INT (info->spe_gp_save_offset
24702 + frame_off
24703 - ool_adjust)));
24704 /* Keep the invariant that frame_reg_rtx + frame_off points
24705 at the top of the stack frame. */
24706 frame_off = -info->spe_gp_save_offset + ool_adjust;
24709 if (restoring_GPRs_inline)
24711 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
24713 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24714 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24716 rtx offset, addr, mem, reg;
24718 /* We're doing all this to ensure that the immediate offset
24719 fits into the immediate field of 'evldd'. */
24720 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
24722 offset = GEN_INT (spe_offset + reg_size * i);
24723 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
24724 mem = gen_rtx_MEM (V2SImode, addr);
24725 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24727 emit_move_insn (reg, mem);
24730 else
24731 rs6000_emit_savres_rtx (info, frame_reg_rtx,
24732 info->spe_gp_save_offset + frame_off,
24733 info->lr_save_offset + frame_off,
24734 reg_mode,
24735 SAVRES_GPR | SAVRES_LR);
24737 else if (!restoring_GPRs_inline)
24739 /* We are jumping to an out-of-line function. */
24740 rtx ptr_reg;
24741 int end_save = info->gp_save_offset + info->gp_size;
24742 bool can_use_exit = end_save == 0;
24743 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
24744 int ptr_off;
24746 /* Emit stack reset code if we need it. */
24747 ptr_regno = ptr_regno_for_savres (sel);
24748 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
24749 if (can_use_exit)
24750 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
24751 else if (end_save + frame_off != 0)
24752 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
24753 GEN_INT (end_save + frame_off)));
24754 else if (REGNO (frame_reg_rtx) != ptr_regno)
24755 emit_move_insn (ptr_reg, frame_reg_rtx);
24756 if (REGNO (frame_reg_rtx) == ptr_regno)
24757 frame_off = -end_save;
24759 if (can_use_exit && info->cr_save_p)
24760 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
24762 ptr_off = -end_save;
24763 rs6000_emit_savres_rtx (info, ptr_reg,
24764 info->gp_save_offset + ptr_off,
24765 info->lr_save_offset + ptr_off,
24766 reg_mode, sel);
24768 else if (using_load_multiple)
24770 rtvec p;
24771 p = rtvec_alloc (32 - info->first_gp_reg_save);
24772 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24773 RTVEC_ELT (p, i)
24774 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24775 frame_reg_rtx,
24776 info->gp_save_offset + frame_off + reg_size * i);
24777 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
24779 else
24781 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24782 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24783 emit_insn (gen_frame_load
24784 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24785 frame_reg_rtx,
24786 info->gp_save_offset + frame_off + reg_size * i));
24789 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24791 /* If the frame pointer was used then we can't delay emitting
24792 a REG_CFA_DEF_CFA note. This must happen on the insn that
24793 restores the frame pointer, r31. We may have already emitted
24794 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
24795 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
24796 be harmless if emitted. */
24797 if (frame_pointer_needed)
24799 insn = get_last_insn ();
24800 add_reg_note (insn, REG_CFA_DEF_CFA,
24801 plus_constant (Pmode, frame_reg_rtx, frame_off));
24802 RTX_FRAME_RELATED_P (insn) = 1;
24805 /* Set up cfa_restores. We always need these when
24806 shrink-wrapping. If not shrink-wrapping then we only need
24807 the cfa_restore when the stack location is no longer valid.
24808 The cfa_restores must be emitted on or before the insn that
24809 invalidates the stack, and of course must not be emitted
24810 before the insn that actually does the restore. The latter
24811 is why it is a bad idea to emit the cfa_restores as a group
24812 on the last instruction here that actually does a restore:
24813 That insn may be reordered with respect to others doing
24814 restores. */
24815 if (flag_shrink_wrap
24816 && !restoring_GPRs_inline
24817 && info->first_fp_reg_save == 64)
24818 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
24820 for (i = info->first_gp_reg_save; i < 32; i++)
24821 if (!restoring_GPRs_inline
24822 || using_load_multiple
24823 || rs6000_reg_live_or_pic_offset_p (i))
24825 rtx reg = gen_rtx_REG (reg_mode, i);
24827 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24831 if (!restoring_GPRs_inline
24832 && info->first_fp_reg_save == 64)
24834 /* We are jumping to an out-of-line function. */
24835 if (cfa_restores)
24836 emit_cfa_restores (cfa_restores);
24837 return;
24840 if (restore_lr && !restoring_GPRs_inline)
24842 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24843 restore_saved_lr (0, exit_func);
24846 /* Restore fpr's if we need to do it without calling a function. */
24847 if (restoring_FPRs_inline)
24848 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
24849 if (save_reg_p (info->first_fp_reg_save + i))
24851 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24852 ? DFmode : SFmode),
24853 info->first_fp_reg_save + i);
24854 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24855 info->fp_save_offset + frame_off + 8 * i));
24856 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24857 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24860 /* If we saved cr, restore it here. Just those that were used. */
24861 if (info->cr_save_p)
24862 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
24864 /* If this is V.4, unwind the stack pointer after all of the loads
24865 have been done, or set up r11 if we are restoring fp out of line. */
24866 ptr_regno = 1;
24867 if (!restoring_FPRs_inline)
24869 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24870 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24871 ptr_regno = ptr_regno_for_savres (sel);
24874 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
24875 if (REGNO (frame_reg_rtx) == ptr_regno)
24876 frame_off = 0;
24878 if (insn && restoring_FPRs_inline)
24880 if (cfa_restores)
24882 REG_NOTES (insn) = cfa_restores;
24883 cfa_restores = NULL_RTX;
24885 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24886 RTX_FRAME_RELATED_P (insn) = 1;
24889 if (crtl->calls_eh_return)
24891 rtx sa = EH_RETURN_STACKADJ_RTX;
24892 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
24895 if (!sibcall)
24897 rtvec p;
24898 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24899 if (! restoring_FPRs_inline)
24901 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
24902 RTVEC_ELT (p, 0) = ret_rtx;
24904 else
24906 if (cfa_restores)
24908 /* We can't hang the cfa_restores off a simple return,
24909 since the shrink-wrap code sometimes uses an existing
24910 return. This means there might be a path from
24911 pre-prologue code to this return, and dwarf2cfi code
24912 wants the eh_frame unwinder state to be the same on
24913 all paths to any point. So we need to emit the
24914 cfa_restores before the return. For -m64 we really
24915 don't need epilogue cfa_restores at all, except for
24916 this irritating dwarf2cfi with shrink-wrap
24917 requirement; The stack red-zone means eh_frame info
24918 from the prologue telling the unwinder to restore
24919 from the stack is perfectly good right to the end of
24920 the function. */
24921 emit_insn (gen_blockage ());
24922 emit_cfa_restores (cfa_restores);
24923 cfa_restores = NULL_RTX;
24925 p = rtvec_alloc (2);
24926 RTVEC_ELT (p, 0) = simple_return_rtx;
24929 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
24930 ? gen_rtx_USE (VOIDmode,
24931 gen_rtx_REG (Pmode, LR_REGNO))
24932 : gen_rtx_CLOBBER (VOIDmode,
24933 gen_rtx_REG (Pmode, LR_REGNO)));
24935 /* If we have to restore more than two FP registers, branch to the
24936 restore function. It will return to our caller. */
24937 if (! restoring_FPRs_inline)
24939 int i;
24940 int reg;
24941 rtx sym;
24943 if (flag_shrink_wrap)
24944 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
24946 sym = rs6000_savres_routine_sym (info,
24947 SAVRES_FPR | (lr ? SAVRES_LR : 0));
24948 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
24949 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
24950 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
24952 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
24954 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
24956 RTVEC_ELT (p, i + 4)
24957 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
24958 if (flag_shrink_wrap)
24959 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
24960 cfa_restores);
24964 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
24967 if (cfa_restores)
24969 if (sibcall)
24970 /* Ensure the cfa_restores are hung off an insn that won't
24971 be reordered above other restores. */
24972 emit_insn (gen_blockage ());
24974 emit_cfa_restores (cfa_restores);
24978 /* Write function epilogue. */
24980 static void
24981 rs6000_output_function_epilogue (FILE *file,
24982 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
24984 #if TARGET_MACHO
24985 macho_branch_islands ();
24986 /* Mach-O doesn't support labels at the end of objects, so if
24987 it looks like we might want one, insert a NOP. */
24989 rtx insn = get_last_insn ();
24990 rtx deleted_debug_label = NULL_RTX;
24991 while (insn
24992 && NOTE_P (insn)
24993 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
24995 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
24996 notes only, instead set their CODE_LABEL_NUMBER to -1,
24997 otherwise there would be code generation differences
24998 in between -g and -g0. */
24999 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
25000 deleted_debug_label = insn;
25001 insn = PREV_INSN (insn);
25003 if (insn
25004 && (LABEL_P (insn)
25005 || (NOTE_P (insn)
25006 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
25007 fputs ("\tnop\n", file);
25008 else if (deleted_debug_label)
25009 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
25010 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
25011 CODE_LABEL_NUMBER (insn) = -1;
25013 #endif
25015 /* Output a traceback table here. See /usr/include/sys/debug.h for info
25016 on its format.
25018 We don't output a traceback table if -finhibit-size-directive was
25019 used. The documentation for -finhibit-size-directive reads
25020 ``don't output a @code{.size} assembler directive, or anything
25021 else that would cause trouble if the function is split in the
25022 middle, and the two halves are placed at locations far apart in
25023 memory.'' The traceback table has this property, since it
25024 includes the offset from the start of the function to the
25025 traceback table itself.
25027 System V.4 Powerpc's (and the embedded ABI derived from it) use a
25028 different traceback table. */
25029 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25030 && ! flag_inhibit_size_directive
25031 && rs6000_traceback != traceback_none && !cfun->is_thunk)
25033 const char *fname = NULL;
25034 const char *language_string = lang_hooks.name;
25035 int fixed_parms = 0, float_parms = 0, parm_info = 0;
25036 int i;
25037 int optional_tbtab;
25038 rs6000_stack_t *info = rs6000_stack_info ();
25040 if (rs6000_traceback == traceback_full)
25041 optional_tbtab = 1;
25042 else if (rs6000_traceback == traceback_part)
25043 optional_tbtab = 0;
25044 else
25045 optional_tbtab = !optimize_size && !TARGET_ELF;
25047 if (optional_tbtab)
25049 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
25050 while (*fname == '.') /* V.4 encodes . in the name */
25051 fname++;
25053 /* Need label immediately before tbtab, so we can compute
25054 its offset from the function start. */
25055 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
25056 ASM_OUTPUT_LABEL (file, fname);
25059 /* The .tbtab pseudo-op can only be used for the first eight
25060 expressions, since it can't handle the possibly variable
25061 length fields that follow. However, if you omit the optional
25062 fields, the assembler outputs zeros for all optional fields
25063 anyways, giving each variable length field is minimum length
25064 (as defined in sys/debug.h). Thus we can not use the .tbtab
25065 pseudo-op at all. */
25067 /* An all-zero word flags the start of the tbtab, for debuggers
25068 that have to find it by searching forward from the entry
25069 point or from the current pc. */
25070 fputs ("\t.long 0\n", file);
25072 /* Tbtab format type. Use format type 0. */
25073 fputs ("\t.byte 0,", file);
25075 /* Language type. Unfortunately, there does not seem to be any
25076 official way to discover the language being compiled, so we
25077 use language_string.
25078 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
25079 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
25080 a number, so for now use 9. LTO and Go aren't assigned numbers
25081 either, so for now use 0. */
25082 if (! strcmp (language_string, "GNU C")
25083 || ! strcmp (language_string, "GNU GIMPLE")
25084 || ! strcmp (language_string, "GNU Go"))
25085 i = 0;
25086 else if (! strcmp (language_string, "GNU F77")
25087 || ! strcmp (language_string, "GNU Fortran"))
25088 i = 1;
25089 else if (! strcmp (language_string, "GNU Pascal"))
25090 i = 2;
25091 else if (! strcmp (language_string, "GNU Ada"))
25092 i = 3;
25093 else if (! strcmp (language_string, "GNU C++")
25094 || ! strcmp (language_string, "GNU Objective-C++"))
25095 i = 9;
25096 else if (! strcmp (language_string, "GNU Java"))
25097 i = 13;
25098 else if (! strcmp (language_string, "GNU Objective-C"))
25099 i = 14;
25100 else
25101 gcc_unreachable ();
25102 fprintf (file, "%d,", i);
25104 /* 8 single bit fields: global linkage (not set for C extern linkage,
25105 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
25106 from start of procedure stored in tbtab, internal function, function
25107 has controlled storage, function has no toc, function uses fp,
25108 function logs/aborts fp operations. */
25109 /* Assume that fp operations are used if any fp reg must be saved. */
25110 fprintf (file, "%d,",
25111 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
25113 /* 6 bitfields: function is interrupt handler, name present in
25114 proc table, function calls alloca, on condition directives
25115 (controls stack walks, 3 bits), saves condition reg, saves
25116 link reg. */
25117 /* The `function calls alloca' bit seems to be set whenever reg 31 is
25118 set up as a frame pointer, even when there is no alloca call. */
25119 fprintf (file, "%d,",
25120 ((optional_tbtab << 6)
25121 | ((optional_tbtab & frame_pointer_needed) << 5)
25122 | (info->cr_save_p << 1)
25123 | (info->lr_save_p)));
25125 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
25126 (6 bits). */
25127 fprintf (file, "%d,",
25128 (info->push_p << 7) | (64 - info->first_fp_reg_save));
25130 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
25131 fprintf (file, "%d,", (32 - first_reg_to_save ()));
25133 if (optional_tbtab)
25135 /* Compute the parameter info from the function decl argument
25136 list. */
25137 tree decl;
25138 int next_parm_info_bit = 31;
25140 for (decl = DECL_ARGUMENTS (current_function_decl);
25141 decl; decl = DECL_CHAIN (decl))
25143 rtx parameter = DECL_INCOMING_RTL (decl);
25144 enum machine_mode mode = GET_MODE (parameter);
25146 if (GET_CODE (parameter) == REG)
25148 if (SCALAR_FLOAT_MODE_P (mode))
25150 int bits;
25152 float_parms++;
25154 switch (mode)
25156 case SFmode:
25157 case SDmode:
25158 bits = 0x2;
25159 break;
25161 case DFmode:
25162 case DDmode:
25163 case TFmode:
25164 case TDmode:
25165 bits = 0x3;
25166 break;
25168 default:
25169 gcc_unreachable ();
25172 /* If only one bit will fit, don't or in this entry. */
25173 if (next_parm_info_bit > 0)
25174 parm_info |= (bits << (next_parm_info_bit - 1));
25175 next_parm_info_bit -= 2;
25177 else
25179 fixed_parms += ((GET_MODE_SIZE (mode)
25180 + (UNITS_PER_WORD - 1))
25181 / UNITS_PER_WORD);
25182 next_parm_info_bit -= 1;
25188 /* Number of fixed point parameters. */
25189 /* This is actually the number of words of fixed point parameters; thus
25190 an 8 byte struct counts as 2; and thus the maximum value is 8. */
25191 fprintf (file, "%d,", fixed_parms);
25193 /* 2 bitfields: number of floating point parameters (7 bits), parameters
25194 all on stack. */
25195 /* This is actually the number of fp registers that hold parameters;
25196 and thus the maximum value is 13. */
25197 /* Set parameters on stack bit if parameters are not in their original
25198 registers, regardless of whether they are on the stack? Xlc
25199 seems to set the bit when not optimizing. */
25200 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
25202 if (! optional_tbtab)
25203 return;
25205 /* Optional fields follow. Some are variable length. */
25207 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
25208 11 double float. */
25209 /* There is an entry for each parameter in a register, in the order that
25210 they occur in the parameter list. Any intervening arguments on the
25211 stack are ignored. If the list overflows a long (max possible length
25212 34 bits) then completely leave off all elements that don't fit. */
25213 /* Only emit this long if there was at least one parameter. */
25214 if (fixed_parms || float_parms)
25215 fprintf (file, "\t.long %d\n", parm_info);
25217 /* Offset from start of code to tb table. */
25218 fputs ("\t.long ", file);
25219 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
25220 RS6000_OUTPUT_BASENAME (file, fname);
25221 putc ('-', file);
25222 rs6000_output_function_entry (file, fname);
25223 putc ('\n', file);
25225 /* Interrupt handler mask. */
25226 /* Omit this long, since we never set the interrupt handler bit
25227 above. */
25229 /* Number of CTL (controlled storage) anchors. */
25230 /* Omit this long, since the has_ctl bit is never set above. */
25232 /* Displacement into stack of each CTL anchor. */
25233 /* Omit this list of longs, because there are no CTL anchors. */
25235 /* Length of function name. */
25236 if (*fname == '*')
25237 ++fname;
25238 fprintf (file, "\t.short %d\n", (int) strlen (fname));
25240 /* Function name. */
25241 assemble_string (fname, strlen (fname));
25243 /* Register for alloca automatic storage; this is always reg 31.
25244 Only emit this if the alloca bit was set above. */
25245 if (frame_pointer_needed)
25246 fputs ("\t.byte 31\n", file);
25248 fputs ("\t.align 2\n", file);
25252 /* A C compound statement that outputs the assembler code for a thunk
25253 function, used to implement C++ virtual function calls with
25254 multiple inheritance. The thunk acts as a wrapper around a virtual
25255 function, adjusting the implicit object parameter before handing
25256 control off to the real function.
25258 First, emit code to add the integer DELTA to the location that
25259 contains the incoming first argument. Assume that this argument
25260 contains a pointer, and is the one used to pass the `this' pointer
25261 in C++. This is the incoming argument *before* the function
25262 prologue, e.g. `%o0' on a sparc. The addition must preserve the
25263 values of all other incoming arguments.
25265 After the addition, emit code to jump to FUNCTION, which is a
25266 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
25267 not touch the return address. Hence returning from FUNCTION will
25268 return to whoever called the current `thunk'.
25270 The effect must be as if FUNCTION had been called directly with the
25271 adjusted first argument. This macro is responsible for emitting
25272 all of the code for a thunk function; output_function_prologue()
25273 and output_function_epilogue() are not invoked.
25275 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
25276 been extracted from it.) It might possibly be useful on some
25277 targets, but probably not.
25279 If you do not define this macro, the target-independent code in the
25280 C++ frontend will generate a less efficient heavyweight thunk that
25281 calls FUNCTION instead of jumping to it. The generic approach does
25282 not support varargs. */
25284 static void
25285 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
25286 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
25287 tree function)
25289 rtx this_rtx, insn, funexp;
25291 reload_completed = 1;
25292 epilogue_completed = 1;
25294 /* Mark the end of the (empty) prologue. */
25295 emit_note (NOTE_INSN_PROLOGUE_END);
25297 /* Find the "this" pointer. If the function returns a structure,
25298 the structure return pointer is in r3. */
25299 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
25300 this_rtx = gen_rtx_REG (Pmode, 4);
25301 else
25302 this_rtx = gen_rtx_REG (Pmode, 3);
25304 /* Apply the constant offset, if required. */
25305 if (delta)
25306 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
25308 /* Apply the offset from the vtable, if required. */
25309 if (vcall_offset)
25311 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
25312 rtx tmp = gen_rtx_REG (Pmode, 12);
25314 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
25315 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
25317 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
25318 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
25320 else
25322 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
25324 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
25326 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
25329 /* Generate a tail call to the target function. */
25330 if (!TREE_USED (function))
25332 assemble_external (function);
25333 TREE_USED (function) = 1;
25335 funexp = XEXP (DECL_RTL (function), 0);
25336 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
25338 #if TARGET_MACHO
25339 if (MACHOPIC_INDIRECT)
25340 funexp = machopic_indirect_call_target (funexp);
25341 #endif
25343 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
25344 generate sibcall RTL explicitly. */
25345 insn = emit_call_insn (
25346 gen_rtx_PARALLEL (VOIDmode,
25347 gen_rtvec (4,
25348 gen_rtx_CALL (VOIDmode,
25349 funexp, const0_rtx),
25350 gen_rtx_USE (VOIDmode, const0_rtx),
25351 gen_rtx_USE (VOIDmode,
25352 gen_rtx_REG (SImode,
25353 LR_REGNO)),
25354 simple_return_rtx)));
25355 SIBLING_CALL_P (insn) = 1;
25356 emit_barrier ();
25358 /* Ensure we have a global entry point for the thunk. ??? We could
25359 avoid that if the target routine doesn't need a global entry point,
25360 but we do not know whether this is the case at this point. */
25361 if (DEFAULT_ABI == ABI_ELFv2)
25362 cfun->machine->r2_setup_needed = true;
25364 /* Run just enough of rest_of_compilation to get the insns emitted.
25365 There's not really enough bulk here to make other passes such as
25366 instruction scheduling worth while. Note that use_thunk calls
25367 assemble_start_function and assemble_end_function. */
25368 insn = get_insns ();
25369 shorten_branches (insn);
25370 final_start_function (insn, file, 1);
25371 final (insn, file, 1);
25372 final_end_function ();
25374 reload_completed = 0;
25375 epilogue_completed = 0;
25378 /* A quick summary of the various types of 'constant-pool tables'
25379 under PowerPC:
25381 Target Flags Name One table per
25382 AIX (none) AIX TOC object file
25383 AIX -mfull-toc AIX TOC object file
25384 AIX -mminimal-toc AIX minimal TOC translation unit
25385 SVR4/EABI (none) SVR4 SDATA object file
25386 SVR4/EABI -fpic SVR4 pic object file
25387 SVR4/EABI -fPIC SVR4 PIC translation unit
25388 SVR4/EABI -mrelocatable EABI TOC function
25389 SVR4/EABI -maix AIX TOC object file
25390 SVR4/EABI -maix -mminimal-toc
25391 AIX minimal TOC translation unit
25393 Name Reg. Set by entries contains:
25394 made by addrs? fp? sum?
25396 AIX TOC 2 crt0 as Y option option
25397 AIX minimal TOC 30 prolog gcc Y Y option
25398 SVR4 SDATA 13 crt0 gcc N Y N
25399 SVR4 pic 30 prolog ld Y not yet N
25400 SVR4 PIC 30 prolog gcc Y option option
25401 EABI TOC 30 prolog gcc Y option option
25405 /* Hash functions for the hash table. */
25407 static unsigned
25408 rs6000_hash_constant (rtx k)
25410 enum rtx_code code = GET_CODE (k);
25411 enum machine_mode mode = GET_MODE (k);
25412 unsigned result = (code << 3) ^ mode;
25413 const char *format;
25414 int flen, fidx;
25416 format = GET_RTX_FORMAT (code);
25417 flen = strlen (format);
25418 fidx = 0;
25420 switch (code)
25422 case LABEL_REF:
25423 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
25425 case CONST_WIDE_INT:
25427 int i;
25428 flen = CONST_WIDE_INT_NUNITS (k);
25429 for (i = 0; i < flen; i++)
25430 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
25431 return result;
25434 case CONST_DOUBLE:
25435 if (mode != VOIDmode)
25436 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
25437 flen = 2;
25438 break;
25440 case CODE_LABEL:
25441 fidx = 3;
25442 break;
25444 default:
25445 break;
25448 for (; fidx < flen; fidx++)
25449 switch (format[fidx])
25451 case 's':
25453 unsigned i, len;
25454 const char *str = XSTR (k, fidx);
25455 len = strlen (str);
25456 result = result * 613 + len;
25457 for (i = 0; i < len; i++)
25458 result = result * 613 + (unsigned) str[i];
25459 break;
25461 case 'u':
25462 case 'e':
25463 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
25464 break;
25465 case 'i':
25466 case 'n':
25467 result = result * 613 + (unsigned) XINT (k, fidx);
25468 break;
25469 case 'w':
25470 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
25471 result = result * 613 + (unsigned) XWINT (k, fidx);
25472 else
25474 size_t i;
25475 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
25476 result = result * 613 + (unsigned) (XWINT (k, fidx)
25477 >> CHAR_BIT * i);
25479 break;
25480 case '0':
25481 break;
25482 default:
25483 gcc_unreachable ();
25486 return result;
25489 static unsigned
25490 toc_hash_function (const void *hash_entry)
25492 const struct toc_hash_struct *thc =
25493 (const struct toc_hash_struct *) hash_entry;
25494 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
25497 /* Compare H1 and H2 for equivalence. */
25499 static int
25500 toc_hash_eq (const void *h1, const void *h2)
25502 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
25503 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
25505 if (((const struct toc_hash_struct *) h1)->key_mode
25506 != ((const struct toc_hash_struct *) h2)->key_mode)
25507 return 0;
25509 return rtx_equal_p (r1, r2);
25512 /* These are the names given by the C++ front-end to vtables, and
25513 vtable-like objects. Ideally, this logic should not be here;
25514 instead, there should be some programmatic way of inquiring as
25515 to whether or not an object is a vtable. */
25517 #define VTABLE_NAME_P(NAME) \
25518 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
25519 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
25520 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
25521 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
25522 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
25524 #ifdef NO_DOLLAR_IN_LABEL
25525 /* Return a GGC-allocated character string translating dollar signs in
25526 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
25528 const char *
25529 rs6000_xcoff_strip_dollar (const char *name)
25531 char *strip, *p;
25532 const char *q;
25533 size_t len;
25535 q = (const char *) strchr (name, '$');
25537 if (q == 0 || q == name)
25538 return name;
25540 len = strlen (name);
25541 strip = XALLOCAVEC (char, len + 1);
25542 strcpy (strip, name);
25543 p = strip + (q - name);
25544 while (p)
25546 *p = '_';
25547 p = strchr (p + 1, '$');
25550 return ggc_alloc_string (strip, len);
25552 #endif
25554 void
25555 rs6000_output_symbol_ref (FILE *file, rtx x)
25557 /* Currently C++ toc references to vtables can be emitted before it
25558 is decided whether the vtable is public or private. If this is
25559 the case, then the linker will eventually complain that there is
25560 a reference to an unknown section. Thus, for vtables only,
25561 we emit the TOC reference to reference the symbol and not the
25562 section. */
25563 const char *name = XSTR (x, 0);
25565 if (VTABLE_NAME_P (name))
25567 RS6000_OUTPUT_BASENAME (file, name);
25569 else
25570 assemble_name (file, name);
25573 /* Output a TOC entry. We derive the entry name from what is being
25574 written. */
25576 void
25577 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
25579 char buf[256];
25580 const char *name = buf;
25581 rtx base = x;
25582 HOST_WIDE_INT offset = 0;
25584 gcc_assert (!TARGET_NO_TOC);
25586 /* When the linker won't eliminate them, don't output duplicate
25587 TOC entries (this happens on AIX if there is any kind of TOC,
25588 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
25589 CODE_LABELs. */
25590 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
25592 struct toc_hash_struct *h;
25593 void * * found;
25595 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
25596 time because GGC is not initialized at that point. */
25597 if (toc_hash_table == NULL)
25598 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
25599 toc_hash_eq, NULL);
25601 h = ggc_alloc<toc_hash_struct> ();
25602 h->key = x;
25603 h->key_mode = mode;
25604 h->labelno = labelno;
25606 found = htab_find_slot (toc_hash_table, h, INSERT);
25607 if (*found == NULL)
25608 *found = h;
25609 else /* This is indeed a duplicate.
25610 Set this label equal to that label. */
25612 fputs ("\t.set ", file);
25613 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25614 fprintf (file, "%d,", labelno);
25615 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25616 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25617 found)->labelno));
25619 #ifdef HAVE_AS_TLS
25620 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
25621 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
25622 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
25624 fputs ("\t.set ", file);
25625 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25626 fprintf (file, "%d,", labelno);
25627 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25628 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25629 found)->labelno));
25631 #endif
25632 return;
25636 /* If we're going to put a double constant in the TOC, make sure it's
25637 aligned properly when strict alignment is on. */
25638 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
25639 && STRICT_ALIGNMENT
25640 && GET_MODE_BITSIZE (mode) >= 64
25641 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
25642 ASM_OUTPUT_ALIGN (file, 3);
25645 (*targetm.asm_out.internal_label) (file, "LC", labelno);
25647 /* Handle FP constants specially. Note that if we have a minimal
25648 TOC, things we put here aren't actually in the TOC, so we can allow
25649 FP constants. */
25650 if (GET_CODE (x) == CONST_DOUBLE &&
25651 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
25653 REAL_VALUE_TYPE rv;
25654 long k[4];
25656 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25657 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25658 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
25659 else
25660 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
25662 if (TARGET_64BIT)
25664 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25665 fputs (DOUBLE_INT_ASM_OP, file);
25666 else
25667 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25668 k[0] & 0xffffffff, k[1] & 0xffffffff,
25669 k[2] & 0xffffffff, k[3] & 0xffffffff);
25670 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
25671 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25672 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
25673 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
25674 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
25675 return;
25677 else
25679 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25680 fputs ("\t.long ", file);
25681 else
25682 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25683 k[0] & 0xffffffff, k[1] & 0xffffffff,
25684 k[2] & 0xffffffff, k[3] & 0xffffffff);
25685 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
25686 k[0] & 0xffffffff, k[1] & 0xffffffff,
25687 k[2] & 0xffffffff, k[3] & 0xffffffff);
25688 return;
25691 else if (GET_CODE (x) == CONST_DOUBLE &&
25692 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
25694 REAL_VALUE_TYPE rv;
25695 long k[2];
25697 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25699 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25700 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
25701 else
25702 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
25704 if (TARGET_64BIT)
25706 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25707 fputs (DOUBLE_INT_ASM_OP, file);
25708 else
25709 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25710 k[0] & 0xffffffff, k[1] & 0xffffffff);
25711 fprintf (file, "0x%lx%08lx\n",
25712 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25713 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
25714 return;
25716 else
25718 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25719 fputs ("\t.long ", file);
25720 else
25721 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25722 k[0] & 0xffffffff, k[1] & 0xffffffff);
25723 fprintf (file, "0x%lx,0x%lx\n",
25724 k[0] & 0xffffffff, k[1] & 0xffffffff);
25725 return;
25728 else if (GET_CODE (x) == CONST_DOUBLE &&
25729 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
25731 REAL_VALUE_TYPE rv;
25732 long l;
25734 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25735 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25736 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
25737 else
25738 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
25740 if (TARGET_64BIT)
25742 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25743 fputs (DOUBLE_INT_ASM_OP, file);
25744 else
25745 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25746 if (WORDS_BIG_ENDIAN)
25747 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
25748 else
25749 fprintf (file, "0x%lx\n", l & 0xffffffff);
25750 return;
25752 else
25754 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25755 fputs ("\t.long ", file);
25756 else
25757 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25758 fprintf (file, "0x%lx\n", l & 0xffffffff);
25759 return;
25762 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
25764 unsigned HOST_WIDE_INT low;
25765 HOST_WIDE_INT high;
25767 low = INTVAL (x) & 0xffffffff;
25768 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
25770 /* TOC entries are always Pmode-sized, so when big-endian
25771 smaller integer constants in the TOC need to be padded.
25772 (This is still a win over putting the constants in
25773 a separate constant pool, because then we'd have
25774 to have both a TOC entry _and_ the actual constant.)
25776 For a 32-bit target, CONST_INT values are loaded and shifted
25777 entirely within `low' and can be stored in one TOC entry. */
25779 /* It would be easy to make this work, but it doesn't now. */
25780 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
25782 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
25784 low |= high << 32;
25785 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
25786 high = (HOST_WIDE_INT) low >> 32;
25787 low &= 0xffffffff;
25790 if (TARGET_64BIT)
25792 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25793 fputs (DOUBLE_INT_ASM_OP, file);
25794 else
25795 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25796 (long) high & 0xffffffff, (long) low & 0xffffffff);
25797 fprintf (file, "0x%lx%08lx\n",
25798 (long) high & 0xffffffff, (long) low & 0xffffffff);
25799 return;
25801 else
25803 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
25805 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25806 fputs ("\t.long ", file);
25807 else
25808 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25809 (long) high & 0xffffffff, (long) low & 0xffffffff);
25810 fprintf (file, "0x%lx,0x%lx\n",
25811 (long) high & 0xffffffff, (long) low & 0xffffffff);
25813 else
25815 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25816 fputs ("\t.long ", file);
25817 else
25818 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
25819 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
25821 return;
25825 if (GET_CODE (x) == CONST)
25827 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
25828 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
25830 base = XEXP (XEXP (x, 0), 0);
25831 offset = INTVAL (XEXP (XEXP (x, 0), 1));
25834 switch (GET_CODE (base))
25836 case SYMBOL_REF:
25837 name = XSTR (base, 0);
25838 break;
25840 case LABEL_REF:
25841 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
25842 CODE_LABEL_NUMBER (XEXP (base, 0)));
25843 break;
25845 case CODE_LABEL:
25846 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
25847 break;
25849 default:
25850 gcc_unreachable ();
25853 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25854 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
25855 else
25857 fputs ("\t.tc ", file);
25858 RS6000_OUTPUT_BASENAME (file, name);
25860 if (offset < 0)
25861 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
25862 else if (offset)
25863 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
25865 /* Mark large TOC symbols on AIX with [TE] so they are mapped
25866 after other TOC symbols, reducing overflow of small TOC access
25867 to [TC] symbols. */
25868 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
25869 ? "[TE]," : "[TC],", file);
25872 /* Currently C++ toc references to vtables can be emitted before it
25873 is decided whether the vtable is public or private. If this is
25874 the case, then the linker will eventually complain that there is
25875 a TOC reference to an unknown section. Thus, for vtables only,
25876 we emit the TOC reference to reference the symbol and not the
25877 section. */
25878 if (VTABLE_NAME_P (name))
25880 RS6000_OUTPUT_BASENAME (file, name);
25881 if (offset < 0)
25882 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
25883 else if (offset > 0)
25884 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
25886 else
25887 output_addr_const (file, x);
25889 #if HAVE_AS_TLS
25890 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
25891 && SYMBOL_REF_TLS_MODEL (base) != 0)
25893 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
25894 fputs ("@le", file);
25895 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
25896 fputs ("@ie", file);
25897 /* Use global-dynamic for local-dynamic. */
25898 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
25899 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
25901 putc ('\n', file);
25902 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
25903 fputs ("\t.tc .", file);
25904 RS6000_OUTPUT_BASENAME (file, name);
25905 fputs ("[TC],", file);
25906 output_addr_const (file, x);
25907 fputs ("@m", file);
25910 #endif
25912 putc ('\n', file);
25915 /* Output an assembler pseudo-op to write an ASCII string of N characters
25916 starting at P to FILE.
25918 On the RS/6000, we have to do this using the .byte operation and
25919 write out special characters outside the quoted string.
25920 Also, the assembler is broken; very long strings are truncated,
25921 so we must artificially break them up early. */
25923 void
25924 output_ascii (FILE *file, const char *p, int n)
25926 char c;
25927 int i, count_string;
25928 const char *for_string = "\t.byte \"";
25929 const char *for_decimal = "\t.byte ";
25930 const char *to_close = NULL;
25932 count_string = 0;
25933 for (i = 0; i < n; i++)
25935 c = *p++;
25936 if (c >= ' ' && c < 0177)
25938 if (for_string)
25939 fputs (for_string, file);
25940 putc (c, file);
25942 /* Write two quotes to get one. */
25943 if (c == '"')
25945 putc (c, file);
25946 ++count_string;
25949 for_string = NULL;
25950 for_decimal = "\"\n\t.byte ";
25951 to_close = "\"\n";
25952 ++count_string;
25954 if (count_string >= 512)
25956 fputs (to_close, file);
25958 for_string = "\t.byte \"";
25959 for_decimal = "\t.byte ";
25960 to_close = NULL;
25961 count_string = 0;
25964 else
25966 if (for_decimal)
25967 fputs (for_decimal, file);
25968 fprintf (file, "%d", c);
25970 for_string = "\n\t.byte \"";
25971 for_decimal = ", ";
25972 to_close = "\n";
25973 count_string = 0;
25977 /* Now close the string if we have written one. Then end the line. */
25978 if (to_close)
25979 fputs (to_close, file);
25982 /* Generate a unique section name for FILENAME for a section type
25983 represented by SECTION_DESC. Output goes into BUF.
25985 SECTION_DESC can be any string, as long as it is different for each
25986 possible section type.
25988 We name the section in the same manner as xlc. The name begins with an
25989 underscore followed by the filename (after stripping any leading directory
25990 names) with the last period replaced by the string SECTION_DESC. If
25991 FILENAME does not contain a period, SECTION_DESC is appended to the end of
25992 the name. */
25994 void
25995 rs6000_gen_section_name (char **buf, const char *filename,
25996 const char *section_desc)
25998 const char *q, *after_last_slash, *last_period = 0;
25999 char *p;
26000 int len;
26002 after_last_slash = filename;
26003 for (q = filename; *q; q++)
26005 if (*q == '/')
26006 after_last_slash = q + 1;
26007 else if (*q == '.')
26008 last_period = q;
26011 len = strlen (after_last_slash) + strlen (section_desc) + 2;
26012 *buf = (char *) xmalloc (len);
26014 p = *buf;
26015 *p++ = '_';
26017 for (q = after_last_slash; *q; q++)
26019 if (q == last_period)
26021 strcpy (p, section_desc);
26022 p += strlen (section_desc);
26023 break;
26026 else if (ISALNUM (*q))
26027 *p++ = *q;
26030 if (last_period == 0)
26031 strcpy (p, section_desc);
26032 else
26033 *p = '\0';
26036 /* Emit profile function. */
26038 void
26039 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
26041 /* Non-standard profiling for kernels, which just saves LR then calls
26042 _mcount without worrying about arg saves. The idea is to change
26043 the function prologue as little as possible as it isn't easy to
26044 account for arg save/restore code added just for _mcount. */
26045 if (TARGET_PROFILE_KERNEL)
26046 return;
26048 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26050 #ifndef NO_PROFILE_COUNTERS
26051 # define NO_PROFILE_COUNTERS 0
26052 #endif
26053 if (NO_PROFILE_COUNTERS)
26054 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
26055 LCT_NORMAL, VOIDmode, 0);
26056 else
26058 char buf[30];
26059 const char *label_name;
26060 rtx fun;
26062 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
26063 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
26064 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
26066 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
26067 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
26070 else if (DEFAULT_ABI == ABI_DARWIN)
26072 const char *mcount_name = RS6000_MCOUNT;
26073 int caller_addr_regno = LR_REGNO;
26075 /* Be conservative and always set this, at least for now. */
26076 crtl->uses_pic_offset_table = 1;
26078 #if TARGET_MACHO
26079 /* For PIC code, set up a stub and collect the caller's address
26080 from r0, which is where the prologue puts it. */
26081 if (MACHOPIC_INDIRECT
26082 && crtl->uses_pic_offset_table)
26083 caller_addr_regno = 0;
26084 #endif
26085 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
26086 LCT_NORMAL, VOIDmode, 1,
26087 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
26091 /* Write function profiler code. */
26093 void
26094 output_function_profiler (FILE *file, int labelno)
26096 char buf[100];
26098 switch (DEFAULT_ABI)
26100 default:
26101 gcc_unreachable ();
26103 case ABI_V4:
26104 if (!TARGET_32BIT)
26106 warning (0, "no profiling of 64-bit code for this ABI");
26107 return;
26109 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
26110 fprintf (file, "\tmflr %s\n", reg_names[0]);
26111 if (NO_PROFILE_COUNTERS)
26113 asm_fprintf (file, "\tstw %s,4(%s)\n",
26114 reg_names[0], reg_names[1]);
26116 else if (TARGET_SECURE_PLT && flag_pic)
26118 if (TARGET_LINK_STACK)
26120 char name[32];
26121 get_ppc476_thunk_name (name);
26122 asm_fprintf (file, "\tbl %s\n", name);
26124 else
26125 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
26126 asm_fprintf (file, "\tstw %s,4(%s)\n",
26127 reg_names[0], reg_names[1]);
26128 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
26129 asm_fprintf (file, "\taddis %s,%s,",
26130 reg_names[12], reg_names[12]);
26131 assemble_name (file, buf);
26132 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
26133 assemble_name (file, buf);
26134 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
26136 else if (flag_pic == 1)
26138 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
26139 asm_fprintf (file, "\tstw %s,4(%s)\n",
26140 reg_names[0], reg_names[1]);
26141 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
26142 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
26143 assemble_name (file, buf);
26144 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
26146 else if (flag_pic > 1)
26148 asm_fprintf (file, "\tstw %s,4(%s)\n",
26149 reg_names[0], reg_names[1]);
26150 /* Now, we need to get the address of the label. */
26151 if (TARGET_LINK_STACK)
26153 char name[32];
26154 get_ppc476_thunk_name (name);
26155 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
26156 assemble_name (file, buf);
26157 fputs ("-.\n1:", file);
26158 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26159 asm_fprintf (file, "\taddi %s,%s,4\n",
26160 reg_names[11], reg_names[11]);
26162 else
26164 fputs ("\tbcl 20,31,1f\n\t.long ", file);
26165 assemble_name (file, buf);
26166 fputs ("-.\n1:", file);
26167 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26169 asm_fprintf (file, "\tlwz %s,0(%s)\n",
26170 reg_names[0], reg_names[11]);
26171 asm_fprintf (file, "\tadd %s,%s,%s\n",
26172 reg_names[0], reg_names[0], reg_names[11]);
26174 else
26176 asm_fprintf (file, "\tlis %s,", reg_names[12]);
26177 assemble_name (file, buf);
26178 fputs ("@ha\n", file);
26179 asm_fprintf (file, "\tstw %s,4(%s)\n",
26180 reg_names[0], reg_names[1]);
26181 asm_fprintf (file, "\tla %s,", reg_names[0]);
26182 assemble_name (file, buf);
26183 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
26186 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
26187 fprintf (file, "\tbl %s%s\n",
26188 RS6000_MCOUNT, flag_pic ? "@plt" : "");
26189 break;
26191 case ABI_AIX:
26192 case ABI_ELFv2:
26193 case ABI_DARWIN:
26194 /* Don't do anything, done in output_profile_hook (). */
26195 break;
26201 /* The following variable value is the last issued insn. */
26203 static rtx last_scheduled_insn;
26205 /* The following variable helps to balance issuing of load and
26206 store instructions */
26208 static int load_store_pendulum;
26210 /* Power4 load update and store update instructions are cracked into a
26211 load or store and an integer insn which are executed in the same cycle.
26212 Branches have their own dispatch slot which does not count against the
26213 GCC issue rate, but it changes the program flow so there are no other
26214 instructions to issue in this cycle. */
26216 static int
26217 rs6000_variable_issue_1 (rtx insn, int more)
26219 last_scheduled_insn = insn;
26220 if (GET_CODE (PATTERN (insn)) == USE
26221 || GET_CODE (PATTERN (insn)) == CLOBBER)
26223 cached_can_issue_more = more;
26224 return cached_can_issue_more;
26227 if (insn_terminates_group_p (insn, current_group))
26229 cached_can_issue_more = 0;
26230 return cached_can_issue_more;
26233 /* If no reservation, but reach here */
26234 if (recog_memoized (insn) < 0)
26235 return more;
26237 if (rs6000_sched_groups)
26239 if (is_microcoded_insn (insn))
26240 cached_can_issue_more = 0;
26241 else if (is_cracked_insn (insn))
26242 cached_can_issue_more = more > 2 ? more - 2 : 0;
26243 else
26244 cached_can_issue_more = more - 1;
26246 return cached_can_issue_more;
26249 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
26250 return 0;
26252 cached_can_issue_more = more - 1;
26253 return cached_can_issue_more;
26256 static int
26257 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
26259 int r = rs6000_variable_issue_1 (insn, more);
26260 if (verbose)
26261 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
26262 return r;
26265 /* Adjust the cost of a scheduling dependency. Return the new cost of
26266 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
26268 static int
26269 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
26271 enum attr_type attr_type;
26273 if (! recog_memoized (insn))
26274 return 0;
26276 switch (REG_NOTE_KIND (link))
26278 case REG_DEP_TRUE:
26280 /* Data dependency; DEP_INSN writes a register that INSN reads
26281 some cycles later. */
26283 /* Separate a load from a narrower, dependent store. */
26284 if (rs6000_sched_groups
26285 && GET_CODE (PATTERN (insn)) == SET
26286 && GET_CODE (PATTERN (dep_insn)) == SET
26287 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
26288 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
26289 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
26290 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
26291 return cost + 14;
26293 attr_type = get_attr_type (insn);
26295 switch (attr_type)
26297 case TYPE_JMPREG:
26298 /* Tell the first scheduling pass about the latency between
26299 a mtctr and bctr (and mtlr and br/blr). The first
26300 scheduling pass will not know about this latency since
26301 the mtctr instruction, which has the latency associated
26302 to it, will be generated by reload. */
26303 return 4;
26304 case TYPE_BRANCH:
26305 /* Leave some extra cycles between a compare and its
26306 dependent branch, to inhibit expensive mispredicts. */
26307 if ((rs6000_cpu_attr == CPU_PPC603
26308 || rs6000_cpu_attr == CPU_PPC604
26309 || rs6000_cpu_attr == CPU_PPC604E
26310 || rs6000_cpu_attr == CPU_PPC620
26311 || rs6000_cpu_attr == CPU_PPC630
26312 || rs6000_cpu_attr == CPU_PPC750
26313 || rs6000_cpu_attr == CPU_PPC7400
26314 || rs6000_cpu_attr == CPU_PPC7450
26315 || rs6000_cpu_attr == CPU_PPCE5500
26316 || rs6000_cpu_attr == CPU_PPCE6500
26317 || rs6000_cpu_attr == CPU_POWER4
26318 || rs6000_cpu_attr == CPU_POWER5
26319 || rs6000_cpu_attr == CPU_POWER7
26320 || rs6000_cpu_attr == CPU_POWER8
26321 || rs6000_cpu_attr == CPU_CELL)
26322 && recog_memoized (dep_insn)
26323 && (INSN_CODE (dep_insn) >= 0))
26325 switch (get_attr_type (dep_insn))
26327 case TYPE_CMP:
26328 case TYPE_COMPARE:
26329 case TYPE_FPCOMPARE:
26330 case TYPE_CR_LOGICAL:
26331 case TYPE_DELAYED_CR:
26332 return cost + 2;
26333 case TYPE_MUL:
26334 if (get_attr_dot (dep_insn) == DOT_YES)
26335 return cost + 2;
26336 else
26337 break;
26338 case TYPE_SHIFT:
26339 if (get_attr_dot (dep_insn) == DOT_YES
26340 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
26341 return cost + 2;
26342 else
26343 break;
26344 default:
26345 break;
26347 break;
26349 case TYPE_STORE:
26350 case TYPE_FPSTORE:
26351 if ((rs6000_cpu == PROCESSOR_POWER6)
26352 && recog_memoized (dep_insn)
26353 && (INSN_CODE (dep_insn) >= 0))
26356 if (GET_CODE (PATTERN (insn)) != SET)
26357 /* If this happens, we have to extend this to schedule
26358 optimally. Return default for now. */
26359 return cost;
26361 /* Adjust the cost for the case where the value written
26362 by a fixed point operation is used as the address
26363 gen value on a store. */
26364 switch (get_attr_type (dep_insn))
26366 case TYPE_LOAD:
26367 case TYPE_CNTLZ:
26369 if (! store_data_bypass_p (dep_insn, insn))
26370 return get_attr_sign_extend (dep_insn)
26371 == SIGN_EXTEND_YES ? 6 : 4;
26372 break;
26374 case TYPE_SHIFT:
26376 if (! store_data_bypass_p (dep_insn, insn))
26377 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
26378 6 : 3;
26379 break;
26381 case TYPE_INTEGER:
26382 case TYPE_ADD:
26383 case TYPE_LOGICAL:
26384 case TYPE_COMPARE:
26385 case TYPE_EXTS:
26386 case TYPE_INSERT:
26388 if (! store_data_bypass_p (dep_insn, insn))
26389 return 3;
26390 break;
26392 case TYPE_STORE:
26393 case TYPE_FPLOAD:
26394 case TYPE_FPSTORE:
26396 if (get_attr_update (dep_insn) == UPDATE_YES
26397 && ! store_data_bypass_p (dep_insn, insn))
26398 return 3;
26399 break;
26401 case TYPE_MUL:
26403 if (! store_data_bypass_p (dep_insn, insn))
26404 return 17;
26405 break;
26407 case TYPE_DIV:
26409 if (! store_data_bypass_p (dep_insn, insn))
26410 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
26411 break;
26413 default:
26414 break;
26417 break;
26419 case TYPE_LOAD:
26420 if ((rs6000_cpu == PROCESSOR_POWER6)
26421 && recog_memoized (dep_insn)
26422 && (INSN_CODE (dep_insn) >= 0))
26425 /* Adjust the cost for the case where the value written
26426 by a fixed point instruction is used within the address
26427 gen portion of a subsequent load(u)(x) */
26428 switch (get_attr_type (dep_insn))
26430 case TYPE_LOAD:
26431 case TYPE_CNTLZ:
26433 if (set_to_load_agen (dep_insn, insn))
26434 return get_attr_sign_extend (dep_insn)
26435 == SIGN_EXTEND_YES ? 6 : 4;
26436 break;
26438 case TYPE_SHIFT:
26440 if (set_to_load_agen (dep_insn, insn))
26441 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
26442 6 : 3;
26443 break;
26445 case TYPE_INTEGER:
26446 case TYPE_ADD:
26447 case TYPE_LOGICAL:
26448 case TYPE_COMPARE:
26449 case TYPE_EXTS:
26450 case TYPE_INSERT:
26452 if (set_to_load_agen (dep_insn, insn))
26453 return 3;
26454 break;
26456 case TYPE_STORE:
26457 case TYPE_FPLOAD:
26458 case TYPE_FPSTORE:
26460 if (get_attr_update (dep_insn) == UPDATE_YES
26461 && set_to_load_agen (dep_insn, insn))
26462 return 3;
26463 break;
26465 case TYPE_MUL:
26467 if (set_to_load_agen (dep_insn, insn))
26468 return 17;
26469 break;
26471 case TYPE_DIV:
26473 if (set_to_load_agen (dep_insn, insn))
26474 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
26475 break;
26477 default:
26478 break;
26481 break;
26483 case TYPE_FPLOAD:
26484 if ((rs6000_cpu == PROCESSOR_POWER6)
26485 && get_attr_update (insn) == UPDATE_NO
26486 && recog_memoized (dep_insn)
26487 && (INSN_CODE (dep_insn) >= 0)
26488 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
26489 return 2;
26491 default:
26492 break;
26495 /* Fall out to return default cost. */
26497 break;
26499 case REG_DEP_OUTPUT:
26500 /* Output dependency; DEP_INSN writes a register that INSN writes some
26501 cycles later. */
26502 if ((rs6000_cpu == PROCESSOR_POWER6)
26503 && recog_memoized (dep_insn)
26504 && (INSN_CODE (dep_insn) >= 0))
26506 attr_type = get_attr_type (insn);
26508 switch (attr_type)
26510 case TYPE_FP:
26511 if (get_attr_type (dep_insn) == TYPE_FP)
26512 return 1;
26513 break;
26514 case TYPE_FPLOAD:
26515 if (get_attr_update (insn) == UPDATE_NO
26516 && get_attr_type (dep_insn) == TYPE_MFFGPR)
26517 return 2;
26518 break;
26519 default:
26520 break;
26523 case REG_DEP_ANTI:
26524 /* Anti dependency; DEP_INSN reads a register that INSN writes some
26525 cycles later. */
26526 return 0;
26528 default:
26529 gcc_unreachable ();
26532 return cost;
26535 /* Debug version of rs6000_adjust_cost. */
26537 static int
26538 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
26540 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
26542 if (ret != cost)
26544 const char *dep;
26546 switch (REG_NOTE_KIND (link))
26548 default: dep = "unknown depencency"; break;
26549 case REG_DEP_TRUE: dep = "data dependency"; break;
26550 case REG_DEP_OUTPUT: dep = "output dependency"; break;
26551 case REG_DEP_ANTI: dep = "anti depencency"; break;
26554 fprintf (stderr,
26555 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
26556 "%s, insn:\n", ret, cost, dep);
26558 debug_rtx (insn);
26561 return ret;
26564 /* The function returns a true if INSN is microcoded.
26565 Return false otherwise. */
26567 static bool
26568 is_microcoded_insn (rtx insn)
26570 if (!insn || !NONDEBUG_INSN_P (insn)
26571 || GET_CODE (PATTERN (insn)) == USE
26572 || GET_CODE (PATTERN (insn)) == CLOBBER)
26573 return false;
26575 if (rs6000_cpu_attr == CPU_CELL)
26576 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
26578 if (rs6000_sched_groups
26579 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26581 enum attr_type type = get_attr_type (insn);
26582 if ((type == TYPE_LOAD
26583 && get_attr_update (insn) == UPDATE_YES
26584 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
26585 || ((type == TYPE_LOAD || type == TYPE_STORE)
26586 && get_attr_update (insn) == UPDATE_YES
26587 && get_attr_indexed (insn) == INDEXED_YES)
26588 || type == TYPE_MFCR)
26589 return true;
26592 return false;
26595 /* The function returns true if INSN is cracked into 2 instructions
26596 by the processor (and therefore occupies 2 issue slots). */
26598 static bool
26599 is_cracked_insn (rtx insn)
26601 if (!insn || !NONDEBUG_INSN_P (insn)
26602 || GET_CODE (PATTERN (insn)) == USE
26603 || GET_CODE (PATTERN (insn)) == CLOBBER)
26604 return false;
26606 if (rs6000_sched_groups
26607 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26609 enum attr_type type = get_attr_type (insn);
26610 if ((type == TYPE_LOAD
26611 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26612 && get_attr_update (insn) == UPDATE_NO)
26613 || (type == TYPE_LOAD
26614 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
26615 && get_attr_update (insn) == UPDATE_YES
26616 && get_attr_indexed (insn) == INDEXED_NO)
26617 || (type == TYPE_STORE
26618 && get_attr_update (insn) == UPDATE_YES
26619 && get_attr_indexed (insn) == INDEXED_NO)
26620 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
26621 && get_attr_update (insn) == UPDATE_YES)
26622 || type == TYPE_DELAYED_CR
26623 || type == TYPE_COMPARE
26624 || (type == TYPE_SHIFT
26625 && get_attr_dot (insn) == DOT_YES
26626 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
26627 || (type == TYPE_MUL
26628 && get_attr_dot (insn) == DOT_YES)
26629 || type == TYPE_DIV
26630 || (type == TYPE_INSERT
26631 && get_attr_size (insn) == SIZE_32))
26632 return true;
26635 return false;
26638 /* The function returns true if INSN can be issued only from
26639 the branch slot. */
26641 static bool
26642 is_branch_slot_insn (rtx insn)
26644 if (!insn || !NONDEBUG_INSN_P (insn)
26645 || GET_CODE (PATTERN (insn)) == USE
26646 || GET_CODE (PATTERN (insn)) == CLOBBER)
26647 return false;
26649 if (rs6000_sched_groups)
26651 enum attr_type type = get_attr_type (insn);
26652 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
26653 return true;
26654 return false;
26657 return false;
26660 /* The function returns true if out_inst sets a value that is
26661 used in the address generation computation of in_insn */
26662 static bool
26663 set_to_load_agen (rtx out_insn, rtx in_insn)
26665 rtx out_set, in_set;
26667 /* For performance reasons, only handle the simple case where
26668 both loads are a single_set. */
26669 out_set = single_set (out_insn);
26670 if (out_set)
26672 in_set = single_set (in_insn);
26673 if (in_set)
26674 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
26677 return false;
26680 /* Try to determine base/offset/size parts of the given MEM.
26681 Return true if successful, false if all the values couldn't
26682 be determined.
26684 This function only looks for REG or REG+CONST address forms.
26685 REG+REG address form will return false. */
26687 static bool
26688 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
26689 HOST_WIDE_INT *size)
26691 rtx addr_rtx;
26692 if MEM_SIZE_KNOWN_P (mem)
26693 *size = MEM_SIZE (mem);
26694 else
26695 return false;
26697 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
26698 addr_rtx = XEXP (XEXP (mem, 0), 1);
26699 else
26700 addr_rtx = (XEXP (mem, 0));
26702 if (GET_CODE (addr_rtx) == REG)
26704 *base = addr_rtx;
26705 *offset = 0;
26707 else if (GET_CODE (addr_rtx) == PLUS
26708 && CONST_INT_P (XEXP (addr_rtx, 1)))
26710 *base = XEXP (addr_rtx, 0);
26711 *offset = INTVAL (XEXP (addr_rtx, 1));
26713 else
26714 return false;
26716 return true;
26719 /* The function returns true if the target storage location of
26720 mem1 is adjacent to the target storage location of mem2 */
26721 /* Return 1 if memory locations are adjacent. */
26723 static bool
26724 adjacent_mem_locations (rtx mem1, rtx mem2)
26726 rtx reg1, reg2;
26727 HOST_WIDE_INT off1, size1, off2, size2;
26729 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26730 && get_memref_parts (mem2, &reg2, &off2, &size2))
26731 return ((REGNO (reg1) == REGNO (reg2))
26732 && ((off1 + size1 == off2)
26733 || (off2 + size2 == off1)));
26735 return false;
26738 /* This function returns true if it can be determined that the two MEM
26739 locations overlap by at least 1 byte based on base reg/offset/size. */
26741 static bool
26742 mem_locations_overlap (rtx mem1, rtx mem2)
26744 rtx reg1, reg2;
26745 HOST_WIDE_INT off1, size1, off2, size2;
26747 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26748 && get_memref_parts (mem2, &reg2, &off2, &size2))
26749 return ((REGNO (reg1) == REGNO (reg2))
26750 && (((off1 <= off2) && (off1 + size1 > off2))
26751 || ((off2 <= off1) && (off2 + size2 > off1))));
26753 return false;
26756 /* A C statement (sans semicolon) to update the integer scheduling
26757 priority INSN_PRIORITY (INSN). Increase the priority to execute the
26758 INSN earlier, reduce the priority to execute INSN later. Do not
26759 define this macro if you do not need to adjust the scheduling
26760 priorities of insns. */
26762 static int
26763 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
26765 rtx load_mem, str_mem;
26766 /* On machines (like the 750) which have asymmetric integer units,
26767 where one integer unit can do multiply and divides and the other
26768 can't, reduce the priority of multiply/divide so it is scheduled
26769 before other integer operations. */
26771 #if 0
26772 if (! INSN_P (insn))
26773 return priority;
26775 if (GET_CODE (PATTERN (insn)) == USE)
26776 return priority;
26778 switch (rs6000_cpu_attr) {
26779 case CPU_PPC750:
26780 switch (get_attr_type (insn))
26782 default:
26783 break;
26785 case TYPE_MUL:
26786 case TYPE_DIV:
26787 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
26788 priority, priority);
26789 if (priority >= 0 && priority < 0x01000000)
26790 priority >>= 3;
26791 break;
26794 #endif
26796 if (insn_must_be_first_in_group (insn)
26797 && reload_completed
26798 && current_sched_info->sched_max_insns_priority
26799 && rs6000_sched_restricted_insns_priority)
26802 /* Prioritize insns that can be dispatched only in the first
26803 dispatch slot. */
26804 if (rs6000_sched_restricted_insns_priority == 1)
26805 /* Attach highest priority to insn. This means that in
26806 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
26807 precede 'priority' (critical path) considerations. */
26808 return current_sched_info->sched_max_insns_priority;
26809 else if (rs6000_sched_restricted_insns_priority == 2)
26810 /* Increase priority of insn by a minimal amount. This means that in
26811 haifa-sched.c:ready_sort(), only 'priority' (critical path)
26812 considerations precede dispatch-slot restriction considerations. */
26813 return (priority + 1);
26816 if (rs6000_cpu == PROCESSOR_POWER6
26817 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
26818 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
26819 /* Attach highest priority to insn if the scheduler has just issued two
26820 stores and this instruction is a load, or two loads and this instruction
26821 is a store. Power6 wants loads and stores scheduled alternately
26822 when possible */
26823 return current_sched_info->sched_max_insns_priority;
26825 return priority;
26828 /* Return true if the instruction is nonpipelined on the Cell. */
26829 static bool
26830 is_nonpipeline_insn (rtx insn)
26832 enum attr_type type;
26833 if (!insn || !NONDEBUG_INSN_P (insn)
26834 || GET_CODE (PATTERN (insn)) == USE
26835 || GET_CODE (PATTERN (insn)) == CLOBBER)
26836 return false;
26838 type = get_attr_type (insn);
26839 if (type == TYPE_MUL
26840 || type == TYPE_DIV
26841 || type == TYPE_SDIV
26842 || type == TYPE_DDIV
26843 || type == TYPE_SSQRT
26844 || type == TYPE_DSQRT
26845 || type == TYPE_MFCR
26846 || type == TYPE_MFCRF
26847 || type == TYPE_MFJMPR)
26849 return true;
26851 return false;
26855 /* Return how many instructions the machine can issue per cycle. */
26857 static int
26858 rs6000_issue_rate (void)
26860 /* Unless scheduling for register pressure, use issue rate of 1 for
26861 first scheduling pass to decrease degradation. */
26862 if (!reload_completed && !flag_sched_pressure)
26863 return 1;
26865 switch (rs6000_cpu_attr) {
26866 case CPU_RS64A:
26867 case CPU_PPC601: /* ? */
26868 case CPU_PPC7450:
26869 return 3;
26870 case CPU_PPC440:
26871 case CPU_PPC603:
26872 case CPU_PPC750:
26873 case CPU_PPC7400:
26874 case CPU_PPC8540:
26875 case CPU_PPC8548:
26876 case CPU_CELL:
26877 case CPU_PPCE300C2:
26878 case CPU_PPCE300C3:
26879 case CPU_PPCE500MC:
26880 case CPU_PPCE500MC64:
26881 case CPU_PPCE5500:
26882 case CPU_PPCE6500:
26883 case CPU_TITAN:
26884 return 2;
26885 case CPU_PPC476:
26886 case CPU_PPC604:
26887 case CPU_PPC604E:
26888 case CPU_PPC620:
26889 case CPU_PPC630:
26890 return 4;
26891 case CPU_POWER4:
26892 case CPU_POWER5:
26893 case CPU_POWER6:
26894 case CPU_POWER7:
26895 return 5;
26896 case CPU_POWER8:
26897 return 7;
26898 default:
26899 return 1;
26903 /* Return how many instructions to look ahead for better insn
26904 scheduling. */
26906 static int
26907 rs6000_use_sched_lookahead (void)
26909 switch (rs6000_cpu_attr)
26911 case CPU_PPC8540:
26912 case CPU_PPC8548:
26913 return 4;
26915 case CPU_CELL:
26916 return (reload_completed ? 8 : 0);
26918 default:
26919 return 0;
26923 /* We are choosing insn from the ready queue. Return zero if INSN can be
26924 chosen. */
26925 static int
26926 rs6000_use_sched_lookahead_guard (rtx insn, int ready_index)
26928 if (ready_index == 0)
26929 return 0;
26931 if (rs6000_cpu_attr != CPU_CELL)
26932 return 0;
26934 gcc_assert (insn != NULL_RTX && INSN_P (insn));
26936 if (!reload_completed
26937 || is_nonpipeline_insn (insn)
26938 || is_microcoded_insn (insn))
26939 return 1;
26941 return 0;
26944 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
26945 and return true. */
26947 static bool
26948 find_mem_ref (rtx pat, rtx *mem_ref)
26950 const char * fmt;
26951 int i, j;
26953 /* stack_tie does not produce any real memory traffic. */
26954 if (tie_operand (pat, VOIDmode))
26955 return false;
26957 if (GET_CODE (pat) == MEM)
26959 *mem_ref = pat;
26960 return true;
26963 /* Recursively process the pattern. */
26964 fmt = GET_RTX_FORMAT (GET_CODE (pat));
26966 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
26968 if (fmt[i] == 'e')
26970 if (find_mem_ref (XEXP (pat, i), mem_ref))
26971 return true;
26973 else if (fmt[i] == 'E')
26974 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
26976 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
26977 return true;
26981 return false;
26984 /* Determine if PAT is a PATTERN of a load insn. */
26986 static bool
26987 is_load_insn1 (rtx pat, rtx *load_mem)
26989 if (!pat || pat == NULL_RTX)
26990 return false;
26992 if (GET_CODE (pat) == SET)
26993 return find_mem_ref (SET_SRC (pat), load_mem);
26995 if (GET_CODE (pat) == PARALLEL)
26997 int i;
26999 for (i = 0; i < XVECLEN (pat, 0); i++)
27000 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
27001 return true;
27004 return false;
27007 /* Determine if INSN loads from memory. */
27009 static bool
27010 is_load_insn (rtx insn, rtx *load_mem)
27012 if (!insn || !INSN_P (insn))
27013 return false;
27015 if (CALL_P (insn))
27016 return false;
27018 return is_load_insn1 (PATTERN (insn), load_mem);
27021 /* Determine if PAT is a PATTERN of a store insn. */
27023 static bool
27024 is_store_insn1 (rtx pat, rtx *str_mem)
27026 if (!pat || pat == NULL_RTX)
27027 return false;
27029 if (GET_CODE (pat) == SET)
27030 return find_mem_ref (SET_DEST (pat), str_mem);
27032 if (GET_CODE (pat) == PARALLEL)
27034 int i;
27036 for (i = 0; i < XVECLEN (pat, 0); i++)
27037 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
27038 return true;
27041 return false;
27044 /* Determine if INSN stores to memory. */
27046 static bool
27047 is_store_insn (rtx insn, rtx *str_mem)
27049 if (!insn || !INSN_P (insn))
27050 return false;
27052 return is_store_insn1 (PATTERN (insn), str_mem);
27055 /* Returns whether the dependence between INSN and NEXT is considered
27056 costly by the given target. */
27058 static bool
27059 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
27061 rtx insn;
27062 rtx next;
27063 rtx load_mem, str_mem;
27065 /* If the flag is not enabled - no dependence is considered costly;
27066 allow all dependent insns in the same group.
27067 This is the most aggressive option. */
27068 if (rs6000_sched_costly_dep == no_dep_costly)
27069 return false;
27071 /* If the flag is set to 1 - a dependence is always considered costly;
27072 do not allow dependent instructions in the same group.
27073 This is the most conservative option. */
27074 if (rs6000_sched_costly_dep == all_deps_costly)
27075 return true;
27077 insn = DEP_PRO (dep);
27078 next = DEP_CON (dep);
27080 if (rs6000_sched_costly_dep == store_to_load_dep_costly
27081 && is_load_insn (next, &load_mem)
27082 && is_store_insn (insn, &str_mem))
27083 /* Prevent load after store in the same group. */
27084 return true;
27086 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
27087 && is_load_insn (next, &load_mem)
27088 && is_store_insn (insn, &str_mem)
27089 && DEP_TYPE (dep) == REG_DEP_TRUE
27090 && mem_locations_overlap(str_mem, load_mem))
27091 /* Prevent load after store in the same group if it is a true
27092 dependence. */
27093 return true;
27095 /* The flag is set to X; dependences with latency >= X are considered costly,
27096 and will not be scheduled in the same group. */
27097 if (rs6000_sched_costly_dep <= max_dep_latency
27098 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
27099 return true;
27101 return false;
27104 /* Return the next insn after INSN that is found before TAIL is reached,
27105 skipping any "non-active" insns - insns that will not actually occupy
27106 an issue slot. Return NULL_RTX if such an insn is not found. */
27108 static rtx
27109 get_next_active_insn (rtx insn, rtx tail)
27111 if (insn == NULL_RTX || insn == tail)
27112 return NULL_RTX;
27114 while (1)
27116 insn = NEXT_INSN (insn);
27117 if (insn == NULL_RTX || insn == tail)
27118 return NULL_RTX;
27120 if (CALL_P (insn)
27121 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
27122 || (NONJUMP_INSN_P (insn)
27123 && GET_CODE (PATTERN (insn)) != USE
27124 && GET_CODE (PATTERN (insn)) != CLOBBER
27125 && INSN_CODE (insn) != CODE_FOR_stack_tie))
27126 break;
27128 return insn;
27131 /* We are about to begin issuing insns for this clock cycle. */
27133 static int
27134 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
27135 rtx *ready ATTRIBUTE_UNUSED,
27136 int *pn_ready ATTRIBUTE_UNUSED,
27137 int clock_var ATTRIBUTE_UNUSED)
27139 int n_ready = *pn_ready;
27141 if (sched_verbose)
27142 fprintf (dump, "// rs6000_sched_reorder :\n");
27144 /* Reorder the ready list, if the second to last ready insn
27145 is a nonepipeline insn. */
27146 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
27148 if (is_nonpipeline_insn (ready[n_ready - 1])
27149 && (recog_memoized (ready[n_ready - 2]) > 0))
27150 /* Simply swap first two insns. */
27152 rtx tmp = ready[n_ready - 1];
27153 ready[n_ready - 1] = ready[n_ready - 2];
27154 ready[n_ready - 2] = tmp;
27158 if (rs6000_cpu == PROCESSOR_POWER6)
27159 load_store_pendulum = 0;
27161 return rs6000_issue_rate ();
27164 /* Like rs6000_sched_reorder, but called after issuing each insn. */
27166 static int
27167 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
27168 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
27170 if (sched_verbose)
27171 fprintf (dump, "// rs6000_sched_reorder2 :\n");
27173 /* For Power6, we need to handle some special cases to try and keep the
27174 store queue from overflowing and triggering expensive flushes.
27176 This code monitors how load and store instructions are being issued
27177 and skews the ready list one way or the other to increase the likelihood
27178 that a desired instruction is issued at the proper time.
27180 A couple of things are done. First, we maintain a "load_store_pendulum"
27181 to track the current state of load/store issue.
27183 - If the pendulum is at zero, then no loads or stores have been
27184 issued in the current cycle so we do nothing.
27186 - If the pendulum is 1, then a single load has been issued in this
27187 cycle and we attempt to locate another load in the ready list to
27188 issue with it.
27190 - If the pendulum is -2, then two stores have already been
27191 issued in this cycle, so we increase the priority of the first load
27192 in the ready list to increase it's likelihood of being chosen first
27193 in the next cycle.
27195 - If the pendulum is -1, then a single store has been issued in this
27196 cycle and we attempt to locate another store in the ready list to
27197 issue with it, preferring a store to an adjacent memory location to
27198 facilitate store pairing in the store queue.
27200 - If the pendulum is 2, then two loads have already been
27201 issued in this cycle, so we increase the priority of the first store
27202 in the ready list to increase it's likelihood of being chosen first
27203 in the next cycle.
27205 - If the pendulum < -2 or > 2, then do nothing.
27207 Note: This code covers the most common scenarios. There exist non
27208 load/store instructions which make use of the LSU and which
27209 would need to be accounted for to strictly model the behavior
27210 of the machine. Those instructions are currently unaccounted
27211 for to help minimize compile time overhead of this code.
27213 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
27215 int pos;
27216 int i;
27217 rtx tmp, load_mem, str_mem;
27219 if (is_store_insn (last_scheduled_insn, &str_mem))
27220 /* Issuing a store, swing the load_store_pendulum to the left */
27221 load_store_pendulum--;
27222 else if (is_load_insn (last_scheduled_insn, &load_mem))
27223 /* Issuing a load, swing the load_store_pendulum to the right */
27224 load_store_pendulum++;
27225 else
27226 return cached_can_issue_more;
27228 /* If the pendulum is balanced, or there is only one instruction on
27229 the ready list, then all is well, so return. */
27230 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
27231 return cached_can_issue_more;
27233 if (load_store_pendulum == 1)
27235 /* A load has been issued in this cycle. Scan the ready list
27236 for another load to issue with it */
27237 pos = *pn_ready-1;
27239 while (pos >= 0)
27241 if (is_load_insn (ready[pos], &load_mem))
27243 /* Found a load. Move it to the head of the ready list,
27244 and adjust it's priority so that it is more likely to
27245 stay there */
27246 tmp = ready[pos];
27247 for (i=pos; i<*pn_ready-1; i++)
27248 ready[i] = ready[i + 1];
27249 ready[*pn_ready-1] = tmp;
27251 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27252 INSN_PRIORITY (tmp)++;
27253 break;
27255 pos--;
27258 else if (load_store_pendulum == -2)
27260 /* Two stores have been issued in this cycle. Increase the
27261 priority of the first load in the ready list to favor it for
27262 issuing in the next cycle. */
27263 pos = *pn_ready-1;
27265 while (pos >= 0)
27267 if (is_load_insn (ready[pos], &load_mem)
27268 && !sel_sched_p ()
27269 && INSN_PRIORITY_KNOWN (ready[pos]))
27271 INSN_PRIORITY (ready[pos])++;
27273 /* Adjust the pendulum to account for the fact that a load
27274 was found and increased in priority. This is to prevent
27275 increasing the priority of multiple loads */
27276 load_store_pendulum--;
27278 break;
27280 pos--;
27283 else if (load_store_pendulum == -1)
27285 /* A store has been issued in this cycle. Scan the ready list for
27286 another store to issue with it, preferring a store to an adjacent
27287 memory location */
27288 int first_store_pos = -1;
27290 pos = *pn_ready-1;
27292 while (pos >= 0)
27294 if (is_store_insn (ready[pos], &str_mem))
27296 rtx str_mem2;
27297 /* Maintain the index of the first store found on the
27298 list */
27299 if (first_store_pos == -1)
27300 first_store_pos = pos;
27302 if (is_store_insn (last_scheduled_insn, &str_mem2)
27303 && adjacent_mem_locations (str_mem, str_mem2))
27305 /* Found an adjacent store. Move it to the head of the
27306 ready list, and adjust it's priority so that it is
27307 more likely to stay there */
27308 tmp = ready[pos];
27309 for (i=pos; i<*pn_ready-1; i++)
27310 ready[i] = ready[i + 1];
27311 ready[*pn_ready-1] = tmp;
27313 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27314 INSN_PRIORITY (tmp)++;
27316 first_store_pos = -1;
27318 break;
27321 pos--;
27324 if (first_store_pos >= 0)
27326 /* An adjacent store wasn't found, but a non-adjacent store was,
27327 so move the non-adjacent store to the front of the ready
27328 list, and adjust its priority so that it is more likely to
27329 stay there. */
27330 tmp = ready[first_store_pos];
27331 for (i=first_store_pos; i<*pn_ready-1; i++)
27332 ready[i] = ready[i + 1];
27333 ready[*pn_ready-1] = tmp;
27334 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27335 INSN_PRIORITY (tmp)++;
27338 else if (load_store_pendulum == 2)
27340 /* Two loads have been issued in this cycle. Increase the priority
27341 of the first store in the ready list to favor it for issuing in
27342 the next cycle. */
27343 pos = *pn_ready-1;
27345 while (pos >= 0)
27347 if (is_store_insn (ready[pos], &str_mem)
27348 && !sel_sched_p ()
27349 && INSN_PRIORITY_KNOWN (ready[pos]))
27351 INSN_PRIORITY (ready[pos])++;
27353 /* Adjust the pendulum to account for the fact that a store
27354 was found and increased in priority. This is to prevent
27355 increasing the priority of multiple stores */
27356 load_store_pendulum++;
27358 break;
27360 pos--;
27365 return cached_can_issue_more;
27368 /* Return whether the presence of INSN causes a dispatch group termination
27369 of group WHICH_GROUP.
27371 If WHICH_GROUP == current_group, this function will return true if INSN
27372 causes the termination of the current group (i.e, the dispatch group to
27373 which INSN belongs). This means that INSN will be the last insn in the
27374 group it belongs to.
27376 If WHICH_GROUP == previous_group, this function will return true if INSN
27377 causes the termination of the previous group (i.e, the dispatch group that
27378 precedes the group to which INSN belongs). This means that INSN will be
27379 the first insn in the group it belongs to). */
27381 static bool
27382 insn_terminates_group_p (rtx insn, enum group_termination which_group)
27384 bool first, last;
27386 if (! insn)
27387 return false;
27389 first = insn_must_be_first_in_group (insn);
27390 last = insn_must_be_last_in_group (insn);
27392 if (first && last)
27393 return true;
27395 if (which_group == current_group)
27396 return last;
27397 else if (which_group == previous_group)
27398 return first;
27400 return false;
27404 static bool
27405 insn_must_be_first_in_group (rtx insn)
27407 enum attr_type type;
27409 if (!insn
27410 || NOTE_P (insn)
27411 || DEBUG_INSN_P (insn)
27412 || GET_CODE (PATTERN (insn)) == USE
27413 || GET_CODE (PATTERN (insn)) == CLOBBER)
27414 return false;
27416 switch (rs6000_cpu)
27418 case PROCESSOR_POWER5:
27419 if (is_cracked_insn (insn))
27420 return true;
27421 case PROCESSOR_POWER4:
27422 if (is_microcoded_insn (insn))
27423 return true;
27425 if (!rs6000_sched_groups)
27426 return false;
27428 type = get_attr_type (insn);
27430 switch (type)
27432 case TYPE_MFCR:
27433 case TYPE_MFCRF:
27434 case TYPE_MTCR:
27435 case TYPE_DELAYED_CR:
27436 case TYPE_CR_LOGICAL:
27437 case TYPE_MTJMPR:
27438 case TYPE_MFJMPR:
27439 case TYPE_DIV:
27440 case TYPE_LOAD_L:
27441 case TYPE_STORE_C:
27442 case TYPE_ISYNC:
27443 case TYPE_SYNC:
27444 return true;
27445 default:
27446 break;
27448 break;
27449 case PROCESSOR_POWER6:
27450 type = get_attr_type (insn);
27452 switch (type)
27454 case TYPE_EXTS:
27455 case TYPE_CNTLZ:
27456 case TYPE_TRAP:
27457 case TYPE_MUL:
27458 case TYPE_INSERT:
27459 case TYPE_FPCOMPARE:
27460 case TYPE_MFCR:
27461 case TYPE_MTCR:
27462 case TYPE_MFJMPR:
27463 case TYPE_MTJMPR:
27464 case TYPE_ISYNC:
27465 case TYPE_SYNC:
27466 case TYPE_LOAD_L:
27467 case TYPE_STORE_C:
27468 return true;
27469 case TYPE_SHIFT:
27470 if (get_attr_dot (insn) == DOT_NO
27471 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
27472 return true;
27473 else
27474 break;
27475 case TYPE_DIV:
27476 if (get_attr_size (insn) == SIZE_32)
27477 return true;
27478 else
27479 break;
27480 case TYPE_LOAD:
27481 case TYPE_STORE:
27482 case TYPE_FPLOAD:
27483 case TYPE_FPSTORE:
27484 if (get_attr_update (insn) == UPDATE_YES)
27485 return true;
27486 else
27487 break;
27488 default:
27489 break;
27491 break;
27492 case PROCESSOR_POWER7:
27493 type = get_attr_type (insn);
27495 switch (type)
27497 case TYPE_CR_LOGICAL:
27498 case TYPE_MFCR:
27499 case TYPE_MFCRF:
27500 case TYPE_MTCR:
27501 case TYPE_DIV:
27502 case TYPE_COMPARE:
27503 case TYPE_ISYNC:
27504 case TYPE_LOAD_L:
27505 case TYPE_STORE_C:
27506 case TYPE_MFJMPR:
27507 case TYPE_MTJMPR:
27508 return true;
27509 case TYPE_MUL:
27510 case TYPE_SHIFT:
27511 if (get_attr_dot (insn) == DOT_YES)
27512 return true;
27513 else
27514 break;
27515 case TYPE_LOAD:
27516 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27517 || get_attr_update (insn) == UPDATE_YES)
27518 return true;
27519 else
27520 break;
27521 case TYPE_STORE:
27522 case TYPE_FPLOAD:
27523 case TYPE_FPSTORE:
27524 if (get_attr_update (insn) == UPDATE_YES)
27525 return true;
27526 else
27527 break;
27528 default:
27529 break;
27531 break;
27532 case PROCESSOR_POWER8:
27533 type = get_attr_type (insn);
27535 switch (type)
27537 case TYPE_CR_LOGICAL:
27538 case TYPE_DELAYED_CR:
27539 case TYPE_MFCR:
27540 case TYPE_MFCRF:
27541 case TYPE_MTCR:
27542 case TYPE_COMPARE:
27543 case TYPE_SYNC:
27544 case TYPE_ISYNC:
27545 case TYPE_LOAD_L:
27546 case TYPE_STORE_C:
27547 case TYPE_VECSTORE:
27548 case TYPE_MFJMPR:
27549 case TYPE_MTJMPR:
27550 return true;
27551 case TYPE_SHIFT:
27552 case TYPE_MUL:
27553 if (get_attr_dot (insn) == DOT_YES)
27554 return true;
27555 else
27556 break;
27557 case TYPE_LOAD:
27558 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27559 || get_attr_update (insn) == UPDATE_YES)
27560 return true;
27561 else
27562 break;
27563 case TYPE_STORE:
27564 if (get_attr_update (insn) == UPDATE_YES
27565 && get_attr_indexed (insn) == INDEXED_YES)
27566 return true;
27567 else
27568 break;
27569 default:
27570 break;
27572 break;
27573 default:
27574 break;
27577 return false;
27580 static bool
27581 insn_must_be_last_in_group (rtx insn)
27583 enum attr_type type;
27585 if (!insn
27586 || NOTE_P (insn)
27587 || DEBUG_INSN_P (insn)
27588 || GET_CODE (PATTERN (insn)) == USE
27589 || GET_CODE (PATTERN (insn)) == CLOBBER)
27590 return false;
27592 switch (rs6000_cpu) {
27593 case PROCESSOR_POWER4:
27594 case PROCESSOR_POWER5:
27595 if (is_microcoded_insn (insn))
27596 return true;
27598 if (is_branch_slot_insn (insn))
27599 return true;
27601 break;
27602 case PROCESSOR_POWER6:
27603 type = get_attr_type (insn);
27605 switch (type)
27607 case TYPE_EXTS:
27608 case TYPE_CNTLZ:
27609 case TYPE_TRAP:
27610 case TYPE_MUL:
27611 case TYPE_FPCOMPARE:
27612 case TYPE_MFCR:
27613 case TYPE_MTCR:
27614 case TYPE_MFJMPR:
27615 case TYPE_MTJMPR:
27616 case TYPE_ISYNC:
27617 case TYPE_SYNC:
27618 case TYPE_LOAD_L:
27619 case TYPE_STORE_C:
27620 return true;
27621 case TYPE_SHIFT:
27622 if (get_attr_dot (insn) == DOT_NO
27623 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
27624 return true;
27625 else
27626 break;
27627 case TYPE_DIV:
27628 if (get_attr_size (insn) == SIZE_32)
27629 return true;
27630 else
27631 break;
27632 default:
27633 break;
27635 break;
27636 case PROCESSOR_POWER7:
27637 type = get_attr_type (insn);
27639 switch (type)
27641 case TYPE_ISYNC:
27642 case TYPE_SYNC:
27643 case TYPE_LOAD_L:
27644 case TYPE_STORE_C:
27645 return true;
27646 case TYPE_LOAD:
27647 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27648 && get_attr_update (insn) == UPDATE_YES)
27649 return true;
27650 else
27651 break;
27652 case TYPE_STORE:
27653 if (get_attr_update (insn) == UPDATE_YES
27654 && get_attr_indexed (insn) == INDEXED_YES)
27655 return true;
27656 else
27657 break;
27658 default:
27659 break;
27661 break;
27662 case PROCESSOR_POWER8:
27663 type = get_attr_type (insn);
27665 switch (type)
27667 case TYPE_MFCR:
27668 case TYPE_MTCR:
27669 case TYPE_ISYNC:
27670 case TYPE_SYNC:
27671 case TYPE_LOAD_L:
27672 case TYPE_STORE_C:
27673 return true;
27674 case TYPE_LOAD:
27675 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27676 && get_attr_update (insn) == UPDATE_YES)
27677 return true;
27678 else
27679 break;
27680 case TYPE_STORE:
27681 if (get_attr_update (insn) == UPDATE_YES
27682 && get_attr_indexed (insn) == INDEXED_YES)
27683 return true;
27684 else
27685 break;
27686 default:
27687 break;
27689 break;
27690 default:
27691 break;
27694 return false;
27697 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
27698 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
27700 static bool
27701 is_costly_group (rtx *group_insns, rtx next_insn)
27703 int i;
27704 int issue_rate = rs6000_issue_rate ();
27706 for (i = 0; i < issue_rate; i++)
27708 sd_iterator_def sd_it;
27709 dep_t dep;
27710 rtx insn = group_insns[i];
27712 if (!insn)
27713 continue;
27715 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
27717 rtx next = DEP_CON (dep);
27719 if (next == next_insn
27720 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
27721 return true;
27725 return false;
27728 /* Utility of the function redefine_groups.
27729 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
27730 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
27731 to keep it "far" (in a separate group) from GROUP_INSNS, following
27732 one of the following schemes, depending on the value of the flag
27733 -minsert_sched_nops = X:
27734 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
27735 in order to force NEXT_INSN into a separate group.
27736 (2) X < sched_finish_regroup_exact: insert exactly X nops.
27737 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
27738 insertion (has a group just ended, how many vacant issue slots remain in the
27739 last group, and how many dispatch groups were encountered so far). */
27741 static int
27742 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
27743 rtx next_insn, bool *group_end, int can_issue_more,
27744 int *group_count)
27746 rtx nop;
27747 bool force;
27748 int issue_rate = rs6000_issue_rate ();
27749 bool end = *group_end;
27750 int i;
27752 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
27753 return can_issue_more;
27755 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
27756 return can_issue_more;
27758 force = is_costly_group (group_insns, next_insn);
27759 if (!force)
27760 return can_issue_more;
27762 if (sched_verbose > 6)
27763 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
27764 *group_count ,can_issue_more);
27766 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
27768 if (*group_end)
27769 can_issue_more = 0;
27771 /* Since only a branch can be issued in the last issue_slot, it is
27772 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
27773 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
27774 in this case the last nop will start a new group and the branch
27775 will be forced to the new group. */
27776 if (can_issue_more && !is_branch_slot_insn (next_insn))
27777 can_issue_more--;
27779 /* Do we have a special group ending nop? */
27780 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
27781 || rs6000_cpu_attr == CPU_POWER8)
27783 nop = gen_group_ending_nop ();
27784 emit_insn_before (nop, next_insn);
27785 can_issue_more = 0;
27787 else
27788 while (can_issue_more > 0)
27790 nop = gen_nop ();
27791 emit_insn_before (nop, next_insn);
27792 can_issue_more--;
27795 *group_end = true;
27796 return 0;
27799 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
27801 int n_nops = rs6000_sched_insert_nops;
27803 /* Nops can't be issued from the branch slot, so the effective
27804 issue_rate for nops is 'issue_rate - 1'. */
27805 if (can_issue_more == 0)
27806 can_issue_more = issue_rate;
27807 can_issue_more--;
27808 if (can_issue_more == 0)
27810 can_issue_more = issue_rate - 1;
27811 (*group_count)++;
27812 end = true;
27813 for (i = 0; i < issue_rate; i++)
27815 group_insns[i] = 0;
27819 while (n_nops > 0)
27821 nop = gen_nop ();
27822 emit_insn_before (nop, next_insn);
27823 if (can_issue_more == issue_rate - 1) /* new group begins */
27824 end = false;
27825 can_issue_more--;
27826 if (can_issue_more == 0)
27828 can_issue_more = issue_rate - 1;
27829 (*group_count)++;
27830 end = true;
27831 for (i = 0; i < issue_rate; i++)
27833 group_insns[i] = 0;
27836 n_nops--;
27839 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
27840 can_issue_more++;
27842 /* Is next_insn going to start a new group? */
27843 *group_end
27844 = (end
27845 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
27846 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
27847 || (can_issue_more < issue_rate &&
27848 insn_terminates_group_p (next_insn, previous_group)));
27849 if (*group_end && end)
27850 (*group_count)--;
27852 if (sched_verbose > 6)
27853 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
27854 *group_count, can_issue_more);
27855 return can_issue_more;
27858 return can_issue_more;
27861 /* This function tries to synch the dispatch groups that the compiler "sees"
27862 with the dispatch groups that the processor dispatcher is expected to
27863 form in practice. It tries to achieve this synchronization by forcing the
27864 estimated processor grouping on the compiler (as opposed to the function
27865 'pad_goups' which tries to force the scheduler's grouping on the processor).
27867 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
27868 examines the (estimated) dispatch groups that will be formed by the processor
27869 dispatcher. It marks these group boundaries to reflect the estimated
27870 processor grouping, overriding the grouping that the scheduler had marked.
27871 Depending on the value of the flag '-minsert-sched-nops' this function can
27872 force certain insns into separate groups or force a certain distance between
27873 them by inserting nops, for example, if there exists a "costly dependence"
27874 between the insns.
27876 The function estimates the group boundaries that the processor will form as
27877 follows: It keeps track of how many vacant issue slots are available after
27878 each insn. A subsequent insn will start a new group if one of the following
27879 4 cases applies:
27880 - no more vacant issue slots remain in the current dispatch group.
27881 - only the last issue slot, which is the branch slot, is vacant, but the next
27882 insn is not a branch.
27883 - only the last 2 or less issue slots, including the branch slot, are vacant,
27884 which means that a cracked insn (which occupies two issue slots) can't be
27885 issued in this group.
27886 - less than 'issue_rate' slots are vacant, and the next insn always needs to
27887 start a new group. */
27889 static int
27890 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
27892 rtx insn, next_insn;
27893 int issue_rate;
27894 int can_issue_more;
27895 int slot, i;
27896 bool group_end;
27897 int group_count = 0;
27898 rtx *group_insns;
27900 /* Initialize. */
27901 issue_rate = rs6000_issue_rate ();
27902 group_insns = XALLOCAVEC (rtx, issue_rate);
27903 for (i = 0; i < issue_rate; i++)
27905 group_insns[i] = 0;
27907 can_issue_more = issue_rate;
27908 slot = 0;
27909 insn = get_next_active_insn (prev_head_insn, tail);
27910 group_end = false;
27912 while (insn != NULL_RTX)
27914 slot = (issue_rate - can_issue_more);
27915 group_insns[slot] = insn;
27916 can_issue_more =
27917 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
27918 if (insn_terminates_group_p (insn, current_group))
27919 can_issue_more = 0;
27921 next_insn = get_next_active_insn (insn, tail);
27922 if (next_insn == NULL_RTX)
27923 return group_count + 1;
27925 /* Is next_insn going to start a new group? */
27926 group_end
27927 = (can_issue_more == 0
27928 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
27929 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
27930 || (can_issue_more < issue_rate &&
27931 insn_terminates_group_p (next_insn, previous_group)));
27933 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
27934 next_insn, &group_end, can_issue_more,
27935 &group_count);
27937 if (group_end)
27939 group_count++;
27940 can_issue_more = 0;
27941 for (i = 0; i < issue_rate; i++)
27943 group_insns[i] = 0;
27947 if (GET_MODE (next_insn) == TImode && can_issue_more)
27948 PUT_MODE (next_insn, VOIDmode);
27949 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
27950 PUT_MODE (next_insn, TImode);
27952 insn = next_insn;
27953 if (can_issue_more == 0)
27954 can_issue_more = issue_rate;
27955 } /* while */
27957 return group_count;
27960 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
27961 dispatch group boundaries that the scheduler had marked. Pad with nops
27962 any dispatch groups which have vacant issue slots, in order to force the
27963 scheduler's grouping on the processor dispatcher. The function
27964 returns the number of dispatch groups found. */
27966 static int
27967 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
27969 rtx insn, next_insn;
27970 rtx nop;
27971 int issue_rate;
27972 int can_issue_more;
27973 int group_end;
27974 int group_count = 0;
27976 /* Initialize issue_rate. */
27977 issue_rate = rs6000_issue_rate ();
27978 can_issue_more = issue_rate;
27980 insn = get_next_active_insn (prev_head_insn, tail);
27981 next_insn = get_next_active_insn (insn, tail);
27983 while (insn != NULL_RTX)
27985 can_issue_more =
27986 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
27988 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
27990 if (next_insn == NULL_RTX)
27991 break;
27993 if (group_end)
27995 /* If the scheduler had marked group termination at this location
27996 (between insn and next_insn), and neither insn nor next_insn will
27997 force group termination, pad the group with nops to force group
27998 termination. */
27999 if (can_issue_more
28000 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
28001 && !insn_terminates_group_p (insn, current_group)
28002 && !insn_terminates_group_p (next_insn, previous_group))
28004 if (!is_branch_slot_insn (next_insn))
28005 can_issue_more--;
28007 while (can_issue_more)
28009 nop = gen_nop ();
28010 emit_insn_before (nop, next_insn);
28011 can_issue_more--;
28015 can_issue_more = issue_rate;
28016 group_count++;
28019 insn = next_insn;
28020 next_insn = get_next_active_insn (insn, tail);
28023 return group_count;
28026 /* We're beginning a new block. Initialize data structures as necessary. */
28028 static void
28029 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
28030 int sched_verbose ATTRIBUTE_UNUSED,
28031 int max_ready ATTRIBUTE_UNUSED)
28033 last_scheduled_insn = NULL_RTX;
28034 load_store_pendulum = 0;
28037 /* The following function is called at the end of scheduling BB.
28038 After reload, it inserts nops at insn group bundling. */
28040 static void
28041 rs6000_sched_finish (FILE *dump, int sched_verbose)
28043 int n_groups;
28045 if (sched_verbose)
28046 fprintf (dump, "=== Finishing schedule.\n");
28048 if (reload_completed && rs6000_sched_groups)
28050 /* Do not run sched_finish hook when selective scheduling enabled. */
28051 if (sel_sched_p ())
28052 return;
28054 if (rs6000_sched_insert_nops == sched_finish_none)
28055 return;
28057 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
28058 n_groups = pad_groups (dump, sched_verbose,
28059 current_sched_info->prev_head,
28060 current_sched_info->next_tail);
28061 else
28062 n_groups = redefine_groups (dump, sched_verbose,
28063 current_sched_info->prev_head,
28064 current_sched_info->next_tail);
28066 if (sched_verbose >= 6)
28068 fprintf (dump, "ngroups = %d\n", n_groups);
28069 print_rtl (dump, current_sched_info->prev_head);
28070 fprintf (dump, "Done finish_sched\n");
28075 struct _rs6000_sched_context
28077 short cached_can_issue_more;
28078 rtx last_scheduled_insn;
28079 int load_store_pendulum;
28082 typedef struct _rs6000_sched_context rs6000_sched_context_def;
28083 typedef rs6000_sched_context_def *rs6000_sched_context_t;
28085 /* Allocate store for new scheduling context. */
28086 static void *
28087 rs6000_alloc_sched_context (void)
28089 return xmalloc (sizeof (rs6000_sched_context_def));
28092 /* If CLEAN_P is true then initializes _SC with clean data,
28093 and from the global context otherwise. */
28094 static void
28095 rs6000_init_sched_context (void *_sc, bool clean_p)
28097 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
28099 if (clean_p)
28101 sc->cached_can_issue_more = 0;
28102 sc->last_scheduled_insn = NULL_RTX;
28103 sc->load_store_pendulum = 0;
28105 else
28107 sc->cached_can_issue_more = cached_can_issue_more;
28108 sc->last_scheduled_insn = last_scheduled_insn;
28109 sc->load_store_pendulum = load_store_pendulum;
28113 /* Sets the global scheduling context to the one pointed to by _SC. */
28114 static void
28115 rs6000_set_sched_context (void *_sc)
28117 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
28119 gcc_assert (sc != NULL);
28121 cached_can_issue_more = sc->cached_can_issue_more;
28122 last_scheduled_insn = sc->last_scheduled_insn;
28123 load_store_pendulum = sc->load_store_pendulum;
28126 /* Free _SC. */
28127 static void
28128 rs6000_free_sched_context (void *_sc)
28130 gcc_assert (_sc != NULL);
28132 free (_sc);
28136 /* Length in units of the trampoline for entering a nested function. */
28139 rs6000_trampoline_size (void)
28141 int ret = 0;
28143 switch (DEFAULT_ABI)
28145 default:
28146 gcc_unreachable ();
28148 case ABI_AIX:
28149 ret = (TARGET_32BIT) ? 12 : 24;
28150 break;
28152 case ABI_ELFv2:
28153 gcc_assert (!TARGET_32BIT);
28154 ret = 32;
28155 break;
28157 case ABI_DARWIN:
28158 case ABI_V4:
28159 ret = (TARGET_32BIT) ? 40 : 48;
28160 break;
28163 return ret;
28166 /* Emit RTL insns to initialize the variable parts of a trampoline.
28167 FNADDR is an RTX for the address of the function's pure code.
28168 CXT is an RTX for the static chain value for the function. */
28170 static void
28171 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
28173 int regsize = (TARGET_32BIT) ? 4 : 8;
28174 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
28175 rtx ctx_reg = force_reg (Pmode, cxt);
28176 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
28178 switch (DEFAULT_ABI)
28180 default:
28181 gcc_unreachable ();
28183 /* Under AIX, just build the 3 word function descriptor */
28184 case ABI_AIX:
28186 rtx fnmem, fn_reg, toc_reg;
28188 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28189 error ("You cannot take the address of a nested function if you use "
28190 "the -mno-pointers-to-nested-functions option.");
28192 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
28193 fn_reg = gen_reg_rtx (Pmode);
28194 toc_reg = gen_reg_rtx (Pmode);
28196 /* Macro to shorten the code expansions below. */
28197 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
28199 m_tramp = replace_equiv_address (m_tramp, addr);
28201 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
28202 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
28203 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
28204 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
28205 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
28207 # undef MEM_PLUS
28209 break;
28211 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
28212 case ABI_ELFv2:
28213 case ABI_DARWIN:
28214 case ABI_V4:
28215 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
28216 LCT_NORMAL, VOIDmode, 4,
28217 addr, Pmode,
28218 GEN_INT (rs6000_trampoline_size ()), SImode,
28219 fnaddr, Pmode,
28220 ctx_reg, Pmode);
28221 break;
28226 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
28227 identifier as an argument, so the front end shouldn't look it up. */
28229 static bool
28230 rs6000_attribute_takes_identifier_p (const_tree attr_id)
28232 return is_attribute_p ("altivec", attr_id);
28235 /* Handle the "altivec" attribute. The attribute may have
28236 arguments as follows:
28238 __attribute__((altivec(vector__)))
28239 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
28240 __attribute__((altivec(bool__))) (always followed by 'unsigned')
28242 and may appear more than once (e.g., 'vector bool char') in a
28243 given declaration. */
28245 static tree
28246 rs6000_handle_altivec_attribute (tree *node,
28247 tree name ATTRIBUTE_UNUSED,
28248 tree args,
28249 int flags ATTRIBUTE_UNUSED,
28250 bool *no_add_attrs)
28252 tree type = *node, result = NULL_TREE;
28253 enum machine_mode mode;
28254 int unsigned_p;
28255 char altivec_type
28256 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
28257 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
28258 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
28259 : '?');
28261 while (POINTER_TYPE_P (type)
28262 || TREE_CODE (type) == FUNCTION_TYPE
28263 || TREE_CODE (type) == METHOD_TYPE
28264 || TREE_CODE (type) == ARRAY_TYPE)
28265 type = TREE_TYPE (type);
28267 mode = TYPE_MODE (type);
28269 /* Check for invalid AltiVec type qualifiers. */
28270 if (type == long_double_type_node)
28271 error ("use of %<long double%> in AltiVec types is invalid");
28272 else if (type == boolean_type_node)
28273 error ("use of boolean types in AltiVec types is invalid");
28274 else if (TREE_CODE (type) == COMPLEX_TYPE)
28275 error ("use of %<complex%> in AltiVec types is invalid");
28276 else if (DECIMAL_FLOAT_MODE_P (mode))
28277 error ("use of decimal floating point types in AltiVec types is invalid");
28278 else if (!TARGET_VSX)
28280 if (type == long_unsigned_type_node || type == long_integer_type_node)
28282 if (TARGET_64BIT)
28283 error ("use of %<long%> in AltiVec types is invalid for "
28284 "64-bit code without -mvsx");
28285 else if (rs6000_warn_altivec_long)
28286 warning (0, "use of %<long%> in AltiVec types is deprecated; "
28287 "use %<int%>");
28289 else if (type == long_long_unsigned_type_node
28290 || type == long_long_integer_type_node)
28291 error ("use of %<long long%> in AltiVec types is invalid without "
28292 "-mvsx");
28293 else if (type == double_type_node)
28294 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
28297 switch (altivec_type)
28299 case 'v':
28300 unsigned_p = TYPE_UNSIGNED (type);
28301 switch (mode)
28303 case TImode:
28304 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
28305 break;
28306 case DImode:
28307 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
28308 break;
28309 case SImode:
28310 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
28311 break;
28312 case HImode:
28313 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
28314 break;
28315 case QImode:
28316 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
28317 break;
28318 case SFmode: result = V4SF_type_node; break;
28319 case DFmode: result = V2DF_type_node; break;
28320 /* If the user says 'vector int bool', we may be handed the 'bool'
28321 attribute _before_ the 'vector' attribute, and so select the
28322 proper type in the 'b' case below. */
28323 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
28324 case V2DImode: case V2DFmode:
28325 result = type;
28326 default: break;
28328 break;
28329 case 'b':
28330 switch (mode)
28332 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
28333 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
28334 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
28335 case QImode: case V16QImode: result = bool_V16QI_type_node;
28336 default: break;
28338 break;
28339 case 'p':
28340 switch (mode)
28342 case V8HImode: result = pixel_V8HI_type_node;
28343 default: break;
28345 default: break;
28348 /* Propagate qualifiers attached to the element type
28349 onto the vector type. */
28350 if (result && result != type && TYPE_QUALS (type))
28351 result = build_qualified_type (result, TYPE_QUALS (type));
28353 *no_add_attrs = true; /* No need to hang on to the attribute. */
28355 if (result)
28356 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
28358 return NULL_TREE;
28361 /* AltiVec defines four built-in scalar types that serve as vector
28362 elements; we must teach the compiler how to mangle them. */
28364 static const char *
28365 rs6000_mangle_type (const_tree type)
28367 type = TYPE_MAIN_VARIANT (type);
28369 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28370 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28371 return NULL;
28373 if (type == bool_char_type_node) return "U6__boolc";
28374 if (type == bool_short_type_node) return "U6__bools";
28375 if (type == pixel_type_node) return "u7__pixel";
28376 if (type == bool_int_type_node) return "U6__booli";
28377 if (type == bool_long_type_node) return "U6__booll";
28379 /* Mangle IBM extended float long double as `g' (__float128) on
28380 powerpc*-linux where long-double-64 previously was the default. */
28381 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
28382 && TARGET_ELF
28383 && TARGET_LONG_DOUBLE_128
28384 && !TARGET_IEEEQUAD)
28385 return "g";
28387 /* For all other types, use normal C++ mangling. */
28388 return NULL;
28391 /* Handle a "longcall" or "shortcall" attribute; arguments as in
28392 struct attribute_spec.handler. */
28394 static tree
28395 rs6000_handle_longcall_attribute (tree *node, tree name,
28396 tree args ATTRIBUTE_UNUSED,
28397 int flags ATTRIBUTE_UNUSED,
28398 bool *no_add_attrs)
28400 if (TREE_CODE (*node) != FUNCTION_TYPE
28401 && TREE_CODE (*node) != FIELD_DECL
28402 && TREE_CODE (*node) != TYPE_DECL)
28404 warning (OPT_Wattributes, "%qE attribute only applies to functions",
28405 name);
28406 *no_add_attrs = true;
28409 return NULL_TREE;
28412 /* Set longcall attributes on all functions declared when
28413 rs6000_default_long_calls is true. */
28414 static void
28415 rs6000_set_default_type_attributes (tree type)
28417 if (rs6000_default_long_calls
28418 && (TREE_CODE (type) == FUNCTION_TYPE
28419 || TREE_CODE (type) == METHOD_TYPE))
28420 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
28421 NULL_TREE,
28422 TYPE_ATTRIBUTES (type));
28424 #if TARGET_MACHO
28425 darwin_set_default_type_attributes (type);
28426 #endif
28429 /* Return a reference suitable for calling a function with the
28430 longcall attribute. */
28433 rs6000_longcall_ref (rtx call_ref)
28435 const char *call_name;
28436 tree node;
28438 if (GET_CODE (call_ref) != SYMBOL_REF)
28439 return call_ref;
28441 /* System V adds '.' to the internal name, so skip them. */
28442 call_name = XSTR (call_ref, 0);
28443 if (*call_name == '.')
28445 while (*call_name == '.')
28446 call_name++;
28448 node = get_identifier (call_name);
28449 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
28452 return force_reg (Pmode, call_ref);
28455 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
28456 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
28457 #endif
28459 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
28460 struct attribute_spec.handler. */
28461 static tree
28462 rs6000_handle_struct_attribute (tree *node, tree name,
28463 tree args ATTRIBUTE_UNUSED,
28464 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
28466 tree *type = NULL;
28467 if (DECL_P (*node))
28469 if (TREE_CODE (*node) == TYPE_DECL)
28470 type = &TREE_TYPE (*node);
28472 else
28473 type = node;
28475 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
28476 || TREE_CODE (*type) == UNION_TYPE)))
28478 warning (OPT_Wattributes, "%qE attribute ignored", name);
28479 *no_add_attrs = true;
28482 else if ((is_attribute_p ("ms_struct", name)
28483 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
28484 || ((is_attribute_p ("gcc_struct", name)
28485 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
28487 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
28488 name);
28489 *no_add_attrs = true;
28492 return NULL_TREE;
28495 static bool
28496 rs6000_ms_bitfield_layout_p (const_tree record_type)
28498 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
28499 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
28500 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
28503 #ifdef USING_ELFOS_H
28505 /* A get_unnamed_section callback, used for switching to toc_section. */
28507 static void
28508 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
28510 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28511 && TARGET_MINIMAL_TOC
28512 && !TARGET_RELOCATABLE)
28514 if (!toc_initialized)
28516 toc_initialized = 1;
28517 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28518 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
28519 fprintf (asm_out_file, "\t.tc ");
28520 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
28521 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28522 fprintf (asm_out_file, "\n");
28524 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28525 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28526 fprintf (asm_out_file, " = .+32768\n");
28528 else
28529 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28531 else if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28532 && !TARGET_RELOCATABLE)
28533 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28534 else
28536 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28537 if (!toc_initialized)
28539 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28540 fprintf (asm_out_file, " = .+32768\n");
28541 toc_initialized = 1;
28546 /* Implement TARGET_ASM_INIT_SECTIONS. */
28548 static void
28549 rs6000_elf_asm_init_sections (void)
28551 toc_section
28552 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
28554 sdata2_section
28555 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
28556 SDATA2_SECTION_ASM_OP);
28559 /* Implement TARGET_SELECT_RTX_SECTION. */
28561 static section *
28562 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
28563 unsigned HOST_WIDE_INT align)
28565 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
28566 return toc_section;
28567 else
28568 return default_elf_select_rtx_section (mode, x, align);
28571 /* For a SYMBOL_REF, set generic flags and then perform some
28572 target-specific processing.
28574 When the AIX ABI is requested on a non-AIX system, replace the
28575 function name with the real name (with a leading .) rather than the
28576 function descriptor name. This saves a lot of overriding code to
28577 read the prefixes. */
28579 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
28580 static void
28581 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
28583 default_encode_section_info (decl, rtl, first);
28585 if (first
28586 && TREE_CODE (decl) == FUNCTION_DECL
28587 && !TARGET_AIX
28588 && DEFAULT_ABI == ABI_AIX)
28590 rtx sym_ref = XEXP (rtl, 0);
28591 size_t len = strlen (XSTR (sym_ref, 0));
28592 char *str = XALLOCAVEC (char, len + 2);
28593 str[0] = '.';
28594 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
28595 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
28599 static inline bool
28600 compare_section_name (const char *section, const char *templ)
28602 int len;
28604 len = strlen (templ);
28605 return (strncmp (section, templ, len) == 0
28606 && (section[len] == 0 || section[len] == '.'));
28609 bool
28610 rs6000_elf_in_small_data_p (const_tree decl)
28612 if (rs6000_sdata == SDATA_NONE)
28613 return false;
28615 /* We want to merge strings, so we never consider them small data. */
28616 if (TREE_CODE (decl) == STRING_CST)
28617 return false;
28619 /* Functions are never in the small data area. */
28620 if (TREE_CODE (decl) == FUNCTION_DECL)
28621 return false;
28623 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
28625 const char *section = DECL_SECTION_NAME (decl);
28626 if (compare_section_name (section, ".sdata")
28627 || compare_section_name (section, ".sdata2")
28628 || compare_section_name (section, ".gnu.linkonce.s")
28629 || compare_section_name (section, ".sbss")
28630 || compare_section_name (section, ".sbss2")
28631 || compare_section_name (section, ".gnu.linkonce.sb")
28632 || strcmp (section, ".PPC.EMB.sdata0") == 0
28633 || strcmp (section, ".PPC.EMB.sbss0") == 0)
28634 return true;
28636 else
28638 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
28640 if (size > 0
28641 && size <= g_switch_value
28642 /* If it's not public, and we're not going to reference it there,
28643 there's no need to put it in the small data section. */
28644 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
28645 return true;
28648 return false;
28651 #endif /* USING_ELFOS_H */
28653 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
28655 static bool
28656 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
28658 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
28661 /* Do not place thread-local symbols refs in the object blocks. */
28663 static bool
28664 rs6000_use_blocks_for_decl_p (const_tree decl)
28666 return !DECL_THREAD_LOCAL_P (decl);
28669 /* Return a REG that occurs in ADDR with coefficient 1.
28670 ADDR can be effectively incremented by incrementing REG.
28672 r0 is special and we must not select it as an address
28673 register by this routine since our caller will try to
28674 increment the returned register via an "la" instruction. */
28677 find_addr_reg (rtx addr)
28679 while (GET_CODE (addr) == PLUS)
28681 if (GET_CODE (XEXP (addr, 0)) == REG
28682 && REGNO (XEXP (addr, 0)) != 0)
28683 addr = XEXP (addr, 0);
28684 else if (GET_CODE (XEXP (addr, 1)) == REG
28685 && REGNO (XEXP (addr, 1)) != 0)
28686 addr = XEXP (addr, 1);
28687 else if (CONSTANT_P (XEXP (addr, 0)))
28688 addr = XEXP (addr, 1);
28689 else if (CONSTANT_P (XEXP (addr, 1)))
28690 addr = XEXP (addr, 0);
28691 else
28692 gcc_unreachable ();
28694 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
28695 return addr;
28698 void
28699 rs6000_fatal_bad_address (rtx op)
28701 fatal_insn ("bad address", op);
28704 #if TARGET_MACHO
28706 typedef struct branch_island_d {
28707 tree function_name;
28708 tree label_name;
28709 int line_number;
28710 } branch_island;
28713 static vec<branch_island, va_gc> *branch_islands;
28715 /* Remember to generate a branch island for far calls to the given
28716 function. */
28718 static void
28719 add_compiler_branch_island (tree label_name, tree function_name,
28720 int line_number)
28722 branch_island bi = {function_name, label_name, line_number};
28723 vec_safe_push (branch_islands, bi);
28726 /* Generate far-jump branch islands for everything recorded in
28727 branch_islands. Invoked immediately after the last instruction of
28728 the epilogue has been emitted; the branch islands must be appended
28729 to, and contiguous with, the function body. Mach-O stubs are
28730 generated in machopic_output_stub(). */
28732 static void
28733 macho_branch_islands (void)
28735 char tmp_buf[512];
28737 while (!vec_safe_is_empty (branch_islands))
28739 branch_island *bi = &branch_islands->last ();
28740 const char *label = IDENTIFIER_POINTER (bi->label_name);
28741 const char *name = IDENTIFIER_POINTER (bi->function_name);
28742 char name_buf[512];
28743 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
28744 if (name[0] == '*' || name[0] == '&')
28745 strcpy (name_buf, name+1);
28746 else
28748 name_buf[0] = '_';
28749 strcpy (name_buf+1, name);
28751 strcpy (tmp_buf, "\n");
28752 strcat (tmp_buf, label);
28753 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28754 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28755 dbxout_stabd (N_SLINE, bi->line_number);
28756 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28757 if (flag_pic)
28759 if (TARGET_LINK_STACK)
28761 char name[32];
28762 get_ppc476_thunk_name (name);
28763 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
28764 strcat (tmp_buf, name);
28765 strcat (tmp_buf, "\n");
28766 strcat (tmp_buf, label);
28767 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28769 else
28771 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
28772 strcat (tmp_buf, label);
28773 strcat (tmp_buf, "_pic\n");
28774 strcat (tmp_buf, label);
28775 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28778 strcat (tmp_buf, "\taddis r11,r11,ha16(");
28779 strcat (tmp_buf, name_buf);
28780 strcat (tmp_buf, " - ");
28781 strcat (tmp_buf, label);
28782 strcat (tmp_buf, "_pic)\n");
28784 strcat (tmp_buf, "\tmtlr r0\n");
28786 strcat (tmp_buf, "\taddi r12,r11,lo16(");
28787 strcat (tmp_buf, name_buf);
28788 strcat (tmp_buf, " - ");
28789 strcat (tmp_buf, label);
28790 strcat (tmp_buf, "_pic)\n");
28792 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
28794 else
28796 strcat (tmp_buf, ":\nlis r12,hi16(");
28797 strcat (tmp_buf, name_buf);
28798 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
28799 strcat (tmp_buf, name_buf);
28800 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
28802 output_asm_insn (tmp_buf, 0);
28803 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28804 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28805 dbxout_stabd (N_SLINE, bi->line_number);
28806 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28807 branch_islands->pop ();
28811 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
28812 already there or not. */
28814 static int
28815 no_previous_def (tree function_name)
28817 branch_island *bi;
28818 unsigned ix;
28820 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28821 if (function_name == bi->function_name)
28822 return 0;
28823 return 1;
28826 /* GET_PREV_LABEL gets the label name from the previous definition of
28827 the function. */
28829 static tree
28830 get_prev_label (tree function_name)
28832 branch_island *bi;
28833 unsigned ix;
28835 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28836 if (function_name == bi->function_name)
28837 return bi->label_name;
28838 return NULL_TREE;
28841 /* INSN is either a function call or a millicode call. It may have an
28842 unconditional jump in its delay slot.
28844 CALL_DEST is the routine we are calling. */
28846 char *
28847 output_call (rtx insn, rtx *operands, int dest_operand_number,
28848 int cookie_operand_number)
28850 static char buf[256];
28851 if (darwin_emit_branch_islands
28852 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
28853 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
28855 tree labelname;
28856 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
28858 if (no_previous_def (funname))
28860 rtx label_rtx = gen_label_rtx ();
28861 char *label_buf, temp_buf[256];
28862 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
28863 CODE_LABEL_NUMBER (label_rtx));
28864 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
28865 labelname = get_identifier (label_buf);
28866 add_compiler_branch_island (labelname, funname, insn_line (insn));
28868 else
28869 labelname = get_prev_label (funname);
28871 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
28872 instruction will reach 'foo', otherwise link as 'bl L42'".
28873 "L42" should be a 'branch island', that will do a far jump to
28874 'foo'. Branch islands are generated in
28875 macho_branch_islands(). */
28876 sprintf (buf, "jbsr %%z%d,%.246s",
28877 dest_operand_number, IDENTIFIER_POINTER (labelname));
28879 else
28880 sprintf (buf, "bl %%z%d", dest_operand_number);
28881 return buf;
28884 /* Generate PIC and indirect symbol stubs. */
28886 void
28887 machopic_output_stub (FILE *file, const char *symb, const char *stub)
28889 unsigned int length;
28890 char *symbol_name, *lazy_ptr_name;
28891 char *local_label_0;
28892 static int label = 0;
28894 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
28895 symb = (*targetm.strip_name_encoding) (symb);
28898 length = strlen (symb);
28899 symbol_name = XALLOCAVEC (char, length + 32);
28900 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
28902 lazy_ptr_name = XALLOCAVEC (char, length + 32);
28903 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
28905 if (flag_pic == 2)
28906 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
28907 else
28908 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
28910 if (flag_pic == 2)
28912 fprintf (file, "\t.align 5\n");
28914 fprintf (file, "%s:\n", stub);
28915 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28917 label++;
28918 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
28919 sprintf (local_label_0, "\"L%011d$spb\"", label);
28921 fprintf (file, "\tmflr r0\n");
28922 if (TARGET_LINK_STACK)
28924 char name[32];
28925 get_ppc476_thunk_name (name);
28926 fprintf (file, "\tbl %s\n", name);
28927 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
28929 else
28931 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
28932 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
28934 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
28935 lazy_ptr_name, local_label_0);
28936 fprintf (file, "\tmtlr r0\n");
28937 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
28938 (TARGET_64BIT ? "ldu" : "lwzu"),
28939 lazy_ptr_name, local_label_0);
28940 fprintf (file, "\tmtctr r12\n");
28941 fprintf (file, "\tbctr\n");
28943 else
28945 fprintf (file, "\t.align 4\n");
28947 fprintf (file, "%s:\n", stub);
28948 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28950 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
28951 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
28952 (TARGET_64BIT ? "ldu" : "lwzu"),
28953 lazy_ptr_name);
28954 fprintf (file, "\tmtctr r12\n");
28955 fprintf (file, "\tbctr\n");
28958 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
28959 fprintf (file, "%s:\n", lazy_ptr_name);
28960 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28961 fprintf (file, "%sdyld_stub_binding_helper\n",
28962 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
28965 /* Legitimize PIC addresses. If the address is already
28966 position-independent, we return ORIG. Newly generated
28967 position-independent addresses go into a reg. This is REG if non
28968 zero, otherwise we allocate register(s) as necessary. */
28970 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
28973 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
28974 rtx reg)
28976 rtx base, offset;
28978 if (reg == NULL && ! reload_in_progress && ! reload_completed)
28979 reg = gen_reg_rtx (Pmode);
28981 if (GET_CODE (orig) == CONST)
28983 rtx reg_temp;
28985 if (GET_CODE (XEXP (orig, 0)) == PLUS
28986 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
28987 return orig;
28989 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
28991 /* Use a different reg for the intermediate value, as
28992 it will be marked UNCHANGING. */
28993 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
28994 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
28995 Pmode, reg_temp);
28996 offset =
28997 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
28998 Pmode, reg);
29000 if (GET_CODE (offset) == CONST_INT)
29002 if (SMALL_INT (offset))
29003 return plus_constant (Pmode, base, INTVAL (offset));
29004 else if (! reload_in_progress && ! reload_completed)
29005 offset = force_reg (Pmode, offset);
29006 else
29008 rtx mem = force_const_mem (Pmode, orig);
29009 return machopic_legitimize_pic_address (mem, Pmode, reg);
29012 return gen_rtx_PLUS (Pmode, base, offset);
29015 /* Fall back on generic machopic code. */
29016 return machopic_legitimize_pic_address (orig, mode, reg);
29019 /* Output a .machine directive for the Darwin assembler, and call
29020 the generic start_file routine. */
29022 static void
29023 rs6000_darwin_file_start (void)
29025 static const struct
29027 const char *arg;
29028 const char *name;
29029 HOST_WIDE_INT if_set;
29030 } mapping[] = {
29031 { "ppc64", "ppc64", MASK_64BIT },
29032 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
29033 { "power4", "ppc970", 0 },
29034 { "G5", "ppc970", 0 },
29035 { "7450", "ppc7450", 0 },
29036 { "7400", "ppc7400", MASK_ALTIVEC },
29037 { "G4", "ppc7400", 0 },
29038 { "750", "ppc750", 0 },
29039 { "740", "ppc750", 0 },
29040 { "G3", "ppc750", 0 },
29041 { "604e", "ppc604e", 0 },
29042 { "604", "ppc604", 0 },
29043 { "603e", "ppc603", 0 },
29044 { "603", "ppc603", 0 },
29045 { "601", "ppc601", 0 },
29046 { NULL, "ppc", 0 } };
29047 const char *cpu_id = "";
29048 size_t i;
29050 rs6000_file_start ();
29051 darwin_file_start ();
29053 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
29055 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
29056 cpu_id = rs6000_default_cpu;
29058 if (global_options_set.x_rs6000_cpu_index)
29059 cpu_id = processor_target_table[rs6000_cpu_index].name;
29061 /* Look through the mapping array. Pick the first name that either
29062 matches the argument, has a bit set in IF_SET that is also set
29063 in the target flags, or has a NULL name. */
29065 i = 0;
29066 while (mapping[i].arg != NULL
29067 && strcmp (mapping[i].arg, cpu_id) != 0
29068 && (mapping[i].if_set & rs6000_isa_flags) == 0)
29069 i++;
29071 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
29074 #endif /* TARGET_MACHO */
29076 #if TARGET_ELF
29077 static int
29078 rs6000_elf_reloc_rw_mask (void)
29080 if (flag_pic)
29081 return 3;
29082 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29083 return 2;
29084 else
29085 return 0;
29088 /* Record an element in the table of global constructors. SYMBOL is
29089 a SYMBOL_REF of the function to be called; PRIORITY is a number
29090 between 0 and MAX_INIT_PRIORITY.
29092 This differs from default_named_section_asm_out_constructor in
29093 that we have special handling for -mrelocatable. */
29095 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
29096 static void
29097 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
29099 const char *section = ".ctors";
29100 char buf[16];
29102 if (priority != DEFAULT_INIT_PRIORITY)
29104 sprintf (buf, ".ctors.%.5u",
29105 /* Invert the numbering so the linker puts us in the proper
29106 order; constructors are run from right to left, and the
29107 linker sorts in increasing order. */
29108 MAX_INIT_PRIORITY - priority);
29109 section = buf;
29112 switch_to_section (get_section (section, SECTION_WRITE, NULL));
29113 assemble_align (POINTER_SIZE);
29115 if (TARGET_RELOCATABLE)
29117 fputs ("\t.long (", asm_out_file);
29118 output_addr_const (asm_out_file, symbol);
29119 fputs (")@fixup\n", asm_out_file);
29121 else
29122 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
29125 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
29126 static void
29127 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
29129 const char *section = ".dtors";
29130 char buf[16];
29132 if (priority != DEFAULT_INIT_PRIORITY)
29134 sprintf (buf, ".dtors.%.5u",
29135 /* Invert the numbering so the linker puts us in the proper
29136 order; constructors are run from right to left, and the
29137 linker sorts in increasing order. */
29138 MAX_INIT_PRIORITY - priority);
29139 section = buf;
29142 switch_to_section (get_section (section, SECTION_WRITE, NULL));
29143 assemble_align (POINTER_SIZE);
29145 if (TARGET_RELOCATABLE)
29147 fputs ("\t.long (", asm_out_file);
29148 output_addr_const (asm_out_file, symbol);
29149 fputs (")@fixup\n", asm_out_file);
29151 else
29152 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
29155 void
29156 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
29158 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
29160 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
29161 ASM_OUTPUT_LABEL (file, name);
29162 fputs (DOUBLE_INT_ASM_OP, file);
29163 rs6000_output_function_entry (file, name);
29164 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
29165 if (DOT_SYMBOLS)
29167 fputs ("\t.size\t", file);
29168 assemble_name (file, name);
29169 fputs (",24\n\t.type\t.", file);
29170 assemble_name (file, name);
29171 fputs (",@function\n", file);
29172 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
29174 fputs ("\t.globl\t.", file);
29175 assemble_name (file, name);
29176 putc ('\n', file);
29179 else
29180 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29181 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29182 rs6000_output_function_entry (file, name);
29183 fputs (":\n", file);
29184 return;
29187 if (TARGET_RELOCATABLE
29188 && !TARGET_SECURE_PLT
29189 && (get_pool_size () != 0 || crtl->profile)
29190 && uses_TOC ())
29192 char buf[256];
29194 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
29196 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
29197 fprintf (file, "\t.long ");
29198 assemble_name (file, buf);
29199 putc ('-', file);
29200 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
29201 assemble_name (file, buf);
29202 putc ('\n', file);
29205 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29206 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29208 if (DEFAULT_ABI == ABI_AIX)
29210 const char *desc_name, *orig_name;
29212 orig_name = (*targetm.strip_name_encoding) (name);
29213 desc_name = orig_name;
29214 while (*desc_name == '.')
29215 desc_name++;
29217 if (TREE_PUBLIC (decl))
29218 fprintf (file, "\t.globl %s\n", desc_name);
29220 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
29221 fprintf (file, "%s:\n", desc_name);
29222 fprintf (file, "\t.long %s\n", orig_name);
29223 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
29224 fputs ("\t.long 0\n", file);
29225 fprintf (file, "\t.previous\n");
29227 ASM_OUTPUT_LABEL (file, name);
29230 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
29231 static void
29232 rs6000_elf_file_end (void)
29234 #ifdef HAVE_AS_GNU_ATTRIBUTE
29235 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
29237 if (rs6000_passes_float)
29238 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
29239 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
29240 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
29241 : 2));
29242 if (rs6000_passes_vector)
29243 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
29244 (TARGET_ALTIVEC_ABI ? 2
29245 : TARGET_SPE_ABI ? 3
29246 : 1));
29247 if (rs6000_returns_struct)
29248 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
29249 aix_struct_return ? 2 : 1);
29251 #endif
29252 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
29253 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
29254 file_end_indicate_exec_stack ();
29255 #endif
29257 #endif
29259 #if TARGET_XCOFF
29260 static void
29261 rs6000_xcoff_asm_output_anchor (rtx symbol)
29263 char buffer[100];
29265 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
29266 SYMBOL_REF_BLOCK_OFFSET (symbol));
29267 fprintf (asm_out_file, "%s", SET_ASM_OP);
29268 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
29269 fprintf (asm_out_file, ",");
29270 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
29271 fprintf (asm_out_file, "\n");
29274 static void
29275 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
29277 fputs (GLOBAL_ASM_OP, stream);
29278 RS6000_OUTPUT_BASENAME (stream, name);
29279 putc ('\n', stream);
29282 /* A get_unnamed_decl callback, used for read-only sections. PTR
29283 points to the section string variable. */
29285 static void
29286 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
29288 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
29289 *(const char *const *) directive,
29290 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29293 /* Likewise for read-write sections. */
29295 static void
29296 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
29298 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
29299 *(const char *const *) directive,
29300 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29303 static void
29304 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
29306 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
29307 *(const char *const *) directive,
29308 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29311 /* A get_unnamed_section callback, used for switching to toc_section. */
29313 static void
29314 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
29316 if (TARGET_MINIMAL_TOC)
29318 /* toc_section is always selected at least once from
29319 rs6000_xcoff_file_start, so this is guaranteed to
29320 always be defined once and only once in each file. */
29321 if (!toc_initialized)
29323 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
29324 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
29325 toc_initialized = 1;
29327 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
29328 (TARGET_32BIT ? "" : ",3"));
29330 else
29331 fputs ("\t.toc\n", asm_out_file);
29334 /* Implement TARGET_ASM_INIT_SECTIONS. */
29336 static void
29337 rs6000_xcoff_asm_init_sections (void)
29339 read_only_data_section
29340 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29341 &xcoff_read_only_section_name);
29343 private_data_section
29344 = get_unnamed_section (SECTION_WRITE,
29345 rs6000_xcoff_output_readwrite_section_asm_op,
29346 &xcoff_private_data_section_name);
29348 tls_data_section
29349 = get_unnamed_section (SECTION_TLS,
29350 rs6000_xcoff_output_tls_section_asm_op,
29351 &xcoff_tls_data_section_name);
29353 tls_private_data_section
29354 = get_unnamed_section (SECTION_TLS,
29355 rs6000_xcoff_output_tls_section_asm_op,
29356 &xcoff_private_data_section_name);
29358 read_only_private_data_section
29359 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29360 &xcoff_private_data_section_name);
29362 toc_section
29363 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
29365 readonly_data_section = read_only_data_section;
29366 exception_section = data_section;
29369 static int
29370 rs6000_xcoff_reloc_rw_mask (void)
29372 return 3;
29375 static void
29376 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
29377 tree decl ATTRIBUTE_UNUSED)
29379 int smclass;
29380 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
29382 if (flags & SECTION_CODE)
29383 smclass = 0;
29384 else if (flags & SECTION_TLS)
29385 smclass = 3;
29386 else if (flags & SECTION_WRITE)
29387 smclass = 2;
29388 else
29389 smclass = 1;
29391 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
29392 (flags & SECTION_CODE) ? "." : "",
29393 name, suffix[smclass], flags & SECTION_ENTSIZE);
29396 #define IN_NAMED_SECTION(DECL) \
29397 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
29398 && DECL_SECTION_NAME (DECL) != NULL)
29400 static section *
29401 rs6000_xcoff_select_section (tree decl, int reloc,
29402 unsigned HOST_WIDE_INT align)
29404 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
29405 named section. */
29406 if (align > BIGGEST_ALIGNMENT)
29408 resolve_unique_section (decl, reloc, true);
29409 if (IN_NAMED_SECTION (decl))
29410 return get_named_section (decl, NULL, reloc);
29413 if (decl_readonly_section (decl, reloc))
29415 if (TREE_PUBLIC (decl))
29416 return read_only_data_section;
29417 else
29418 return read_only_private_data_section;
29420 else
29422 #if HAVE_AS_TLS
29423 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29425 if (TREE_PUBLIC (decl))
29426 return tls_data_section;
29427 else if (bss_initializer_p (decl))
29429 /* Convert to COMMON to emit in BSS. */
29430 DECL_COMMON (decl) = 1;
29431 return tls_comm_section;
29433 else
29434 return tls_private_data_section;
29436 else
29437 #endif
29438 if (TREE_PUBLIC (decl))
29439 return data_section;
29440 else
29441 return private_data_section;
29445 static void
29446 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
29448 const char *name;
29450 /* Use select_section for private data and uninitialized data with
29451 alignment <= BIGGEST_ALIGNMENT. */
29452 if (!TREE_PUBLIC (decl)
29453 || DECL_COMMON (decl)
29454 || (DECL_INITIAL (decl) == NULL_TREE
29455 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
29456 || DECL_INITIAL (decl) == error_mark_node
29457 || (flag_zero_initialized_in_bss
29458 && initializer_zerop (DECL_INITIAL (decl))))
29459 return;
29461 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
29462 name = (*targetm.strip_name_encoding) (name);
29463 set_decl_section_name (decl, name);
29466 /* Select section for constant in constant pool.
29468 On RS/6000, all constants are in the private read-only data area.
29469 However, if this is being placed in the TOC it must be output as a
29470 toc entry. */
29472 static section *
29473 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
29474 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
29476 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
29477 return toc_section;
29478 else
29479 return read_only_private_data_section;
29482 /* Remove any trailing [DS] or the like from the symbol name. */
29484 static const char *
29485 rs6000_xcoff_strip_name_encoding (const char *name)
29487 size_t len;
29488 if (*name == '*')
29489 name++;
29490 len = strlen (name);
29491 if (name[len - 1] == ']')
29492 return ggc_alloc_string (name, len - 4);
29493 else
29494 return name;
29497 /* Section attributes. AIX is always PIC. */
29499 static unsigned int
29500 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
29502 unsigned int align;
29503 unsigned int flags = default_section_type_flags (decl, name, reloc);
29505 /* Align to at least UNIT size. */
29506 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
29507 align = MIN_UNITS_PER_WORD;
29508 else
29509 /* Increase alignment of large objects if not already stricter. */
29510 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
29511 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
29512 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
29514 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
29517 /* Output at beginning of assembler file.
29519 Initialize the section names for the RS/6000 at this point.
29521 Specify filename, including full path, to assembler.
29523 We want to go into the TOC section so at least one .toc will be emitted.
29524 Also, in order to output proper .bs/.es pairs, we need at least one static
29525 [RW] section emitted.
29527 Finally, declare mcount when profiling to make the assembler happy. */
29529 static void
29530 rs6000_xcoff_file_start (void)
29532 rs6000_gen_section_name (&xcoff_bss_section_name,
29533 main_input_filename, ".bss_");
29534 rs6000_gen_section_name (&xcoff_private_data_section_name,
29535 main_input_filename, ".rw_");
29536 rs6000_gen_section_name (&xcoff_read_only_section_name,
29537 main_input_filename, ".ro_");
29538 rs6000_gen_section_name (&xcoff_tls_data_section_name,
29539 main_input_filename, ".tls_");
29540 rs6000_gen_section_name (&xcoff_tbss_section_name,
29541 main_input_filename, ".tbss_[UL]");
29543 fputs ("\t.file\t", asm_out_file);
29544 output_quoted_string (asm_out_file, main_input_filename);
29545 fputc ('\n', asm_out_file);
29546 if (write_symbols != NO_DEBUG)
29547 switch_to_section (private_data_section);
29548 switch_to_section (text_section);
29549 if (profile_flag)
29550 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
29551 rs6000_file_start ();
29554 /* Output at end of assembler file.
29555 On the RS/6000, referencing data should automatically pull in text. */
29557 static void
29558 rs6000_xcoff_file_end (void)
29560 switch_to_section (text_section);
29561 fputs ("_section_.text:\n", asm_out_file);
29562 switch_to_section (data_section);
29563 fputs (TARGET_32BIT
29564 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
29565 asm_out_file);
29568 struct declare_alias_data
29570 FILE *file;
29571 bool function_descriptor;
29574 /* Declare alias N. A helper function for for_node_and_aliases. */
29576 static bool
29577 rs6000_declare_alias (struct symtab_node *n, void *d)
29579 struct declare_alias_data *data = (struct declare_alias_data *)d;
29580 /* Main symbol is output specially, because varasm machinery does part of
29581 the job for us - we do not need to declare .globl/lglobs and such. */
29582 if (!n->alias || n->weakref)
29583 return false;
29585 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
29586 return false;
29588 /* Prevent assemble_alias from trying to use .set pseudo operation
29589 that does not behave as expected by the middle-end. */
29590 TREE_ASM_WRITTEN (n->decl) = true;
29592 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
29593 char *buffer = (char *) alloca (strlen (name) + 2);
29594 char *p;
29595 int dollar_inside = 0;
29597 strcpy (buffer, name);
29598 p = strchr (buffer, '$');
29599 while (p) {
29600 *p = '_';
29601 dollar_inside++;
29602 p = strchr (p + 1, '$');
29604 if (TREE_PUBLIC (n->decl))
29606 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
29608 if (dollar_inside) {
29609 if (data->function_descriptor)
29610 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
29611 else
29612 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
29614 if (data->function_descriptor)
29615 fputs ("\t.globl .", data->file);
29616 else
29617 fputs ("\t.globl ", data->file);
29618 RS6000_OUTPUT_BASENAME (data->file, buffer);
29619 putc ('\n', data->file);
29621 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
29622 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
29624 else
29626 if (dollar_inside)
29628 if (data->function_descriptor)
29629 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
29630 else
29631 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
29633 if (data->function_descriptor)
29634 fputs ("\t.lglobl .", data->file);
29635 else
29636 fputs ("\t.lglobl ", data->file);
29637 RS6000_OUTPUT_BASENAME (data->file, buffer);
29638 putc ('\n', data->file);
29640 if (data->function_descriptor)
29641 fputs (".", data->file);
29642 RS6000_OUTPUT_BASENAME (data->file, buffer);
29643 fputs (":\n", data->file);
29644 return false;
29647 /* This macro produces the initial definition of a function name.
29648 On the RS/6000, we need to place an extra '.' in the function name and
29649 output the function descriptor.
29650 Dollar signs are converted to underscores.
29652 The csect for the function will have already been created when
29653 text_section was selected. We do have to go back to that csect, however.
29655 The third and fourth parameters to the .function pseudo-op (16 and 044)
29656 are placeholders which no longer have any use.
29658 Because AIX assembler's .set command has unexpected semantics, we output
29659 all aliases as alternative labels in front of the definition. */
29661 void
29662 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
29664 char *buffer = (char *) alloca (strlen (name) + 1);
29665 char *p;
29666 int dollar_inside = 0;
29667 struct declare_alias_data data = {file, false};
29669 strcpy (buffer, name);
29670 p = strchr (buffer, '$');
29671 while (p) {
29672 *p = '_';
29673 dollar_inside++;
29674 p = strchr (p + 1, '$');
29676 if (TREE_PUBLIC (decl))
29678 if (!RS6000_WEAK || !DECL_WEAK (decl))
29680 if (dollar_inside) {
29681 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
29682 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
29684 fputs ("\t.globl .", file);
29685 RS6000_OUTPUT_BASENAME (file, buffer);
29686 putc ('\n', file);
29689 else
29691 if (dollar_inside) {
29692 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
29693 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
29695 fputs ("\t.lglobl .", file);
29696 RS6000_OUTPUT_BASENAME (file, buffer);
29697 putc ('\n', file);
29699 fputs ("\t.csect ", file);
29700 RS6000_OUTPUT_BASENAME (file, buffer);
29701 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
29702 RS6000_OUTPUT_BASENAME (file, buffer);
29703 fputs (":\n", file);
29704 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
29705 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
29706 RS6000_OUTPUT_BASENAME (file, buffer);
29707 fputs (", TOC[tc0], 0\n", file);
29708 in_section = NULL;
29709 switch_to_section (function_section (decl));
29710 putc ('.', file);
29711 RS6000_OUTPUT_BASENAME (file, buffer);
29712 fputs (":\n", file);
29713 data.function_descriptor = true;
29714 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
29715 if (write_symbols != NO_DEBUG && !DECL_IGNORED_P (decl))
29716 xcoffout_declare_function (file, decl, buffer);
29717 return;
29720 /* This macro produces the initial definition of a object (variable) name.
29721 Because AIX assembler's .set command has unexpected semantics, we output
29722 all aliases as alternative labels in front of the definition. */
29724 void
29725 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
29727 struct declare_alias_data data = {file, false};
29728 RS6000_OUTPUT_BASENAME (file, name);
29729 fputs (":\n", file);
29730 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
29733 #ifdef HAVE_AS_TLS
29734 static void
29735 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
29737 rtx symbol;
29738 int flags;
29740 default_encode_section_info (decl, rtl, first);
29742 /* Careful not to prod global register variables. */
29743 if (!MEM_P (rtl))
29744 return;
29745 symbol = XEXP (rtl, 0);
29746 if (GET_CODE (symbol) != SYMBOL_REF)
29747 return;
29749 flags = SYMBOL_REF_FLAGS (symbol);
29751 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29752 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
29754 SYMBOL_REF_FLAGS (symbol) = flags;
29756 #endif /* HAVE_AS_TLS */
29757 #endif /* TARGET_XCOFF */
29759 /* Compute a (partial) cost for rtx X. Return true if the complete
29760 cost has been computed, and false if subexpressions should be
29761 scanned. In either case, *TOTAL contains the cost result. */
29763 static bool
29764 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
29765 int *total, bool speed)
29767 enum machine_mode mode = GET_MODE (x);
29769 switch (code)
29771 /* On the RS/6000, if it is valid in the insn, it is free. */
29772 case CONST_INT:
29773 if (((outer_code == SET
29774 || outer_code == PLUS
29775 || outer_code == MINUS)
29776 && (satisfies_constraint_I (x)
29777 || satisfies_constraint_L (x)))
29778 || (outer_code == AND
29779 && (satisfies_constraint_K (x)
29780 || (mode == SImode
29781 ? satisfies_constraint_L (x)
29782 : satisfies_constraint_J (x))
29783 || mask_operand (x, mode)
29784 || (mode == DImode
29785 && mask64_operand (x, DImode))))
29786 || ((outer_code == IOR || outer_code == XOR)
29787 && (satisfies_constraint_K (x)
29788 || (mode == SImode
29789 ? satisfies_constraint_L (x)
29790 : satisfies_constraint_J (x))))
29791 || outer_code == ASHIFT
29792 || outer_code == ASHIFTRT
29793 || outer_code == LSHIFTRT
29794 || outer_code == ROTATE
29795 || outer_code == ROTATERT
29796 || outer_code == ZERO_EXTRACT
29797 || (outer_code == MULT
29798 && satisfies_constraint_I (x))
29799 || ((outer_code == DIV || outer_code == UDIV
29800 || outer_code == MOD || outer_code == UMOD)
29801 && exact_log2 (INTVAL (x)) >= 0)
29802 || (outer_code == COMPARE
29803 && (satisfies_constraint_I (x)
29804 || satisfies_constraint_K (x)))
29805 || ((outer_code == EQ || outer_code == NE)
29806 && (satisfies_constraint_I (x)
29807 || satisfies_constraint_K (x)
29808 || (mode == SImode
29809 ? satisfies_constraint_L (x)
29810 : satisfies_constraint_J (x))))
29811 || (outer_code == GTU
29812 && satisfies_constraint_I (x))
29813 || (outer_code == LTU
29814 && satisfies_constraint_P (x)))
29816 *total = 0;
29817 return true;
29819 else if ((outer_code == PLUS
29820 && reg_or_add_cint_operand (x, VOIDmode))
29821 || (outer_code == MINUS
29822 && reg_or_sub_cint_operand (x, VOIDmode))
29823 || ((outer_code == SET
29824 || outer_code == IOR
29825 || outer_code == XOR)
29826 && (INTVAL (x)
29827 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
29829 *total = COSTS_N_INSNS (1);
29830 return true;
29832 /* FALLTHRU */
29834 case CONST_DOUBLE:
29835 case CONST_WIDE_INT:
29836 case CONST:
29837 case HIGH:
29838 case SYMBOL_REF:
29839 case MEM:
29840 /* When optimizing for size, MEM should be slightly more expensive
29841 than generating address, e.g., (plus (reg) (const)).
29842 L1 cache latency is about two instructions. */
29843 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
29844 return true;
29846 case LABEL_REF:
29847 *total = 0;
29848 return true;
29850 case PLUS:
29851 case MINUS:
29852 if (FLOAT_MODE_P (mode))
29853 *total = rs6000_cost->fp;
29854 else
29855 *total = COSTS_N_INSNS (1);
29856 return false;
29858 case MULT:
29859 if (GET_CODE (XEXP (x, 1)) == CONST_INT
29860 && satisfies_constraint_I (XEXP (x, 1)))
29862 if (INTVAL (XEXP (x, 1)) >= -256
29863 && INTVAL (XEXP (x, 1)) <= 255)
29864 *total = rs6000_cost->mulsi_const9;
29865 else
29866 *total = rs6000_cost->mulsi_const;
29868 else if (mode == SFmode)
29869 *total = rs6000_cost->fp;
29870 else if (FLOAT_MODE_P (mode))
29871 *total = rs6000_cost->dmul;
29872 else if (mode == DImode)
29873 *total = rs6000_cost->muldi;
29874 else
29875 *total = rs6000_cost->mulsi;
29876 return false;
29878 case FMA:
29879 if (mode == SFmode)
29880 *total = rs6000_cost->fp;
29881 else
29882 *total = rs6000_cost->dmul;
29883 break;
29885 case DIV:
29886 case MOD:
29887 if (FLOAT_MODE_P (mode))
29889 *total = mode == DFmode ? rs6000_cost->ddiv
29890 : rs6000_cost->sdiv;
29891 return false;
29893 /* FALLTHRU */
29895 case UDIV:
29896 case UMOD:
29897 if (GET_CODE (XEXP (x, 1)) == CONST_INT
29898 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
29900 if (code == DIV || code == MOD)
29901 /* Shift, addze */
29902 *total = COSTS_N_INSNS (2);
29903 else
29904 /* Shift */
29905 *total = COSTS_N_INSNS (1);
29907 else
29909 if (GET_MODE (XEXP (x, 1)) == DImode)
29910 *total = rs6000_cost->divdi;
29911 else
29912 *total = rs6000_cost->divsi;
29914 /* Add in shift and subtract for MOD. */
29915 if (code == MOD || code == UMOD)
29916 *total += COSTS_N_INSNS (2);
29917 return false;
29919 case CTZ:
29920 case FFS:
29921 *total = COSTS_N_INSNS (4);
29922 return false;
29924 case POPCOUNT:
29925 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
29926 return false;
29928 case PARITY:
29929 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
29930 return false;
29932 case NOT:
29933 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
29935 *total = 0;
29936 return false;
29938 /* FALLTHRU */
29940 case AND:
29941 case CLZ:
29942 case IOR:
29943 case XOR:
29944 case ZERO_EXTRACT:
29945 *total = COSTS_N_INSNS (1);
29946 return false;
29948 case ASHIFT:
29949 case ASHIFTRT:
29950 case LSHIFTRT:
29951 case ROTATE:
29952 case ROTATERT:
29953 /* Handle mul_highpart. */
29954 if (outer_code == TRUNCATE
29955 && GET_CODE (XEXP (x, 0)) == MULT)
29957 if (mode == DImode)
29958 *total = rs6000_cost->muldi;
29959 else
29960 *total = rs6000_cost->mulsi;
29961 return true;
29963 else if (outer_code == AND)
29964 *total = 0;
29965 else
29966 *total = COSTS_N_INSNS (1);
29967 return false;
29969 case SIGN_EXTEND:
29970 case ZERO_EXTEND:
29971 if (GET_CODE (XEXP (x, 0)) == MEM)
29972 *total = 0;
29973 else
29974 *total = COSTS_N_INSNS (1);
29975 return false;
29977 case COMPARE:
29978 case NEG:
29979 case ABS:
29980 if (!FLOAT_MODE_P (mode))
29982 *total = COSTS_N_INSNS (1);
29983 return false;
29985 /* FALLTHRU */
29987 case FLOAT:
29988 case UNSIGNED_FLOAT:
29989 case FIX:
29990 case UNSIGNED_FIX:
29991 case FLOAT_TRUNCATE:
29992 *total = rs6000_cost->fp;
29993 return false;
29995 case FLOAT_EXTEND:
29996 if (mode == DFmode)
29997 *total = 0;
29998 else
29999 *total = rs6000_cost->fp;
30000 return false;
30002 case UNSPEC:
30003 switch (XINT (x, 1))
30005 case UNSPEC_FRSP:
30006 *total = rs6000_cost->fp;
30007 return true;
30009 default:
30010 break;
30012 break;
30014 case CALL:
30015 case IF_THEN_ELSE:
30016 if (!speed)
30018 *total = COSTS_N_INSNS (1);
30019 return true;
30021 else if (FLOAT_MODE_P (mode)
30022 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
30024 *total = rs6000_cost->fp;
30025 return false;
30027 break;
30029 case EQ:
30030 case GTU:
30031 case LTU:
30032 /* Carry bit requires mode == Pmode.
30033 NEG or PLUS already counted so only add one. */
30034 if (mode == Pmode
30035 && (outer_code == NEG || outer_code == PLUS))
30037 *total = COSTS_N_INSNS (1);
30038 return true;
30040 if (outer_code == SET)
30042 if (XEXP (x, 1) == const0_rtx)
30044 if (TARGET_ISEL && !TARGET_MFCRF)
30045 *total = COSTS_N_INSNS (8);
30046 else
30047 *total = COSTS_N_INSNS (2);
30048 return true;
30050 else if (mode == Pmode)
30052 *total = COSTS_N_INSNS (3);
30053 return false;
30056 /* FALLTHRU */
30058 case GT:
30059 case LT:
30060 case UNORDERED:
30061 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
30063 if (TARGET_ISEL && !TARGET_MFCRF)
30064 *total = COSTS_N_INSNS (8);
30065 else
30066 *total = COSTS_N_INSNS (2);
30067 return true;
30069 /* CC COMPARE. */
30070 if (outer_code == COMPARE)
30072 *total = 0;
30073 return true;
30075 break;
30077 default:
30078 break;
30081 return false;
30084 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
30086 static bool
30087 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
30088 bool speed)
30090 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
30092 fprintf (stderr,
30093 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
30094 "opno = %d, total = %d, speed = %s, x:\n",
30095 ret ? "complete" : "scan inner",
30096 GET_RTX_NAME (code),
30097 GET_RTX_NAME (outer_code),
30098 opno,
30099 *total,
30100 speed ? "true" : "false");
30102 debug_rtx (x);
30104 return ret;
30107 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
30109 static int
30110 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
30111 addr_space_t as, bool speed)
30113 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
30115 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
30116 ret, speed ? "true" : "false");
30117 debug_rtx (x);
30119 return ret;
30123 /* A C expression returning the cost of moving data from a register of class
30124 CLASS1 to one of CLASS2. */
30126 static int
30127 rs6000_register_move_cost (enum machine_mode mode,
30128 reg_class_t from, reg_class_t to)
30130 int ret;
30132 if (TARGET_DEBUG_COST)
30133 dbg_cost_ctrl++;
30135 /* Moves from/to GENERAL_REGS. */
30136 if (reg_classes_intersect_p (to, GENERAL_REGS)
30137 || reg_classes_intersect_p (from, GENERAL_REGS))
30139 reg_class_t rclass = from;
30141 if (! reg_classes_intersect_p (to, GENERAL_REGS))
30142 rclass = to;
30144 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
30145 ret = (rs6000_memory_move_cost (mode, rclass, false)
30146 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
30148 /* It's more expensive to move CR_REGS than CR0_REGS because of the
30149 shift. */
30150 else if (rclass == CR_REGS)
30151 ret = 4;
30153 /* For those processors that have slow LR/CTR moves, make them more
30154 expensive than memory in order to bias spills to memory .*/
30155 else if ((rs6000_cpu == PROCESSOR_POWER6
30156 || rs6000_cpu == PROCESSOR_POWER7
30157 || rs6000_cpu == PROCESSOR_POWER8)
30158 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
30159 ret = 6 * hard_regno_nregs[0][mode];
30161 else
30162 /* A move will cost one instruction per GPR moved. */
30163 ret = 2 * hard_regno_nregs[0][mode];
30166 /* If we have VSX, we can easily move between FPR or Altivec registers. */
30167 else if (VECTOR_MEM_VSX_P (mode)
30168 && reg_classes_intersect_p (to, VSX_REGS)
30169 && reg_classes_intersect_p (from, VSX_REGS))
30170 ret = 2 * hard_regno_nregs[32][mode];
30172 /* Moving between two similar registers is just one instruction. */
30173 else if (reg_classes_intersect_p (to, from))
30174 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
30176 /* Everything else has to go through GENERAL_REGS. */
30177 else
30178 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
30179 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
30181 if (TARGET_DEBUG_COST)
30183 if (dbg_cost_ctrl == 1)
30184 fprintf (stderr,
30185 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
30186 ret, GET_MODE_NAME (mode), reg_class_names[from],
30187 reg_class_names[to]);
30188 dbg_cost_ctrl--;
30191 return ret;
30194 /* A C expressions returning the cost of moving data of MODE from a register to
30195 or from memory. */
30197 static int
30198 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
30199 bool in ATTRIBUTE_UNUSED)
30201 int ret;
30203 if (TARGET_DEBUG_COST)
30204 dbg_cost_ctrl++;
30206 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
30207 ret = 4 * hard_regno_nregs[0][mode];
30208 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
30209 || reg_classes_intersect_p (rclass, VSX_REGS)))
30210 ret = 4 * hard_regno_nregs[32][mode];
30211 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
30212 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
30213 else
30214 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
30216 if (TARGET_DEBUG_COST)
30218 if (dbg_cost_ctrl == 1)
30219 fprintf (stderr,
30220 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
30221 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
30222 dbg_cost_ctrl--;
30225 return ret;
30228 /* Returns a code for a target-specific builtin that implements
30229 reciprocal of the function, or NULL_TREE if not available. */
30231 static tree
30232 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
30233 bool sqrt ATTRIBUTE_UNUSED)
30235 if (optimize_insn_for_size_p ())
30236 return NULL_TREE;
30238 if (md_fn)
30239 switch (fn)
30241 case VSX_BUILTIN_XVSQRTDP:
30242 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
30243 return NULL_TREE;
30245 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
30247 case VSX_BUILTIN_XVSQRTSP:
30248 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
30249 return NULL_TREE;
30251 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
30253 default:
30254 return NULL_TREE;
30257 else
30258 switch (fn)
30260 case BUILT_IN_SQRT:
30261 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
30262 return NULL_TREE;
30264 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
30266 case BUILT_IN_SQRTF:
30267 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
30268 return NULL_TREE;
30270 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
30272 default:
30273 return NULL_TREE;
30277 /* Load up a constant. If the mode is a vector mode, splat the value across
30278 all of the vector elements. */
30280 static rtx
30281 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
30283 rtx reg;
30285 if (mode == SFmode || mode == DFmode)
30287 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
30288 reg = force_reg (mode, d);
30290 else if (mode == V4SFmode)
30292 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
30293 rtvec v = gen_rtvec (4, d, d, d, d);
30294 reg = gen_reg_rtx (mode);
30295 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
30297 else if (mode == V2DFmode)
30299 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
30300 rtvec v = gen_rtvec (2, d, d);
30301 reg = gen_reg_rtx (mode);
30302 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
30304 else
30305 gcc_unreachable ();
30307 return reg;
30310 /* Generate an FMA instruction. */
30312 static void
30313 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
30315 enum machine_mode mode = GET_MODE (target);
30316 rtx dst;
30318 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30319 gcc_assert (dst != NULL);
30321 if (dst != target)
30322 emit_move_insn (target, dst);
30325 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
30327 static void
30328 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
30330 enum machine_mode mode = GET_MODE (target);
30331 rtx dst;
30333 /* Altivec does not support fms directly;
30334 generate in terms of fma in that case. */
30335 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
30336 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
30337 else
30339 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
30340 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30342 gcc_assert (dst != NULL);
30344 if (dst != target)
30345 emit_move_insn (target, dst);
30348 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
30350 static void
30351 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
30353 enum machine_mode mode = GET_MODE (dst);
30354 rtx r;
30356 /* This is a tad more complicated, since the fnma_optab is for
30357 a different expression: fma(-m1, m2, a), which is the same
30358 thing except in the case of signed zeros.
30360 Fortunately we know that if FMA is supported that FNMSUB is
30361 also supported in the ISA. Just expand it directly. */
30363 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
30365 r = gen_rtx_NEG (mode, a);
30366 r = gen_rtx_FMA (mode, m1, m2, r);
30367 r = gen_rtx_NEG (mode, r);
30368 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
30371 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
30372 add a reg_note saying that this was a division. Support both scalar and
30373 vector divide. Assumes no trapping math and finite arguments. */
30375 void
30376 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
30378 enum machine_mode mode = GET_MODE (dst);
30379 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
30380 int i;
30382 /* Low precision estimates guarantee 5 bits of accuracy. High
30383 precision estimates guarantee 14 bits of accuracy. SFmode
30384 requires 23 bits of accuracy. DFmode requires 52 bits of
30385 accuracy. Each pass at least doubles the accuracy, leading
30386 to the following. */
30387 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
30388 if (mode == DFmode || mode == V2DFmode)
30389 passes++;
30391 enum insn_code code = optab_handler (smul_optab, mode);
30392 insn_gen_fn gen_mul = GEN_FCN (code);
30394 gcc_assert (code != CODE_FOR_nothing);
30396 one = rs6000_load_constant_and_splat (mode, dconst1);
30398 /* x0 = 1./d estimate */
30399 x0 = gen_reg_rtx (mode);
30400 emit_insn (gen_rtx_SET (VOIDmode, x0,
30401 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
30402 UNSPEC_FRES)));
30404 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
30405 if (passes > 1) {
30407 /* e0 = 1. - d * x0 */
30408 e0 = gen_reg_rtx (mode);
30409 rs6000_emit_nmsub (e0, d, x0, one);
30411 /* x1 = x0 + e0 * x0 */
30412 x1 = gen_reg_rtx (mode);
30413 rs6000_emit_madd (x1, e0, x0, x0);
30415 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
30416 ++i, xprev = xnext, eprev = enext) {
30418 /* enext = eprev * eprev */
30419 enext = gen_reg_rtx (mode);
30420 emit_insn (gen_mul (enext, eprev, eprev));
30422 /* xnext = xprev + enext * xprev */
30423 xnext = gen_reg_rtx (mode);
30424 rs6000_emit_madd (xnext, enext, xprev, xprev);
30427 } else
30428 xprev = x0;
30430 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
30432 /* u = n * xprev */
30433 u = gen_reg_rtx (mode);
30434 emit_insn (gen_mul (u, n, xprev));
30436 /* v = n - (d * u) */
30437 v = gen_reg_rtx (mode);
30438 rs6000_emit_nmsub (v, d, u, n);
30440 /* dst = (v * xprev) + u */
30441 rs6000_emit_madd (dst, v, xprev, u);
30443 if (note_p)
30444 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
30447 /* Newton-Raphson approximation of single/double-precision floating point
30448 rsqrt. Assumes no trapping math and finite arguments. */
30450 void
30451 rs6000_emit_swrsqrt (rtx dst, rtx src)
30453 enum machine_mode mode = GET_MODE (src);
30454 rtx x0 = gen_reg_rtx (mode);
30455 rtx y = gen_reg_rtx (mode);
30457 /* Low precision estimates guarantee 5 bits of accuracy. High
30458 precision estimates guarantee 14 bits of accuracy. SFmode
30459 requires 23 bits of accuracy. DFmode requires 52 bits of
30460 accuracy. Each pass at least doubles the accuracy, leading
30461 to the following. */
30462 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
30463 if (mode == DFmode || mode == V2DFmode)
30464 passes++;
30466 REAL_VALUE_TYPE dconst3_2;
30467 int i;
30468 rtx halfthree;
30469 enum insn_code code = optab_handler (smul_optab, mode);
30470 insn_gen_fn gen_mul = GEN_FCN (code);
30472 gcc_assert (code != CODE_FOR_nothing);
30474 /* Load up the constant 1.5 either as a scalar, or as a vector. */
30475 real_from_integer (&dconst3_2, VOIDmode, 3, SIGNED);
30476 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
30478 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
30480 /* x0 = rsqrt estimate */
30481 emit_insn (gen_rtx_SET (VOIDmode, x0,
30482 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
30483 UNSPEC_RSQRT)));
30485 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
30486 rs6000_emit_msub (y, src, halfthree, src);
30488 for (i = 0; i < passes; i++)
30490 rtx x1 = gen_reg_rtx (mode);
30491 rtx u = gen_reg_rtx (mode);
30492 rtx v = gen_reg_rtx (mode);
30494 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
30495 emit_insn (gen_mul (u, x0, x0));
30496 rs6000_emit_nmsub (v, y, u, halfthree);
30497 emit_insn (gen_mul (x1, x0, v));
30498 x0 = x1;
30501 emit_move_insn (dst, x0);
30502 return;
30505 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
30506 (Power7) targets. DST is the target, and SRC is the argument operand. */
30508 void
30509 rs6000_emit_popcount (rtx dst, rtx src)
30511 enum machine_mode mode = GET_MODE (dst);
30512 rtx tmp1, tmp2;
30514 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
30515 if (TARGET_POPCNTD)
30517 if (mode == SImode)
30518 emit_insn (gen_popcntdsi2 (dst, src));
30519 else
30520 emit_insn (gen_popcntddi2 (dst, src));
30521 return;
30524 tmp1 = gen_reg_rtx (mode);
30526 if (mode == SImode)
30528 emit_insn (gen_popcntbsi2 (tmp1, src));
30529 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
30530 NULL_RTX, 0);
30531 tmp2 = force_reg (SImode, tmp2);
30532 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
30534 else
30536 emit_insn (gen_popcntbdi2 (tmp1, src));
30537 tmp2 = expand_mult (DImode, tmp1,
30538 GEN_INT ((HOST_WIDE_INT)
30539 0x01010101 << 32 | 0x01010101),
30540 NULL_RTX, 0);
30541 tmp2 = force_reg (DImode, tmp2);
30542 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
30547 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
30548 target, and SRC is the argument operand. */
30550 void
30551 rs6000_emit_parity (rtx dst, rtx src)
30553 enum machine_mode mode = GET_MODE (dst);
30554 rtx tmp;
30556 tmp = gen_reg_rtx (mode);
30558 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
30559 if (TARGET_CMPB)
30561 if (mode == SImode)
30563 emit_insn (gen_popcntbsi2 (tmp, src));
30564 emit_insn (gen_paritysi2_cmpb (dst, tmp));
30566 else
30568 emit_insn (gen_popcntbdi2 (tmp, src));
30569 emit_insn (gen_paritydi2_cmpb (dst, tmp));
30571 return;
30574 if (mode == SImode)
30576 /* Is mult+shift >= shift+xor+shift+xor? */
30577 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
30579 rtx tmp1, tmp2, tmp3, tmp4;
30581 tmp1 = gen_reg_rtx (SImode);
30582 emit_insn (gen_popcntbsi2 (tmp1, src));
30584 tmp2 = gen_reg_rtx (SImode);
30585 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
30586 tmp3 = gen_reg_rtx (SImode);
30587 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
30589 tmp4 = gen_reg_rtx (SImode);
30590 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
30591 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
30593 else
30594 rs6000_emit_popcount (tmp, src);
30595 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
30597 else
30599 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
30600 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
30602 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
30604 tmp1 = gen_reg_rtx (DImode);
30605 emit_insn (gen_popcntbdi2 (tmp1, src));
30607 tmp2 = gen_reg_rtx (DImode);
30608 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
30609 tmp3 = gen_reg_rtx (DImode);
30610 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
30612 tmp4 = gen_reg_rtx (DImode);
30613 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
30614 tmp5 = gen_reg_rtx (DImode);
30615 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
30617 tmp6 = gen_reg_rtx (DImode);
30618 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
30619 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
30621 else
30622 rs6000_emit_popcount (tmp, src);
30623 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
30627 /* Expand an Altivec constant permutation for little endian mode.
30628 There are two issues: First, the two input operands must be
30629 swapped so that together they form a double-wide array in LE
30630 order. Second, the vperm instruction has surprising behavior
30631 in LE mode: it interprets the elements of the source vectors
30632 in BE mode ("left to right") and interprets the elements of
30633 the destination vector in LE mode ("right to left"). To
30634 correct for this, we must subtract each element of the permute
30635 control vector from 31.
30637 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
30638 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
30639 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
30640 serve as the permute control vector. Then, in BE mode,
30642 vperm 9,10,11,12
30644 places the desired result in vr9. However, in LE mode the
30645 vector contents will be
30647 vr10 = 00000003 00000002 00000001 00000000
30648 vr11 = 00000007 00000006 00000005 00000004
30650 The result of the vperm using the same permute control vector is
30652 vr9 = 05000000 07000000 01000000 03000000
30654 That is, the leftmost 4 bytes of vr10 are interpreted as the
30655 source for the rightmost 4 bytes of vr9, and so on.
30657 If we change the permute control vector to
30659 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
30661 and issue
30663 vperm 9,11,10,12
30665 we get the desired
30667 vr9 = 00000006 00000004 00000002 00000000. */
30669 void
30670 altivec_expand_vec_perm_const_le (rtx operands[4])
30672 unsigned int i;
30673 rtx perm[16];
30674 rtx constv, unspec;
30675 rtx target = operands[0];
30676 rtx op0 = operands[1];
30677 rtx op1 = operands[2];
30678 rtx sel = operands[3];
30680 /* Unpack and adjust the constant selector. */
30681 for (i = 0; i < 16; ++i)
30683 rtx e = XVECEXP (sel, 0, i);
30684 unsigned int elt = 31 - (INTVAL (e) & 31);
30685 perm[i] = GEN_INT (elt);
30688 /* Expand to a permute, swapping the inputs and using the
30689 adjusted selector. */
30690 if (!REG_P (op0))
30691 op0 = force_reg (V16QImode, op0);
30692 if (!REG_P (op1))
30693 op1 = force_reg (V16QImode, op1);
30695 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
30696 constv = force_reg (V16QImode, constv);
30697 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
30698 UNSPEC_VPERM);
30699 if (!REG_P (target))
30701 rtx tmp = gen_reg_rtx (V16QImode);
30702 emit_move_insn (tmp, unspec);
30703 unspec = tmp;
30706 emit_move_insn (target, unspec);
30709 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
30710 permute control vector. But here it's not a constant, so we must
30711 generate a vector NAND or NOR to do the adjustment. */
30713 void
30714 altivec_expand_vec_perm_le (rtx operands[4])
30716 rtx notx, iorx, unspec;
30717 rtx target = operands[0];
30718 rtx op0 = operands[1];
30719 rtx op1 = operands[2];
30720 rtx sel = operands[3];
30721 rtx tmp = target;
30722 rtx norreg = gen_reg_rtx (V16QImode);
30723 enum machine_mode mode = GET_MODE (target);
30725 /* Get everything in regs so the pattern matches. */
30726 if (!REG_P (op0))
30727 op0 = force_reg (mode, op0);
30728 if (!REG_P (op1))
30729 op1 = force_reg (mode, op1);
30730 if (!REG_P (sel))
30731 sel = force_reg (V16QImode, sel);
30732 if (!REG_P (target))
30733 tmp = gen_reg_rtx (mode);
30735 /* Invert the selector with a VNAND if available, else a VNOR.
30736 The VNAND is preferred for future fusion opportunities. */
30737 notx = gen_rtx_NOT (V16QImode, sel);
30738 iorx = (TARGET_P8_VECTOR
30739 ? gen_rtx_IOR (V16QImode, notx, notx)
30740 : gen_rtx_AND (V16QImode, notx, notx));
30741 emit_insn (gen_rtx_SET (VOIDmode, norreg, iorx));
30743 /* Permute with operands reversed and adjusted selector. */
30744 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
30745 UNSPEC_VPERM);
30747 /* Copy into target, possibly by way of a register. */
30748 if (!REG_P (target))
30750 emit_move_insn (tmp, unspec);
30751 unspec = tmp;
30754 emit_move_insn (target, unspec);
30757 /* Expand an Altivec constant permutation. Return true if we match
30758 an efficient implementation; false to fall back to VPERM. */
30760 bool
30761 altivec_expand_vec_perm_const (rtx operands[4])
30763 struct altivec_perm_insn {
30764 HOST_WIDE_INT mask;
30765 enum insn_code impl;
30766 unsigned char perm[16];
30768 static const struct altivec_perm_insn patterns[] = {
30769 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
30770 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
30771 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
30772 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
30773 { OPTION_MASK_ALTIVEC,
30774 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
30775 : CODE_FOR_altivec_vmrglb_direct),
30776 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
30777 { OPTION_MASK_ALTIVEC,
30778 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
30779 : CODE_FOR_altivec_vmrglh_direct),
30780 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
30781 { OPTION_MASK_ALTIVEC,
30782 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
30783 : CODE_FOR_altivec_vmrglw_direct),
30784 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
30785 { OPTION_MASK_ALTIVEC,
30786 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
30787 : CODE_FOR_altivec_vmrghb_direct),
30788 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
30789 { OPTION_MASK_ALTIVEC,
30790 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
30791 : CODE_FOR_altivec_vmrghh_direct),
30792 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
30793 { OPTION_MASK_ALTIVEC,
30794 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
30795 : CODE_FOR_altivec_vmrghw_direct),
30796 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
30797 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
30798 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
30799 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
30800 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
30803 unsigned int i, j, elt, which;
30804 unsigned char perm[16];
30805 rtx target, op0, op1, sel, x;
30806 bool one_vec;
30808 target = operands[0];
30809 op0 = operands[1];
30810 op1 = operands[2];
30811 sel = operands[3];
30813 /* Unpack the constant selector. */
30814 for (i = which = 0; i < 16; ++i)
30816 rtx e = XVECEXP (sel, 0, i);
30817 elt = INTVAL (e) & 31;
30818 which |= (elt < 16 ? 1 : 2);
30819 perm[i] = elt;
30822 /* Simplify the constant selector based on operands. */
30823 switch (which)
30825 default:
30826 gcc_unreachable ();
30828 case 3:
30829 one_vec = false;
30830 if (!rtx_equal_p (op0, op1))
30831 break;
30832 /* FALLTHRU */
30834 case 2:
30835 for (i = 0; i < 16; ++i)
30836 perm[i] &= 15;
30837 op0 = op1;
30838 one_vec = true;
30839 break;
30841 case 1:
30842 op1 = op0;
30843 one_vec = true;
30844 break;
30847 /* Look for splat patterns. */
30848 if (one_vec)
30850 elt = perm[0];
30852 for (i = 0; i < 16; ++i)
30853 if (perm[i] != elt)
30854 break;
30855 if (i == 16)
30857 if (!BYTES_BIG_ENDIAN)
30858 elt = 15 - elt;
30859 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
30860 return true;
30863 if (elt % 2 == 0)
30865 for (i = 0; i < 16; i += 2)
30866 if (perm[i] != elt || perm[i + 1] != elt + 1)
30867 break;
30868 if (i == 16)
30870 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
30871 x = gen_reg_rtx (V8HImode);
30872 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
30873 GEN_INT (field)));
30874 emit_move_insn (target, gen_lowpart (V16QImode, x));
30875 return true;
30879 if (elt % 4 == 0)
30881 for (i = 0; i < 16; i += 4)
30882 if (perm[i] != elt
30883 || perm[i + 1] != elt + 1
30884 || perm[i + 2] != elt + 2
30885 || perm[i + 3] != elt + 3)
30886 break;
30887 if (i == 16)
30889 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
30890 x = gen_reg_rtx (V4SImode);
30891 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
30892 GEN_INT (field)));
30893 emit_move_insn (target, gen_lowpart (V16QImode, x));
30894 return true;
30899 /* Look for merge and pack patterns. */
30900 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
30902 bool swapped;
30904 if ((patterns[j].mask & rs6000_isa_flags) == 0)
30905 continue;
30907 elt = patterns[j].perm[0];
30908 if (perm[0] == elt)
30909 swapped = false;
30910 else if (perm[0] == elt + 16)
30911 swapped = true;
30912 else
30913 continue;
30914 for (i = 1; i < 16; ++i)
30916 elt = patterns[j].perm[i];
30917 if (swapped)
30918 elt = (elt >= 16 ? elt - 16 : elt + 16);
30919 else if (one_vec && elt >= 16)
30920 elt -= 16;
30921 if (perm[i] != elt)
30922 break;
30924 if (i == 16)
30926 enum insn_code icode = patterns[j].impl;
30927 enum machine_mode omode = insn_data[icode].operand[0].mode;
30928 enum machine_mode imode = insn_data[icode].operand[1].mode;
30930 /* For little-endian, don't use vpkuwum and vpkuhum if the
30931 underlying vector type is not V4SI and V8HI, respectively.
30932 For example, using vpkuwum with a V8HI picks up the even
30933 halfwords (BE numbering) when the even halfwords (LE
30934 numbering) are what we need. */
30935 if (!BYTES_BIG_ENDIAN
30936 && icode == CODE_FOR_altivec_vpkuwum_direct
30937 && ((GET_CODE (op0) == REG
30938 && GET_MODE (op0) != V4SImode)
30939 || (GET_CODE (op0) == SUBREG
30940 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
30941 continue;
30942 if (!BYTES_BIG_ENDIAN
30943 && icode == CODE_FOR_altivec_vpkuhum_direct
30944 && ((GET_CODE (op0) == REG
30945 && GET_MODE (op0) != V8HImode)
30946 || (GET_CODE (op0) == SUBREG
30947 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
30948 continue;
30950 /* For little-endian, the two input operands must be swapped
30951 (or swapped back) to ensure proper right-to-left numbering
30952 from 0 to 2N-1. */
30953 if (swapped ^ !BYTES_BIG_ENDIAN)
30954 x = op0, op0 = op1, op1 = x;
30955 if (imode != V16QImode)
30957 op0 = gen_lowpart (imode, op0);
30958 op1 = gen_lowpart (imode, op1);
30960 if (omode == V16QImode)
30961 x = target;
30962 else
30963 x = gen_reg_rtx (omode);
30964 emit_insn (GEN_FCN (icode) (x, op0, op1));
30965 if (omode != V16QImode)
30966 emit_move_insn (target, gen_lowpart (V16QImode, x));
30967 return true;
30971 if (!BYTES_BIG_ENDIAN)
30973 altivec_expand_vec_perm_const_le (operands);
30974 return true;
30977 return false;
30980 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
30981 Return true if we match an efficient implementation. */
30983 static bool
30984 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
30985 unsigned char perm0, unsigned char perm1)
30987 rtx x;
30989 /* If both selectors come from the same operand, fold to single op. */
30990 if ((perm0 & 2) == (perm1 & 2))
30992 if (perm0 & 2)
30993 op0 = op1;
30994 else
30995 op1 = op0;
30997 /* If both operands are equal, fold to simpler permutation. */
30998 if (rtx_equal_p (op0, op1))
31000 perm0 = perm0 & 1;
31001 perm1 = (perm1 & 1) + 2;
31003 /* If the first selector comes from the second operand, swap. */
31004 else if (perm0 & 2)
31006 if (perm1 & 2)
31007 return false;
31008 perm0 -= 2;
31009 perm1 += 2;
31010 x = op0, op0 = op1, op1 = x;
31012 /* If the second selector does not come from the second operand, fail. */
31013 else if ((perm1 & 2) == 0)
31014 return false;
31016 /* Success! */
31017 if (target != NULL)
31019 enum machine_mode vmode, dmode;
31020 rtvec v;
31022 vmode = GET_MODE (target);
31023 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
31024 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
31025 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
31026 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
31027 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
31028 emit_insn (gen_rtx_SET (VOIDmode, target, x));
31030 return true;
31033 bool
31034 rs6000_expand_vec_perm_const (rtx operands[4])
31036 rtx target, op0, op1, sel;
31037 unsigned char perm0, perm1;
31039 target = operands[0];
31040 op0 = operands[1];
31041 op1 = operands[2];
31042 sel = operands[3];
31044 /* Unpack the constant selector. */
31045 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
31046 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
31048 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
31051 /* Test whether a constant permutation is supported. */
31053 static bool
31054 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
31055 const unsigned char *sel)
31057 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
31058 if (TARGET_ALTIVEC)
31059 return true;
31061 /* Check for ps_merge* or evmerge* insns. */
31062 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
31063 || (TARGET_SPE && vmode == V2SImode))
31065 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
31066 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
31067 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
31070 return false;
31073 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
31075 static void
31076 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
31077 enum machine_mode vmode, unsigned nelt, rtx perm[])
31079 enum machine_mode imode;
31080 rtx x;
31082 imode = vmode;
31083 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
31085 imode = GET_MODE_INNER (vmode);
31086 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
31087 imode = mode_for_vector (imode, nelt);
31090 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
31091 x = expand_vec_perm (vmode, op0, op1, x, target);
31092 if (x != target)
31093 emit_move_insn (target, x);
31096 /* Expand an extract even operation. */
31098 void
31099 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
31101 enum machine_mode vmode = GET_MODE (target);
31102 unsigned i, nelt = GET_MODE_NUNITS (vmode);
31103 rtx perm[16];
31105 for (i = 0; i < nelt; i++)
31106 perm[i] = GEN_INT (i * 2);
31108 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
31111 /* Expand a vector interleave operation. */
31113 void
31114 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
31116 enum machine_mode vmode = GET_MODE (target);
31117 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
31118 rtx perm[16];
31120 high = (highp ? 0 : nelt / 2);
31121 for (i = 0; i < nelt / 2; i++)
31123 perm[i * 2] = GEN_INT (i + high);
31124 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
31127 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
31130 /* Return an RTX representing where to find the function value of a
31131 function returning MODE. */
31132 static rtx
31133 rs6000_complex_function_value (enum machine_mode mode)
31135 unsigned int regno;
31136 rtx r1, r2;
31137 enum machine_mode inner = GET_MODE_INNER (mode);
31138 unsigned int inner_bytes = GET_MODE_SIZE (inner);
31140 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31141 regno = FP_ARG_RETURN;
31142 else
31144 regno = GP_ARG_RETURN;
31146 /* 32-bit is OK since it'll go in r3/r4. */
31147 if (TARGET_32BIT && inner_bytes >= 4)
31148 return gen_rtx_REG (mode, regno);
31151 if (inner_bytes >= 8)
31152 return gen_rtx_REG (mode, regno);
31154 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
31155 const0_rtx);
31156 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
31157 GEN_INT (inner_bytes));
31158 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
31161 /* Target hook for TARGET_FUNCTION_VALUE.
31163 On the SPE, both FPs and vectors are returned in r3.
31165 On RS/6000 an integer value is in r3 and a floating-point value is in
31166 fp1, unless -msoft-float. */
31168 static rtx
31169 rs6000_function_value (const_tree valtype,
31170 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
31171 bool outgoing ATTRIBUTE_UNUSED)
31173 enum machine_mode mode;
31174 unsigned int regno;
31175 enum machine_mode elt_mode;
31176 int n_elts;
31178 /* Special handling for structs in darwin64. */
31179 if (TARGET_MACHO
31180 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
31182 CUMULATIVE_ARGS valcum;
31183 rtx valret;
31185 valcum.words = 0;
31186 valcum.fregno = FP_ARG_MIN_REG;
31187 valcum.vregno = ALTIVEC_ARG_MIN_REG;
31188 /* Do a trial code generation as if this were going to be passed as
31189 an argument; if any part goes in memory, we return NULL. */
31190 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
31191 if (valret)
31192 return valret;
31193 /* Otherwise fall through to standard ABI rules. */
31196 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
31197 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (valtype), valtype,
31198 &elt_mode, &n_elts))
31200 int first_reg, n_regs, i;
31201 rtx par;
31203 if (SCALAR_FLOAT_MODE_P (elt_mode))
31205 /* _Decimal128 must use even/odd register pairs. */
31206 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31207 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
31209 else
31211 first_reg = ALTIVEC_ARG_RETURN;
31212 n_regs = 1;
31215 par = gen_rtx_PARALLEL (TYPE_MODE (valtype), rtvec_alloc (n_elts));
31216 for (i = 0; i < n_elts; i++)
31218 rtx r = gen_rtx_REG (elt_mode, first_reg + i * n_regs);
31219 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
31220 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
31223 return par;
31226 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
31228 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
31229 return gen_rtx_PARALLEL (DImode,
31230 gen_rtvec (2,
31231 gen_rtx_EXPR_LIST (VOIDmode,
31232 gen_rtx_REG (SImode, GP_ARG_RETURN),
31233 const0_rtx),
31234 gen_rtx_EXPR_LIST (VOIDmode,
31235 gen_rtx_REG (SImode,
31236 GP_ARG_RETURN + 1),
31237 GEN_INT (4))));
31239 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
31241 return gen_rtx_PARALLEL (DCmode,
31242 gen_rtvec (4,
31243 gen_rtx_EXPR_LIST (VOIDmode,
31244 gen_rtx_REG (SImode, GP_ARG_RETURN),
31245 const0_rtx),
31246 gen_rtx_EXPR_LIST (VOIDmode,
31247 gen_rtx_REG (SImode,
31248 GP_ARG_RETURN + 1),
31249 GEN_INT (4)),
31250 gen_rtx_EXPR_LIST (VOIDmode,
31251 gen_rtx_REG (SImode,
31252 GP_ARG_RETURN + 2),
31253 GEN_INT (8)),
31254 gen_rtx_EXPR_LIST (VOIDmode,
31255 gen_rtx_REG (SImode,
31256 GP_ARG_RETURN + 3),
31257 GEN_INT (12))));
31260 mode = TYPE_MODE (valtype);
31261 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
31262 || POINTER_TYPE_P (valtype))
31263 mode = TARGET_32BIT ? SImode : DImode;
31265 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31266 /* _Decimal128 must use an even/odd register pair. */
31267 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31268 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
31269 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
31270 regno = FP_ARG_RETURN;
31271 else if (TREE_CODE (valtype) == COMPLEX_TYPE
31272 && targetm.calls.split_complex_arg)
31273 return rs6000_complex_function_value (mode);
31274 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31275 return register is used in both cases, and we won't see V2DImode/V2DFmode
31276 for pure altivec, combine the two cases. */
31277 else if (TREE_CODE (valtype) == VECTOR_TYPE
31278 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
31279 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
31280 regno = ALTIVEC_ARG_RETURN;
31281 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
31282 && (mode == DFmode || mode == DCmode
31283 || mode == TFmode || mode == TCmode))
31284 return spe_build_register_parallel (mode, GP_ARG_RETURN);
31285 else
31286 regno = GP_ARG_RETURN;
31288 return gen_rtx_REG (mode, regno);
31291 /* Define how to find the value returned by a library function
31292 assuming the value has mode MODE. */
31294 rs6000_libcall_value (enum machine_mode mode)
31296 unsigned int regno;
31298 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
31300 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
31301 return gen_rtx_PARALLEL (DImode,
31302 gen_rtvec (2,
31303 gen_rtx_EXPR_LIST (VOIDmode,
31304 gen_rtx_REG (SImode, GP_ARG_RETURN),
31305 const0_rtx),
31306 gen_rtx_EXPR_LIST (VOIDmode,
31307 gen_rtx_REG (SImode,
31308 GP_ARG_RETURN + 1),
31309 GEN_INT (4))));
31312 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31313 /* _Decimal128 must use an even/odd register pair. */
31314 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31315 else if (SCALAR_FLOAT_MODE_P (mode)
31316 && TARGET_HARD_FLOAT && TARGET_FPRS
31317 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
31318 regno = FP_ARG_RETURN;
31319 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31320 return register is used in both cases, and we won't see V2DImode/V2DFmode
31321 for pure altivec, combine the two cases. */
31322 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
31323 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
31324 regno = ALTIVEC_ARG_RETURN;
31325 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
31326 return rs6000_complex_function_value (mode);
31327 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
31328 && (mode == DFmode || mode == DCmode
31329 || mode == TFmode || mode == TCmode))
31330 return spe_build_register_parallel (mode, GP_ARG_RETURN);
31331 else
31332 regno = GP_ARG_RETURN;
31334 return gen_rtx_REG (mode, regno);
31338 /* Return true if we use LRA instead of reload pass. */
31339 static bool
31340 rs6000_lra_p (void)
31342 return rs6000_lra_flag;
31345 /* Given FROM and TO register numbers, say whether this elimination is allowed.
31346 Frame pointer elimination is automatically handled.
31348 For the RS/6000, if frame pointer elimination is being done, we would like
31349 to convert ap into fp, not sp.
31351 We need r30 if -mminimal-toc was specified, and there are constant pool
31352 references. */
31354 static bool
31355 rs6000_can_eliminate (const int from, const int to)
31357 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
31358 ? ! frame_pointer_needed
31359 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
31360 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
31361 : true);
31364 /* Define the offset between two registers, FROM to be eliminated and its
31365 replacement TO, at the start of a routine. */
31366 HOST_WIDE_INT
31367 rs6000_initial_elimination_offset (int from, int to)
31369 rs6000_stack_t *info = rs6000_stack_info ();
31370 HOST_WIDE_INT offset;
31372 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31373 offset = info->push_p ? 0 : -info->total_size;
31374 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31376 offset = info->push_p ? 0 : -info->total_size;
31377 if (FRAME_GROWS_DOWNWARD)
31378 offset += info->fixed_size + info->vars_size + info->parm_size;
31380 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31381 offset = FRAME_GROWS_DOWNWARD
31382 ? info->fixed_size + info->vars_size + info->parm_size
31383 : 0;
31384 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31385 offset = info->total_size;
31386 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31387 offset = info->push_p ? info->total_size : 0;
31388 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
31389 offset = 0;
31390 else
31391 gcc_unreachable ();
31393 return offset;
31396 static rtx
31397 rs6000_dwarf_register_span (rtx reg)
31399 rtx parts[8];
31400 int i, words;
31401 unsigned regno = REGNO (reg);
31402 enum machine_mode mode = GET_MODE (reg);
31404 if (TARGET_SPE
31405 && regno < 32
31406 && (SPE_VECTOR_MODE (GET_MODE (reg))
31407 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
31408 && mode != SFmode && mode != SDmode && mode != SCmode)))
31410 else
31411 return NULL_RTX;
31413 regno = REGNO (reg);
31415 /* The duality of the SPE register size wreaks all kinds of havoc.
31416 This is a way of distinguishing r0 in 32-bits from r0 in
31417 64-bits. */
31418 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
31419 gcc_assert (words <= 4);
31420 for (i = 0; i < words; i++, regno++)
31422 if (BYTES_BIG_ENDIAN)
31424 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
31425 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
31427 else
31429 parts[2 * i] = gen_rtx_REG (SImode, regno);
31430 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
31434 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
31437 /* Fill in sizes for SPE register high parts in table used by unwinder. */
31439 static void
31440 rs6000_init_dwarf_reg_sizes_extra (tree address)
31442 if (TARGET_SPE)
31444 int i;
31445 enum machine_mode mode = TYPE_MODE (char_type_node);
31446 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
31447 rtx mem = gen_rtx_MEM (BLKmode, addr);
31448 rtx value = gen_int_mode (4, mode);
31450 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
31452 int column = DWARF_REG_TO_UNWIND_COLUMN
31453 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
31454 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
31456 emit_move_insn (adjust_address (mem, mode, offset), value);
31460 if (TARGET_MACHO && ! TARGET_ALTIVEC)
31462 int i;
31463 enum machine_mode mode = TYPE_MODE (char_type_node);
31464 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
31465 rtx mem = gen_rtx_MEM (BLKmode, addr);
31466 rtx value = gen_int_mode (16, mode);
31468 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
31469 The unwinder still needs to know the size of Altivec registers. */
31471 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
31473 int column = DWARF_REG_TO_UNWIND_COLUMN
31474 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
31475 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
31477 emit_move_insn (adjust_address (mem, mode, offset), value);
31482 /* Map internal gcc register numbers to DWARF2 register numbers. */
31484 unsigned int
31485 rs6000_dbx_register_number (unsigned int regno)
31487 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
31488 return regno;
31489 if (regno == LR_REGNO)
31490 return 108;
31491 if (regno == CTR_REGNO)
31492 return 109;
31493 if (CR_REGNO_P (regno))
31494 return regno - CR0_REGNO + 86;
31495 if (regno == CA_REGNO)
31496 return 101; /* XER */
31497 if (ALTIVEC_REGNO_P (regno))
31498 return regno - FIRST_ALTIVEC_REGNO + 1124;
31499 if (regno == VRSAVE_REGNO)
31500 return 356;
31501 if (regno == VSCR_REGNO)
31502 return 67;
31503 if (regno == SPE_ACC_REGNO)
31504 return 99;
31505 if (regno == SPEFSCR_REGNO)
31506 return 612;
31507 if (SPE_HIGH_REGNO_P (regno))
31508 return regno - FIRST_SPE_HIGH_REGNO + 1200;
31509 return regno;
31512 /* target hook eh_return_filter_mode */
31513 static enum machine_mode
31514 rs6000_eh_return_filter_mode (void)
31516 return TARGET_32BIT ? SImode : word_mode;
31519 /* Target hook for scalar_mode_supported_p. */
31520 static bool
31521 rs6000_scalar_mode_supported_p (enum machine_mode mode)
31523 if (DECIMAL_FLOAT_MODE_P (mode))
31524 return default_decimal_float_supported_p ();
31525 else
31526 return default_scalar_mode_supported_p (mode);
31529 /* Target hook for vector_mode_supported_p. */
31530 static bool
31531 rs6000_vector_mode_supported_p (enum machine_mode mode)
31534 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
31535 return true;
31537 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
31538 return true;
31540 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
31541 return true;
31543 else
31544 return false;
31547 /* Target hook for invalid_arg_for_unprototyped_fn. */
31548 static const char *
31549 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
31551 return (!rs6000_darwin64_abi
31552 && typelist == 0
31553 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
31554 && (funcdecl == NULL_TREE
31555 || (TREE_CODE (funcdecl) == FUNCTION_DECL
31556 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
31557 ? N_("AltiVec argument passed to unprototyped function")
31558 : NULL;
31561 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
31562 setup by using __stack_chk_fail_local hidden function instead of
31563 calling __stack_chk_fail directly. Otherwise it is better to call
31564 __stack_chk_fail directly. */
31566 static tree ATTRIBUTE_UNUSED
31567 rs6000_stack_protect_fail (void)
31569 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
31570 ? default_hidden_stack_protect_fail ()
31571 : default_external_stack_protect_fail ();
31574 void
31575 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
31576 int num_operands ATTRIBUTE_UNUSED)
31578 if (rs6000_warn_cell_microcode)
31580 const char *temp;
31581 int insn_code_number = recog_memoized (insn);
31582 location_t location = INSN_LOCATION (insn);
31584 /* Punt on insns we cannot recognize. */
31585 if (insn_code_number < 0)
31586 return;
31588 temp = get_insn_template (insn_code_number, insn);
31590 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
31591 warning_at (location, OPT_mwarn_cell_microcode,
31592 "emitting microcode insn %s\t[%s] #%d",
31593 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31594 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
31595 warning_at (location, OPT_mwarn_cell_microcode,
31596 "emitting conditional microcode insn %s\t[%s] #%d",
31597 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31601 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
31603 #if TARGET_ELF
31604 static unsigned HOST_WIDE_INT
31605 rs6000_asan_shadow_offset (void)
31607 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
31609 #endif
31611 /* Mask options that we want to support inside of attribute((target)) and
31612 #pragma GCC target operations. Note, we do not include things like
31613 64/32-bit, endianess, hard/soft floating point, etc. that would have
31614 different calling sequences. */
31616 struct rs6000_opt_mask {
31617 const char *name; /* option name */
31618 HOST_WIDE_INT mask; /* mask to set */
31619 bool invert; /* invert sense of mask */
31620 bool valid_target; /* option is a target option */
31623 static struct rs6000_opt_mask const rs6000_opt_masks[] =
31625 { "altivec", OPTION_MASK_ALTIVEC, false, true },
31626 { "cmpb", OPTION_MASK_CMPB, false, true },
31627 { "crypto", OPTION_MASK_CRYPTO, false, true },
31628 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
31629 { "dlmzb", OPTION_MASK_DLMZB, false, true },
31630 { "fprnd", OPTION_MASK_FPRND, false, true },
31631 { "hard-dfp", OPTION_MASK_DFP, false, true },
31632 { "htm", OPTION_MASK_HTM, false, true },
31633 { "isel", OPTION_MASK_ISEL, false, true },
31634 { "mfcrf", OPTION_MASK_MFCRF, false, true },
31635 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
31636 { "mulhw", OPTION_MASK_MULHW, false, true },
31637 { "multiple", OPTION_MASK_MULTIPLE, false, true },
31638 { "popcntb", OPTION_MASK_POPCNTB, false, true },
31639 { "popcntd", OPTION_MASK_POPCNTD, false, true },
31640 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
31641 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
31642 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
31643 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
31644 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
31645 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
31646 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
31647 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
31648 { "string", OPTION_MASK_STRING, false, true },
31649 { "update", OPTION_MASK_NO_UPDATE, true , true },
31650 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, false },
31651 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, false },
31652 { "vsx", OPTION_MASK_VSX, false, true },
31653 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
31654 #ifdef OPTION_MASK_64BIT
31655 #if TARGET_AIX_OS
31656 { "aix64", OPTION_MASK_64BIT, false, false },
31657 { "aix32", OPTION_MASK_64BIT, true, false },
31658 #else
31659 { "64", OPTION_MASK_64BIT, false, false },
31660 { "32", OPTION_MASK_64BIT, true, false },
31661 #endif
31662 #endif
31663 #ifdef OPTION_MASK_EABI
31664 { "eabi", OPTION_MASK_EABI, false, false },
31665 #endif
31666 #ifdef OPTION_MASK_LITTLE_ENDIAN
31667 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
31668 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
31669 #endif
31670 #ifdef OPTION_MASK_RELOCATABLE
31671 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
31672 #endif
31673 #ifdef OPTION_MASK_STRICT_ALIGN
31674 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
31675 #endif
31676 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
31677 { "string", OPTION_MASK_STRING, false, false },
31680 /* Builtin mask mapping for printing the flags. */
31681 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
31683 { "altivec", RS6000_BTM_ALTIVEC, false, false },
31684 { "vsx", RS6000_BTM_VSX, false, false },
31685 { "spe", RS6000_BTM_SPE, false, false },
31686 { "paired", RS6000_BTM_PAIRED, false, false },
31687 { "fre", RS6000_BTM_FRE, false, false },
31688 { "fres", RS6000_BTM_FRES, false, false },
31689 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
31690 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
31691 { "popcntd", RS6000_BTM_POPCNTD, false, false },
31692 { "cell", RS6000_BTM_CELL, false, false },
31693 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
31694 { "crypto", RS6000_BTM_CRYPTO, false, false },
31695 { "htm", RS6000_BTM_HTM, false, false },
31696 { "hard-dfp", RS6000_BTM_DFP, false, false },
31697 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
31698 { "long-double-128", RS6000_BTM_LDBL128, false, false },
31701 /* Option variables that we want to support inside attribute((target)) and
31702 #pragma GCC target operations. */
31704 struct rs6000_opt_var {
31705 const char *name; /* option name */
31706 size_t global_offset; /* offset of the option in global_options. */
31707 size_t target_offset; /* offset of the option in target optiosn. */
31710 static struct rs6000_opt_var const rs6000_opt_vars[] =
31712 { "friz",
31713 offsetof (struct gcc_options, x_TARGET_FRIZ),
31714 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
31715 { "avoid-indexed-addresses",
31716 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
31717 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
31718 { "paired",
31719 offsetof (struct gcc_options, x_rs6000_paired_float),
31720 offsetof (struct cl_target_option, x_rs6000_paired_float), },
31721 { "longcall",
31722 offsetof (struct gcc_options, x_rs6000_default_long_calls),
31723 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
31726 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
31727 parsing. Return true if there were no errors. */
31729 static bool
31730 rs6000_inner_target_options (tree args, bool attr_p)
31732 bool ret = true;
31734 if (args == NULL_TREE)
31737 else if (TREE_CODE (args) == STRING_CST)
31739 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31740 char *q;
31742 while ((q = strtok (p, ",")) != NULL)
31744 bool error_p = false;
31745 bool not_valid_p = false;
31746 const char *cpu_opt = NULL;
31748 p = NULL;
31749 if (strncmp (q, "cpu=", 4) == 0)
31751 int cpu_index = rs6000_cpu_name_lookup (q+4);
31752 if (cpu_index >= 0)
31753 rs6000_cpu_index = cpu_index;
31754 else
31756 error_p = true;
31757 cpu_opt = q+4;
31760 else if (strncmp (q, "tune=", 5) == 0)
31762 int tune_index = rs6000_cpu_name_lookup (q+5);
31763 if (tune_index >= 0)
31764 rs6000_tune_index = tune_index;
31765 else
31767 error_p = true;
31768 cpu_opt = q+5;
31771 else
31773 size_t i;
31774 bool invert = false;
31775 char *r = q;
31777 error_p = true;
31778 if (strncmp (r, "no-", 3) == 0)
31780 invert = true;
31781 r += 3;
31784 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
31785 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
31787 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
31789 if (!rs6000_opt_masks[i].valid_target)
31790 not_valid_p = true;
31791 else
31793 error_p = false;
31794 rs6000_isa_flags_explicit |= mask;
31796 /* VSX needs altivec, so -mvsx automagically sets
31797 altivec. */
31798 if (mask == OPTION_MASK_VSX && !invert)
31799 mask |= OPTION_MASK_ALTIVEC;
31801 if (rs6000_opt_masks[i].invert)
31802 invert = !invert;
31804 if (invert)
31805 rs6000_isa_flags &= ~mask;
31806 else
31807 rs6000_isa_flags |= mask;
31809 break;
31812 if (error_p && !not_valid_p)
31814 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
31815 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
31817 size_t j = rs6000_opt_vars[i].global_offset;
31818 *((int *) ((char *)&global_options + j)) = !invert;
31819 error_p = false;
31820 break;
31825 if (error_p)
31827 const char *eprefix, *esuffix;
31829 ret = false;
31830 if (attr_p)
31832 eprefix = "__attribute__((__target__(";
31833 esuffix = ")))";
31835 else
31837 eprefix = "#pragma GCC target ";
31838 esuffix = "";
31841 if (cpu_opt)
31842 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
31843 q, esuffix);
31844 else if (not_valid_p)
31845 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
31846 else
31847 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
31852 else if (TREE_CODE (args) == TREE_LIST)
31856 tree value = TREE_VALUE (args);
31857 if (value)
31859 bool ret2 = rs6000_inner_target_options (value, attr_p);
31860 if (!ret2)
31861 ret = false;
31863 args = TREE_CHAIN (args);
31865 while (args != NULL_TREE);
31868 else
31869 gcc_unreachable ();
31871 return ret;
31874 /* Print out the target options as a list for -mdebug=target. */
31876 static void
31877 rs6000_debug_target_options (tree args, const char *prefix)
31879 if (args == NULL_TREE)
31880 fprintf (stderr, "%s<NULL>", prefix);
31882 else if (TREE_CODE (args) == STRING_CST)
31884 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31885 char *q;
31887 while ((q = strtok (p, ",")) != NULL)
31889 p = NULL;
31890 fprintf (stderr, "%s\"%s\"", prefix, q);
31891 prefix = ", ";
31895 else if (TREE_CODE (args) == TREE_LIST)
31899 tree value = TREE_VALUE (args);
31900 if (value)
31902 rs6000_debug_target_options (value, prefix);
31903 prefix = ", ";
31905 args = TREE_CHAIN (args);
31907 while (args != NULL_TREE);
31910 else
31911 gcc_unreachable ();
31913 return;
31917 /* Hook to validate attribute((target("..."))). */
31919 static bool
31920 rs6000_valid_attribute_p (tree fndecl,
31921 tree ARG_UNUSED (name),
31922 tree args,
31923 int flags)
31925 struct cl_target_option cur_target;
31926 bool ret;
31927 tree old_optimize = build_optimization_node (&global_options);
31928 tree new_target, new_optimize;
31929 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
31931 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
31933 if (TARGET_DEBUG_TARGET)
31935 tree tname = DECL_NAME (fndecl);
31936 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
31937 if (tname)
31938 fprintf (stderr, "function: %.*s\n",
31939 (int) IDENTIFIER_LENGTH (tname),
31940 IDENTIFIER_POINTER (tname));
31941 else
31942 fprintf (stderr, "function: unknown\n");
31944 fprintf (stderr, "args:");
31945 rs6000_debug_target_options (args, " ");
31946 fprintf (stderr, "\n");
31948 if (flags)
31949 fprintf (stderr, "flags: 0x%x\n", flags);
31951 fprintf (stderr, "--------------------\n");
31954 old_optimize = build_optimization_node (&global_options);
31955 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
31957 /* If the function changed the optimization levels as well as setting target
31958 options, start with the optimizations specified. */
31959 if (func_optimize && func_optimize != old_optimize)
31960 cl_optimization_restore (&global_options,
31961 TREE_OPTIMIZATION (func_optimize));
31963 /* The target attributes may also change some optimization flags, so update
31964 the optimization options if necessary. */
31965 cl_target_option_save (&cur_target, &global_options);
31966 rs6000_cpu_index = rs6000_tune_index = -1;
31967 ret = rs6000_inner_target_options (args, true);
31969 /* Set up any additional state. */
31970 if (ret)
31972 ret = rs6000_option_override_internal (false);
31973 new_target = build_target_option_node (&global_options);
31975 else
31976 new_target = NULL;
31978 new_optimize = build_optimization_node (&global_options);
31980 if (!new_target)
31981 ret = false;
31983 else if (fndecl)
31985 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
31987 if (old_optimize != new_optimize)
31988 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
31991 cl_target_option_restore (&global_options, &cur_target);
31993 if (old_optimize != new_optimize)
31994 cl_optimization_restore (&global_options,
31995 TREE_OPTIMIZATION (old_optimize));
31997 return ret;
32001 /* Hook to validate the current #pragma GCC target and set the state, and
32002 update the macros based on what was changed. If ARGS is NULL, then
32003 POP_TARGET is used to reset the options. */
32005 bool
32006 rs6000_pragma_target_parse (tree args, tree pop_target)
32008 tree prev_tree = build_target_option_node (&global_options);
32009 tree cur_tree;
32010 struct cl_target_option *prev_opt, *cur_opt;
32011 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
32012 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
32014 if (TARGET_DEBUG_TARGET)
32016 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
32017 fprintf (stderr, "args:");
32018 rs6000_debug_target_options (args, " ");
32019 fprintf (stderr, "\n");
32021 if (pop_target)
32023 fprintf (stderr, "pop_target:\n");
32024 debug_tree (pop_target);
32026 else
32027 fprintf (stderr, "pop_target: <NULL>\n");
32029 fprintf (stderr, "--------------------\n");
32032 if (! args)
32034 cur_tree = ((pop_target)
32035 ? pop_target
32036 : target_option_default_node);
32037 cl_target_option_restore (&global_options,
32038 TREE_TARGET_OPTION (cur_tree));
32040 else
32042 rs6000_cpu_index = rs6000_tune_index = -1;
32043 if (!rs6000_inner_target_options (args, false)
32044 || !rs6000_option_override_internal (false)
32045 || (cur_tree = build_target_option_node (&global_options))
32046 == NULL_TREE)
32048 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
32049 fprintf (stderr, "invalid pragma\n");
32051 return false;
32055 target_option_current_node = cur_tree;
32057 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
32058 change the macros that are defined. */
32059 if (rs6000_target_modify_macros_ptr)
32061 prev_opt = TREE_TARGET_OPTION (prev_tree);
32062 prev_bumask = prev_opt->x_rs6000_builtin_mask;
32063 prev_flags = prev_opt->x_rs6000_isa_flags;
32065 cur_opt = TREE_TARGET_OPTION (cur_tree);
32066 cur_flags = cur_opt->x_rs6000_isa_flags;
32067 cur_bumask = cur_opt->x_rs6000_builtin_mask;
32069 diff_bumask = (prev_bumask ^ cur_bumask);
32070 diff_flags = (prev_flags ^ cur_flags);
32072 if ((diff_flags != 0) || (diff_bumask != 0))
32074 /* Delete old macros. */
32075 rs6000_target_modify_macros_ptr (false,
32076 prev_flags & diff_flags,
32077 prev_bumask & diff_bumask);
32079 /* Define new macros. */
32080 rs6000_target_modify_macros_ptr (true,
32081 cur_flags & diff_flags,
32082 cur_bumask & diff_bumask);
32086 return true;
32090 /* Remember the last target of rs6000_set_current_function. */
32091 static GTY(()) tree rs6000_previous_fndecl;
32093 /* Establish appropriate back-end context for processing the function
32094 FNDECL. The argument might be NULL to indicate processing at top
32095 level, outside of any function scope. */
32096 static void
32097 rs6000_set_current_function (tree fndecl)
32099 tree old_tree = (rs6000_previous_fndecl
32100 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
32101 : NULL_TREE);
32103 tree new_tree = (fndecl
32104 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
32105 : NULL_TREE);
32107 if (TARGET_DEBUG_TARGET)
32109 bool print_final = false;
32110 fprintf (stderr, "\n==================== rs6000_set_current_function");
32112 if (fndecl)
32113 fprintf (stderr, ", fndecl %s (%p)",
32114 (DECL_NAME (fndecl)
32115 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
32116 : "<unknown>"), (void *)fndecl);
32118 if (rs6000_previous_fndecl)
32119 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
32121 fprintf (stderr, "\n");
32122 if (new_tree)
32124 fprintf (stderr, "\nnew fndecl target specific options:\n");
32125 debug_tree (new_tree);
32126 print_final = true;
32129 if (old_tree)
32131 fprintf (stderr, "\nold fndecl target specific options:\n");
32132 debug_tree (old_tree);
32133 print_final = true;
32136 if (print_final)
32137 fprintf (stderr, "--------------------\n");
32140 /* Only change the context if the function changes. This hook is called
32141 several times in the course of compiling a function, and we don't want to
32142 slow things down too much or call target_reinit when it isn't safe. */
32143 if (fndecl && fndecl != rs6000_previous_fndecl)
32145 rs6000_previous_fndecl = fndecl;
32146 if (old_tree == new_tree)
32149 else if (new_tree)
32151 cl_target_option_restore (&global_options,
32152 TREE_TARGET_OPTION (new_tree));
32153 if (TREE_TARGET_GLOBALS (new_tree))
32154 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
32155 else
32156 TREE_TARGET_GLOBALS (new_tree)
32157 = save_target_globals_default_opts ();
32160 else if (old_tree)
32162 new_tree = target_option_current_node;
32163 cl_target_option_restore (&global_options,
32164 TREE_TARGET_OPTION (new_tree));
32165 if (TREE_TARGET_GLOBALS (new_tree))
32166 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
32167 else if (new_tree == target_option_default_node)
32168 restore_target_globals (&default_target_globals);
32169 else
32170 TREE_TARGET_GLOBALS (new_tree)
32171 = save_target_globals_default_opts ();
32177 /* Save the current options */
32179 static void
32180 rs6000_function_specific_save (struct cl_target_option *ptr,
32181 struct gcc_options *opts)
32183 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
32184 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
32187 /* Restore the current options */
32189 static void
32190 rs6000_function_specific_restore (struct gcc_options *opts,
32191 struct cl_target_option *ptr)
32194 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
32195 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
32196 (void) rs6000_option_override_internal (false);
32199 /* Print the current options */
32201 static void
32202 rs6000_function_specific_print (FILE *file, int indent,
32203 struct cl_target_option *ptr)
32205 rs6000_print_isa_options (file, indent, "Isa options set",
32206 ptr->x_rs6000_isa_flags);
32208 rs6000_print_isa_options (file, indent, "Isa options explicit",
32209 ptr->x_rs6000_isa_flags_explicit);
32212 /* Helper function to print the current isa or misc options on a line. */
32214 static void
32215 rs6000_print_options_internal (FILE *file,
32216 int indent,
32217 const char *string,
32218 HOST_WIDE_INT flags,
32219 const char *prefix,
32220 const struct rs6000_opt_mask *opts,
32221 size_t num_elements)
32223 size_t i;
32224 size_t start_column = 0;
32225 size_t cur_column;
32226 size_t max_column = 76;
32227 const char *comma = "";
32229 if (indent)
32230 start_column += fprintf (file, "%*s", indent, "");
32232 if (!flags)
32234 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
32235 return;
32238 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
32240 /* Print the various mask options. */
32241 cur_column = start_column;
32242 for (i = 0; i < num_elements; i++)
32244 if ((flags & opts[i].mask) != 0)
32246 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
32247 size_t len = (strlen (comma)
32248 + strlen (prefix)
32249 + strlen (no_str)
32250 + strlen (rs6000_opt_masks[i].name));
32252 cur_column += len;
32253 if (cur_column > max_column)
32255 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
32256 cur_column = start_column + len;
32257 comma = "";
32260 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
32261 rs6000_opt_masks[i].name);
32262 flags &= ~ opts[i].mask;
32263 comma = ", ";
32267 fputs ("\n", file);
32270 /* Helper function to print the current isa options on a line. */
32272 static void
32273 rs6000_print_isa_options (FILE *file, int indent, const char *string,
32274 HOST_WIDE_INT flags)
32276 rs6000_print_options_internal (file, indent, string, flags, "-m",
32277 &rs6000_opt_masks[0],
32278 ARRAY_SIZE (rs6000_opt_masks));
32281 static void
32282 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
32283 HOST_WIDE_INT flags)
32285 rs6000_print_options_internal (file, indent, string, flags, "",
32286 &rs6000_builtin_mask_names[0],
32287 ARRAY_SIZE (rs6000_builtin_mask_names));
32291 /* Hook to determine if one function can safely inline another. */
32293 static bool
32294 rs6000_can_inline_p (tree caller, tree callee)
32296 bool ret = false;
32297 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
32298 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
32300 /* If callee has no option attributes, then it is ok to inline. */
32301 if (!callee_tree)
32302 ret = true;
32304 /* If caller has no option attributes, but callee does then it is not ok to
32305 inline. */
32306 else if (!caller_tree)
32307 ret = false;
32309 else
32311 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
32312 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
32314 /* Callee's options should a subset of the caller's, i.e. a vsx function
32315 can inline an altivec function but a non-vsx function can't inline a
32316 vsx function. */
32317 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
32318 == callee_opts->x_rs6000_isa_flags)
32319 ret = true;
32322 if (TARGET_DEBUG_TARGET)
32323 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
32324 (DECL_NAME (caller)
32325 ? IDENTIFIER_POINTER (DECL_NAME (caller))
32326 : "<unknown>"),
32327 (DECL_NAME (callee)
32328 ? IDENTIFIER_POINTER (DECL_NAME (callee))
32329 : "<unknown>"),
32330 (ret ? "can" : "cannot"));
32332 return ret;
32335 /* Allocate a stack temp and fixup the address so it meets the particular
32336 memory requirements (either offetable or REG+REG addressing). */
32339 rs6000_allocate_stack_temp (enum machine_mode mode,
32340 bool offsettable_p,
32341 bool reg_reg_p)
32343 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
32344 rtx addr = XEXP (stack, 0);
32345 int strict_p = (reload_in_progress || reload_completed);
32347 if (!legitimate_indirect_address_p (addr, strict_p))
32349 if (offsettable_p
32350 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
32351 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32353 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
32354 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32357 return stack;
32360 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
32361 to such a form to deal with memory reference instructions like STFIWX that
32362 only take reg+reg addressing. */
32365 rs6000_address_for_fpconvert (rtx x)
32367 int strict_p = (reload_in_progress || reload_completed);
32368 rtx addr;
32370 gcc_assert (MEM_P (x));
32371 addr = XEXP (x, 0);
32372 if (! legitimate_indirect_address_p (addr, strict_p)
32373 && ! legitimate_indexed_address_p (addr, strict_p))
32375 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
32377 rtx reg = XEXP (addr, 0);
32378 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
32379 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
32380 gcc_assert (REG_P (reg));
32381 emit_insn (gen_add3_insn (reg, reg, size_rtx));
32382 addr = reg;
32384 else if (GET_CODE (addr) == PRE_MODIFY)
32386 rtx reg = XEXP (addr, 0);
32387 rtx expr = XEXP (addr, 1);
32388 gcc_assert (REG_P (reg));
32389 gcc_assert (GET_CODE (expr) == PLUS);
32390 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
32391 addr = reg;
32394 x = replace_equiv_address (x, copy_addr_to_reg (addr));
32397 return x;
32400 /* Given a memory reference, if it is not in the form for altivec memory
32401 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
32402 convert to the altivec format. */
32405 rs6000_address_for_altivec (rtx x)
32407 gcc_assert (MEM_P (x));
32408 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
32410 rtx addr = XEXP (x, 0);
32411 int strict_p = (reload_in_progress || reload_completed);
32413 if (!legitimate_indexed_address_p (addr, strict_p)
32414 && !legitimate_indirect_address_p (addr, strict_p))
32415 addr = copy_to_mode_reg (Pmode, addr);
32417 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
32418 x = change_address (x, GET_MODE (x), addr);
32421 return x;
32424 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
32426 On the RS/6000, all integer constants are acceptable, most won't be valid
32427 for particular insns, though. Only easy FP constants are acceptable. */
32429 static bool
32430 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
32432 if (TARGET_ELF && tls_referenced_p (x))
32433 return false;
32435 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
32436 || GET_MODE (x) == VOIDmode
32437 || (TARGET_POWERPC64 && mode == DImode)
32438 || easy_fp_constant (x, mode)
32439 || easy_vector_constant (x, mode));
32444 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
32446 void
32447 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
32449 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
32450 rtx toc_load = NULL_RTX;
32451 rtx toc_restore = NULL_RTX;
32452 rtx func_addr;
32453 rtx abi_reg = NULL_RTX;
32454 rtx call[4];
32455 int n_call;
32456 rtx insn;
32458 /* Handle longcall attributes. */
32459 if (INTVAL (cookie) & CALL_LONG)
32460 func_desc = rs6000_longcall_ref (func_desc);
32462 /* Handle indirect calls. */
32463 if (GET_CODE (func_desc) != SYMBOL_REF
32464 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
32466 /* Save the TOC into its reserved slot before the call,
32467 and prepare to restore it after the call. */
32468 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32469 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
32470 rtx stack_toc_mem = gen_frame_mem (Pmode,
32471 gen_rtx_PLUS (Pmode, stack_ptr,
32472 stack_toc_offset));
32473 toc_restore = gen_rtx_SET (VOIDmode, toc_reg, stack_toc_mem);
32475 /* Can we optimize saving the TOC in the prologue or
32476 do we need to do it at every call? */
32477 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
32478 cfun->machine->save_toc_in_prologue = true;
32479 else
32481 MEM_VOLATILE_P (stack_toc_mem) = 1;
32482 emit_move_insn (stack_toc_mem, toc_reg);
32485 if (DEFAULT_ABI == ABI_ELFv2)
32487 /* A function pointer in the ELFv2 ABI is just a plain address, but
32488 the ABI requires it to be loaded into r12 before the call. */
32489 func_addr = gen_rtx_REG (Pmode, 12);
32490 emit_move_insn (func_addr, func_desc);
32491 abi_reg = func_addr;
32493 else
32495 /* A function pointer under AIX is a pointer to a data area whose
32496 first word contains the actual address of the function, whose
32497 second word contains a pointer to its TOC, and whose third word
32498 contains a value to place in the static chain register (r11).
32499 Note that if we load the static chain, our "trampoline" need
32500 not have any executable code. */
32502 /* Load up address of the actual function. */
32503 func_desc = force_reg (Pmode, func_desc);
32504 func_addr = gen_reg_rtx (Pmode);
32505 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
32507 /* Prepare to load the TOC of the called function. Note that the
32508 TOC load must happen immediately before the actual call so
32509 that unwinding the TOC registers works correctly. See the
32510 comment in frob_update_context. */
32511 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
32512 rtx func_toc_mem = gen_rtx_MEM (Pmode,
32513 gen_rtx_PLUS (Pmode, func_desc,
32514 func_toc_offset));
32515 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
32517 /* If we have a static chain, load it up. */
32518 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32520 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
32521 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
32522 rtx func_sc_mem = gen_rtx_MEM (Pmode,
32523 gen_rtx_PLUS (Pmode, func_desc,
32524 func_sc_offset));
32525 emit_move_insn (sc_reg, func_sc_mem);
32526 abi_reg = sc_reg;
32530 else
32532 /* Direct calls use the TOC: for local calls, the callee will
32533 assume the TOC register is set; for non-local calls, the
32534 PLT stub needs the TOC register. */
32535 abi_reg = toc_reg;
32536 func_addr = func_desc;
32539 /* Create the call. */
32540 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
32541 if (value != NULL_RTX)
32542 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32543 n_call = 1;
32545 if (toc_load)
32546 call[n_call++] = toc_load;
32547 if (toc_restore)
32548 call[n_call++] = toc_restore;
32550 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
32552 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
32553 insn = emit_call_insn (insn);
32555 /* Mention all registers defined by the ABI to hold information
32556 as uses in CALL_INSN_FUNCTION_USAGE. */
32557 if (abi_reg)
32558 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32561 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
32563 void
32564 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
32566 rtx call[2];
32567 rtx insn;
32569 gcc_assert (INTVAL (cookie) == 0);
32571 /* Create the call. */
32572 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
32573 if (value != NULL_RTX)
32574 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32576 call[1] = simple_return_rtx;
32578 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
32579 insn = emit_call_insn (insn);
32581 /* Note use of the TOC register. */
32582 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
32583 /* We need to also mark a use of the link register since the function we
32584 sibling-call to will use it to return to our caller. */
32585 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
32588 /* Return whether we need to always update the saved TOC pointer when we update
32589 the stack pointer. */
32591 static bool
32592 rs6000_save_toc_in_prologue_p (void)
32594 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
32597 #ifdef HAVE_GAS_HIDDEN
32598 # define USE_HIDDEN_LINKONCE 1
32599 #else
32600 # define USE_HIDDEN_LINKONCE 0
32601 #endif
32603 /* Fills in the label name that should be used for a 476 link stack thunk. */
32605 void
32606 get_ppc476_thunk_name (char name[32])
32608 gcc_assert (TARGET_LINK_STACK);
32610 if (USE_HIDDEN_LINKONCE)
32611 sprintf (name, "__ppc476.get_thunk");
32612 else
32613 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
32616 /* This function emits the simple thunk routine that is used to preserve
32617 the link stack on the 476 cpu. */
32619 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
32620 static void
32621 rs6000_code_end (void)
32623 char name[32];
32624 tree decl;
32626 if (!TARGET_LINK_STACK)
32627 return;
32629 get_ppc476_thunk_name (name);
32631 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
32632 build_function_type_list (void_type_node, NULL_TREE));
32633 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
32634 NULL_TREE, void_type_node);
32635 TREE_PUBLIC (decl) = 1;
32636 TREE_STATIC (decl) = 1;
32638 #if RS6000_WEAK
32639 if (USE_HIDDEN_LINKONCE)
32641 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
32642 targetm.asm_out.unique_section (decl, 0);
32643 switch_to_section (get_named_section (decl, NULL, 0));
32644 DECL_WEAK (decl) = 1;
32645 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
32646 targetm.asm_out.globalize_label (asm_out_file, name);
32647 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
32648 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
32650 else
32651 #endif
32653 switch_to_section (text_section);
32654 ASM_OUTPUT_LABEL (asm_out_file, name);
32657 DECL_INITIAL (decl) = make_node (BLOCK);
32658 current_function_decl = decl;
32659 init_function_start (decl);
32660 first_function_block_is_cold = false;
32661 /* Make sure unwind info is emitted for the thunk if needed. */
32662 final_start_function (emit_barrier (), asm_out_file, 1);
32664 fputs ("\tblr\n", asm_out_file);
32666 final_end_function ();
32667 init_insn_lengths ();
32668 free_after_compilation (cfun);
32669 set_cfun (NULL);
32670 current_function_decl = NULL;
32673 /* Add r30 to hard reg set if the prologue sets it up and it is not
32674 pic_offset_table_rtx. */
32676 static void
32677 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
32679 if (!TARGET_SINGLE_PIC_BASE
32680 && TARGET_TOC
32681 && TARGET_MINIMAL_TOC
32682 && get_pool_size () != 0)
32683 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32687 /* Helper function for rs6000_split_logical to emit a logical instruction after
32688 spliting the operation to single GPR registers.
32690 DEST is the destination register.
32691 OP1 and OP2 are the input source registers.
32692 CODE is the base operation (AND, IOR, XOR, NOT).
32693 MODE is the machine mode.
32694 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32695 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32696 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32697 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32698 formation of the AND instructions. */
32700 static void
32701 rs6000_split_logical_inner (rtx dest,
32702 rtx op1,
32703 rtx op2,
32704 enum rtx_code code,
32705 enum machine_mode mode,
32706 bool complement_final_p,
32707 bool complement_op1_p,
32708 bool complement_op2_p,
32709 rtx clobber_reg)
32711 rtx bool_rtx;
32712 rtx set_rtx;
32714 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
32715 if (op2 && GET_CODE (op2) == CONST_INT
32716 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
32717 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32719 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
32720 HOST_WIDE_INT value = INTVAL (op2) & mask;
32722 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
32723 if (code == AND)
32725 if (value == 0)
32727 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
32728 return;
32731 else if (value == mask)
32733 if (!rtx_equal_p (dest, op1))
32734 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32735 return;
32739 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
32740 into separate ORI/ORIS or XORI/XORIS instrucitons. */
32741 else if (code == IOR || code == XOR)
32743 if (value == 0)
32745 if (!rtx_equal_p (dest, op1))
32746 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32747 return;
32752 if (complement_op1_p)
32753 op1 = gen_rtx_NOT (mode, op1);
32755 if (complement_op2_p)
32756 op2 = gen_rtx_NOT (mode, op2);
32758 bool_rtx = ((code == NOT)
32759 ? gen_rtx_NOT (mode, op1)
32760 : gen_rtx_fmt_ee (code, mode, op1, op2));
32762 if (complement_final_p)
32763 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
32765 set_rtx = gen_rtx_SET (VOIDmode, dest, bool_rtx);
32767 /* Is this AND with an explicit clobber? */
32768 if (clobber_reg)
32770 rtx clobber = gen_rtx_CLOBBER (VOIDmode, clobber_reg);
32771 set_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set_rtx, clobber));
32774 emit_insn (set_rtx);
32775 return;
32778 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
32779 operations are split immediately during RTL generation to allow for more
32780 optimizations of the AND/IOR/XOR.
32782 OPERANDS is an array containing the destination and two input operands.
32783 CODE is the base operation (AND, IOR, XOR, NOT).
32784 MODE is the machine mode.
32785 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32786 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32787 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32788 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32789 formation of the AND instructions. */
32791 static void
32792 rs6000_split_logical_di (rtx operands[3],
32793 enum rtx_code code,
32794 bool complement_final_p,
32795 bool complement_op1_p,
32796 bool complement_op2_p,
32797 rtx clobber_reg)
32799 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
32800 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
32801 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
32802 enum hi_lo { hi = 0, lo = 1 };
32803 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
32804 size_t i;
32806 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
32807 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
32808 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
32809 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
32811 if (code == NOT)
32812 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
32813 else
32815 if (GET_CODE (operands[2]) != CONST_INT)
32817 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
32818 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
32820 else
32822 HOST_WIDE_INT value = INTVAL (operands[2]);
32823 HOST_WIDE_INT value_hi_lo[2];
32825 gcc_assert (!complement_final_p);
32826 gcc_assert (!complement_op1_p);
32827 gcc_assert (!complement_op2_p);
32829 value_hi_lo[hi] = value >> 32;
32830 value_hi_lo[lo] = value & lower_32bits;
32832 for (i = 0; i < 2; i++)
32834 HOST_WIDE_INT sub_value = value_hi_lo[i];
32836 if (sub_value & sign_bit)
32837 sub_value |= upper_32bits;
32839 op2_hi_lo[i] = GEN_INT (sub_value);
32841 /* If this is an AND instruction, check to see if we need to load
32842 the value in a register. */
32843 if (code == AND && sub_value != -1 && sub_value != 0
32844 && !and_operand (op2_hi_lo[i], SImode))
32845 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
32850 for (i = 0; i < 2; i++)
32852 /* Split large IOR/XOR operations. */
32853 if ((code == IOR || code == XOR)
32854 && GET_CODE (op2_hi_lo[i]) == CONST_INT
32855 && !complement_final_p
32856 && !complement_op1_p
32857 && !complement_op2_p
32858 && clobber_reg == NULL_RTX
32859 && !logical_const_operand (op2_hi_lo[i], SImode))
32861 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
32862 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
32863 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
32864 rtx tmp = gen_reg_rtx (SImode);
32866 /* Make sure the constant is sign extended. */
32867 if ((hi_16bits & sign_bit) != 0)
32868 hi_16bits |= upper_32bits;
32870 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
32871 code, SImode, false, false, false,
32872 NULL_RTX);
32874 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
32875 code, SImode, false, false, false,
32876 NULL_RTX);
32878 else
32879 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
32880 code, SImode, complement_final_p,
32881 complement_op1_p, complement_op2_p,
32882 clobber_reg);
32885 return;
32888 /* Split the insns that make up boolean operations operating on multiple GPR
32889 registers. The boolean MD patterns ensure that the inputs either are
32890 exactly the same as the output registers, or there is no overlap.
32892 OPERANDS is an array containing the destination and two input operands.
32893 CODE is the base operation (AND, IOR, XOR, NOT).
32894 MODE is the machine mode.
32895 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32896 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32897 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32898 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32899 formation of the AND instructions. */
32901 void
32902 rs6000_split_logical (rtx operands[3],
32903 enum rtx_code code,
32904 bool complement_final_p,
32905 bool complement_op1_p,
32906 bool complement_op2_p,
32907 rtx clobber_reg)
32909 enum machine_mode mode = GET_MODE (operands[0]);
32910 enum machine_mode sub_mode;
32911 rtx op0, op1, op2;
32912 int sub_size, regno0, regno1, nregs, i;
32914 /* If this is DImode, use the specialized version that can run before
32915 register allocation. */
32916 if (mode == DImode && !TARGET_POWERPC64)
32918 rs6000_split_logical_di (operands, code, complement_final_p,
32919 complement_op1_p, complement_op2_p,
32920 clobber_reg);
32921 return;
32924 op0 = operands[0];
32925 op1 = operands[1];
32926 op2 = (code == NOT) ? NULL_RTX : operands[2];
32927 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
32928 sub_size = GET_MODE_SIZE (sub_mode);
32929 regno0 = REGNO (op0);
32930 regno1 = REGNO (op1);
32932 gcc_assert (reload_completed);
32933 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
32934 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
32936 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
32937 gcc_assert (nregs > 1);
32939 if (op2 && REG_P (op2))
32940 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
32942 for (i = 0; i < nregs; i++)
32944 int offset = i * sub_size;
32945 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
32946 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
32947 rtx sub_op2 = ((code == NOT)
32948 ? NULL_RTX
32949 : simplify_subreg (sub_mode, op2, mode, offset));
32951 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
32952 complement_final_p, complement_op1_p,
32953 complement_op2_p, clobber_reg);
32956 return;
32960 /* Return true if the peephole2 can combine a load involving a combination of
32961 an addis instruction and a load with an offset that can be fused together on
32962 a power8.
32964 The operands are:
32965 operands[0] register set with addis
32966 operands[1] value set via addis
32967 operands[2] target register being loaded
32968 operands[3] D-form memory reference using operands[0].
32970 In addition, we are passed a boolean that is true if this is a peephole2,
32971 and we can use see if the addis_reg is dead after the insn and can be
32972 replaced by the target register. */
32974 bool
32975 fusion_gpr_load_p (rtx *operands, bool peep2_p)
32977 rtx addis_reg = operands[0];
32978 rtx addis_value = operands[1];
32979 rtx target = operands[2];
32980 rtx mem = operands[3];
32981 rtx addr;
32982 rtx base_reg;
32984 /* Validate arguments. */
32985 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
32986 return false;
32988 if (!base_reg_operand (target, GET_MODE (target)))
32989 return false;
32991 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
32992 return false;
32994 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
32995 return false;
32997 /* Allow sign/zero extension. */
32998 if (GET_CODE (mem) == ZERO_EXTEND
32999 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
33000 mem = XEXP (mem, 0);
33002 if (!MEM_P (mem))
33003 return false;
33005 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
33006 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33007 return false;
33009 /* Validate that the register used to load the high value is either the
33010 register being loaded, or we can safely replace its use in a peephole2.
33012 If this is a peephole2, we assume that there are 2 instructions in the
33013 peephole (addis and load), so we want to check if the target register was
33014 not used in the memory address and the register to hold the addis result
33015 is dead after the peephole. */
33016 if (REGNO (addis_reg) != REGNO (target))
33018 if (!peep2_p)
33019 return false;
33021 if (reg_mentioned_p (target, mem))
33022 return false;
33024 if (!peep2_reg_dead_p (2, addis_reg))
33025 return false;
33027 /* If the target register being loaded is the stack pointer, we must
33028 avoid loading any other value into it, even temporarily. */
33029 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
33030 return false;
33033 base_reg = XEXP (addr, 0);
33034 return REGNO (addis_reg) == REGNO (base_reg);
33037 /* During the peephole2 pass, adjust and expand the insns for a load fusion
33038 sequence. We adjust the addis register to use the target register. If the
33039 load sign extends, we adjust the code to do the zero extending load, and an
33040 explicit sign extension later since the fusion only covers zero extending
33041 loads.
33043 The operands are:
33044 operands[0] register set with addis (to be replaced with target)
33045 operands[1] value set via addis
33046 operands[2] target register being loaded
33047 operands[3] D-form memory reference using operands[0]. */
33049 void
33050 expand_fusion_gpr_load (rtx *operands)
33052 rtx addis_value = operands[1];
33053 rtx target = operands[2];
33054 rtx orig_mem = operands[3];
33055 rtx new_addr, new_mem, orig_addr, offset;
33056 enum rtx_code plus_or_lo_sum;
33057 enum machine_mode target_mode = GET_MODE (target);
33058 enum machine_mode extend_mode = target_mode;
33059 enum machine_mode ptr_mode = Pmode;
33060 enum rtx_code extend = UNKNOWN;
33061 rtx addis_reg = ((ptr_mode == target_mode)
33062 ? target
33063 : simplify_subreg (ptr_mode, target, target_mode, 0));
33065 if (GET_CODE (orig_mem) == ZERO_EXTEND
33066 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
33068 extend = GET_CODE (orig_mem);
33069 orig_mem = XEXP (orig_mem, 0);
33070 target_mode = GET_MODE (orig_mem);
33073 gcc_assert (MEM_P (orig_mem));
33075 orig_addr = XEXP (orig_mem, 0);
33076 plus_or_lo_sum = GET_CODE (orig_addr);
33077 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
33079 offset = XEXP (orig_addr, 1);
33080 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_reg, offset);
33081 new_mem = change_address (orig_mem, target_mode, new_addr);
33083 if (extend != UNKNOWN)
33084 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
33086 emit_insn (gen_rtx_SET (VOIDmode, addis_reg, addis_value));
33087 emit_insn (gen_rtx_SET (VOIDmode, target, new_mem));
33089 if (extend == SIGN_EXTEND)
33091 int sub_off = ((BYTES_BIG_ENDIAN)
33092 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
33093 : 0);
33094 rtx sign_reg
33095 = simplify_subreg (target_mode, target, extend_mode, sub_off);
33097 emit_insn (gen_rtx_SET (VOIDmode, target,
33098 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
33101 return;
33104 /* Return a string to fuse an addis instruction with a gpr load to the same
33105 register that we loaded up the addis instruction. The code is complicated,
33106 so we call output_asm_insn directly, and just return "".
33108 The operands are:
33109 operands[0] register set with addis (must be same reg as target).
33110 operands[1] value set via addis
33111 operands[2] target register being loaded
33112 operands[3] D-form memory reference using operands[0]. */
33114 const char *
33115 emit_fusion_gpr_load (rtx *operands)
33117 rtx addis_reg = operands[0];
33118 rtx addis_value = operands[1];
33119 rtx target = operands[2];
33120 rtx mem = operands[3];
33121 rtx fuse_ops[10];
33122 rtx addr;
33123 rtx load_offset;
33124 const char *addis_str = NULL;
33125 const char *load_str = NULL;
33126 const char *extend_insn = NULL;
33127 const char *mode_name = NULL;
33128 char insn_template[80];
33129 enum machine_mode mode;
33130 const char *comment_str = ASM_COMMENT_START;
33131 bool sign_p = false;
33133 gcc_assert (REG_P (addis_reg) && REG_P (target));
33134 gcc_assert (REGNO (addis_reg) == REGNO (target));
33136 if (*comment_str == ' ')
33137 comment_str++;
33139 /* Allow sign/zero extension. */
33140 if (GET_CODE (mem) == ZERO_EXTEND)
33141 mem = XEXP (mem, 0);
33143 else if (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN)
33145 sign_p = true;
33146 mem = XEXP (mem, 0);
33149 gcc_assert (MEM_P (mem));
33150 addr = XEXP (mem, 0);
33151 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33152 gcc_unreachable ();
33154 load_offset = XEXP (addr, 1);
33156 /* Now emit the load instruction to the same register. */
33157 mode = GET_MODE (mem);
33158 switch (mode)
33160 case QImode:
33161 mode_name = "char";
33162 load_str = "lbz";
33163 extend_insn = "extsb %0,%0";
33164 break;
33166 case HImode:
33167 mode_name = "short";
33168 load_str = "lhz";
33169 extend_insn = "extsh %0,%0";
33170 break;
33172 case SImode:
33173 mode_name = "int";
33174 load_str = "lwz";
33175 extend_insn = "extsw %0,%0";
33176 break;
33178 case DImode:
33179 if (TARGET_POWERPC64)
33181 mode_name = "long";
33182 load_str = "ld";
33184 else
33185 gcc_unreachable ();
33186 break;
33188 default:
33189 gcc_unreachable ();
33192 /* Emit the addis instruction. */
33193 fuse_ops[0] = target;
33194 if (satisfies_constraint_L (addis_value))
33196 fuse_ops[1] = addis_value;
33197 addis_str = "lis %0,%v1";
33200 else if (GET_CODE (addis_value) == PLUS)
33202 rtx op0 = XEXP (addis_value, 0);
33203 rtx op1 = XEXP (addis_value, 1);
33205 if (REG_P (op0) && CONST_INT_P (op1)
33206 && satisfies_constraint_L (op1))
33208 fuse_ops[1] = op0;
33209 fuse_ops[2] = op1;
33210 addis_str = "addis %0,%1,%v2";
33214 else if (GET_CODE (addis_value) == HIGH)
33216 rtx value = XEXP (addis_value, 0);
33217 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
33219 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
33220 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
33221 if (TARGET_ELF)
33222 addis_str = "addis %0,%2,%1@toc@ha";
33224 else if (TARGET_XCOFF)
33225 addis_str = "addis %0,%1@u(%2)";
33227 else
33228 gcc_unreachable ();
33231 else if (GET_CODE (value) == PLUS)
33233 rtx op0 = XEXP (value, 0);
33234 rtx op1 = XEXP (value, 1);
33236 if (GET_CODE (op0) == UNSPEC
33237 && XINT (op0, 1) == UNSPEC_TOCREL
33238 && CONST_INT_P (op1))
33240 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
33241 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
33242 fuse_ops[3] = op1;
33243 if (TARGET_ELF)
33244 addis_str = "addis %0,%2,%1+%3@toc@ha";
33246 else if (TARGET_XCOFF)
33247 addis_str = "addis %0,%1+%3@u(%2)";
33249 else
33250 gcc_unreachable ();
33254 else if (satisfies_constraint_L (value))
33256 fuse_ops[1] = value;
33257 addis_str = "lis %0,%v1";
33260 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
33262 fuse_ops[1] = value;
33263 addis_str = "lis %0,%1@ha";
33267 if (!addis_str)
33268 fatal_insn ("Could not generate addis value for fusion", addis_value);
33270 sprintf (insn_template, "%s\t\t%s gpr load fusion, type %s", addis_str,
33271 comment_str, mode_name);
33272 output_asm_insn (insn_template, fuse_ops);
33274 /* Emit the D-form load instruction. */
33275 if (CONST_INT_P (load_offset) && satisfies_constraint_I (load_offset))
33277 sprintf (insn_template, "%s %%0,%%1(%%0)", load_str);
33278 fuse_ops[1] = load_offset;
33279 output_asm_insn (insn_template, fuse_ops);
33282 else if (GET_CODE (load_offset) == UNSPEC
33283 && XINT (load_offset, 1) == UNSPEC_TOCREL)
33285 if (TARGET_ELF)
33286 sprintf (insn_template, "%s %%0,%%1@toc@l(%%0)", load_str);
33288 else if (TARGET_XCOFF)
33289 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
33291 else
33292 gcc_unreachable ();
33294 fuse_ops[1] = XVECEXP (load_offset, 0, 0);
33295 output_asm_insn (insn_template, fuse_ops);
33298 else if (GET_CODE (load_offset) == PLUS
33299 && GET_CODE (XEXP (load_offset, 0)) == UNSPEC
33300 && XINT (XEXP (load_offset, 0), 1) == UNSPEC_TOCREL
33301 && CONST_INT_P (XEXP (load_offset, 1)))
33303 rtx tocrel_unspec = XEXP (load_offset, 0);
33304 if (TARGET_ELF)
33305 sprintf (insn_template, "%s %%0,%%1+%%2@toc@l(%%0)", load_str);
33307 else if (TARGET_XCOFF)
33308 sprintf (insn_template, "%s %%0,%%1+%%2@l(%%0)", load_str);
33310 else
33311 gcc_unreachable ();
33313 fuse_ops[1] = XVECEXP (tocrel_unspec, 0, 0);
33314 fuse_ops[2] = XEXP (load_offset, 1);
33315 output_asm_insn (insn_template, fuse_ops);
33318 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (load_offset))
33320 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
33322 fuse_ops[1] = load_offset;
33323 output_asm_insn (insn_template, fuse_ops);
33326 else
33327 fatal_insn ("Unable to generate load offset for fusion", load_offset);
33329 /* Handle sign extension. The peephole2 pass generates this as a separate
33330 insn, but we handle it just in case it got reattached. */
33331 if (sign_p)
33333 gcc_assert (extend_insn != NULL);
33334 output_asm_insn (extend_insn, fuse_ops);
33337 return "";
33341 struct gcc_target targetm = TARGET_INITIALIZER;
33343 #include "gt-rs6000.h"