rs6000: Make all insert instructions one type
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobc8aba1fbcf3cac2945290f6690796230d18b14c6
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2014 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "stringpool.h"
36 #include "stor-layout.h"
37 #include "calls.h"
38 #include "print-tree.h"
39 #include "varasm.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "function.h"
44 #include "output.h"
45 #include "dbxout.h"
46 #include "basic-block.h"
47 #include "diagnostic-core.h"
48 #include "toplev.h"
49 #include "ggc.h"
50 #include "hashtab.h"
51 #include "tm_p.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "cfgloop.h"
58 #include "sched-int.h"
59 #include "pointer-set.h"
60 #include "hash-table.h"
61 #include "vec.h"
62 #include "basic-block.h"
63 #include "tree-ssa-alias.h"
64 #include "internal-fn.h"
65 #include "gimple-fold.h"
66 #include "tree-eh.h"
67 #include "gimple-expr.h"
68 #include "is-a.h"
69 #include "gimple.h"
70 #include "gimplify.h"
71 #include "gimple-iterator.h"
72 #include "gimple-walk.h"
73 #include "intl.h"
74 #include "params.h"
75 #include "tm-constrs.h"
76 #include "ira.h"
77 #include "opts.h"
78 #include "tree-vectorizer.h"
79 #include "dumpfile.h"
80 #include "cgraph.h"
81 #include "target-globals.h"
82 #if TARGET_XCOFF
83 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
84 #endif
85 #if TARGET_MACHO
86 #include "gstab.h" /* for N_SLINE */
87 #endif
89 #ifndef TARGET_NO_PROTOTYPE
90 #define TARGET_NO_PROTOTYPE 0
91 #endif
93 #define min(A,B) ((A) < (B) ? (A) : (B))
94 #define max(A,B) ((A) > (B) ? (A) : (B))
96 /* Structure used to define the rs6000 stack */
97 typedef struct rs6000_stack {
98 int reload_completed; /* stack info won't change from here on */
99 int first_gp_reg_save; /* first callee saved GP register used */
100 int first_fp_reg_save; /* first callee saved FP register used */
101 int first_altivec_reg_save; /* first callee saved AltiVec register used */
102 int lr_save_p; /* true if the link reg needs to be saved */
103 int cr_save_p; /* true if the CR reg needs to be saved */
104 unsigned int vrsave_mask; /* mask of vec registers to save */
105 int push_p; /* true if we need to allocate stack space */
106 int calls_p; /* true if the function makes any calls */
107 int world_save_p; /* true if we're saving *everything*:
108 r13-r31, cr, f14-f31, vrsave, v20-v31 */
109 enum rs6000_abi abi; /* which ABI to use */
110 int gp_save_offset; /* offset to save GP regs from initial SP */
111 int fp_save_offset; /* offset to save FP regs from initial SP */
112 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
113 int lr_save_offset; /* offset to save LR from initial SP */
114 int cr_save_offset; /* offset to save CR from initial SP */
115 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
116 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
117 int varargs_save_offset; /* offset to save the varargs registers */
118 int ehrd_offset; /* offset to EH return data */
119 int ehcr_offset; /* offset to EH CR field data */
120 int reg_size; /* register size (4 or 8) */
121 HOST_WIDE_INT vars_size; /* variable save area size */
122 int parm_size; /* outgoing parameter size */
123 int save_size; /* save area size */
124 int fixed_size; /* fixed size of stack frame */
125 int gp_size; /* size of saved GP registers */
126 int fp_size; /* size of saved FP registers */
127 int altivec_size; /* size of saved AltiVec registers */
128 int cr_size; /* size to hold CR if not in save_size */
129 int vrsave_size; /* size to hold VRSAVE if not in save_size */
130 int altivec_padding_size; /* size of altivec alignment padding if
131 not in save_size */
132 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
133 int spe_padding_size;
134 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
135 int spe_64bit_regs_used;
136 int savres_strategy;
137 } rs6000_stack_t;
139 /* A C structure for machine-specific, per-function data.
140 This is added to the cfun structure. */
141 typedef struct GTY(()) machine_function
143 /* Some local-dynamic symbol. */
144 const char *some_ld_name;
145 /* Whether the instruction chain has been scanned already. */
146 int insn_chain_scanned_p;
147 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
148 int ra_needs_full_frame;
149 /* Flags if __builtin_return_address (0) was used. */
150 int ra_need_lr;
151 /* Cache lr_save_p after expansion of builtin_eh_return. */
152 int lr_save_state;
153 /* Whether we need to save the TOC to the reserved stack location in the
154 function prologue. */
155 bool save_toc_in_prologue;
156 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
157 varargs save area. */
158 HOST_WIDE_INT varargs_save_offset;
159 /* Temporary stack slot to use for SDmode copies. This slot is
160 64-bits wide and is allocated early enough so that the offset
161 does not overflow the 16-bit load/store offset field. */
162 rtx sdmode_stack_slot;
163 /* Flag if r2 setup is needed with ELFv2 ABI. */
164 bool r2_setup_needed;
165 } machine_function;
167 /* Support targetm.vectorize.builtin_mask_for_load. */
168 static GTY(()) tree altivec_builtin_mask_for_load;
170 /* Set to nonzero once AIX common-mode calls have been defined. */
171 static GTY(()) int common_mode_defined;
173 /* Label number of label created for -mrelocatable, to call to so we can
174 get the address of the GOT section */
175 static int rs6000_pic_labelno;
177 #ifdef USING_ELFOS_H
178 /* Counter for labels which are to be placed in .fixup. */
179 int fixuplabelno = 0;
180 #endif
182 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
183 int dot_symbols;
185 /* Specify the machine mode that pointers have. After generation of rtl, the
186 compiler makes no further distinction between pointers and any other objects
187 of this machine mode. The type is unsigned since not all things that
188 include rs6000.h also include machmode.h. */
189 unsigned rs6000_pmode;
191 /* Width in bits of a pointer. */
192 unsigned rs6000_pointer_size;
194 #ifdef HAVE_AS_GNU_ATTRIBUTE
195 /* Flag whether floating point values have been passed/returned. */
196 static bool rs6000_passes_float;
197 /* Flag whether vector values have been passed/returned. */
198 static bool rs6000_passes_vector;
199 /* Flag whether small (<= 8 byte) structures have been returned. */
200 static bool rs6000_returns_struct;
201 #endif
203 /* Value is TRUE if register/mode pair is acceptable. */
204 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
206 /* Maximum number of registers needed for a given register class and mode. */
207 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
209 /* How many registers are needed for a given register and mode. */
210 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
212 /* Map register number to register class. */
213 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
215 static int dbg_cost_ctrl;
217 /* Built in types. */
218 tree rs6000_builtin_types[RS6000_BTI_MAX];
219 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
221 /* Flag to say the TOC is initialized */
222 int toc_initialized;
223 char toc_label_name[10];
225 /* Cached value of rs6000_variable_issue. This is cached in
226 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
227 static short cached_can_issue_more;
229 static GTY(()) section *read_only_data_section;
230 static GTY(()) section *private_data_section;
231 static GTY(()) section *tls_data_section;
232 static GTY(()) section *tls_private_data_section;
233 static GTY(()) section *read_only_private_data_section;
234 static GTY(()) section *sdata2_section;
235 static GTY(()) section *toc_section;
237 struct builtin_description
239 const HOST_WIDE_INT mask;
240 const enum insn_code icode;
241 const char *const name;
242 const enum rs6000_builtins code;
245 /* Describe the vector unit used for modes. */
246 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
247 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
249 /* Register classes for various constraints that are based on the target
250 switches. */
251 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
253 /* Describe the alignment of a vector. */
254 int rs6000_vector_align[NUM_MACHINE_MODES];
256 /* Map selected modes to types for builtins. */
257 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
259 /* What modes to automatically generate reciprocal divide estimate (fre) and
260 reciprocal sqrt (frsqrte) for. */
261 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
263 /* Masks to determine which reciprocal esitmate instructions to generate
264 automatically. */
265 enum rs6000_recip_mask {
266 RECIP_SF_DIV = 0x001, /* Use divide estimate */
267 RECIP_DF_DIV = 0x002,
268 RECIP_V4SF_DIV = 0x004,
269 RECIP_V2DF_DIV = 0x008,
271 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
272 RECIP_DF_RSQRT = 0x020,
273 RECIP_V4SF_RSQRT = 0x040,
274 RECIP_V2DF_RSQRT = 0x080,
276 /* Various combination of flags for -mrecip=xxx. */
277 RECIP_NONE = 0,
278 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
279 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
280 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
282 RECIP_HIGH_PRECISION = RECIP_ALL,
284 /* On low precision machines like the power5, don't enable double precision
285 reciprocal square root estimate, since it isn't accurate enough. */
286 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
289 /* -mrecip options. */
290 static struct
292 const char *string; /* option name */
293 unsigned int mask; /* mask bits to set */
294 } recip_options[] = {
295 { "all", RECIP_ALL },
296 { "none", RECIP_NONE },
297 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
298 | RECIP_V2DF_DIV) },
299 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
300 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
301 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
302 | RECIP_V2DF_RSQRT) },
303 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
304 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
307 /* Pointer to function (in rs6000-c.c) that can define or undefine target
308 macros that have changed. Languages that don't support the preprocessor
309 don't link in rs6000-c.c, so we can't call it directly. */
310 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
312 /* Simplfy register classes into simpler classifications. We assume
313 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
314 check for standard register classes (gpr/floating/altivec/vsx) and
315 floating/vector classes (float/altivec/vsx). */
317 enum rs6000_reg_type {
318 NO_REG_TYPE,
319 PSEUDO_REG_TYPE,
320 GPR_REG_TYPE,
321 VSX_REG_TYPE,
322 ALTIVEC_REG_TYPE,
323 FPR_REG_TYPE,
324 SPR_REG_TYPE,
325 CR_REG_TYPE,
326 SPE_ACC_TYPE,
327 SPEFSCR_REG_TYPE
330 /* Map register class to register type. */
331 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
333 /* First/last register type for the 'normal' register types (i.e. general
334 purpose, floating point, altivec, and VSX registers). */
335 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
337 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
340 /* Register classes we care about in secondary reload or go if legitimate
341 address. We only need to worry about GPR, FPR, and Altivec registers here,
342 along an ANY field that is the OR of the 3 register classes. */
344 enum rs6000_reload_reg_type {
345 RELOAD_REG_GPR, /* General purpose registers. */
346 RELOAD_REG_FPR, /* Traditional floating point regs. */
347 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
348 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
349 N_RELOAD_REG
352 /* For setting up register classes, loop through the 3 register classes mapping
353 into real registers, and skip the ANY class, which is just an OR of the
354 bits. */
355 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
356 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
358 /* Map reload register type to a register in the register class. */
359 struct reload_reg_map_type {
360 const char *name; /* Register class name. */
361 int reg; /* Register in the register class. */
364 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
365 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
366 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
367 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
368 { "Any", -1 }, /* RELOAD_REG_ANY. */
371 /* Mask bits for each register class, indexed per mode. Historically the
372 compiler has been more restrictive which types can do PRE_MODIFY instead of
373 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
374 typedef unsigned char addr_mask_type;
376 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
377 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
378 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
379 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
380 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
381 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
383 /* Register type masks based on the type, of valid addressing modes. */
384 struct rs6000_reg_addr {
385 enum insn_code reload_load; /* INSN to reload for loading. */
386 enum insn_code reload_store; /* INSN to reload for storing. */
387 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
388 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
389 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
390 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
393 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
395 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
396 static inline bool
397 mode_supports_pre_incdec_p (enum machine_mode mode)
399 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
400 != 0);
403 /* Helper function to say whether a mode supports PRE_MODIFY. */
404 static inline bool
405 mode_supports_pre_modify_p (enum machine_mode mode)
407 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
408 != 0);
412 /* Target cpu costs. */
414 struct processor_costs {
415 const int mulsi; /* cost of SImode multiplication. */
416 const int mulsi_const; /* cost of SImode multiplication by constant. */
417 const int mulsi_const9; /* cost of SImode mult by short constant. */
418 const int muldi; /* cost of DImode multiplication. */
419 const int divsi; /* cost of SImode division. */
420 const int divdi; /* cost of DImode division. */
421 const int fp; /* cost of simple SFmode and DFmode insns. */
422 const int dmul; /* cost of DFmode multiplication (and fmadd). */
423 const int sdiv; /* cost of SFmode division (fdivs). */
424 const int ddiv; /* cost of DFmode division (fdiv). */
425 const int cache_line_size; /* cache line size in bytes. */
426 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
427 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
428 const int simultaneous_prefetches; /* number of parallel prefetch
429 operations. */
432 const struct processor_costs *rs6000_cost;
434 /* Processor costs (relative to an add) */
436 /* Instruction size costs on 32bit processors. */
437 static const
438 struct processor_costs size32_cost = {
439 COSTS_N_INSNS (1), /* mulsi */
440 COSTS_N_INSNS (1), /* mulsi_const */
441 COSTS_N_INSNS (1), /* mulsi_const9 */
442 COSTS_N_INSNS (1), /* muldi */
443 COSTS_N_INSNS (1), /* divsi */
444 COSTS_N_INSNS (1), /* divdi */
445 COSTS_N_INSNS (1), /* fp */
446 COSTS_N_INSNS (1), /* dmul */
447 COSTS_N_INSNS (1), /* sdiv */
448 COSTS_N_INSNS (1), /* ddiv */
455 /* Instruction size costs on 64bit processors. */
456 static const
457 struct processor_costs size64_cost = {
458 COSTS_N_INSNS (1), /* mulsi */
459 COSTS_N_INSNS (1), /* mulsi_const */
460 COSTS_N_INSNS (1), /* mulsi_const9 */
461 COSTS_N_INSNS (1), /* muldi */
462 COSTS_N_INSNS (1), /* divsi */
463 COSTS_N_INSNS (1), /* divdi */
464 COSTS_N_INSNS (1), /* fp */
465 COSTS_N_INSNS (1), /* dmul */
466 COSTS_N_INSNS (1), /* sdiv */
467 COSTS_N_INSNS (1), /* ddiv */
468 128,
474 /* Instruction costs on RS64A processors. */
475 static const
476 struct processor_costs rs64a_cost = {
477 COSTS_N_INSNS (20), /* mulsi */
478 COSTS_N_INSNS (12), /* mulsi_const */
479 COSTS_N_INSNS (8), /* mulsi_const9 */
480 COSTS_N_INSNS (34), /* muldi */
481 COSTS_N_INSNS (65), /* divsi */
482 COSTS_N_INSNS (67), /* divdi */
483 COSTS_N_INSNS (4), /* fp */
484 COSTS_N_INSNS (4), /* dmul */
485 COSTS_N_INSNS (31), /* sdiv */
486 COSTS_N_INSNS (31), /* ddiv */
487 128, /* cache line size */
488 128, /* l1 cache */
489 2048, /* l2 cache */
490 1, /* streams */
493 /* Instruction costs on MPCCORE processors. */
494 static const
495 struct processor_costs mpccore_cost = {
496 COSTS_N_INSNS (2), /* mulsi */
497 COSTS_N_INSNS (2), /* mulsi_const */
498 COSTS_N_INSNS (2), /* mulsi_const9 */
499 COSTS_N_INSNS (2), /* muldi */
500 COSTS_N_INSNS (6), /* divsi */
501 COSTS_N_INSNS (6), /* divdi */
502 COSTS_N_INSNS (4), /* fp */
503 COSTS_N_INSNS (5), /* dmul */
504 COSTS_N_INSNS (10), /* sdiv */
505 COSTS_N_INSNS (17), /* ddiv */
506 32, /* cache line size */
507 4, /* l1 cache */
508 16, /* l2 cache */
509 1, /* streams */
512 /* Instruction costs on PPC403 processors. */
513 static const
514 struct processor_costs ppc403_cost = {
515 COSTS_N_INSNS (4), /* mulsi */
516 COSTS_N_INSNS (4), /* mulsi_const */
517 COSTS_N_INSNS (4), /* mulsi_const9 */
518 COSTS_N_INSNS (4), /* muldi */
519 COSTS_N_INSNS (33), /* divsi */
520 COSTS_N_INSNS (33), /* divdi */
521 COSTS_N_INSNS (11), /* fp */
522 COSTS_N_INSNS (11), /* dmul */
523 COSTS_N_INSNS (11), /* sdiv */
524 COSTS_N_INSNS (11), /* ddiv */
525 32, /* cache line size */
526 4, /* l1 cache */
527 16, /* l2 cache */
528 1, /* streams */
531 /* Instruction costs on PPC405 processors. */
532 static const
533 struct processor_costs ppc405_cost = {
534 COSTS_N_INSNS (5), /* mulsi */
535 COSTS_N_INSNS (4), /* mulsi_const */
536 COSTS_N_INSNS (3), /* mulsi_const9 */
537 COSTS_N_INSNS (5), /* muldi */
538 COSTS_N_INSNS (35), /* divsi */
539 COSTS_N_INSNS (35), /* divdi */
540 COSTS_N_INSNS (11), /* fp */
541 COSTS_N_INSNS (11), /* dmul */
542 COSTS_N_INSNS (11), /* sdiv */
543 COSTS_N_INSNS (11), /* ddiv */
544 32, /* cache line size */
545 16, /* l1 cache */
546 128, /* l2 cache */
547 1, /* streams */
550 /* Instruction costs on PPC440 processors. */
551 static const
552 struct processor_costs ppc440_cost = {
553 COSTS_N_INSNS (3), /* mulsi */
554 COSTS_N_INSNS (2), /* mulsi_const */
555 COSTS_N_INSNS (2), /* mulsi_const9 */
556 COSTS_N_INSNS (3), /* muldi */
557 COSTS_N_INSNS (34), /* divsi */
558 COSTS_N_INSNS (34), /* divdi */
559 COSTS_N_INSNS (5), /* fp */
560 COSTS_N_INSNS (5), /* dmul */
561 COSTS_N_INSNS (19), /* sdiv */
562 COSTS_N_INSNS (33), /* ddiv */
563 32, /* cache line size */
564 32, /* l1 cache */
565 256, /* l2 cache */
566 1, /* streams */
569 /* Instruction costs on PPC476 processors. */
570 static const
571 struct processor_costs ppc476_cost = {
572 COSTS_N_INSNS (4), /* mulsi */
573 COSTS_N_INSNS (4), /* mulsi_const */
574 COSTS_N_INSNS (4), /* mulsi_const9 */
575 COSTS_N_INSNS (4), /* muldi */
576 COSTS_N_INSNS (11), /* divsi */
577 COSTS_N_INSNS (11), /* divdi */
578 COSTS_N_INSNS (6), /* fp */
579 COSTS_N_INSNS (6), /* dmul */
580 COSTS_N_INSNS (19), /* sdiv */
581 COSTS_N_INSNS (33), /* ddiv */
582 32, /* l1 cache line size */
583 32, /* l1 cache */
584 512, /* l2 cache */
585 1, /* streams */
588 /* Instruction costs on PPC601 processors. */
589 static const
590 struct processor_costs ppc601_cost = {
591 COSTS_N_INSNS (5), /* mulsi */
592 COSTS_N_INSNS (5), /* mulsi_const */
593 COSTS_N_INSNS (5), /* mulsi_const9 */
594 COSTS_N_INSNS (5), /* muldi */
595 COSTS_N_INSNS (36), /* divsi */
596 COSTS_N_INSNS (36), /* divdi */
597 COSTS_N_INSNS (4), /* fp */
598 COSTS_N_INSNS (5), /* dmul */
599 COSTS_N_INSNS (17), /* sdiv */
600 COSTS_N_INSNS (31), /* ddiv */
601 32, /* cache line size */
602 32, /* l1 cache */
603 256, /* l2 cache */
604 1, /* streams */
607 /* Instruction costs on PPC603 processors. */
608 static const
609 struct processor_costs ppc603_cost = {
610 COSTS_N_INSNS (5), /* mulsi */
611 COSTS_N_INSNS (3), /* mulsi_const */
612 COSTS_N_INSNS (2), /* mulsi_const9 */
613 COSTS_N_INSNS (5), /* muldi */
614 COSTS_N_INSNS (37), /* divsi */
615 COSTS_N_INSNS (37), /* divdi */
616 COSTS_N_INSNS (3), /* fp */
617 COSTS_N_INSNS (4), /* dmul */
618 COSTS_N_INSNS (18), /* sdiv */
619 COSTS_N_INSNS (33), /* ddiv */
620 32, /* cache line size */
621 8, /* l1 cache */
622 64, /* l2 cache */
623 1, /* streams */
626 /* Instruction costs on PPC604 processors. */
627 static const
628 struct processor_costs ppc604_cost = {
629 COSTS_N_INSNS (4), /* mulsi */
630 COSTS_N_INSNS (4), /* mulsi_const */
631 COSTS_N_INSNS (4), /* mulsi_const9 */
632 COSTS_N_INSNS (4), /* muldi */
633 COSTS_N_INSNS (20), /* divsi */
634 COSTS_N_INSNS (20), /* divdi */
635 COSTS_N_INSNS (3), /* fp */
636 COSTS_N_INSNS (3), /* dmul */
637 COSTS_N_INSNS (18), /* sdiv */
638 COSTS_N_INSNS (32), /* ddiv */
639 32, /* cache line size */
640 16, /* l1 cache */
641 512, /* l2 cache */
642 1, /* streams */
645 /* Instruction costs on PPC604e processors. */
646 static const
647 struct processor_costs ppc604e_cost = {
648 COSTS_N_INSNS (2), /* mulsi */
649 COSTS_N_INSNS (2), /* mulsi_const */
650 COSTS_N_INSNS (2), /* mulsi_const9 */
651 COSTS_N_INSNS (2), /* muldi */
652 COSTS_N_INSNS (20), /* divsi */
653 COSTS_N_INSNS (20), /* divdi */
654 COSTS_N_INSNS (3), /* fp */
655 COSTS_N_INSNS (3), /* dmul */
656 COSTS_N_INSNS (18), /* sdiv */
657 COSTS_N_INSNS (32), /* ddiv */
658 32, /* cache line size */
659 32, /* l1 cache */
660 1024, /* l2 cache */
661 1, /* streams */
664 /* Instruction costs on PPC620 processors. */
665 static const
666 struct processor_costs ppc620_cost = {
667 COSTS_N_INSNS (5), /* mulsi */
668 COSTS_N_INSNS (4), /* mulsi_const */
669 COSTS_N_INSNS (3), /* mulsi_const9 */
670 COSTS_N_INSNS (7), /* muldi */
671 COSTS_N_INSNS (21), /* divsi */
672 COSTS_N_INSNS (37), /* divdi */
673 COSTS_N_INSNS (3), /* fp */
674 COSTS_N_INSNS (3), /* dmul */
675 COSTS_N_INSNS (18), /* sdiv */
676 COSTS_N_INSNS (32), /* ddiv */
677 128, /* cache line size */
678 32, /* l1 cache */
679 1024, /* l2 cache */
680 1, /* streams */
683 /* Instruction costs on PPC630 processors. */
684 static const
685 struct processor_costs ppc630_cost = {
686 COSTS_N_INSNS (5), /* mulsi */
687 COSTS_N_INSNS (4), /* mulsi_const */
688 COSTS_N_INSNS (3), /* mulsi_const9 */
689 COSTS_N_INSNS (7), /* muldi */
690 COSTS_N_INSNS (21), /* divsi */
691 COSTS_N_INSNS (37), /* divdi */
692 COSTS_N_INSNS (3), /* fp */
693 COSTS_N_INSNS (3), /* dmul */
694 COSTS_N_INSNS (17), /* sdiv */
695 COSTS_N_INSNS (21), /* ddiv */
696 128, /* cache line size */
697 64, /* l1 cache */
698 1024, /* l2 cache */
699 1, /* streams */
702 /* Instruction costs on Cell processor. */
703 /* COSTS_N_INSNS (1) ~ one add. */
704 static const
705 struct processor_costs ppccell_cost = {
706 COSTS_N_INSNS (9/2)+2, /* mulsi */
707 COSTS_N_INSNS (6/2), /* mulsi_const */
708 COSTS_N_INSNS (6/2), /* mulsi_const9 */
709 COSTS_N_INSNS (15/2)+2, /* muldi */
710 COSTS_N_INSNS (38/2), /* divsi */
711 COSTS_N_INSNS (70/2), /* divdi */
712 COSTS_N_INSNS (10/2), /* fp */
713 COSTS_N_INSNS (10/2), /* dmul */
714 COSTS_N_INSNS (74/2), /* sdiv */
715 COSTS_N_INSNS (74/2), /* ddiv */
716 128, /* cache line size */
717 32, /* l1 cache */
718 512, /* l2 cache */
719 6, /* streams */
722 /* Instruction costs on PPC750 and PPC7400 processors. */
723 static const
724 struct processor_costs ppc750_cost = {
725 COSTS_N_INSNS (5), /* mulsi */
726 COSTS_N_INSNS (3), /* mulsi_const */
727 COSTS_N_INSNS (2), /* mulsi_const9 */
728 COSTS_N_INSNS (5), /* muldi */
729 COSTS_N_INSNS (17), /* divsi */
730 COSTS_N_INSNS (17), /* divdi */
731 COSTS_N_INSNS (3), /* fp */
732 COSTS_N_INSNS (3), /* dmul */
733 COSTS_N_INSNS (17), /* sdiv */
734 COSTS_N_INSNS (31), /* ddiv */
735 32, /* cache line size */
736 32, /* l1 cache */
737 512, /* l2 cache */
738 1, /* streams */
741 /* Instruction costs on PPC7450 processors. */
742 static const
743 struct processor_costs ppc7450_cost = {
744 COSTS_N_INSNS (4), /* mulsi */
745 COSTS_N_INSNS (3), /* mulsi_const */
746 COSTS_N_INSNS (3), /* mulsi_const9 */
747 COSTS_N_INSNS (4), /* muldi */
748 COSTS_N_INSNS (23), /* divsi */
749 COSTS_N_INSNS (23), /* divdi */
750 COSTS_N_INSNS (5), /* fp */
751 COSTS_N_INSNS (5), /* dmul */
752 COSTS_N_INSNS (21), /* sdiv */
753 COSTS_N_INSNS (35), /* ddiv */
754 32, /* cache line size */
755 32, /* l1 cache */
756 1024, /* l2 cache */
757 1, /* streams */
760 /* Instruction costs on PPC8540 processors. */
761 static const
762 struct processor_costs ppc8540_cost = {
763 COSTS_N_INSNS (4), /* mulsi */
764 COSTS_N_INSNS (4), /* mulsi_const */
765 COSTS_N_INSNS (4), /* mulsi_const9 */
766 COSTS_N_INSNS (4), /* muldi */
767 COSTS_N_INSNS (19), /* divsi */
768 COSTS_N_INSNS (19), /* divdi */
769 COSTS_N_INSNS (4), /* fp */
770 COSTS_N_INSNS (4), /* dmul */
771 COSTS_N_INSNS (29), /* sdiv */
772 COSTS_N_INSNS (29), /* ddiv */
773 32, /* cache line size */
774 32, /* l1 cache */
775 256, /* l2 cache */
776 1, /* prefetch streams /*/
779 /* Instruction costs on E300C2 and E300C3 cores. */
780 static const
781 struct processor_costs ppce300c2c3_cost = {
782 COSTS_N_INSNS (4), /* mulsi */
783 COSTS_N_INSNS (4), /* mulsi_const */
784 COSTS_N_INSNS (4), /* mulsi_const9 */
785 COSTS_N_INSNS (4), /* muldi */
786 COSTS_N_INSNS (19), /* divsi */
787 COSTS_N_INSNS (19), /* divdi */
788 COSTS_N_INSNS (3), /* fp */
789 COSTS_N_INSNS (4), /* dmul */
790 COSTS_N_INSNS (18), /* sdiv */
791 COSTS_N_INSNS (33), /* ddiv */
793 16, /* l1 cache */
794 16, /* l2 cache */
795 1, /* prefetch streams /*/
798 /* Instruction costs on PPCE500MC processors. */
799 static const
800 struct processor_costs ppce500mc_cost = {
801 COSTS_N_INSNS (4), /* mulsi */
802 COSTS_N_INSNS (4), /* mulsi_const */
803 COSTS_N_INSNS (4), /* mulsi_const9 */
804 COSTS_N_INSNS (4), /* muldi */
805 COSTS_N_INSNS (14), /* divsi */
806 COSTS_N_INSNS (14), /* divdi */
807 COSTS_N_INSNS (8), /* fp */
808 COSTS_N_INSNS (10), /* dmul */
809 COSTS_N_INSNS (36), /* sdiv */
810 COSTS_N_INSNS (66), /* ddiv */
811 64, /* cache line size */
812 32, /* l1 cache */
813 128, /* l2 cache */
814 1, /* prefetch streams /*/
817 /* Instruction costs on PPCE500MC64 processors. */
818 static const
819 struct processor_costs ppce500mc64_cost = {
820 COSTS_N_INSNS (4), /* mulsi */
821 COSTS_N_INSNS (4), /* mulsi_const */
822 COSTS_N_INSNS (4), /* mulsi_const9 */
823 COSTS_N_INSNS (4), /* muldi */
824 COSTS_N_INSNS (14), /* divsi */
825 COSTS_N_INSNS (14), /* divdi */
826 COSTS_N_INSNS (4), /* fp */
827 COSTS_N_INSNS (10), /* dmul */
828 COSTS_N_INSNS (36), /* sdiv */
829 COSTS_N_INSNS (66), /* ddiv */
830 64, /* cache line size */
831 32, /* l1 cache */
832 128, /* l2 cache */
833 1, /* prefetch streams /*/
836 /* Instruction costs on PPCE5500 processors. */
837 static const
838 struct processor_costs ppce5500_cost = {
839 COSTS_N_INSNS (5), /* mulsi */
840 COSTS_N_INSNS (5), /* mulsi_const */
841 COSTS_N_INSNS (4), /* mulsi_const9 */
842 COSTS_N_INSNS (5), /* muldi */
843 COSTS_N_INSNS (14), /* divsi */
844 COSTS_N_INSNS (14), /* divdi */
845 COSTS_N_INSNS (7), /* fp */
846 COSTS_N_INSNS (10), /* dmul */
847 COSTS_N_INSNS (36), /* sdiv */
848 COSTS_N_INSNS (66), /* ddiv */
849 64, /* cache line size */
850 32, /* l1 cache */
851 128, /* l2 cache */
852 1, /* prefetch streams /*/
855 /* Instruction costs on PPCE6500 processors. */
856 static const
857 struct processor_costs ppce6500_cost = {
858 COSTS_N_INSNS (5), /* mulsi */
859 COSTS_N_INSNS (5), /* mulsi_const */
860 COSTS_N_INSNS (4), /* mulsi_const9 */
861 COSTS_N_INSNS (5), /* muldi */
862 COSTS_N_INSNS (14), /* divsi */
863 COSTS_N_INSNS (14), /* divdi */
864 COSTS_N_INSNS (7), /* fp */
865 COSTS_N_INSNS (10), /* dmul */
866 COSTS_N_INSNS (36), /* sdiv */
867 COSTS_N_INSNS (66), /* ddiv */
868 64, /* cache line size */
869 32, /* l1 cache */
870 128, /* l2 cache */
871 1, /* prefetch streams /*/
874 /* Instruction costs on AppliedMicro Titan processors. */
875 static const
876 struct processor_costs titan_cost = {
877 COSTS_N_INSNS (5), /* mulsi */
878 COSTS_N_INSNS (5), /* mulsi_const */
879 COSTS_N_INSNS (5), /* mulsi_const9 */
880 COSTS_N_INSNS (5), /* muldi */
881 COSTS_N_INSNS (18), /* divsi */
882 COSTS_N_INSNS (18), /* divdi */
883 COSTS_N_INSNS (10), /* fp */
884 COSTS_N_INSNS (10), /* dmul */
885 COSTS_N_INSNS (46), /* sdiv */
886 COSTS_N_INSNS (72), /* ddiv */
887 32, /* cache line size */
888 32, /* l1 cache */
889 512, /* l2 cache */
890 1, /* prefetch streams /*/
893 /* Instruction costs on POWER4 and POWER5 processors. */
894 static const
895 struct processor_costs power4_cost = {
896 COSTS_N_INSNS (3), /* mulsi */
897 COSTS_N_INSNS (2), /* mulsi_const */
898 COSTS_N_INSNS (2), /* mulsi_const9 */
899 COSTS_N_INSNS (4), /* muldi */
900 COSTS_N_INSNS (18), /* divsi */
901 COSTS_N_INSNS (34), /* divdi */
902 COSTS_N_INSNS (3), /* fp */
903 COSTS_N_INSNS (3), /* dmul */
904 COSTS_N_INSNS (17), /* sdiv */
905 COSTS_N_INSNS (17), /* ddiv */
906 128, /* cache line size */
907 32, /* l1 cache */
908 1024, /* l2 cache */
909 8, /* prefetch streams /*/
912 /* Instruction costs on POWER6 processors. */
913 static const
914 struct processor_costs power6_cost = {
915 COSTS_N_INSNS (8), /* mulsi */
916 COSTS_N_INSNS (8), /* mulsi_const */
917 COSTS_N_INSNS (8), /* mulsi_const9 */
918 COSTS_N_INSNS (8), /* muldi */
919 COSTS_N_INSNS (22), /* divsi */
920 COSTS_N_INSNS (28), /* divdi */
921 COSTS_N_INSNS (3), /* fp */
922 COSTS_N_INSNS (3), /* dmul */
923 COSTS_N_INSNS (13), /* sdiv */
924 COSTS_N_INSNS (16), /* ddiv */
925 128, /* cache line size */
926 64, /* l1 cache */
927 2048, /* l2 cache */
928 16, /* prefetch streams */
931 /* Instruction costs on POWER7 processors. */
932 static const
933 struct processor_costs power7_cost = {
934 COSTS_N_INSNS (2), /* mulsi */
935 COSTS_N_INSNS (2), /* mulsi_const */
936 COSTS_N_INSNS (2), /* mulsi_const9 */
937 COSTS_N_INSNS (2), /* muldi */
938 COSTS_N_INSNS (18), /* divsi */
939 COSTS_N_INSNS (34), /* divdi */
940 COSTS_N_INSNS (3), /* fp */
941 COSTS_N_INSNS (3), /* dmul */
942 COSTS_N_INSNS (13), /* sdiv */
943 COSTS_N_INSNS (16), /* ddiv */
944 128, /* cache line size */
945 32, /* l1 cache */
946 256, /* l2 cache */
947 12, /* prefetch streams */
950 /* Instruction costs on POWER8 processors. */
951 static const
952 struct processor_costs power8_cost = {
953 COSTS_N_INSNS (3), /* mulsi */
954 COSTS_N_INSNS (3), /* mulsi_const */
955 COSTS_N_INSNS (3), /* mulsi_const9 */
956 COSTS_N_INSNS (3), /* muldi */
957 COSTS_N_INSNS (19), /* divsi */
958 COSTS_N_INSNS (35), /* divdi */
959 COSTS_N_INSNS (3), /* fp */
960 COSTS_N_INSNS (3), /* dmul */
961 COSTS_N_INSNS (14), /* sdiv */
962 COSTS_N_INSNS (17), /* ddiv */
963 128, /* cache line size */
964 32, /* l1 cache */
965 256, /* l2 cache */
966 12, /* prefetch streams */
969 /* Instruction costs on POWER A2 processors. */
970 static const
971 struct processor_costs ppca2_cost = {
972 COSTS_N_INSNS (16), /* mulsi */
973 COSTS_N_INSNS (16), /* mulsi_const */
974 COSTS_N_INSNS (16), /* mulsi_const9 */
975 COSTS_N_INSNS (16), /* muldi */
976 COSTS_N_INSNS (22), /* divsi */
977 COSTS_N_INSNS (28), /* divdi */
978 COSTS_N_INSNS (3), /* fp */
979 COSTS_N_INSNS (3), /* dmul */
980 COSTS_N_INSNS (59), /* sdiv */
981 COSTS_N_INSNS (72), /* ddiv */
983 16, /* l1 cache */
984 2048, /* l2 cache */
985 16, /* prefetch streams */
989 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
990 #undef RS6000_BUILTIN_1
991 #undef RS6000_BUILTIN_2
992 #undef RS6000_BUILTIN_3
993 #undef RS6000_BUILTIN_A
994 #undef RS6000_BUILTIN_D
995 #undef RS6000_BUILTIN_E
996 #undef RS6000_BUILTIN_H
997 #undef RS6000_BUILTIN_P
998 #undef RS6000_BUILTIN_Q
999 #undef RS6000_BUILTIN_S
1000 #undef RS6000_BUILTIN_X
1002 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1003 { NAME, ICODE, MASK, ATTR },
1005 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1006 { NAME, ICODE, MASK, ATTR },
1008 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1009 { NAME, ICODE, MASK, ATTR },
1011 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1012 { NAME, ICODE, MASK, ATTR },
1014 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1015 { NAME, ICODE, MASK, ATTR },
1017 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1018 { NAME, ICODE, MASK, ATTR },
1020 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1021 { NAME, ICODE, MASK, ATTR },
1023 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1024 { NAME, ICODE, MASK, ATTR },
1026 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1027 { NAME, ICODE, MASK, ATTR },
1029 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1030 { NAME, ICODE, MASK, ATTR },
1032 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1033 { NAME, ICODE, MASK, ATTR },
1035 struct rs6000_builtin_info_type {
1036 const char *name;
1037 const enum insn_code icode;
1038 const HOST_WIDE_INT mask;
1039 const unsigned attr;
1042 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1044 #include "rs6000-builtin.def"
1047 #undef RS6000_BUILTIN_1
1048 #undef RS6000_BUILTIN_2
1049 #undef RS6000_BUILTIN_3
1050 #undef RS6000_BUILTIN_A
1051 #undef RS6000_BUILTIN_D
1052 #undef RS6000_BUILTIN_E
1053 #undef RS6000_BUILTIN_H
1054 #undef RS6000_BUILTIN_P
1055 #undef RS6000_BUILTIN_Q
1056 #undef RS6000_BUILTIN_S
1057 #undef RS6000_BUILTIN_X
1059 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1060 static tree (*rs6000_veclib_handler) (tree, tree, tree);
1063 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
1064 static bool spe_func_has_64bit_regs_p (void);
1065 static struct machine_function * rs6000_init_machine_status (void);
1066 static int rs6000_ra_ever_killed (void);
1067 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1068 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1069 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1070 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
1071 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
1072 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
1073 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
1074 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
1075 bool);
1076 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
1077 static bool is_microcoded_insn (rtx);
1078 static bool is_nonpipeline_insn (rtx);
1079 static bool is_cracked_insn (rtx);
1080 static bool is_load_insn (rtx, rtx *);
1081 static bool is_store_insn (rtx, rtx *);
1082 static bool set_to_load_agen (rtx,rtx);
1083 static bool insn_terminates_group_p (rtx , enum group_termination);
1084 static bool insn_must_be_first_in_group (rtx);
1085 static bool insn_must_be_last_in_group (rtx);
1086 static void altivec_init_builtins (void);
1087 static tree builtin_function_type (enum machine_mode, enum machine_mode,
1088 enum machine_mode, enum machine_mode,
1089 enum rs6000_builtins, const char *name);
1090 static void rs6000_common_init_builtins (void);
1091 static void paired_init_builtins (void);
1092 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1093 static void spe_init_builtins (void);
1094 static void htm_init_builtins (void);
1095 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1096 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1097 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1098 static rs6000_stack_t *rs6000_stack_info (void);
1099 static void is_altivec_return_reg (rtx, void *);
1100 int easy_vector_constant (rtx, enum machine_mode);
1101 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
1102 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1103 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
1104 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
1105 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1106 bool, bool);
1107 #if TARGET_MACHO
1108 static void macho_branch_islands (void);
1109 #endif
1110 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
1111 int, int *);
1112 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
1113 int, int, int *);
1114 static bool rs6000_mode_dependent_address (const_rtx);
1115 static bool rs6000_debug_mode_dependent_address (const_rtx);
1116 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1117 enum machine_mode, rtx);
1118 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1119 enum machine_mode,
1120 rtx);
1121 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1122 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1123 enum reg_class);
1124 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1125 enum machine_mode);
1126 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1127 enum reg_class,
1128 enum machine_mode);
1129 static bool rs6000_cannot_change_mode_class (enum machine_mode,
1130 enum machine_mode,
1131 enum reg_class);
1132 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
1133 enum machine_mode,
1134 enum reg_class);
1135 static bool rs6000_save_toc_in_prologue_p (void);
1137 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
1138 int, int *)
1139 = rs6000_legitimize_reload_address;
1141 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1142 = rs6000_mode_dependent_address;
1144 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1145 enum machine_mode, rtx)
1146 = rs6000_secondary_reload_class;
1148 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1149 = rs6000_preferred_reload_class;
1151 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1152 enum machine_mode)
1153 = rs6000_secondary_memory_needed;
1155 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1156 enum machine_mode,
1157 enum reg_class)
1158 = rs6000_cannot_change_mode_class;
1160 const int INSN_NOT_AVAILABLE = -1;
1162 static void rs6000_print_isa_options (FILE *, int, const char *,
1163 HOST_WIDE_INT);
1164 static void rs6000_print_builtin_options (FILE *, int, const char *,
1165 HOST_WIDE_INT);
1167 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1168 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1169 enum rs6000_reg_type,
1170 enum machine_mode,
1171 secondary_reload_info *,
1172 bool);
1174 /* Hash table stuff for keeping track of TOC entries. */
1176 struct GTY(()) toc_hash_struct
1178 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1179 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1180 rtx key;
1181 enum machine_mode key_mode;
1182 int labelno;
1185 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1187 /* Hash table to keep track of the argument types for builtin functions. */
1189 struct GTY(()) builtin_hash_struct
1191 tree type;
1192 enum machine_mode mode[4]; /* return value + 3 arguments. */
1193 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1196 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1199 /* Default register names. */
1200 char rs6000_reg_names[][8] =
1202 "0", "1", "2", "3", "4", "5", "6", "7",
1203 "8", "9", "10", "11", "12", "13", "14", "15",
1204 "16", "17", "18", "19", "20", "21", "22", "23",
1205 "24", "25", "26", "27", "28", "29", "30", "31",
1206 "0", "1", "2", "3", "4", "5", "6", "7",
1207 "8", "9", "10", "11", "12", "13", "14", "15",
1208 "16", "17", "18", "19", "20", "21", "22", "23",
1209 "24", "25", "26", "27", "28", "29", "30", "31",
1210 "mq", "lr", "ctr","ap",
1211 "0", "1", "2", "3", "4", "5", "6", "7",
1212 "ca",
1213 /* AltiVec registers. */
1214 "0", "1", "2", "3", "4", "5", "6", "7",
1215 "8", "9", "10", "11", "12", "13", "14", "15",
1216 "16", "17", "18", "19", "20", "21", "22", "23",
1217 "24", "25", "26", "27", "28", "29", "30", "31",
1218 "vrsave", "vscr",
1219 /* SPE registers. */
1220 "spe_acc", "spefscr",
1221 /* Soft frame pointer. */
1222 "sfp",
1223 /* HTM SPR registers. */
1224 "tfhar", "tfiar", "texasr"
1227 #ifdef TARGET_REGNAMES
1228 static const char alt_reg_names[][8] =
1230 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1231 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1232 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1233 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1234 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1235 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1236 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1237 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1238 "mq", "lr", "ctr", "ap",
1239 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1240 "ca",
1241 /* AltiVec registers. */
1242 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1243 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1244 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1245 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1246 "vrsave", "vscr",
1247 /* SPE registers. */
1248 "spe_acc", "spefscr",
1249 /* Soft frame pointer. */
1250 "sfp",
1251 /* HTM SPR registers. */
1252 "tfhar", "tfiar", "texasr"
1254 #endif
1256 /* Table of valid machine attributes. */
1258 static const struct attribute_spec rs6000_attribute_table[] =
1260 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1261 affects_type_identity } */
1262 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1263 false },
1264 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1265 false },
1266 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1267 false },
1268 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1269 false },
1270 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1271 false },
1272 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1273 SUBTARGET_ATTRIBUTE_TABLE,
1274 #endif
1275 { NULL, 0, 0, false, false, false, NULL, false }
1278 #ifndef TARGET_PROFILE_KERNEL
1279 #define TARGET_PROFILE_KERNEL 0
1280 #endif
1282 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1283 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1285 /* Initialize the GCC target structure. */
1286 #undef TARGET_ATTRIBUTE_TABLE
1287 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1288 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1289 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1290 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1291 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1293 #undef TARGET_ASM_ALIGNED_DI_OP
1294 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1296 /* Default unaligned ops are only provided for ELF. Find the ops needed
1297 for non-ELF systems. */
1298 #ifndef OBJECT_FORMAT_ELF
1299 #if TARGET_XCOFF
1300 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1301 64-bit targets. */
1302 #undef TARGET_ASM_UNALIGNED_HI_OP
1303 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1304 #undef TARGET_ASM_UNALIGNED_SI_OP
1305 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1306 #undef TARGET_ASM_UNALIGNED_DI_OP
1307 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1308 #else
1309 /* For Darwin. */
1310 #undef TARGET_ASM_UNALIGNED_HI_OP
1311 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1312 #undef TARGET_ASM_UNALIGNED_SI_OP
1313 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1314 #undef TARGET_ASM_UNALIGNED_DI_OP
1315 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1316 #undef TARGET_ASM_ALIGNED_DI_OP
1317 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1318 #endif
1319 #endif
1321 /* This hook deals with fixups for relocatable code and DI-mode objects
1322 in 64-bit code. */
1323 #undef TARGET_ASM_INTEGER
1324 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1326 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1327 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1328 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1329 #endif
1331 #undef TARGET_SET_UP_BY_PROLOGUE
1332 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1334 #undef TARGET_HAVE_TLS
1335 #define TARGET_HAVE_TLS HAVE_AS_TLS
1337 #undef TARGET_CANNOT_FORCE_CONST_MEM
1338 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1340 #undef TARGET_DELEGITIMIZE_ADDRESS
1341 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1343 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1344 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1346 #undef TARGET_ASM_FUNCTION_PROLOGUE
1347 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1348 #undef TARGET_ASM_FUNCTION_EPILOGUE
1349 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1351 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1352 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1354 #undef TARGET_LEGITIMIZE_ADDRESS
1355 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1357 #undef TARGET_SCHED_VARIABLE_ISSUE
1358 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1360 #undef TARGET_SCHED_ISSUE_RATE
1361 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1362 #undef TARGET_SCHED_ADJUST_COST
1363 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1364 #undef TARGET_SCHED_ADJUST_PRIORITY
1365 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1366 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1367 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1368 #undef TARGET_SCHED_INIT
1369 #define TARGET_SCHED_INIT rs6000_sched_init
1370 #undef TARGET_SCHED_FINISH
1371 #define TARGET_SCHED_FINISH rs6000_sched_finish
1372 #undef TARGET_SCHED_REORDER
1373 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1374 #undef TARGET_SCHED_REORDER2
1375 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1377 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1378 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1380 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1381 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1383 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1384 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1385 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1386 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1387 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1388 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1389 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1390 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1392 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1393 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1394 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1395 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1396 rs6000_builtin_support_vector_misalignment
1397 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1398 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1399 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1400 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1401 rs6000_builtin_vectorization_cost
1402 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1403 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1404 rs6000_preferred_simd_mode
1405 #undef TARGET_VECTORIZE_INIT_COST
1406 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1407 #undef TARGET_VECTORIZE_ADD_STMT_COST
1408 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1409 #undef TARGET_VECTORIZE_FINISH_COST
1410 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1411 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1412 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1414 #undef TARGET_INIT_BUILTINS
1415 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1416 #undef TARGET_BUILTIN_DECL
1417 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1419 #undef TARGET_EXPAND_BUILTIN
1420 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1422 #undef TARGET_MANGLE_TYPE
1423 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1425 #undef TARGET_INIT_LIBFUNCS
1426 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1428 #if TARGET_MACHO
1429 #undef TARGET_BINDS_LOCAL_P
1430 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1431 #endif
1433 #undef TARGET_MS_BITFIELD_LAYOUT_P
1434 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1436 #undef TARGET_ASM_OUTPUT_MI_THUNK
1437 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1439 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1440 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1442 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1443 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1445 #undef TARGET_REGISTER_MOVE_COST
1446 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1447 #undef TARGET_MEMORY_MOVE_COST
1448 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1449 #undef TARGET_RTX_COSTS
1450 #define TARGET_RTX_COSTS rs6000_rtx_costs
1451 #undef TARGET_ADDRESS_COST
1452 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1454 #undef TARGET_DWARF_REGISTER_SPAN
1455 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1457 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1458 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1460 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1461 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1463 /* On rs6000, function arguments are promoted, as are function return
1464 values. */
1465 #undef TARGET_PROMOTE_FUNCTION_MODE
1466 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1468 #undef TARGET_RETURN_IN_MEMORY
1469 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1471 #undef TARGET_RETURN_IN_MSB
1472 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1474 #undef TARGET_SETUP_INCOMING_VARARGS
1475 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1477 /* Always strict argument naming on rs6000. */
1478 #undef TARGET_STRICT_ARGUMENT_NAMING
1479 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1480 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1481 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1482 #undef TARGET_SPLIT_COMPLEX_ARG
1483 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1484 #undef TARGET_MUST_PASS_IN_STACK
1485 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1486 #undef TARGET_PASS_BY_REFERENCE
1487 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1488 #undef TARGET_ARG_PARTIAL_BYTES
1489 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1490 #undef TARGET_FUNCTION_ARG_ADVANCE
1491 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1492 #undef TARGET_FUNCTION_ARG
1493 #define TARGET_FUNCTION_ARG rs6000_function_arg
1494 #undef TARGET_FUNCTION_ARG_BOUNDARY
1495 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1497 #undef TARGET_BUILD_BUILTIN_VA_LIST
1498 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1500 #undef TARGET_EXPAND_BUILTIN_VA_START
1501 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1503 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1504 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1506 #undef TARGET_EH_RETURN_FILTER_MODE
1507 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1509 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1510 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1512 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1513 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1515 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1516 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1518 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1519 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1521 #undef TARGET_OPTION_OVERRIDE
1522 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1524 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1525 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1526 rs6000_builtin_vectorized_function
1528 #if !TARGET_MACHO
1529 #undef TARGET_STACK_PROTECT_FAIL
1530 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1531 #endif
1533 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1534 The PowerPC architecture requires only weak consistency among
1535 processors--that is, memory accesses between processors need not be
1536 sequentially consistent and memory accesses among processors can occur
1537 in any order. The ability to order memory accesses weakly provides
1538 opportunities for more efficient use of the system bus. Unless a
1539 dependency exists, the 604e allows read operations to precede store
1540 operations. */
1541 #undef TARGET_RELAXED_ORDERING
1542 #define TARGET_RELAXED_ORDERING true
1544 #ifdef HAVE_AS_TLS
1545 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1546 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1547 #endif
1549 /* Use a 32-bit anchor range. This leads to sequences like:
1551 addis tmp,anchor,high
1552 add dest,tmp,low
1554 where tmp itself acts as an anchor, and can be shared between
1555 accesses to the same 64k page. */
1556 #undef TARGET_MIN_ANCHOR_OFFSET
1557 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1558 #undef TARGET_MAX_ANCHOR_OFFSET
1559 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1560 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1561 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1562 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1563 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1565 #undef TARGET_BUILTIN_RECIPROCAL
1566 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1568 #undef TARGET_EXPAND_TO_RTL_HOOK
1569 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1571 #undef TARGET_INSTANTIATE_DECLS
1572 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1574 #undef TARGET_SECONDARY_RELOAD
1575 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1577 #undef TARGET_LEGITIMATE_ADDRESS_P
1578 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1580 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1581 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1583 #undef TARGET_LRA_P
1584 #define TARGET_LRA_P rs6000_lra_p
1586 #undef TARGET_CAN_ELIMINATE
1587 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1589 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1590 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1592 #undef TARGET_TRAMPOLINE_INIT
1593 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1595 #undef TARGET_FUNCTION_VALUE
1596 #define TARGET_FUNCTION_VALUE rs6000_function_value
1598 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1599 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1601 #undef TARGET_OPTION_SAVE
1602 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1604 #undef TARGET_OPTION_RESTORE
1605 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1607 #undef TARGET_OPTION_PRINT
1608 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1610 #undef TARGET_CAN_INLINE_P
1611 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1613 #undef TARGET_SET_CURRENT_FUNCTION
1614 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1616 #undef TARGET_LEGITIMATE_CONSTANT_P
1617 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1619 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1620 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1622 #undef TARGET_CAN_USE_DOLOOP_P
1623 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1626 /* Processor table. */
1627 struct rs6000_ptt
1629 const char *const name; /* Canonical processor name. */
1630 const enum processor_type processor; /* Processor type enum value. */
1631 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1634 static struct rs6000_ptt const processor_target_table[] =
1636 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1637 #include "rs6000-cpus.def"
1638 #undef RS6000_CPU
1641 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1642 name is invalid. */
1644 static int
1645 rs6000_cpu_name_lookup (const char *name)
1647 size_t i;
1649 if (name != NULL)
1651 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1652 if (! strcmp (name, processor_target_table[i].name))
1653 return (int)i;
1656 return -1;
1660 /* Return number of consecutive hard regs needed starting at reg REGNO
1661 to hold something of mode MODE.
1662 This is ordinarily the length in words of a value of mode MODE
1663 but can be less for certain modes in special long registers.
1665 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1666 scalar instructions. The upper 32 bits are only available to the
1667 SIMD instructions.
1669 POWER and PowerPC GPRs hold 32 bits worth;
1670 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1672 static int
1673 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1675 unsigned HOST_WIDE_INT reg_size;
1677 /* TF/TD modes are special in that they always take 2 registers. */
1678 if (FP_REGNO_P (regno))
1679 reg_size = ((VECTOR_MEM_VSX_P (mode) && mode != TDmode && mode != TFmode)
1680 ? UNITS_PER_VSX_WORD
1681 : UNITS_PER_FP_WORD);
1683 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1684 reg_size = UNITS_PER_SPE_WORD;
1686 else if (ALTIVEC_REGNO_P (regno))
1687 reg_size = UNITS_PER_ALTIVEC_WORD;
1689 /* The value returned for SCmode in the E500 double case is 2 for
1690 ABI compatibility; storing an SCmode value in a single register
1691 would require function_arg and rs6000_spe_function_arg to handle
1692 SCmode so as to pass the value correctly in a pair of
1693 registers. */
1694 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1695 && !DECIMAL_FLOAT_MODE_P (mode))
1696 reg_size = UNITS_PER_FP_WORD;
1698 else
1699 reg_size = UNITS_PER_WORD;
1701 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1704 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1705 MODE. */
1706 static int
1707 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1709 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1711 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1712 register combinations, and use PTImode where we need to deal with quad
1713 word memory operations. Don't allow quad words in the argument or frame
1714 pointer registers, just registers 0..31. */
1715 if (mode == PTImode)
1716 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1717 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1718 && ((regno & 1) == 0));
1720 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1721 implementations. Don't allow an item to be split between a FP register
1722 and an Altivec register. Allow TImode in all VSX registers if the user
1723 asked for it. */
1724 if (TARGET_VSX && VSX_REGNO_P (regno)
1725 && (VECTOR_MEM_VSX_P (mode)
1726 || (TARGET_VSX_SCALAR_FLOAT && mode == SFmode)
1727 || (TARGET_VSX_SCALAR_DOUBLE && (mode == DFmode || mode == DImode))
1728 || (TARGET_VSX_TIMODE && mode == TImode)
1729 || (TARGET_VADDUQM && mode == V1TImode)))
1731 if (FP_REGNO_P (regno))
1732 return FP_REGNO_P (last_regno);
1734 if (ALTIVEC_REGNO_P (regno))
1736 if (mode == SFmode && !TARGET_UPPER_REGS_SF)
1737 return 0;
1739 if ((mode == DFmode || mode == DImode) && !TARGET_UPPER_REGS_DF)
1740 return 0;
1742 return ALTIVEC_REGNO_P (last_regno);
1746 /* The GPRs can hold any mode, but values bigger than one register
1747 cannot go past R31. */
1748 if (INT_REGNO_P (regno))
1749 return INT_REGNO_P (last_regno);
1751 /* The float registers (except for VSX vector modes) can only hold floating
1752 modes and DImode. */
1753 if (FP_REGNO_P (regno))
1755 if (SCALAR_FLOAT_MODE_P (mode)
1756 && (mode != TDmode || (regno % 2) == 0)
1757 && FP_REGNO_P (last_regno))
1758 return 1;
1760 if (GET_MODE_CLASS (mode) == MODE_INT
1761 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1762 return 1;
1764 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1765 && PAIRED_VECTOR_MODE (mode))
1766 return 1;
1768 return 0;
1771 /* The CR register can only hold CC modes. */
1772 if (CR_REGNO_P (regno))
1773 return GET_MODE_CLASS (mode) == MODE_CC;
1775 if (CA_REGNO_P (regno))
1776 return mode == BImode;
1778 /* AltiVec only in AldyVec registers. */
1779 if (ALTIVEC_REGNO_P (regno))
1780 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1781 || mode == V1TImode);
1783 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1784 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1785 return 1;
1787 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1788 and it must be able to fit within the register set. */
1790 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1793 /* Print interesting facts about registers. */
1794 static void
1795 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1797 int r, m;
1799 for (r = first_regno; r <= last_regno; ++r)
1801 const char *comma = "";
1802 int len;
1804 if (first_regno == last_regno)
1805 fprintf (stderr, "%s:\t", reg_name);
1806 else
1807 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1809 len = 8;
1810 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1811 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1813 if (len > 70)
1815 fprintf (stderr, ",\n\t");
1816 len = 8;
1817 comma = "";
1820 if (rs6000_hard_regno_nregs[m][r] > 1)
1821 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1822 rs6000_hard_regno_nregs[m][r]);
1823 else
1824 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1826 comma = ", ";
1829 if (call_used_regs[r])
1831 if (len > 70)
1833 fprintf (stderr, ",\n\t");
1834 len = 8;
1835 comma = "";
1838 len += fprintf (stderr, "%s%s", comma, "call-used");
1839 comma = ", ";
1842 if (fixed_regs[r])
1844 if (len > 70)
1846 fprintf (stderr, ",\n\t");
1847 len = 8;
1848 comma = "";
1851 len += fprintf (stderr, "%s%s", comma, "fixed");
1852 comma = ", ";
1855 if (len > 70)
1857 fprintf (stderr, ",\n\t");
1858 comma = "";
1861 len += fprintf (stderr, "%sreg-class = %s", comma,
1862 reg_class_names[(int)rs6000_regno_regclass[r]]);
1863 comma = ", ";
1865 if (len > 70)
1867 fprintf (stderr, ",\n\t");
1868 comma = "";
1871 fprintf (stderr, "%sregno = %d\n", comma, r);
1875 static const char *
1876 rs6000_debug_vector_unit (enum rs6000_vector v)
1878 const char *ret;
1880 switch (v)
1882 case VECTOR_NONE: ret = "none"; break;
1883 case VECTOR_ALTIVEC: ret = "altivec"; break;
1884 case VECTOR_VSX: ret = "vsx"; break;
1885 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
1886 case VECTOR_PAIRED: ret = "paired"; break;
1887 case VECTOR_SPE: ret = "spe"; break;
1888 case VECTOR_OTHER: ret = "other"; break;
1889 default: ret = "unknown"; break;
1892 return ret;
1895 /* Print the address masks in a human readble fashion. */
1896 DEBUG_FUNCTION void
1897 rs6000_debug_print_mode (ssize_t m)
1899 ssize_t rc;
1901 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
1902 for (rc = 0; rc < N_RELOAD_REG; rc++)
1904 addr_mask_type mask = reg_addr[m].addr_mask[rc];
1905 fprintf (stderr,
1906 " %s: %c%c%c%c%c%c",
1907 reload_reg_map[rc].name,
1908 (mask & RELOAD_REG_VALID) != 0 ? 'v' : ' ',
1909 (mask & RELOAD_REG_MULTIPLE) != 0 ? 'm' : ' ',
1910 (mask & RELOAD_REG_INDEXED) != 0 ? 'i' : ' ',
1911 (mask & RELOAD_REG_OFFSET) != 0 ? 'o' : ' ',
1912 (mask & RELOAD_REG_PRE_INCDEC) != 0 ? '+' : ' ',
1913 (mask & RELOAD_REG_PRE_MODIFY) != 0 ? '+' : ' ');
1916 if (rs6000_vector_unit[m] != VECTOR_NONE
1917 || rs6000_vector_mem[m] != VECTOR_NONE
1918 || (reg_addr[m].reload_store != CODE_FOR_nothing)
1919 || (reg_addr[m].reload_load != CODE_FOR_nothing))
1921 fprintf (stderr,
1922 " Vector-arith=%-10s Vector-mem=%-10s Reload=%c%c",
1923 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
1924 rs6000_debug_vector_unit (rs6000_vector_mem[m]),
1925 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
1926 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
1929 fputs ("\n", stderr);
1932 #define DEBUG_FMT_ID "%-32s= "
1933 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1934 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1935 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1937 /* Print various interesting information with -mdebug=reg. */
1938 static void
1939 rs6000_debug_reg_global (void)
1941 static const char *const tf[2] = { "false", "true" };
1942 const char *nl = (const char *)0;
1943 int m;
1944 size_t m1, m2, v;
1945 char costly_num[20];
1946 char nop_num[20];
1947 char flags_buffer[40];
1948 const char *costly_str;
1949 const char *nop_str;
1950 const char *trace_str;
1951 const char *abi_str;
1952 const char *cmodel_str;
1953 struct cl_target_option cl_opts;
1955 /* Modes we want tieable information on. */
1956 static const enum machine_mode print_tieable_modes[] = {
1957 QImode,
1958 HImode,
1959 SImode,
1960 DImode,
1961 TImode,
1962 PTImode,
1963 SFmode,
1964 DFmode,
1965 TFmode,
1966 SDmode,
1967 DDmode,
1968 TDmode,
1969 V8QImode,
1970 V4HImode,
1971 V2SImode,
1972 V16QImode,
1973 V8HImode,
1974 V4SImode,
1975 V2DImode,
1976 V1TImode,
1977 V32QImode,
1978 V16HImode,
1979 V8SImode,
1980 V4DImode,
1981 V2TImode,
1982 V2SFmode,
1983 V4SFmode,
1984 V2DFmode,
1985 V8SFmode,
1986 V4DFmode,
1987 CCmode,
1988 CCUNSmode,
1989 CCEQmode,
1992 /* Virtual regs we are interested in. */
1993 const static struct {
1994 int regno; /* register number. */
1995 const char *name; /* register name. */
1996 } virtual_regs[] = {
1997 { STACK_POINTER_REGNUM, "stack pointer:" },
1998 { TOC_REGNUM, "toc: " },
1999 { STATIC_CHAIN_REGNUM, "static chain: " },
2000 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2001 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2002 { ARG_POINTER_REGNUM, "arg pointer: " },
2003 { FRAME_POINTER_REGNUM, "frame pointer:" },
2004 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2005 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2006 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2007 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2008 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2009 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2010 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2011 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2012 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2015 fputs ("\nHard register information:\n", stderr);
2016 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2017 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2018 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2019 LAST_ALTIVEC_REGNO,
2020 "vs");
2021 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2022 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2023 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2024 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2025 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2026 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2027 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2028 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2030 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2031 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2032 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2034 fprintf (stderr,
2035 "\n"
2036 "d reg_class = %s\n"
2037 "f reg_class = %s\n"
2038 "v reg_class = %s\n"
2039 "wa reg_class = %s\n"
2040 "wd reg_class = %s\n"
2041 "wf reg_class = %s\n"
2042 "wg reg_class = %s\n"
2043 "wl reg_class = %s\n"
2044 "wm reg_class = %s\n"
2045 "wr reg_class = %s\n"
2046 "ws reg_class = %s\n"
2047 "wt reg_class = %s\n"
2048 "wu reg_class = %s\n"
2049 "wv reg_class = %s\n"
2050 "ww reg_class = %s\n"
2051 "wx reg_class = %s\n"
2052 "wy reg_class = %s\n"
2053 "wz reg_class = %s\n"
2054 "\n",
2055 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2056 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2057 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2058 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2059 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2060 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2061 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2062 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2063 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2064 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2065 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2066 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2067 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2068 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2069 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2070 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2071 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2072 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2074 nl = "\n";
2075 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2076 rs6000_debug_print_mode (m);
2078 fputs ("\n", stderr);
2080 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2082 enum machine_mode mode1 = print_tieable_modes[m1];
2083 bool first_time = true;
2085 nl = (const char *)0;
2086 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2088 enum machine_mode mode2 = print_tieable_modes[m2];
2089 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2091 if (first_time)
2093 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2094 nl = "\n";
2095 first_time = false;
2098 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2102 if (!first_time)
2103 fputs ("\n", stderr);
2106 if (nl)
2107 fputs (nl, stderr);
2109 if (rs6000_recip_control)
2111 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2113 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2114 if (rs6000_recip_bits[m])
2116 fprintf (stderr,
2117 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2118 GET_MODE_NAME (m),
2119 (RS6000_RECIP_AUTO_RE_P (m)
2120 ? "auto"
2121 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2122 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2123 ? "auto"
2124 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2127 fputs ("\n", stderr);
2130 if (rs6000_cpu_index >= 0)
2132 const char *name = processor_target_table[rs6000_cpu_index].name;
2133 HOST_WIDE_INT flags
2134 = processor_target_table[rs6000_cpu_index].target_enable;
2136 sprintf (flags_buffer, "-mcpu=%s flags", name);
2137 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2139 else
2140 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2142 if (rs6000_tune_index >= 0)
2144 const char *name = processor_target_table[rs6000_tune_index].name;
2145 HOST_WIDE_INT flags
2146 = processor_target_table[rs6000_tune_index].target_enable;
2148 sprintf (flags_buffer, "-mtune=%s flags", name);
2149 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2151 else
2152 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2154 cl_target_option_save (&cl_opts, &global_options);
2155 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2156 rs6000_isa_flags);
2158 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2159 rs6000_isa_flags_explicit);
2161 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2162 rs6000_builtin_mask);
2164 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2166 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2167 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2169 switch (rs6000_sched_costly_dep)
2171 case max_dep_latency:
2172 costly_str = "max_dep_latency";
2173 break;
2175 case no_dep_costly:
2176 costly_str = "no_dep_costly";
2177 break;
2179 case all_deps_costly:
2180 costly_str = "all_deps_costly";
2181 break;
2183 case true_store_to_load_dep_costly:
2184 costly_str = "true_store_to_load_dep_costly";
2185 break;
2187 case store_to_load_dep_costly:
2188 costly_str = "store_to_load_dep_costly";
2189 break;
2191 default:
2192 costly_str = costly_num;
2193 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2194 break;
2197 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2199 switch (rs6000_sched_insert_nops)
2201 case sched_finish_regroup_exact:
2202 nop_str = "sched_finish_regroup_exact";
2203 break;
2205 case sched_finish_pad_groups:
2206 nop_str = "sched_finish_pad_groups";
2207 break;
2209 case sched_finish_none:
2210 nop_str = "sched_finish_none";
2211 break;
2213 default:
2214 nop_str = nop_num;
2215 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2216 break;
2219 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2221 switch (rs6000_sdata)
2223 default:
2224 case SDATA_NONE:
2225 break;
2227 case SDATA_DATA:
2228 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2229 break;
2231 case SDATA_SYSV:
2232 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2233 break;
2235 case SDATA_EABI:
2236 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2237 break;
2241 switch (rs6000_traceback)
2243 case traceback_default: trace_str = "default"; break;
2244 case traceback_none: trace_str = "none"; break;
2245 case traceback_part: trace_str = "part"; break;
2246 case traceback_full: trace_str = "full"; break;
2247 default: trace_str = "unknown"; break;
2250 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2252 switch (rs6000_current_cmodel)
2254 case CMODEL_SMALL: cmodel_str = "small"; break;
2255 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2256 case CMODEL_LARGE: cmodel_str = "large"; break;
2257 default: cmodel_str = "unknown"; break;
2260 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2262 switch (rs6000_current_abi)
2264 case ABI_NONE: abi_str = "none"; break;
2265 case ABI_AIX: abi_str = "aix"; break;
2266 case ABI_ELFv2: abi_str = "ELFv2"; break;
2267 case ABI_V4: abi_str = "V4"; break;
2268 case ABI_DARWIN: abi_str = "darwin"; break;
2269 default: abi_str = "unknown"; break;
2272 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2274 if (rs6000_altivec_abi)
2275 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2277 if (rs6000_spe_abi)
2278 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2280 if (rs6000_darwin64_abi)
2281 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2283 if (rs6000_float_gprs)
2284 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2286 fprintf (stderr, DEBUG_FMT_S, "fprs",
2287 (TARGET_FPRS ? "true" : "false"));
2289 fprintf (stderr, DEBUG_FMT_S, "single_float",
2290 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2292 fprintf (stderr, DEBUG_FMT_S, "double_float",
2293 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2295 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2296 (TARGET_SOFT_FLOAT ? "true" : "false"));
2298 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2299 (TARGET_E500_SINGLE ? "true" : "false"));
2301 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2302 (TARGET_E500_DOUBLE ? "true" : "false"));
2304 if (TARGET_LINK_STACK)
2305 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2307 if (targetm.lra_p ())
2308 fprintf (stderr, DEBUG_FMT_S, "lra", "true");
2310 if (TARGET_P8_FUSION)
2311 fprintf (stderr, DEBUG_FMT_S, "p8 fusion",
2312 (TARGET_P8_FUSION_SIGN) ? "zero+sign" : "zero");
2314 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2315 TARGET_SECURE_PLT ? "secure" : "bss");
2316 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2317 aix_struct_return ? "aix" : "sysv");
2318 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2319 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2320 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2321 tf[!!rs6000_align_branch_targets]);
2322 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2323 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2324 rs6000_long_double_type_size);
2325 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2326 (int)rs6000_sched_restricted_insns_priority);
2327 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2328 (int)END_BUILTINS);
2329 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2330 (int)RS6000_BUILTIN_COUNT);
2332 if (TARGET_VSX)
2333 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2334 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2338 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2339 legitimate address support to figure out the appropriate addressing to
2340 use. */
2342 static void
2343 rs6000_setup_reg_addr_masks (void)
2345 ssize_t rc, reg, m, nregs;
2346 addr_mask_type any_addr_mask, addr_mask;
2348 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2350 enum machine_mode m2 = (enum machine_mode)m;
2352 /* SDmode is special in that we want to access it only via REG+REG
2353 addressing on power7 and above, since we want to use the LFIWZX and
2354 STFIWZX instructions to load it. */
2355 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2357 any_addr_mask = 0;
2358 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2360 addr_mask = 0;
2361 reg = reload_reg_map[rc].reg;
2363 /* Can mode values go in the GPR/FPR/Altivec registers? */
2364 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2366 nregs = rs6000_hard_regno_nregs[m][reg];
2367 addr_mask |= RELOAD_REG_VALID;
2369 /* Indicate if the mode takes more than 1 physical register. If
2370 it takes a single register, indicate it can do REG+REG
2371 addressing. */
2372 if (nregs > 1 || m == BLKmode)
2373 addr_mask |= RELOAD_REG_MULTIPLE;
2374 else
2375 addr_mask |= RELOAD_REG_INDEXED;
2377 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2378 addressing. Restrict addressing on SPE for 64-bit types
2379 because of the SUBREG hackery used to address 64-bit floats in
2380 '32-bit' GPRs. To simplify secondary reload, don't allow
2381 update forms on scalar floating point types that can go in the
2382 upper registers. */
2384 if (TARGET_UPDATE
2385 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2386 && GET_MODE_SIZE (m2) <= 8
2387 && !VECTOR_MODE_P (m2)
2388 && !COMPLEX_MODE_P (m2)
2389 && !indexed_only_p
2390 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (m2) == 8)
2391 && !(m2 == DFmode && TARGET_UPPER_REGS_DF)
2392 && !(m2 == SFmode && TARGET_UPPER_REGS_SF))
2394 addr_mask |= RELOAD_REG_PRE_INCDEC;
2396 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2397 we don't allow PRE_MODIFY for some multi-register
2398 operations. */
2399 switch (m)
2401 default:
2402 addr_mask |= RELOAD_REG_PRE_MODIFY;
2403 break;
2405 case DImode:
2406 if (TARGET_POWERPC64)
2407 addr_mask |= RELOAD_REG_PRE_MODIFY;
2408 break;
2410 case DFmode:
2411 case DDmode:
2412 if (TARGET_DF_INSN)
2413 addr_mask |= RELOAD_REG_PRE_MODIFY;
2414 break;
2419 /* GPR and FPR registers can do REG+OFFSET addressing, except
2420 possibly for SDmode. */
2421 if ((addr_mask != 0) && !indexed_only_p
2422 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR))
2423 addr_mask |= RELOAD_REG_OFFSET;
2425 reg_addr[m].addr_mask[rc] = addr_mask;
2426 any_addr_mask |= addr_mask;
2429 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2434 /* Initialize the various global tables that are based on register size. */
2435 static void
2436 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2438 ssize_t r, m, c;
2439 int align64;
2440 int align32;
2442 /* Precalculate REGNO_REG_CLASS. */
2443 rs6000_regno_regclass[0] = GENERAL_REGS;
2444 for (r = 1; r < 32; ++r)
2445 rs6000_regno_regclass[r] = BASE_REGS;
2447 for (r = 32; r < 64; ++r)
2448 rs6000_regno_regclass[r] = FLOAT_REGS;
2450 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2451 rs6000_regno_regclass[r] = NO_REGS;
2453 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2454 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2456 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2457 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2458 rs6000_regno_regclass[r] = CR_REGS;
2460 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2461 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2462 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
2463 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2464 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2465 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2466 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2467 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2468 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2469 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2470 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2471 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2473 /* Precalculate register class to simpler reload register class. We don't
2474 need all of the register classes that are combinations of different
2475 classes, just the simple ones that have constraint letters. */
2476 for (c = 0; c < N_REG_CLASSES; c++)
2477 reg_class_to_reg_type[c] = NO_REG_TYPE;
2479 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2480 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2481 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2482 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2483 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2484 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2485 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2486 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2487 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2488 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2489 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2490 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2492 if (TARGET_VSX)
2494 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2495 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2497 else
2499 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2500 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2503 /* Precalculate the valid memory formats as well as the vector information,
2504 this must be set up before the rs6000_hard_regno_nregs_internal calls
2505 below. */
2506 gcc_assert ((int)VECTOR_NONE == 0);
2507 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2508 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2510 gcc_assert ((int)CODE_FOR_nothing == 0);
2511 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2513 gcc_assert ((int)NO_REGS == 0);
2514 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2516 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2517 believes it can use native alignment or still uses 128-bit alignment. */
2518 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2520 align64 = 64;
2521 align32 = 32;
2523 else
2525 align64 = 128;
2526 align32 = 128;
2529 /* V2DF mode, VSX only. */
2530 if (TARGET_VSX)
2532 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2533 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2534 rs6000_vector_align[V2DFmode] = align64;
2537 /* V4SF mode, either VSX or Altivec. */
2538 if (TARGET_VSX)
2540 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2541 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2542 rs6000_vector_align[V4SFmode] = align32;
2544 else if (TARGET_ALTIVEC)
2546 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2547 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2548 rs6000_vector_align[V4SFmode] = align32;
2551 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2552 and stores. */
2553 if (TARGET_ALTIVEC)
2555 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2556 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2557 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2558 rs6000_vector_align[V4SImode] = align32;
2559 rs6000_vector_align[V8HImode] = align32;
2560 rs6000_vector_align[V16QImode] = align32;
2562 if (TARGET_VSX)
2564 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2565 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2566 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2568 else
2570 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2571 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2572 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2576 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2577 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2578 if (TARGET_VSX)
2580 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2581 rs6000_vector_unit[V2DImode]
2582 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2583 rs6000_vector_align[V2DImode] = align64;
2585 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
2586 rs6000_vector_unit[V1TImode]
2587 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2588 rs6000_vector_align[V1TImode] = 128;
2591 /* DFmode, see if we want to use the VSX unit. */
2592 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2594 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2595 rs6000_vector_mem[DFmode]
2596 = (TARGET_UPPER_REGS_DF ? VECTOR_VSX : VECTOR_NONE);
2597 rs6000_vector_align[DFmode] = align64;
2600 /* Allow TImode in VSX register and set the VSX memory macros. */
2601 if (TARGET_VSX && TARGET_VSX_TIMODE)
2603 rs6000_vector_mem[TImode] = VECTOR_VSX;
2604 rs6000_vector_align[TImode] = align64;
2607 /* TODO add SPE and paired floating point vector support. */
2609 /* Register class constraints for the constraints that depend on compile
2610 switches. When the VSX code was added, different constraints were added
2611 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
2612 of the VSX registers are used. The register classes for scalar floating
2613 point types is set, based on whether we allow that type into the upper
2614 (Altivec) registers. GCC has register classes to target the Altivec
2615 registers for load/store operations, to select using a VSX memory
2616 operation instead of the traditional floating point operation. The
2617 constraints are:
2619 d - Register class to use with traditional DFmode instructions.
2620 f - Register class to use with traditional SFmode instructions.
2621 v - Altivec register.
2622 wa - Any VSX register.
2623 wd - Preferred register class for V2DFmode.
2624 wf - Preferred register class for V4SFmode.
2625 wg - Float register for power6x move insns.
2626 wl - Float register if we can do 32-bit signed int loads.
2627 wm - VSX register for ISA 2.07 direct move operations.
2628 wr - GPR if 64-bit mode is permitted.
2629 ws - Register class to do ISA 2.06 DF operations.
2630 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
2631 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
2632 wt - VSX register for TImode in VSX registers.
2633 ww - Register class to do SF conversions in with VSX operations.
2634 wx - Float register if we can do 32-bit int stores.
2635 wy - Register class to do ISA 2.07 SF operations.
2636 wz - Float register if we can do 32-bit unsigned int loads. */
2638 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2639 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2641 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2642 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2644 if (TARGET_VSX)
2646 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2647 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2648 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2650 if (TARGET_VSX_TIMODE)
2651 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS;
2653 if (TARGET_UPPER_REGS_DF)
2655 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
2656 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
2658 else
2659 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
2662 /* Add conditional constraints based on various options, to allow us to
2663 collapse multiple insn patterns. */
2664 if (TARGET_ALTIVEC)
2665 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2667 if (TARGET_MFPGPR)
2668 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
2670 if (TARGET_LFIWAX)
2671 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS;
2673 if (TARGET_DIRECT_MOVE)
2674 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
2676 if (TARGET_POWERPC64)
2677 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
2679 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF)
2681 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
2682 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
2683 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
2685 else if (TARGET_P8_VECTOR)
2687 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
2688 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2690 else if (TARGET_VSX)
2691 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2693 if (TARGET_STFIWX)
2694 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS;
2696 if (TARGET_LFIWZX)
2697 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS;
2699 /* Set up the reload helper and direct move functions. */
2700 if (TARGET_VSX || TARGET_ALTIVEC)
2702 if (TARGET_64BIT)
2704 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
2705 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
2706 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
2707 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
2708 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
2709 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
2710 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
2711 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
2712 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
2713 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
2714 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
2715 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
2716 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
2717 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
2718 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2720 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
2721 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
2722 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
2723 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
2725 if (TARGET_P8_VECTOR)
2727 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
2728 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
2729 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
2730 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
2732 if (TARGET_VSX_TIMODE)
2734 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
2735 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
2737 if (TARGET_DIRECT_MOVE)
2739 if (TARGET_POWERPC64)
2741 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
2742 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
2743 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
2744 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
2745 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
2746 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
2747 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
2748 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
2749 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
2751 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
2752 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
2753 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
2754 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
2755 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
2756 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
2757 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
2758 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
2759 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
2761 else
2763 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
2764 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
2765 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
2769 else
2771 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
2772 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
2773 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
2774 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
2775 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
2776 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
2777 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
2778 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
2779 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
2780 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
2781 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
2782 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
2783 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
2784 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
2785 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2787 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
2788 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
2789 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
2790 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
2792 if (TARGET_P8_VECTOR)
2794 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
2795 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
2796 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
2797 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
2799 if (TARGET_VSX_TIMODE)
2801 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
2802 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
2807 /* Precalculate HARD_REGNO_NREGS. */
2808 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2809 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2810 rs6000_hard_regno_nregs[m][r]
2811 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2813 /* Precalculate HARD_REGNO_MODE_OK. */
2814 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2815 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2816 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2817 rs6000_hard_regno_mode_ok_p[m][r] = true;
2819 /* Precalculate CLASS_MAX_NREGS sizes. */
2820 for (c = 0; c < LIM_REG_CLASSES; ++c)
2822 int reg_size;
2824 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2825 reg_size = UNITS_PER_VSX_WORD;
2827 else if (c == ALTIVEC_REGS)
2828 reg_size = UNITS_PER_ALTIVEC_WORD;
2830 else if (c == FLOAT_REGS)
2831 reg_size = UNITS_PER_FP_WORD;
2833 else
2834 reg_size = UNITS_PER_WORD;
2836 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2838 enum machine_mode m2 = (enum machine_mode)m;
2839 int reg_size2 = reg_size;
2841 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2842 if (TARGET_VSX && VSX_REG_CLASS_P (c)
2843 && (m == TDmode || m == TFmode))
2844 reg_size2 = UNITS_PER_FP_WORD;
2846 rs6000_class_max_nregs[m][c]
2847 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
2851 if (TARGET_E500_DOUBLE)
2852 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2854 /* Calculate which modes to automatically generate code to use a the
2855 reciprocal divide and square root instructions. In the future, possibly
2856 automatically generate the instructions even if the user did not specify
2857 -mrecip. The older machines double precision reciprocal sqrt estimate is
2858 not accurate enough. */
2859 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2860 if (TARGET_FRES)
2861 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2862 if (TARGET_FRE)
2863 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2864 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2865 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2866 if (VECTOR_UNIT_VSX_P (V2DFmode))
2867 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2869 if (TARGET_FRSQRTES)
2870 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2871 if (TARGET_FRSQRTE)
2872 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2873 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2874 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2875 if (VECTOR_UNIT_VSX_P (V2DFmode))
2876 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2878 if (rs6000_recip_control)
2880 if (!flag_finite_math_only)
2881 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2882 if (flag_trapping_math)
2883 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2884 if (!flag_reciprocal_math)
2885 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2886 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2888 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2889 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2890 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2892 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2893 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2894 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2896 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2897 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2898 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2900 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2901 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2902 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2904 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2905 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2906 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2908 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2909 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2910 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2912 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2913 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2914 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2916 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2917 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2918 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2922 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2923 legitimate address support to figure out the appropriate addressing to
2924 use. */
2925 rs6000_setup_reg_addr_masks ();
2927 if (global_init_p || TARGET_DEBUG_TARGET)
2929 if (TARGET_DEBUG_REG)
2930 rs6000_debug_reg_global ();
2932 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2933 fprintf (stderr,
2934 "SImode variable mult cost = %d\n"
2935 "SImode constant mult cost = %d\n"
2936 "SImode short constant mult cost = %d\n"
2937 "DImode multipliciation cost = %d\n"
2938 "SImode division cost = %d\n"
2939 "DImode division cost = %d\n"
2940 "Simple fp operation cost = %d\n"
2941 "DFmode multiplication cost = %d\n"
2942 "SFmode division cost = %d\n"
2943 "DFmode division cost = %d\n"
2944 "cache line size = %d\n"
2945 "l1 cache size = %d\n"
2946 "l2 cache size = %d\n"
2947 "simultaneous prefetches = %d\n"
2948 "\n",
2949 rs6000_cost->mulsi,
2950 rs6000_cost->mulsi_const,
2951 rs6000_cost->mulsi_const9,
2952 rs6000_cost->muldi,
2953 rs6000_cost->divsi,
2954 rs6000_cost->divdi,
2955 rs6000_cost->fp,
2956 rs6000_cost->dmul,
2957 rs6000_cost->sdiv,
2958 rs6000_cost->ddiv,
2959 rs6000_cost->cache_line_size,
2960 rs6000_cost->l1_cache_size,
2961 rs6000_cost->l2_cache_size,
2962 rs6000_cost->simultaneous_prefetches);
2966 #if TARGET_MACHO
2967 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2969 static void
2970 darwin_rs6000_override_options (void)
2972 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2973 off. */
2974 rs6000_altivec_abi = 1;
2975 TARGET_ALTIVEC_VRSAVE = 1;
2976 rs6000_current_abi = ABI_DARWIN;
2978 if (DEFAULT_ABI == ABI_DARWIN
2979 && TARGET_64BIT)
2980 darwin_one_byte_bool = 1;
2982 if (TARGET_64BIT && ! TARGET_POWERPC64)
2984 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
2985 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2987 if (flag_mkernel)
2989 rs6000_default_long_calls = 1;
2990 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
2993 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2994 Altivec. */
2995 if (!flag_mkernel && !flag_apple_kext
2996 && TARGET_64BIT
2997 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
2998 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3000 /* Unless the user (not the configurer) has explicitly overridden
3001 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3002 G4 unless targeting the kernel. */
3003 if (!flag_mkernel
3004 && !flag_apple_kext
3005 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3006 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3007 && ! global_options_set.x_rs6000_cpu_index)
3009 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3012 #endif
3014 /* If not otherwise specified by a target, make 'long double' equivalent to
3015 'double'. */
3017 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3018 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3019 #endif
3021 /* Return the builtin mask of the various options used that could affect which
3022 builtins were used. In the past we used target_flags, but we've run out of
3023 bits, and some options like SPE and PAIRED are no longer in
3024 target_flags. */
3026 HOST_WIDE_INT
3027 rs6000_builtin_mask_calculate (void)
3029 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3030 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3031 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3032 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3033 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3034 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3035 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3036 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3037 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3038 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3039 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3040 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3041 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3042 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3043 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0));
3046 /* Override command line options. Mostly we process the processor type and
3047 sometimes adjust other TARGET_ options. */
3049 static bool
3050 rs6000_option_override_internal (bool global_init_p)
3052 bool ret = true;
3053 bool have_cpu = false;
3055 /* The default cpu requested at configure time, if any. */
3056 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3058 HOST_WIDE_INT set_masks;
3059 int cpu_index;
3060 int tune_index;
3061 struct cl_target_option *main_target_opt
3062 = ((global_init_p || target_option_default_node == NULL)
3063 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3065 /* Remember the explicit arguments. */
3066 if (global_init_p)
3067 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3069 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3070 library functions, so warn about it. The flag may be useful for
3071 performance studies from time to time though, so don't disable it
3072 entirely. */
3073 if (global_options_set.x_rs6000_alignment_flags
3074 && rs6000_alignment_flags == MASK_ALIGN_POWER
3075 && DEFAULT_ABI == ABI_DARWIN
3076 && TARGET_64BIT)
3077 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3078 " it is incompatible with the installed C and C++ libraries");
3080 /* Numerous experiment shows that IRA based loop pressure
3081 calculation works better for RTL loop invariant motion on targets
3082 with enough (>= 32) registers. It is an expensive optimization.
3083 So it is on only for peak performance. */
3084 if (optimize >= 3 && global_init_p
3085 && !global_options_set.x_flag_ira_loop_pressure)
3086 flag_ira_loop_pressure = 1;
3088 /* Set the pointer size. */
3089 if (TARGET_64BIT)
3091 rs6000_pmode = (int)DImode;
3092 rs6000_pointer_size = 64;
3094 else
3096 rs6000_pmode = (int)SImode;
3097 rs6000_pointer_size = 32;
3100 /* Some OSs don't support saving the high part of 64-bit registers on context
3101 switch. Other OSs don't support saving Altivec registers. On those OSs,
3102 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3103 if the user wants either, the user must explicitly specify them and we
3104 won't interfere with the user's specification. */
3106 set_masks = POWERPC_MASKS;
3107 #ifdef OS_MISSING_POWERPC64
3108 if (OS_MISSING_POWERPC64)
3109 set_masks &= ~OPTION_MASK_POWERPC64;
3110 #endif
3111 #ifdef OS_MISSING_ALTIVEC
3112 if (OS_MISSING_ALTIVEC)
3113 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3114 #endif
3116 /* Don't override by the processor default if given explicitly. */
3117 set_masks &= ~rs6000_isa_flags_explicit;
3119 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3120 the cpu in a target attribute or pragma, but did not specify a tuning
3121 option, use the cpu for the tuning option rather than the option specified
3122 with -mtune on the command line. Process a '--with-cpu' configuration
3123 request as an implicit --cpu. */
3124 if (rs6000_cpu_index >= 0)
3126 cpu_index = rs6000_cpu_index;
3127 have_cpu = true;
3129 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3131 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3132 have_cpu = true;
3134 else if (implicit_cpu)
3136 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3137 have_cpu = true;
3139 else
3141 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
3142 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3143 have_cpu = false;
3146 gcc_assert (cpu_index >= 0);
3148 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3149 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3150 with those from the cpu, except for options that were explicitly set. If
3151 we don't have a cpu, do not override the target bits set in
3152 TARGET_DEFAULT. */
3153 if (have_cpu)
3155 rs6000_isa_flags &= ~set_masks;
3156 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3157 & set_masks);
3159 else
3160 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3161 & ~rs6000_isa_flags_explicit);
3163 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3164 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3165 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3166 to using rs6000_isa_flags, we need to do the initialization here. */
3167 if (!have_cpu)
3168 rs6000_isa_flags |= (TARGET_DEFAULT & ~rs6000_isa_flags_explicit);
3170 if (rs6000_tune_index >= 0)
3171 tune_index = rs6000_tune_index;
3172 else if (have_cpu)
3173 rs6000_tune_index = tune_index = cpu_index;
3174 else
3176 size_t i;
3177 enum processor_type tune_proc
3178 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3180 tune_index = -1;
3181 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3182 if (processor_target_table[i].processor == tune_proc)
3184 rs6000_tune_index = tune_index = i;
3185 break;
3189 gcc_assert (tune_index >= 0);
3190 rs6000_cpu = processor_target_table[tune_index].processor;
3192 /* Pick defaults for SPE related control flags. Do this early to make sure
3193 that the TARGET_ macros are representative ASAP. */
3195 int spe_capable_cpu =
3196 (rs6000_cpu == PROCESSOR_PPC8540
3197 || rs6000_cpu == PROCESSOR_PPC8548);
3199 if (!global_options_set.x_rs6000_spe_abi)
3200 rs6000_spe_abi = spe_capable_cpu;
3202 if (!global_options_set.x_rs6000_spe)
3203 rs6000_spe = spe_capable_cpu;
3205 if (!global_options_set.x_rs6000_float_gprs)
3206 rs6000_float_gprs =
3207 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3208 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3209 : 0);
3212 if (global_options_set.x_rs6000_spe_abi
3213 && rs6000_spe_abi
3214 && !TARGET_SPE_ABI)
3215 error ("not configured for SPE ABI");
3217 if (global_options_set.x_rs6000_spe
3218 && rs6000_spe
3219 && !TARGET_SPE)
3220 error ("not configured for SPE instruction set");
3222 if (main_target_opt != NULL
3223 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3224 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3225 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3226 error ("target attribute or pragma changes SPE ABI");
3228 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3229 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3230 || rs6000_cpu == PROCESSOR_PPCE5500)
3232 if (TARGET_ALTIVEC)
3233 error ("AltiVec not supported in this target");
3234 if (TARGET_SPE)
3235 error ("SPE not supported in this target");
3237 if (rs6000_cpu == PROCESSOR_PPCE6500)
3239 if (TARGET_SPE)
3240 error ("SPE not supported in this target");
3243 /* Disable Cell microcode if we are optimizing for the Cell
3244 and not optimizing for size. */
3245 if (rs6000_gen_cell_microcode == -1)
3246 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3247 && !optimize_size);
3249 /* If we are optimizing big endian systems for space and it's OK to
3250 use instructions that would be microcoded on the Cell, use the
3251 load/store multiple and string instructions. */
3252 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3253 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3254 | OPTION_MASK_STRING);
3256 /* Don't allow -mmultiple or -mstring on little endian systems
3257 unless the cpu is a 750, because the hardware doesn't support the
3258 instructions used in little endian mode, and causes an alignment
3259 trap. The 750 does not cause an alignment trap (except when the
3260 target is unaligned). */
3262 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3264 if (TARGET_MULTIPLE)
3266 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3267 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3268 warning (0, "-mmultiple is not supported on little endian systems");
3271 if (TARGET_STRING)
3273 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3274 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3275 warning (0, "-mstring is not supported on little endian systems");
3279 /* If little-endian, default to -mstrict-align on older processors.
3280 Testing for htm matches power8 and later. */
3281 if (!BYTES_BIG_ENDIAN
3282 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3283 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3285 /* -maltivec={le,be} implies -maltivec. */
3286 if (rs6000_altivec_element_order != 0)
3287 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3289 /* Disallow -maltivec=le in big endian mode for now. This is not
3290 known to be useful for anyone. */
3291 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3293 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3294 rs6000_altivec_element_order = 0;
3297 /* Add some warnings for VSX. */
3298 if (TARGET_VSX)
3300 const char *msg = NULL;
3301 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3302 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3304 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3305 msg = N_("-mvsx requires hardware floating point");
3306 else
3308 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3309 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3312 else if (TARGET_PAIRED_FLOAT)
3313 msg = N_("-mvsx and -mpaired are incompatible");
3314 else if (TARGET_AVOID_XFORM > 0)
3315 msg = N_("-mvsx needs indexed addressing");
3316 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3317 & OPTION_MASK_ALTIVEC))
3319 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3320 msg = N_("-mvsx and -mno-altivec are incompatible");
3321 else
3322 msg = N_("-mno-altivec disables vsx");
3325 if (msg)
3327 warning (0, msg);
3328 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3329 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3333 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3334 the -mcpu setting to enable options that conflict. */
3335 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3336 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3337 | OPTION_MASK_ALTIVEC
3338 | OPTION_MASK_VSX)) != 0)
3339 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3340 | OPTION_MASK_DIRECT_MOVE)
3341 & ~rs6000_isa_flags_explicit);
3343 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3344 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3346 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3347 unless the user explicitly used the -mno-<option> to disable the code. */
3348 if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3349 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3350 else if (TARGET_VSX)
3351 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3352 else if (TARGET_POPCNTD)
3353 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3354 else if (TARGET_DFP)
3355 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3356 else if (TARGET_CMPB)
3357 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3358 else if (TARGET_FPRND)
3359 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
3360 else if (TARGET_POPCNTB)
3361 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
3362 else if (TARGET_ALTIVEC)
3363 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
3365 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3367 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3368 error ("-mcrypto requires -maltivec");
3369 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3372 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3374 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3375 error ("-mdirect-move requires -mvsx");
3376 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3379 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3381 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3382 error ("-mpower8-vector requires -maltivec");
3383 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3386 if (TARGET_P8_VECTOR && !TARGET_VSX)
3388 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3389 error ("-mpower8-vector requires -mvsx");
3390 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3393 if (TARGET_VSX_TIMODE && !TARGET_VSX)
3395 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
3396 error ("-mvsx-timode requires -mvsx");
3397 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
3400 if (TARGET_DFP && !TARGET_HARD_FLOAT)
3402 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
3403 error ("-mhard-dfp requires -mhard-float");
3404 rs6000_isa_flags &= ~OPTION_MASK_DFP;
3407 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3408 silently turn off quad memory mode. */
3409 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
3411 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3412 warning (0, N_("-mquad-memory requires 64-bit mode"));
3414 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
3415 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
3417 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
3418 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
3421 /* Non-atomic quad memory load/store are disabled for little endian, since
3422 the words are reversed, but atomic operations can still be done by
3423 swapping the words. */
3424 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
3426 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3427 warning (0, N_("-mquad-memory is not available in little endian mode"));
3429 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3432 /* Assume if the user asked for normal quad memory instructions, they want
3433 the atomic versions as well, unless they explicity told us not to use quad
3434 word atomic instructions. */
3435 if (TARGET_QUAD_MEMORY
3436 && !TARGET_QUAD_MEMORY_ATOMIC
3437 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
3438 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
3440 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3441 generating power8 instructions. */
3442 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3443 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
3444 & OPTION_MASK_P8_FUSION);
3446 /* Power8 does not fuse sign extended loads with the addis. If we are
3447 optimizing at high levels for speed, convert a sign extended load into a
3448 zero extending load, and an explicit sign extension. */
3449 if (TARGET_P8_FUSION
3450 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
3451 && optimize_function_for_speed_p (cfun)
3452 && optimize >= 3)
3453 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
3455 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3456 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
3458 /* E500mc does "better" if we inline more aggressively. Respect the
3459 user's opinion, though. */
3460 if (rs6000_block_move_inline_limit == 0
3461 && (rs6000_cpu == PROCESSOR_PPCE500MC
3462 || rs6000_cpu == PROCESSOR_PPCE500MC64
3463 || rs6000_cpu == PROCESSOR_PPCE5500
3464 || rs6000_cpu == PROCESSOR_PPCE6500))
3465 rs6000_block_move_inline_limit = 128;
3467 /* store_one_arg depends on expand_block_move to handle at least the
3468 size of reg_parm_stack_space. */
3469 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
3470 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
3472 if (global_init_p)
3474 /* If the appropriate debug option is enabled, replace the target hooks
3475 with debug versions that call the real version and then prints
3476 debugging information. */
3477 if (TARGET_DEBUG_COST)
3479 targetm.rtx_costs = rs6000_debug_rtx_costs;
3480 targetm.address_cost = rs6000_debug_address_cost;
3481 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
3484 if (TARGET_DEBUG_ADDR)
3486 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
3487 targetm.legitimize_address = rs6000_debug_legitimize_address;
3488 rs6000_secondary_reload_class_ptr
3489 = rs6000_debug_secondary_reload_class;
3490 rs6000_secondary_memory_needed_ptr
3491 = rs6000_debug_secondary_memory_needed;
3492 rs6000_cannot_change_mode_class_ptr
3493 = rs6000_debug_cannot_change_mode_class;
3494 rs6000_preferred_reload_class_ptr
3495 = rs6000_debug_preferred_reload_class;
3496 rs6000_legitimize_reload_address_ptr
3497 = rs6000_debug_legitimize_reload_address;
3498 rs6000_mode_dependent_address_ptr
3499 = rs6000_debug_mode_dependent_address;
3502 if (rs6000_veclibabi_name)
3504 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
3505 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
3506 else
3508 error ("unknown vectorization library ABI type (%s) for "
3509 "-mveclibabi= switch", rs6000_veclibabi_name);
3510 ret = false;
3515 if (!global_options_set.x_rs6000_long_double_type_size)
3517 if (main_target_opt != NULL
3518 && (main_target_opt->x_rs6000_long_double_type_size
3519 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
3520 error ("target attribute or pragma changes long double size");
3521 else
3522 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3525 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3526 if (!global_options_set.x_rs6000_ieeequad)
3527 rs6000_ieeequad = 1;
3528 #endif
3530 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3531 target attribute or pragma which automatically enables both options,
3532 unless the altivec ABI was set. This is set by default for 64-bit, but
3533 not for 32-bit. */
3534 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3535 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
3536 & ~rs6000_isa_flags_explicit);
3538 /* Enable Altivec ABI for AIX -maltivec. */
3539 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
3541 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3542 error ("target attribute or pragma changes AltiVec ABI");
3543 else
3544 rs6000_altivec_abi = 1;
3547 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3548 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3549 be explicitly overridden in either case. */
3550 if (TARGET_ELF)
3552 if (!global_options_set.x_rs6000_altivec_abi
3553 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
3555 if (main_target_opt != NULL &&
3556 !main_target_opt->x_rs6000_altivec_abi)
3557 error ("target attribute or pragma changes AltiVec ABI");
3558 else
3559 rs6000_altivec_abi = 1;
3563 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3564 So far, the only darwin64 targets are also MACH-O. */
3565 if (TARGET_MACHO
3566 && DEFAULT_ABI == ABI_DARWIN
3567 && TARGET_64BIT)
3569 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
3570 error ("target attribute or pragma changes darwin64 ABI");
3571 else
3573 rs6000_darwin64_abi = 1;
3574 /* Default to natural alignment, for better performance. */
3575 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3579 /* Place FP constants in the constant pool instead of TOC
3580 if section anchors enabled. */
3581 if (flag_section_anchors
3582 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
3583 TARGET_NO_FP_IN_TOC = 1;
3585 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3586 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
3588 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3589 SUBTARGET_OVERRIDE_OPTIONS;
3590 #endif
3591 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3592 SUBSUBTARGET_OVERRIDE_OPTIONS;
3593 #endif
3594 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3595 SUB3TARGET_OVERRIDE_OPTIONS;
3596 #endif
3598 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3599 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
3601 /* For the E500 family of cores, reset the single/double FP flags to let us
3602 check that they remain constant across attributes or pragmas. Also,
3603 clear a possible request for string instructions, not supported and which
3604 we might have silently queried above for -Os.
3606 For other families, clear ISEL in case it was set implicitly.
3609 switch (rs6000_cpu)
3611 case PROCESSOR_PPC8540:
3612 case PROCESSOR_PPC8548:
3613 case PROCESSOR_PPCE500MC:
3614 case PROCESSOR_PPCE500MC64:
3615 case PROCESSOR_PPCE5500:
3616 case PROCESSOR_PPCE6500:
3618 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
3619 rs6000_double_float = TARGET_E500_DOUBLE;
3621 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3623 break;
3625 default:
3627 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
3628 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
3630 break;
3633 if (main_target_opt)
3635 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
3636 error ("target attribute or pragma changes single precision floating "
3637 "point");
3638 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
3639 error ("target attribute or pragma changes double precision floating "
3640 "point");
3643 /* Detect invalid option combinations with E500. */
3644 CHECK_E500_OPTIONS;
3646 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
3647 && rs6000_cpu != PROCESSOR_POWER5
3648 && rs6000_cpu != PROCESSOR_POWER6
3649 && rs6000_cpu != PROCESSOR_POWER7
3650 && rs6000_cpu != PROCESSOR_POWER8
3651 && rs6000_cpu != PROCESSOR_PPCA2
3652 && rs6000_cpu != PROCESSOR_CELL
3653 && rs6000_cpu != PROCESSOR_PPC476);
3654 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
3655 || rs6000_cpu == PROCESSOR_POWER5
3656 || rs6000_cpu == PROCESSOR_POWER7
3657 || rs6000_cpu == PROCESSOR_POWER8);
3658 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
3659 || rs6000_cpu == PROCESSOR_POWER5
3660 || rs6000_cpu == PROCESSOR_POWER6
3661 || rs6000_cpu == PROCESSOR_POWER7
3662 || rs6000_cpu == PROCESSOR_POWER8
3663 || rs6000_cpu == PROCESSOR_PPCE500MC
3664 || rs6000_cpu == PROCESSOR_PPCE500MC64
3665 || rs6000_cpu == PROCESSOR_PPCE5500
3666 || rs6000_cpu == PROCESSOR_PPCE6500);
3668 /* Allow debug switches to override the above settings. These are set to -1
3669 in rs6000.opt to indicate the user hasn't directly set the switch. */
3670 if (TARGET_ALWAYS_HINT >= 0)
3671 rs6000_always_hint = TARGET_ALWAYS_HINT;
3673 if (TARGET_SCHED_GROUPS >= 0)
3674 rs6000_sched_groups = TARGET_SCHED_GROUPS;
3676 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
3677 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
3679 rs6000_sched_restricted_insns_priority
3680 = (rs6000_sched_groups ? 1 : 0);
3682 /* Handle -msched-costly-dep option. */
3683 rs6000_sched_costly_dep
3684 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
3686 if (rs6000_sched_costly_dep_str)
3688 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
3689 rs6000_sched_costly_dep = no_dep_costly;
3690 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
3691 rs6000_sched_costly_dep = all_deps_costly;
3692 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
3693 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
3694 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
3695 rs6000_sched_costly_dep = store_to_load_dep_costly;
3696 else
3697 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
3698 atoi (rs6000_sched_costly_dep_str));
3701 /* Handle -minsert-sched-nops option. */
3702 rs6000_sched_insert_nops
3703 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
3705 if (rs6000_sched_insert_nops_str)
3707 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
3708 rs6000_sched_insert_nops = sched_finish_none;
3709 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
3710 rs6000_sched_insert_nops = sched_finish_pad_groups;
3711 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
3712 rs6000_sched_insert_nops = sched_finish_regroup_exact;
3713 else
3714 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
3715 atoi (rs6000_sched_insert_nops_str));
3718 if (global_init_p)
3720 #ifdef TARGET_REGNAMES
3721 /* If the user desires alternate register names, copy in the
3722 alternate names now. */
3723 if (TARGET_REGNAMES)
3724 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
3725 #endif
3727 /* Set aix_struct_return last, after the ABI is determined.
3728 If -maix-struct-return or -msvr4-struct-return was explicitly
3729 used, don't override with the ABI default. */
3730 if (!global_options_set.x_aix_struct_return)
3731 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
3733 #if 0
3734 /* IBM XL compiler defaults to unsigned bitfields. */
3735 if (TARGET_XL_COMPAT)
3736 flag_signed_bitfields = 0;
3737 #endif
3739 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3740 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
3742 if (TARGET_TOC)
3743 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
3745 /* We can only guarantee the availability of DI pseudo-ops when
3746 assembling for 64-bit targets. */
3747 if (!TARGET_64BIT)
3749 targetm.asm_out.aligned_op.di = NULL;
3750 targetm.asm_out.unaligned_op.di = NULL;
3754 /* Set branch target alignment, if not optimizing for size. */
3755 if (!optimize_size)
3757 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3758 aligned 8byte to avoid misprediction by the branch predictor. */
3759 if (rs6000_cpu == PROCESSOR_TITAN
3760 || rs6000_cpu == PROCESSOR_CELL)
3762 if (align_functions <= 0)
3763 align_functions = 8;
3764 if (align_jumps <= 0)
3765 align_jumps = 8;
3766 if (align_loops <= 0)
3767 align_loops = 8;
3769 if (rs6000_align_branch_targets)
3771 if (align_functions <= 0)
3772 align_functions = 16;
3773 if (align_jumps <= 0)
3774 align_jumps = 16;
3775 if (align_loops <= 0)
3777 can_override_loop_align = 1;
3778 align_loops = 16;
3781 if (align_jumps_max_skip <= 0)
3782 align_jumps_max_skip = 15;
3783 if (align_loops_max_skip <= 0)
3784 align_loops_max_skip = 15;
3787 /* Arrange to save and restore machine status around nested functions. */
3788 init_machine_status = rs6000_init_machine_status;
3790 /* We should always be splitting complex arguments, but we can't break
3791 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3792 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
3793 targetm.calls.split_complex_arg = NULL;
3796 /* Initialize rs6000_cost with the appropriate target costs. */
3797 if (optimize_size)
3798 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
3799 else
3800 switch (rs6000_cpu)
3802 case PROCESSOR_RS64A:
3803 rs6000_cost = &rs64a_cost;
3804 break;
3806 case PROCESSOR_MPCCORE:
3807 rs6000_cost = &mpccore_cost;
3808 break;
3810 case PROCESSOR_PPC403:
3811 rs6000_cost = &ppc403_cost;
3812 break;
3814 case PROCESSOR_PPC405:
3815 rs6000_cost = &ppc405_cost;
3816 break;
3818 case PROCESSOR_PPC440:
3819 rs6000_cost = &ppc440_cost;
3820 break;
3822 case PROCESSOR_PPC476:
3823 rs6000_cost = &ppc476_cost;
3824 break;
3826 case PROCESSOR_PPC601:
3827 rs6000_cost = &ppc601_cost;
3828 break;
3830 case PROCESSOR_PPC603:
3831 rs6000_cost = &ppc603_cost;
3832 break;
3834 case PROCESSOR_PPC604:
3835 rs6000_cost = &ppc604_cost;
3836 break;
3838 case PROCESSOR_PPC604e:
3839 rs6000_cost = &ppc604e_cost;
3840 break;
3842 case PROCESSOR_PPC620:
3843 rs6000_cost = &ppc620_cost;
3844 break;
3846 case PROCESSOR_PPC630:
3847 rs6000_cost = &ppc630_cost;
3848 break;
3850 case PROCESSOR_CELL:
3851 rs6000_cost = &ppccell_cost;
3852 break;
3854 case PROCESSOR_PPC750:
3855 case PROCESSOR_PPC7400:
3856 rs6000_cost = &ppc750_cost;
3857 break;
3859 case PROCESSOR_PPC7450:
3860 rs6000_cost = &ppc7450_cost;
3861 break;
3863 case PROCESSOR_PPC8540:
3864 case PROCESSOR_PPC8548:
3865 rs6000_cost = &ppc8540_cost;
3866 break;
3868 case PROCESSOR_PPCE300C2:
3869 case PROCESSOR_PPCE300C3:
3870 rs6000_cost = &ppce300c2c3_cost;
3871 break;
3873 case PROCESSOR_PPCE500MC:
3874 rs6000_cost = &ppce500mc_cost;
3875 break;
3877 case PROCESSOR_PPCE500MC64:
3878 rs6000_cost = &ppce500mc64_cost;
3879 break;
3881 case PROCESSOR_PPCE5500:
3882 rs6000_cost = &ppce5500_cost;
3883 break;
3885 case PROCESSOR_PPCE6500:
3886 rs6000_cost = &ppce6500_cost;
3887 break;
3889 case PROCESSOR_TITAN:
3890 rs6000_cost = &titan_cost;
3891 break;
3893 case PROCESSOR_POWER4:
3894 case PROCESSOR_POWER5:
3895 rs6000_cost = &power4_cost;
3896 break;
3898 case PROCESSOR_POWER6:
3899 rs6000_cost = &power6_cost;
3900 break;
3902 case PROCESSOR_POWER7:
3903 rs6000_cost = &power7_cost;
3904 break;
3906 case PROCESSOR_POWER8:
3907 rs6000_cost = &power8_cost;
3908 break;
3910 case PROCESSOR_PPCA2:
3911 rs6000_cost = &ppca2_cost;
3912 break;
3914 default:
3915 gcc_unreachable ();
3918 if (global_init_p)
3920 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3921 rs6000_cost->simultaneous_prefetches,
3922 global_options.x_param_values,
3923 global_options_set.x_param_values);
3924 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3925 global_options.x_param_values,
3926 global_options_set.x_param_values);
3927 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3928 rs6000_cost->cache_line_size,
3929 global_options.x_param_values,
3930 global_options_set.x_param_values);
3931 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3932 global_options.x_param_values,
3933 global_options_set.x_param_values);
3935 /* Increase loop peeling limits based on performance analysis. */
3936 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
3937 global_options.x_param_values,
3938 global_options_set.x_param_values);
3939 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
3940 global_options.x_param_values,
3941 global_options_set.x_param_values);
3943 /* If using typedef char *va_list, signal that
3944 __builtin_va_start (&ap, 0) can be optimized to
3945 ap = __builtin_next_arg (0). */
3946 if (DEFAULT_ABI != ABI_V4)
3947 targetm.expand_builtin_va_start = NULL;
3950 /* Set up single/double float flags.
3951 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3952 then set both flags. */
3953 if (TARGET_HARD_FLOAT && TARGET_FPRS
3954 && rs6000_single_float == 0 && rs6000_double_float == 0)
3955 rs6000_single_float = rs6000_double_float = 1;
3957 /* If not explicitly specified via option, decide whether to generate indexed
3958 load/store instructions. */
3959 if (TARGET_AVOID_XFORM == -1)
3960 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3961 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3962 need indexed accesses and the type used is the scalar type of the element
3963 being loaded or stored. */
3964 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3965 && !TARGET_ALTIVEC);
3967 /* Set the -mrecip options. */
3968 if (rs6000_recip_name)
3970 char *p = ASTRDUP (rs6000_recip_name);
3971 char *q;
3972 unsigned int mask, i;
3973 bool invert;
3975 while ((q = strtok (p, ",")) != NULL)
3977 p = NULL;
3978 if (*q == '!')
3980 invert = true;
3981 q++;
3983 else
3984 invert = false;
3986 if (!strcmp (q, "default"))
3987 mask = ((TARGET_RECIP_PRECISION)
3988 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3989 else
3991 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3992 if (!strcmp (q, recip_options[i].string))
3994 mask = recip_options[i].mask;
3995 break;
3998 if (i == ARRAY_SIZE (recip_options))
4000 error ("unknown option for -mrecip=%s", q);
4001 invert = false;
4002 mask = 0;
4003 ret = false;
4007 if (invert)
4008 rs6000_recip_control &= ~mask;
4009 else
4010 rs6000_recip_control |= mask;
4014 /* Set the builtin mask of the various options used that could affect which
4015 builtins were used. In the past we used target_flags, but we've run out
4016 of bits, and some options like SPE and PAIRED are no longer in
4017 target_flags. */
4018 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4019 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4021 fprintf (stderr,
4022 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX ", ",
4023 rs6000_builtin_mask);
4024 rs6000_print_builtin_options (stderr, 0, NULL, rs6000_builtin_mask);
4027 /* Initialize all of the registers. */
4028 rs6000_init_hard_regno_mode_ok (global_init_p);
4030 /* Save the initial options in case the user does function specific options */
4031 if (global_init_p)
4032 target_option_default_node = target_option_current_node
4033 = build_target_option_node (&global_options);
4035 /* If not explicitly specified via option, decide whether to generate the
4036 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4037 if (TARGET_LINK_STACK == -1)
4038 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
4040 return ret;
4043 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4044 define the target cpu type. */
4046 static void
4047 rs6000_option_override (void)
4049 (void) rs6000_option_override_internal (true);
4053 /* Implement targetm.vectorize.builtin_mask_for_load. */
4054 static tree
4055 rs6000_builtin_mask_for_load (void)
4057 if (TARGET_ALTIVEC || TARGET_VSX)
4058 return altivec_builtin_mask_for_load;
4059 else
4060 return 0;
4063 /* Implement LOOP_ALIGN. */
4065 rs6000_loop_align (rtx label)
4067 basic_block bb;
4068 int ninsns;
4070 /* Don't override loop alignment if -falign-loops was specified. */
4071 if (!can_override_loop_align)
4072 return align_loops_log;
4074 bb = BLOCK_FOR_INSN (label);
4075 ninsns = num_loop_insns(bb->loop_father);
4077 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4078 if (ninsns > 4 && ninsns <= 8
4079 && (rs6000_cpu == PROCESSOR_POWER4
4080 || rs6000_cpu == PROCESSOR_POWER5
4081 || rs6000_cpu == PROCESSOR_POWER6
4082 || rs6000_cpu == PROCESSOR_POWER7
4083 || rs6000_cpu == PROCESSOR_POWER8))
4084 return 5;
4085 else
4086 return align_loops_log;
4089 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
4090 static int
4091 rs6000_loop_align_max_skip (rtx label)
4093 return (1 << rs6000_loop_align (label)) - 1;
4096 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4097 after applying N number of iterations. This routine does not determine
4098 how may iterations are required to reach desired alignment. */
4100 static bool
4101 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4103 if (is_packed)
4104 return false;
4106 if (TARGET_32BIT)
4108 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4109 return true;
4111 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4112 return true;
4114 return false;
4116 else
4118 if (TARGET_MACHO)
4119 return false;
4121 /* Assuming that all other types are naturally aligned. CHECKME! */
4122 return true;
4126 /* Return true if the vector misalignment factor is supported by the
4127 target. */
4128 static bool
4129 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
4130 const_tree type,
4131 int misalignment,
4132 bool is_packed)
4134 if (TARGET_VSX)
4136 /* Return if movmisalign pattern is not supported for this mode. */
4137 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4138 return false;
4140 if (misalignment == -1)
4142 /* Misalignment factor is unknown at compile time but we know
4143 it's word aligned. */
4144 if (rs6000_vector_alignment_reachable (type, is_packed))
4146 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
4148 if (element_size == 64 || element_size == 32)
4149 return true;
4152 return false;
4155 /* VSX supports word-aligned vector. */
4156 if (misalignment % 4 == 0)
4157 return true;
4159 return false;
4162 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4163 static int
4164 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4165 tree vectype, int misalign)
4167 unsigned elements;
4168 tree elem_type;
4170 switch (type_of_cost)
4172 case scalar_stmt:
4173 case scalar_load:
4174 case scalar_store:
4175 case vector_stmt:
4176 case vector_load:
4177 case vector_store:
4178 case vec_to_scalar:
4179 case scalar_to_vec:
4180 case cond_branch_not_taken:
4181 return 1;
4183 case vec_perm:
4184 if (TARGET_VSX)
4185 return 3;
4186 else
4187 return 1;
4189 case vec_promote_demote:
4190 if (TARGET_VSX)
4191 return 4;
4192 else
4193 return 1;
4195 case cond_branch_taken:
4196 return 3;
4198 case unaligned_load:
4199 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4201 elements = TYPE_VECTOR_SUBPARTS (vectype);
4202 if (elements == 2)
4203 /* Double word aligned. */
4204 return 2;
4206 if (elements == 4)
4208 switch (misalign)
4210 case 8:
4211 /* Double word aligned. */
4212 return 2;
4214 case -1:
4215 /* Unknown misalignment. */
4216 case 4:
4217 case 12:
4218 /* Word aligned. */
4219 return 22;
4221 default:
4222 gcc_unreachable ();
4227 if (TARGET_ALTIVEC)
4228 /* Misaligned loads are not supported. */
4229 gcc_unreachable ();
4231 return 2;
4233 case unaligned_store:
4234 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4236 elements = TYPE_VECTOR_SUBPARTS (vectype);
4237 if (elements == 2)
4238 /* Double word aligned. */
4239 return 2;
4241 if (elements == 4)
4243 switch (misalign)
4245 case 8:
4246 /* Double word aligned. */
4247 return 2;
4249 case -1:
4250 /* Unknown misalignment. */
4251 case 4:
4252 case 12:
4253 /* Word aligned. */
4254 return 23;
4256 default:
4257 gcc_unreachable ();
4262 if (TARGET_ALTIVEC)
4263 /* Misaligned stores are not supported. */
4264 gcc_unreachable ();
4266 return 2;
4268 case vec_construct:
4269 elements = TYPE_VECTOR_SUBPARTS (vectype);
4270 elem_type = TREE_TYPE (vectype);
4271 /* 32-bit vectors loaded into registers are stored as double
4272 precision, so we need n/2 converts in addition to the usual
4273 n/2 merges to construct a vector of short floats from them. */
4274 if (SCALAR_FLOAT_TYPE_P (elem_type)
4275 && TYPE_PRECISION (elem_type) == 32)
4276 return elements + 1;
4277 else
4278 return elements / 2 + 1;
4280 default:
4281 gcc_unreachable ();
4285 /* Implement targetm.vectorize.preferred_simd_mode. */
4287 static enum machine_mode
4288 rs6000_preferred_simd_mode (enum machine_mode mode)
4290 if (TARGET_VSX)
4291 switch (mode)
4293 case DFmode:
4294 return V2DFmode;
4295 default:;
4297 if (TARGET_ALTIVEC || TARGET_VSX)
4298 switch (mode)
4300 case SFmode:
4301 return V4SFmode;
4302 case TImode:
4303 return V1TImode;
4304 case DImode:
4305 return V2DImode;
4306 case SImode:
4307 return V4SImode;
4308 case HImode:
4309 return V8HImode;
4310 case QImode:
4311 return V16QImode;
4312 default:;
4314 if (TARGET_SPE)
4315 switch (mode)
4317 case SFmode:
4318 return V2SFmode;
4319 case SImode:
4320 return V2SImode;
4321 default:;
4323 if (TARGET_PAIRED_FLOAT
4324 && mode == SFmode)
4325 return V2SFmode;
4326 return word_mode;
4329 typedef struct _rs6000_cost_data
4331 struct loop *loop_info;
4332 unsigned cost[3];
4333 } rs6000_cost_data;
4335 /* Test for likely overcommitment of vector hardware resources. If a
4336 loop iteration is relatively large, and too large a percentage of
4337 instructions in the loop are vectorized, the cost model may not
4338 adequately reflect delays from unavailable vector resources.
4339 Penalize the loop body cost for this case. */
4341 static void
4342 rs6000_density_test (rs6000_cost_data *data)
4344 const int DENSITY_PCT_THRESHOLD = 85;
4345 const int DENSITY_SIZE_THRESHOLD = 70;
4346 const int DENSITY_PENALTY = 10;
4347 struct loop *loop = data->loop_info;
4348 basic_block *bbs = get_loop_body (loop);
4349 int nbbs = loop->num_nodes;
4350 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
4351 int i, density_pct;
4353 for (i = 0; i < nbbs; i++)
4355 basic_block bb = bbs[i];
4356 gimple_stmt_iterator gsi;
4358 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4360 gimple stmt = gsi_stmt (gsi);
4361 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4363 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4364 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
4365 not_vec_cost++;
4369 free (bbs);
4370 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
4372 if (density_pct > DENSITY_PCT_THRESHOLD
4373 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
4375 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
4376 if (dump_enabled_p ())
4377 dump_printf_loc (MSG_NOTE, vect_location,
4378 "density %d%%, cost %d exceeds threshold, penalizing "
4379 "loop body cost by %d%%", density_pct,
4380 vec_cost + not_vec_cost, DENSITY_PENALTY);
4384 /* Implement targetm.vectorize.init_cost. */
4386 static void *
4387 rs6000_init_cost (struct loop *loop_info)
4389 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
4390 data->loop_info = loop_info;
4391 data->cost[vect_prologue] = 0;
4392 data->cost[vect_body] = 0;
4393 data->cost[vect_epilogue] = 0;
4394 return data;
4397 /* Implement targetm.vectorize.add_stmt_cost. */
4399 static unsigned
4400 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
4401 struct _stmt_vec_info *stmt_info, int misalign,
4402 enum vect_cost_model_location where)
4404 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4405 unsigned retval = 0;
4407 if (flag_vect_cost_model)
4409 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
4410 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
4411 misalign);
4412 /* Statements in an inner loop relative to the loop being
4413 vectorized are weighted more heavily. The value here is
4414 arbitrary and could potentially be improved with analysis. */
4415 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
4416 count *= 50; /* FIXME. */
4418 retval = (unsigned) (count * stmt_cost);
4419 cost_data->cost[where] += retval;
4422 return retval;
4425 /* Implement targetm.vectorize.finish_cost. */
4427 static void
4428 rs6000_finish_cost (void *data, unsigned *prologue_cost,
4429 unsigned *body_cost, unsigned *epilogue_cost)
4431 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4433 if (cost_data->loop_info)
4434 rs6000_density_test (cost_data);
4436 *prologue_cost = cost_data->cost[vect_prologue];
4437 *body_cost = cost_data->cost[vect_body];
4438 *epilogue_cost = cost_data->cost[vect_epilogue];
4441 /* Implement targetm.vectorize.destroy_cost_data. */
4443 static void
4444 rs6000_destroy_cost_data (void *data)
4446 free (data);
4449 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4450 library with vectorized intrinsics. */
4452 static tree
4453 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
4455 char name[32];
4456 const char *suffix = NULL;
4457 tree fntype, new_fndecl, bdecl = NULL_TREE;
4458 int n_args = 1;
4459 const char *bname;
4460 enum machine_mode el_mode, in_mode;
4461 int n, in_n;
4463 /* Libmass is suitable for unsafe math only as it does not correctly support
4464 parts of IEEE with the required precision such as denormals. Only support
4465 it if we have VSX to use the simd d2 or f4 functions.
4466 XXX: Add variable length support. */
4467 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
4468 return NULL_TREE;
4470 el_mode = TYPE_MODE (TREE_TYPE (type_out));
4471 n = TYPE_VECTOR_SUBPARTS (type_out);
4472 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4473 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4474 if (el_mode != in_mode
4475 || n != in_n)
4476 return NULL_TREE;
4478 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4480 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4481 switch (fn)
4483 case BUILT_IN_ATAN2:
4484 case BUILT_IN_HYPOT:
4485 case BUILT_IN_POW:
4486 n_args = 2;
4487 /* fall through */
4489 case BUILT_IN_ACOS:
4490 case BUILT_IN_ACOSH:
4491 case BUILT_IN_ASIN:
4492 case BUILT_IN_ASINH:
4493 case BUILT_IN_ATAN:
4494 case BUILT_IN_ATANH:
4495 case BUILT_IN_CBRT:
4496 case BUILT_IN_COS:
4497 case BUILT_IN_COSH:
4498 case BUILT_IN_ERF:
4499 case BUILT_IN_ERFC:
4500 case BUILT_IN_EXP2:
4501 case BUILT_IN_EXP:
4502 case BUILT_IN_EXPM1:
4503 case BUILT_IN_LGAMMA:
4504 case BUILT_IN_LOG10:
4505 case BUILT_IN_LOG1P:
4506 case BUILT_IN_LOG2:
4507 case BUILT_IN_LOG:
4508 case BUILT_IN_SIN:
4509 case BUILT_IN_SINH:
4510 case BUILT_IN_SQRT:
4511 case BUILT_IN_TAN:
4512 case BUILT_IN_TANH:
4513 bdecl = builtin_decl_implicit (fn);
4514 suffix = "d2"; /* pow -> powd2 */
4515 if (el_mode != DFmode
4516 || n != 2
4517 || !bdecl)
4518 return NULL_TREE;
4519 break;
4521 case BUILT_IN_ATAN2F:
4522 case BUILT_IN_HYPOTF:
4523 case BUILT_IN_POWF:
4524 n_args = 2;
4525 /* fall through */
4527 case BUILT_IN_ACOSF:
4528 case BUILT_IN_ACOSHF:
4529 case BUILT_IN_ASINF:
4530 case BUILT_IN_ASINHF:
4531 case BUILT_IN_ATANF:
4532 case BUILT_IN_ATANHF:
4533 case BUILT_IN_CBRTF:
4534 case BUILT_IN_COSF:
4535 case BUILT_IN_COSHF:
4536 case BUILT_IN_ERFF:
4537 case BUILT_IN_ERFCF:
4538 case BUILT_IN_EXP2F:
4539 case BUILT_IN_EXPF:
4540 case BUILT_IN_EXPM1F:
4541 case BUILT_IN_LGAMMAF:
4542 case BUILT_IN_LOG10F:
4543 case BUILT_IN_LOG1PF:
4544 case BUILT_IN_LOG2F:
4545 case BUILT_IN_LOGF:
4546 case BUILT_IN_SINF:
4547 case BUILT_IN_SINHF:
4548 case BUILT_IN_SQRTF:
4549 case BUILT_IN_TANF:
4550 case BUILT_IN_TANHF:
4551 bdecl = builtin_decl_implicit (fn);
4552 suffix = "4"; /* powf -> powf4 */
4553 if (el_mode != SFmode
4554 || n != 4
4555 || !bdecl)
4556 return NULL_TREE;
4557 break;
4559 default:
4560 return NULL_TREE;
4563 else
4564 return NULL_TREE;
4566 gcc_assert (suffix != NULL);
4567 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
4568 if (!bname)
4569 return NULL_TREE;
4571 strcpy (name, bname + sizeof ("__builtin_") - 1);
4572 strcat (name, suffix);
4574 if (n_args == 1)
4575 fntype = build_function_type_list (type_out, type_in, NULL);
4576 else if (n_args == 2)
4577 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
4578 else
4579 gcc_unreachable ();
4581 /* Build a function declaration for the vectorized function. */
4582 new_fndecl = build_decl (BUILTINS_LOCATION,
4583 FUNCTION_DECL, get_identifier (name), fntype);
4584 TREE_PUBLIC (new_fndecl) = 1;
4585 DECL_EXTERNAL (new_fndecl) = 1;
4586 DECL_IS_NOVOPS (new_fndecl) = 1;
4587 TREE_READONLY (new_fndecl) = 1;
4589 return new_fndecl;
4592 /* Returns a function decl for a vectorized version of the builtin function
4593 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4594 if it is not available. */
4596 static tree
4597 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
4598 tree type_in)
4600 enum machine_mode in_mode, out_mode;
4601 int in_n, out_n;
4603 if (TARGET_DEBUG_BUILTIN)
4604 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4605 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
4606 GET_MODE_NAME (TYPE_MODE (type_out)),
4607 GET_MODE_NAME (TYPE_MODE (type_in)));
4609 if (TREE_CODE (type_out) != VECTOR_TYPE
4610 || TREE_CODE (type_in) != VECTOR_TYPE
4611 || !TARGET_VECTORIZE_BUILTINS)
4612 return NULL_TREE;
4614 out_mode = TYPE_MODE (TREE_TYPE (type_out));
4615 out_n = TYPE_VECTOR_SUBPARTS (type_out);
4616 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4617 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4619 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4621 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4622 switch (fn)
4624 case BUILT_IN_CLZIMAX:
4625 case BUILT_IN_CLZLL:
4626 case BUILT_IN_CLZL:
4627 case BUILT_IN_CLZ:
4628 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4630 if (out_mode == QImode && out_n == 16)
4631 return rs6000_builtin_decls[P8V_BUILTIN_VCLZB];
4632 else if (out_mode == HImode && out_n == 8)
4633 return rs6000_builtin_decls[P8V_BUILTIN_VCLZH];
4634 else if (out_mode == SImode && out_n == 4)
4635 return rs6000_builtin_decls[P8V_BUILTIN_VCLZW];
4636 else if (out_mode == DImode && out_n == 2)
4637 return rs6000_builtin_decls[P8V_BUILTIN_VCLZD];
4639 break;
4640 case BUILT_IN_COPYSIGN:
4641 if (VECTOR_UNIT_VSX_P (V2DFmode)
4642 && out_mode == DFmode && out_n == 2
4643 && in_mode == DFmode && in_n == 2)
4644 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
4645 break;
4646 case BUILT_IN_COPYSIGNF:
4647 if (out_mode != SFmode || out_n != 4
4648 || in_mode != SFmode || in_n != 4)
4649 break;
4650 if (VECTOR_UNIT_VSX_P (V4SFmode))
4651 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
4652 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4653 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
4654 break;
4655 case BUILT_IN_POPCOUNTIMAX:
4656 case BUILT_IN_POPCOUNTLL:
4657 case BUILT_IN_POPCOUNTL:
4658 case BUILT_IN_POPCOUNT:
4659 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4661 if (out_mode == QImode && out_n == 16)
4662 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTB];
4663 else if (out_mode == HImode && out_n == 8)
4664 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTH];
4665 else if (out_mode == SImode && out_n == 4)
4666 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTW];
4667 else if (out_mode == DImode && out_n == 2)
4668 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTD];
4670 break;
4671 case BUILT_IN_SQRT:
4672 if (VECTOR_UNIT_VSX_P (V2DFmode)
4673 && out_mode == DFmode && out_n == 2
4674 && in_mode == DFmode && in_n == 2)
4675 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
4676 break;
4677 case BUILT_IN_SQRTF:
4678 if (VECTOR_UNIT_VSX_P (V4SFmode)
4679 && out_mode == SFmode && out_n == 4
4680 && in_mode == SFmode && in_n == 4)
4681 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
4682 break;
4683 case BUILT_IN_CEIL:
4684 if (VECTOR_UNIT_VSX_P (V2DFmode)
4685 && out_mode == DFmode && out_n == 2
4686 && in_mode == DFmode && in_n == 2)
4687 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
4688 break;
4689 case BUILT_IN_CEILF:
4690 if (out_mode != SFmode || out_n != 4
4691 || in_mode != SFmode || in_n != 4)
4692 break;
4693 if (VECTOR_UNIT_VSX_P (V4SFmode))
4694 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
4695 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4696 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
4697 break;
4698 case BUILT_IN_FLOOR:
4699 if (VECTOR_UNIT_VSX_P (V2DFmode)
4700 && out_mode == DFmode && out_n == 2
4701 && in_mode == DFmode && in_n == 2)
4702 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
4703 break;
4704 case BUILT_IN_FLOORF:
4705 if (out_mode != SFmode || out_n != 4
4706 || in_mode != SFmode || in_n != 4)
4707 break;
4708 if (VECTOR_UNIT_VSX_P (V4SFmode))
4709 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
4710 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4711 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
4712 break;
4713 case BUILT_IN_FMA:
4714 if (VECTOR_UNIT_VSX_P (V2DFmode)
4715 && out_mode == DFmode && out_n == 2
4716 && in_mode == DFmode && in_n == 2)
4717 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
4718 break;
4719 case BUILT_IN_FMAF:
4720 if (VECTOR_UNIT_VSX_P (V4SFmode)
4721 && out_mode == SFmode && out_n == 4
4722 && in_mode == SFmode && in_n == 4)
4723 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
4724 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
4725 && out_mode == SFmode && out_n == 4
4726 && in_mode == SFmode && in_n == 4)
4727 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
4728 break;
4729 case BUILT_IN_TRUNC:
4730 if (VECTOR_UNIT_VSX_P (V2DFmode)
4731 && out_mode == DFmode && out_n == 2
4732 && in_mode == DFmode && in_n == 2)
4733 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
4734 break;
4735 case BUILT_IN_TRUNCF:
4736 if (out_mode != SFmode || out_n != 4
4737 || in_mode != SFmode || in_n != 4)
4738 break;
4739 if (VECTOR_UNIT_VSX_P (V4SFmode))
4740 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
4741 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4742 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
4743 break;
4744 case BUILT_IN_NEARBYINT:
4745 if (VECTOR_UNIT_VSX_P (V2DFmode)
4746 && flag_unsafe_math_optimizations
4747 && out_mode == DFmode && out_n == 2
4748 && in_mode == DFmode && in_n == 2)
4749 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
4750 break;
4751 case BUILT_IN_NEARBYINTF:
4752 if (VECTOR_UNIT_VSX_P (V4SFmode)
4753 && flag_unsafe_math_optimizations
4754 && out_mode == SFmode && out_n == 4
4755 && in_mode == SFmode && in_n == 4)
4756 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
4757 break;
4758 case BUILT_IN_RINT:
4759 if (VECTOR_UNIT_VSX_P (V2DFmode)
4760 && !flag_trapping_math
4761 && out_mode == DFmode && out_n == 2
4762 && in_mode == DFmode && in_n == 2)
4763 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
4764 break;
4765 case BUILT_IN_RINTF:
4766 if (VECTOR_UNIT_VSX_P (V4SFmode)
4767 && !flag_trapping_math
4768 && out_mode == SFmode && out_n == 4
4769 && in_mode == SFmode && in_n == 4)
4770 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
4771 break;
4772 default:
4773 break;
4777 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
4779 enum rs6000_builtins fn
4780 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
4781 switch (fn)
4783 case RS6000_BUILTIN_RSQRTF:
4784 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4785 && out_mode == SFmode && out_n == 4
4786 && in_mode == SFmode && in_n == 4)
4787 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
4788 break;
4789 case RS6000_BUILTIN_RSQRT:
4790 if (VECTOR_UNIT_VSX_P (V2DFmode)
4791 && out_mode == DFmode && out_n == 2
4792 && in_mode == DFmode && in_n == 2)
4793 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
4794 break;
4795 case RS6000_BUILTIN_RECIPF:
4796 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4797 && out_mode == SFmode && out_n == 4
4798 && in_mode == SFmode && in_n == 4)
4799 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
4800 break;
4801 case RS6000_BUILTIN_RECIP:
4802 if (VECTOR_UNIT_VSX_P (V2DFmode)
4803 && out_mode == DFmode && out_n == 2
4804 && in_mode == DFmode && in_n == 2)
4805 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
4806 break;
4807 default:
4808 break;
4812 /* Generate calls to libmass if appropriate. */
4813 if (rs6000_veclib_handler)
4814 return rs6000_veclib_handler (fndecl, type_out, type_in);
4816 return NULL_TREE;
4819 /* Default CPU string for rs6000*_file_start functions. */
4820 static const char *rs6000_default_cpu;
4822 /* Do anything needed at the start of the asm file. */
4824 static void
4825 rs6000_file_start (void)
4827 char buffer[80];
4828 const char *start = buffer;
4829 FILE *file = asm_out_file;
4831 rs6000_default_cpu = TARGET_CPU_DEFAULT;
4833 default_file_start ();
4835 if (flag_verbose_asm)
4837 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
4839 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
4841 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
4842 start = "";
4845 if (global_options_set.x_rs6000_cpu_index)
4847 fprintf (file, "%s -mcpu=%s", start,
4848 processor_target_table[rs6000_cpu_index].name);
4849 start = "";
4852 if (global_options_set.x_rs6000_tune_index)
4854 fprintf (file, "%s -mtune=%s", start,
4855 processor_target_table[rs6000_tune_index].name);
4856 start = "";
4859 if (PPC405_ERRATUM77)
4861 fprintf (file, "%s PPC405CR_ERRATUM77", start);
4862 start = "";
4865 #ifdef USING_ELFOS_H
4866 switch (rs6000_sdata)
4868 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
4869 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
4870 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
4871 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
4874 if (rs6000_sdata && g_switch_value)
4876 fprintf (file, "%s -G %d", start,
4877 g_switch_value);
4878 start = "";
4880 #endif
4882 if (*start == '\0')
4883 putc ('\n', file);
4886 if (DEFAULT_ABI == ABI_ELFv2)
4887 fprintf (file, "\t.abiversion 2\n");
4889 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2
4890 || (TARGET_ELF && flag_pic == 2))
4892 switch_to_section (toc_section);
4893 switch_to_section (text_section);
4898 /* Return nonzero if this function is known to have a null epilogue. */
4901 direct_return (void)
4903 if (reload_completed)
4905 rs6000_stack_t *info = rs6000_stack_info ();
4907 if (info->first_gp_reg_save == 32
4908 && info->first_fp_reg_save == 64
4909 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4910 && ! info->lr_save_p
4911 && ! info->cr_save_p
4912 && info->vrsave_mask == 0
4913 && ! info->push_p)
4914 return 1;
4917 return 0;
4920 /* Return the number of instructions it takes to form a constant in an
4921 integer register. */
4924 num_insns_constant_wide (HOST_WIDE_INT value)
4926 /* signed constant loadable with addi */
4927 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4928 return 1;
4930 /* constant loadable with addis */
4931 else if ((value & 0xffff) == 0
4932 && (value >> 31 == -1 || value >> 31 == 0))
4933 return 1;
4935 else if (TARGET_POWERPC64)
4937 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4938 HOST_WIDE_INT high = value >> 31;
4940 if (high == 0 || high == -1)
4941 return 2;
4943 high >>= 1;
4945 if (low == 0)
4946 return num_insns_constant_wide (high) + 1;
4947 else if (high == 0)
4948 return num_insns_constant_wide (low) + 1;
4949 else
4950 return (num_insns_constant_wide (high)
4951 + num_insns_constant_wide (low) + 1);
4954 else
4955 return 2;
4959 num_insns_constant (rtx op, enum machine_mode mode)
4961 HOST_WIDE_INT low, high;
4963 switch (GET_CODE (op))
4965 case CONST_INT:
4966 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
4967 && mask64_operand (op, mode))
4968 return 2;
4969 else
4970 return num_insns_constant_wide (INTVAL (op));
4972 case CONST_WIDE_INT:
4974 int i;
4975 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
4976 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
4977 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
4978 return ins;
4981 case CONST_DOUBLE:
4982 if (mode == SFmode || mode == SDmode)
4984 long l;
4985 REAL_VALUE_TYPE rv;
4987 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4988 if (DECIMAL_FLOAT_MODE_P (mode))
4989 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
4990 else
4991 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
4992 return num_insns_constant_wide ((HOST_WIDE_INT) l);
4995 long l[2];
4996 REAL_VALUE_TYPE rv;
4998 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4999 if (DECIMAL_FLOAT_MODE_P (mode))
5000 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
5001 else
5002 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
5003 high = l[WORDS_BIG_ENDIAN == 0];
5004 low = l[WORDS_BIG_ENDIAN != 0];
5006 if (TARGET_32BIT)
5007 return (num_insns_constant_wide (low)
5008 + num_insns_constant_wide (high));
5009 else
5011 if ((high == 0 && low >= 0)
5012 || (high == -1 && low < 0))
5013 return num_insns_constant_wide (low);
5015 else if (mask64_operand (op, mode))
5016 return 2;
5018 else if (low == 0)
5019 return num_insns_constant_wide (high) + 1;
5021 else
5022 return (num_insns_constant_wide (high)
5023 + num_insns_constant_wide (low) + 1);
5026 default:
5027 gcc_unreachable ();
5031 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5032 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5033 corresponding element of the vector, but for V4SFmode and V2SFmode,
5034 the corresponding "float" is interpreted as an SImode integer. */
5036 HOST_WIDE_INT
5037 const_vector_elt_as_int (rtx op, unsigned int elt)
5039 rtx tmp;
5041 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5042 gcc_assert (GET_MODE (op) != V2DImode
5043 && GET_MODE (op) != V2DFmode);
5045 tmp = CONST_VECTOR_ELT (op, elt);
5046 if (GET_MODE (op) == V4SFmode
5047 || GET_MODE (op) == V2SFmode)
5048 tmp = gen_lowpart (SImode, tmp);
5049 return INTVAL (tmp);
5052 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5053 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5054 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5055 all items are set to the same value and contain COPIES replicas of the
5056 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5057 operand and the others are set to the value of the operand's msb. */
5059 static bool
5060 vspltis_constant (rtx op, unsigned step, unsigned copies)
5062 enum machine_mode mode = GET_MODE (op);
5063 enum machine_mode inner = GET_MODE_INNER (mode);
5065 unsigned i;
5066 unsigned nunits;
5067 unsigned bitsize;
5068 unsigned mask;
5070 HOST_WIDE_INT val;
5071 HOST_WIDE_INT splat_val;
5072 HOST_WIDE_INT msb_val;
5074 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5075 return false;
5077 nunits = GET_MODE_NUNITS (mode);
5078 bitsize = GET_MODE_BITSIZE (inner);
5079 mask = GET_MODE_MASK (inner);
5081 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5082 splat_val = val;
5083 msb_val = val >= 0 ? 0 : -1;
5085 /* Construct the value to be splatted, if possible. If not, return 0. */
5086 for (i = 2; i <= copies; i *= 2)
5088 HOST_WIDE_INT small_val;
5089 bitsize /= 2;
5090 small_val = splat_val >> bitsize;
5091 mask >>= bitsize;
5092 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
5093 return false;
5094 splat_val = small_val;
5097 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5098 if (EASY_VECTOR_15 (splat_val))
5101 /* Also check if we can splat, and then add the result to itself. Do so if
5102 the value is positive, of if the splat instruction is using OP's mode;
5103 for splat_val < 0, the splat and the add should use the same mode. */
5104 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5105 && (splat_val >= 0 || (step == 1 && copies == 1)))
5108 /* Also check if are loading up the most significant bit which can be done by
5109 loading up -1 and shifting the value left by -1. */
5110 else if (EASY_VECTOR_MSB (splat_val, inner))
5113 else
5114 return false;
5116 /* Check if VAL is present in every STEP-th element, and the
5117 other elements are filled with its most significant bit. */
5118 for (i = 1; i < nunits; ++i)
5120 HOST_WIDE_INT desired_val;
5121 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
5122 if ((i & (step - 1)) == 0)
5123 desired_val = val;
5124 else
5125 desired_val = msb_val;
5127 if (desired_val != const_vector_elt_as_int (op, elt))
5128 return false;
5131 return true;
5135 /* Return true if OP is of the given MODE and can be synthesized
5136 with a vspltisb, vspltish or vspltisw. */
5138 bool
5139 easy_altivec_constant (rtx op, enum machine_mode mode)
5141 unsigned step, copies;
5143 if (mode == VOIDmode)
5144 mode = GET_MODE (op);
5145 else if (mode != GET_MODE (op))
5146 return false;
5148 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
5149 constants. */
5150 if (mode == V2DFmode)
5151 return zero_constant (op, mode);
5153 else if (mode == V2DImode)
5155 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
5156 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
5157 return false;
5159 if (zero_constant (op, mode))
5160 return true;
5162 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
5163 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
5164 return true;
5166 return false;
5169 /* V1TImode is a special container for TImode. Ignore for now. */
5170 else if (mode == V1TImode)
5171 return false;
5173 /* Start with a vspltisw. */
5174 step = GET_MODE_NUNITS (mode) / 4;
5175 copies = 1;
5177 if (vspltis_constant (op, step, copies))
5178 return true;
5180 /* Then try with a vspltish. */
5181 if (step == 1)
5182 copies <<= 1;
5183 else
5184 step >>= 1;
5186 if (vspltis_constant (op, step, copies))
5187 return true;
5189 /* And finally a vspltisb. */
5190 if (step == 1)
5191 copies <<= 1;
5192 else
5193 step >>= 1;
5195 if (vspltis_constant (op, step, copies))
5196 return true;
5198 return false;
5201 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
5202 result is OP. Abort if it is not possible. */
5205 gen_easy_altivec_constant (rtx op)
5207 enum machine_mode mode = GET_MODE (op);
5208 int nunits = GET_MODE_NUNITS (mode);
5209 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5210 unsigned step = nunits / 4;
5211 unsigned copies = 1;
5213 /* Start with a vspltisw. */
5214 if (vspltis_constant (op, step, copies))
5215 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
5217 /* Then try with a vspltish. */
5218 if (step == 1)
5219 copies <<= 1;
5220 else
5221 step >>= 1;
5223 if (vspltis_constant (op, step, copies))
5224 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
5226 /* And finally a vspltisb. */
5227 if (step == 1)
5228 copies <<= 1;
5229 else
5230 step >>= 1;
5232 if (vspltis_constant (op, step, copies))
5233 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
5235 gcc_unreachable ();
5238 const char *
5239 output_vec_const_move (rtx *operands)
5241 int cst, cst2;
5242 enum machine_mode mode;
5243 rtx dest, vec;
5245 dest = operands[0];
5246 vec = operands[1];
5247 mode = GET_MODE (dest);
5249 if (TARGET_VSX)
5251 if (zero_constant (vec, mode))
5252 return "xxlxor %x0,%x0,%x0";
5254 if ((mode == V2DImode || mode == V1TImode)
5255 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
5256 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
5257 return "vspltisw %0,-1";
5260 if (TARGET_ALTIVEC)
5262 rtx splat_vec;
5263 if (zero_constant (vec, mode))
5264 return "vxor %0,%0,%0";
5266 splat_vec = gen_easy_altivec_constant (vec);
5267 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
5268 operands[1] = XEXP (splat_vec, 0);
5269 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
5270 return "#";
5272 switch (GET_MODE (splat_vec))
5274 case V4SImode:
5275 return "vspltisw %0,%1";
5277 case V8HImode:
5278 return "vspltish %0,%1";
5280 case V16QImode:
5281 return "vspltisb %0,%1";
5283 default:
5284 gcc_unreachable ();
5288 gcc_assert (TARGET_SPE);
5290 /* Vector constant 0 is handled as a splitter of V2SI, and in the
5291 pattern of V1DI, V4HI, and V2SF.
5293 FIXME: We should probably return # and add post reload
5294 splitters for these, but this way is so easy ;-). */
5295 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
5296 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
5297 operands[1] = CONST_VECTOR_ELT (vec, 0);
5298 operands[2] = CONST_VECTOR_ELT (vec, 1);
5299 if (cst == cst2)
5300 return "li %0,%1\n\tevmergelo %0,%0,%0";
5301 else
5302 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
5305 /* Initialize TARGET of vector PAIRED to VALS. */
5307 void
5308 paired_expand_vector_init (rtx target, rtx vals)
5310 enum machine_mode mode = GET_MODE (target);
5311 int n_elts = GET_MODE_NUNITS (mode);
5312 int n_var = 0;
5313 rtx x, new_rtx, tmp, constant_op, op1, op2;
5314 int i;
5316 for (i = 0; i < n_elts; ++i)
5318 x = XVECEXP (vals, 0, i);
5319 if (!CONSTANT_P (x))
5320 ++n_var;
5322 if (n_var == 0)
5324 /* Load from constant pool. */
5325 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
5326 return;
5329 if (n_var == 2)
5331 /* The vector is initialized only with non-constants. */
5332 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
5333 XVECEXP (vals, 0, 1));
5335 emit_move_insn (target, new_rtx);
5336 return;
5339 /* One field is non-constant and the other one is a constant. Load the
5340 constant from the constant pool and use ps_merge instruction to
5341 construct the whole vector. */
5342 op1 = XVECEXP (vals, 0, 0);
5343 op2 = XVECEXP (vals, 0, 1);
5345 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
5347 tmp = gen_reg_rtx (GET_MODE (constant_op));
5348 emit_move_insn (tmp, constant_op);
5350 if (CONSTANT_P (op1))
5351 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
5352 else
5353 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
5355 emit_move_insn (target, new_rtx);
5358 void
5359 paired_expand_vector_move (rtx operands[])
5361 rtx op0 = operands[0], op1 = operands[1];
5363 emit_move_insn (op0, op1);
5366 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
5367 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
5368 operands for the relation operation COND. This is a recursive
5369 function. */
5371 static void
5372 paired_emit_vector_compare (enum rtx_code rcode,
5373 rtx dest, rtx op0, rtx op1,
5374 rtx cc_op0, rtx cc_op1)
5376 rtx tmp = gen_reg_rtx (V2SFmode);
5377 rtx tmp1, max, min;
5379 gcc_assert (TARGET_PAIRED_FLOAT);
5380 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
5382 switch (rcode)
5384 case LT:
5385 case LTU:
5386 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5387 return;
5388 case GE:
5389 case GEU:
5390 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5391 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
5392 return;
5393 case LE:
5394 case LEU:
5395 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
5396 return;
5397 case GT:
5398 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5399 return;
5400 case EQ:
5401 tmp1 = gen_reg_rtx (V2SFmode);
5402 max = gen_reg_rtx (V2SFmode);
5403 min = gen_reg_rtx (V2SFmode);
5404 gen_reg_rtx (V2SFmode);
5406 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5407 emit_insn (gen_selv2sf4
5408 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5409 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
5410 emit_insn (gen_selv2sf4
5411 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5412 emit_insn (gen_subv2sf3 (tmp1, min, max));
5413 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
5414 return;
5415 case NE:
5416 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
5417 return;
5418 case UNLE:
5419 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5420 return;
5421 case UNLT:
5422 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
5423 return;
5424 case UNGE:
5425 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5426 return;
5427 case UNGT:
5428 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
5429 return;
5430 default:
5431 gcc_unreachable ();
5434 return;
5437 /* Emit vector conditional expression.
5438 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5439 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5442 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
5443 rtx cond, rtx cc_op0, rtx cc_op1)
5445 enum rtx_code rcode = GET_CODE (cond);
5447 if (!TARGET_PAIRED_FLOAT)
5448 return 0;
5450 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
5452 return 1;
5455 /* Initialize vector TARGET to VALS. */
5457 void
5458 rs6000_expand_vector_init (rtx target, rtx vals)
5460 enum machine_mode mode = GET_MODE (target);
5461 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5462 int n_elts = GET_MODE_NUNITS (mode);
5463 int n_var = 0, one_var = -1;
5464 bool all_same = true, all_const_zero = true;
5465 rtx x, mem;
5466 int i;
5468 for (i = 0; i < n_elts; ++i)
5470 x = XVECEXP (vals, 0, i);
5471 if (!CONSTANT_P (x))
5472 ++n_var, one_var = i;
5473 else if (x != CONST0_RTX (inner_mode))
5474 all_const_zero = false;
5476 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
5477 all_same = false;
5480 if (n_var == 0)
5482 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
5483 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
5484 if ((int_vector_p || TARGET_VSX) && all_const_zero)
5486 /* Zero register. */
5487 emit_insn (gen_rtx_SET (VOIDmode, target,
5488 gen_rtx_XOR (mode, target, target)));
5489 return;
5491 else if (int_vector_p && easy_vector_constant (const_vec, mode))
5493 /* Splat immediate. */
5494 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
5495 return;
5497 else
5499 /* Load from constant pool. */
5500 emit_move_insn (target, const_vec);
5501 return;
5505 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5506 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5508 rtx op0 = XVECEXP (vals, 0, 0);
5509 rtx op1 = XVECEXP (vals, 0, 1);
5510 if (all_same)
5512 if (!MEM_P (op0) && !REG_P (op0))
5513 op0 = force_reg (inner_mode, op0);
5514 if (mode == V2DFmode)
5515 emit_insn (gen_vsx_splat_v2df (target, op0));
5516 else
5517 emit_insn (gen_vsx_splat_v2di (target, op0));
5519 else
5521 op0 = force_reg (inner_mode, op0);
5522 op1 = force_reg (inner_mode, op1);
5523 if (mode == V2DFmode)
5524 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
5525 else
5526 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
5528 return;
5531 /* With single precision floating point on VSX, know that internally single
5532 precision is actually represented as a double, and either make 2 V2DF
5533 vectors, and convert these vectors to single precision, or do one
5534 conversion, and splat the result to the other elements. */
5535 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
5537 if (all_same)
5539 rtx freg = gen_reg_rtx (V4SFmode);
5540 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
5541 rtx cvt = ((TARGET_XSCVDPSPN)
5542 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
5543 : gen_vsx_xscvdpsp_scalar (freg, sreg));
5545 emit_insn (cvt);
5546 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg, const0_rtx));
5548 else
5550 rtx dbl_even = gen_reg_rtx (V2DFmode);
5551 rtx dbl_odd = gen_reg_rtx (V2DFmode);
5552 rtx flt_even = gen_reg_rtx (V4SFmode);
5553 rtx flt_odd = gen_reg_rtx (V4SFmode);
5554 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
5555 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
5556 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
5557 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
5559 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
5560 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
5561 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
5562 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
5563 rs6000_expand_extract_even (target, flt_even, flt_odd);
5565 return;
5568 /* Store value to stack temp. Load vector element. Splat. However, splat
5569 of 64-bit items is not supported on Altivec. */
5570 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
5572 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5573 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
5574 XVECEXP (vals, 0, 0));
5575 x = gen_rtx_UNSPEC (VOIDmode,
5576 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5577 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5578 gen_rtvec (2,
5579 gen_rtx_SET (VOIDmode,
5580 target, mem),
5581 x)));
5582 x = gen_rtx_VEC_SELECT (inner_mode, target,
5583 gen_rtx_PARALLEL (VOIDmode,
5584 gen_rtvec (1, const0_rtx)));
5585 emit_insn (gen_rtx_SET (VOIDmode, target,
5586 gen_rtx_VEC_DUPLICATE (mode, x)));
5587 return;
5590 /* One field is non-constant. Load constant then overwrite
5591 varying field. */
5592 if (n_var == 1)
5594 rtx copy = copy_rtx (vals);
5596 /* Load constant part of vector, substitute neighboring value for
5597 varying element. */
5598 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
5599 rs6000_expand_vector_init (target, copy);
5601 /* Insert variable. */
5602 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
5603 return;
5606 /* Construct the vector in memory one field at a time
5607 and load the whole vector. */
5608 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5609 for (i = 0; i < n_elts; i++)
5610 emit_move_insn (adjust_address_nv (mem, inner_mode,
5611 i * GET_MODE_SIZE (inner_mode)),
5612 XVECEXP (vals, 0, i));
5613 emit_move_insn (target, mem);
5616 /* Set field ELT of TARGET to VAL. */
5618 void
5619 rs6000_expand_vector_set (rtx target, rtx val, int elt)
5621 enum machine_mode mode = GET_MODE (target);
5622 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5623 rtx reg = gen_reg_rtx (mode);
5624 rtx mask, mem, x;
5625 int width = GET_MODE_SIZE (inner_mode);
5626 int i;
5628 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5630 rtx (*set_func) (rtx, rtx, rtx, rtx)
5631 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
5632 emit_insn (set_func (target, target, val, GEN_INT (elt)));
5633 return;
5636 /* Simplify setting single element vectors like V1TImode. */
5637 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
5639 emit_move_insn (target, gen_lowpart (mode, val));
5640 return;
5643 /* Load single variable value. */
5644 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5645 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
5646 x = gen_rtx_UNSPEC (VOIDmode,
5647 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5648 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5649 gen_rtvec (2,
5650 gen_rtx_SET (VOIDmode,
5651 reg, mem),
5652 x)));
5654 /* Linear sequence. */
5655 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
5656 for (i = 0; i < 16; ++i)
5657 XVECEXP (mask, 0, i) = GEN_INT (i);
5659 /* Set permute mask to insert element into target. */
5660 for (i = 0; i < width; ++i)
5661 XVECEXP (mask, 0, elt*width + i)
5662 = GEN_INT (i + 0x10);
5663 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
5665 if (BYTES_BIG_ENDIAN)
5666 x = gen_rtx_UNSPEC (mode,
5667 gen_rtvec (3, target, reg,
5668 force_reg (V16QImode, x)),
5669 UNSPEC_VPERM);
5670 else
5672 /* Invert selector. We prefer to generate VNAND on P8 so
5673 that future fusion opportunities can kick in, but must
5674 generate VNOR elsewhere. */
5675 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
5676 rtx iorx = (TARGET_P8_VECTOR
5677 ? gen_rtx_IOR (V16QImode, notx, notx)
5678 : gen_rtx_AND (V16QImode, notx, notx));
5679 rtx tmp = gen_reg_rtx (V16QImode);
5680 emit_insn (gen_rtx_SET (VOIDmode, tmp, iorx));
5682 /* Permute with operands reversed and adjusted selector. */
5683 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
5684 UNSPEC_VPERM);
5687 emit_insn (gen_rtx_SET (VOIDmode, target, x));
5690 /* Extract field ELT from VEC into TARGET. */
5692 void
5693 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
5695 enum machine_mode mode = GET_MODE (vec);
5696 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5697 rtx mem;
5699 if (VECTOR_MEM_VSX_P (mode))
5701 switch (mode)
5703 default:
5704 break;
5705 case V1TImode:
5706 gcc_assert (elt == 0 && inner_mode == TImode);
5707 emit_move_insn (target, gen_lowpart (TImode, vec));
5708 break;
5709 case V2DFmode:
5710 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
5711 return;
5712 case V2DImode:
5713 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
5714 return;
5715 case V4SFmode:
5716 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
5717 return;
5721 /* Allocate mode-sized buffer. */
5722 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5724 emit_move_insn (mem, vec);
5726 /* Add offset to field within buffer matching vector element. */
5727 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
5729 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
5732 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5733 implement ANDing by the mask IN. */
5734 void
5735 build_mask64_2_operands (rtx in, rtx *out)
5737 unsigned HOST_WIDE_INT c, lsb, m1, m2;
5738 int shift;
5740 gcc_assert (GET_CODE (in) == CONST_INT);
5742 c = INTVAL (in);
5743 if (c & 1)
5745 /* Assume c initially something like 0x00fff000000fffff. The idea
5746 is to rotate the word so that the middle ^^^^^^ group of zeros
5747 is at the MS end and can be cleared with an rldicl mask. We then
5748 rotate back and clear off the MS ^^ group of zeros with a
5749 second rldicl. */
5750 c = ~c; /* c == 0xff000ffffff00000 */
5751 lsb = c & -c; /* lsb == 0x0000000000100000 */
5752 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
5753 c = ~c; /* c == 0x00fff000000fffff */
5754 c &= -lsb; /* c == 0x00fff00000000000 */
5755 lsb = c & -c; /* lsb == 0x0000100000000000 */
5756 c = ~c; /* c == 0xff000fffffffffff */
5757 c &= -lsb; /* c == 0xff00000000000000 */
5758 shift = 0;
5759 while ((lsb >>= 1) != 0)
5760 shift++; /* shift == 44 on exit from loop */
5761 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
5762 m1 = ~m1; /* m1 == 0x000000ffffffffff */
5763 m2 = ~c; /* m2 == 0x00ffffffffffffff */
5765 else
5767 /* Assume c initially something like 0xff000f0000000000. The idea
5768 is to rotate the word so that the ^^^ middle group of zeros
5769 is at the LS end and can be cleared with an rldicr mask. We then
5770 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5771 a second rldicr. */
5772 lsb = c & -c; /* lsb == 0x0000010000000000 */
5773 m2 = -lsb; /* m2 == 0xffffff0000000000 */
5774 c = ~c; /* c == 0x00fff0ffffffffff */
5775 c &= -lsb; /* c == 0x00fff00000000000 */
5776 lsb = c & -c; /* lsb == 0x0000100000000000 */
5777 c = ~c; /* c == 0xff000fffffffffff */
5778 c &= -lsb; /* c == 0xff00000000000000 */
5779 shift = 0;
5780 while ((lsb >>= 1) != 0)
5781 shift++; /* shift == 44 on exit from loop */
5782 m1 = ~c; /* m1 == 0x00ffffffffffffff */
5783 m1 >>= shift; /* m1 == 0x0000000000000fff */
5784 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
5787 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5788 masks will be all 1's. We are guaranteed more than one transition. */
5789 out[0] = GEN_INT (64 - shift);
5790 out[1] = GEN_INT (m1);
5791 out[2] = GEN_INT (shift);
5792 out[3] = GEN_INT (m2);
5795 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5797 bool
5798 invalid_e500_subreg (rtx op, enum machine_mode mode)
5800 if (TARGET_E500_DOUBLE)
5802 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5803 subreg:TI and reg:TF. Decimal float modes are like integer
5804 modes (only low part of each register used) for this
5805 purpose. */
5806 if (GET_CODE (op) == SUBREG
5807 && (mode == SImode || mode == DImode || mode == TImode
5808 || mode == DDmode || mode == TDmode || mode == PTImode)
5809 && REG_P (SUBREG_REG (op))
5810 && (GET_MODE (SUBREG_REG (op)) == DFmode
5811 || GET_MODE (SUBREG_REG (op)) == TFmode))
5812 return true;
5814 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5815 reg:TI. */
5816 if (GET_CODE (op) == SUBREG
5817 && (mode == DFmode || mode == TFmode)
5818 && REG_P (SUBREG_REG (op))
5819 && (GET_MODE (SUBREG_REG (op)) == DImode
5820 || GET_MODE (SUBREG_REG (op)) == TImode
5821 || GET_MODE (SUBREG_REG (op)) == PTImode
5822 || GET_MODE (SUBREG_REG (op)) == DDmode
5823 || GET_MODE (SUBREG_REG (op)) == TDmode))
5824 return true;
5827 if (TARGET_SPE
5828 && GET_CODE (op) == SUBREG
5829 && mode == SImode
5830 && REG_P (SUBREG_REG (op))
5831 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
5832 return true;
5834 return false;
5837 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
5838 selects whether the alignment is abi mandated, optional, or
5839 both abi and optional alignment. */
5841 unsigned int
5842 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
5844 if (how != align_opt)
5846 if (TREE_CODE (type) == VECTOR_TYPE)
5848 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
5849 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
5851 if (align < 64)
5852 align = 64;
5854 else if (align < 128)
5855 align = 128;
5857 else if (TARGET_E500_DOUBLE
5858 && TREE_CODE (type) == REAL_TYPE
5859 && TYPE_MODE (type) == DFmode)
5861 if (align < 64)
5862 align = 64;
5866 if (how != align_abi)
5868 if (TREE_CODE (type) == ARRAY_TYPE
5869 && TYPE_MODE (TREE_TYPE (type)) == QImode)
5871 if (align < BITS_PER_WORD)
5872 align = BITS_PER_WORD;
5876 return align;
5879 /* AIX increases natural record alignment to doubleword if the first
5880 field is an FP double while the FP fields remain word aligned. */
5882 unsigned int
5883 rs6000_special_round_type_align (tree type, unsigned int computed,
5884 unsigned int specified)
5886 unsigned int align = MAX (computed, specified);
5887 tree field = TYPE_FIELDS (type);
5889 /* Skip all non field decls */
5890 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5891 field = DECL_CHAIN (field);
5893 if (field != NULL && field != type)
5895 type = TREE_TYPE (field);
5896 while (TREE_CODE (type) == ARRAY_TYPE)
5897 type = TREE_TYPE (type);
5899 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
5900 align = MAX (align, 64);
5903 return align;
5906 /* Darwin increases record alignment to the natural alignment of
5907 the first field. */
5909 unsigned int
5910 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
5911 unsigned int specified)
5913 unsigned int align = MAX (computed, specified);
5915 if (TYPE_PACKED (type))
5916 return align;
5918 /* Find the first field, looking down into aggregates. */
5919 do {
5920 tree field = TYPE_FIELDS (type);
5921 /* Skip all non field decls */
5922 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5923 field = DECL_CHAIN (field);
5924 if (! field)
5925 break;
5926 /* A packed field does not contribute any extra alignment. */
5927 if (DECL_PACKED (field))
5928 return align;
5929 type = TREE_TYPE (field);
5930 while (TREE_CODE (type) == ARRAY_TYPE)
5931 type = TREE_TYPE (type);
5932 } while (AGGREGATE_TYPE_P (type));
5934 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
5935 align = MAX (align, TYPE_ALIGN (type));
5937 return align;
5940 /* Return 1 for an operand in small memory on V.4/eabi. */
5943 small_data_operand (rtx op ATTRIBUTE_UNUSED,
5944 enum machine_mode mode ATTRIBUTE_UNUSED)
5946 #if TARGET_ELF
5947 rtx sym_ref;
5949 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
5950 return 0;
5952 if (DEFAULT_ABI != ABI_V4)
5953 return 0;
5955 /* Vector and float memory instructions have a limited offset on the
5956 SPE, so using a vector or float variable directly as an operand is
5957 not useful. */
5958 if (TARGET_SPE
5959 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
5960 return 0;
5962 if (GET_CODE (op) == SYMBOL_REF)
5963 sym_ref = op;
5965 else if (GET_CODE (op) != CONST
5966 || GET_CODE (XEXP (op, 0)) != PLUS
5967 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
5968 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
5969 return 0;
5971 else
5973 rtx sum = XEXP (op, 0);
5974 HOST_WIDE_INT summand;
5976 /* We have to be careful here, because it is the referenced address
5977 that must be 32k from _SDA_BASE_, not just the symbol. */
5978 summand = INTVAL (XEXP (sum, 1));
5979 if (summand < 0 || summand > g_switch_value)
5980 return 0;
5982 sym_ref = XEXP (sum, 0);
5985 return SYMBOL_REF_SMALL_P (sym_ref);
5986 #else
5987 return 0;
5988 #endif
5991 /* Return true if either operand is a general purpose register. */
5993 bool
5994 gpr_or_gpr_p (rtx op0, rtx op1)
5996 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
5997 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
6000 /* Return true if this is a move direct operation between GPR registers and
6001 floating point/VSX registers. */
6003 bool
6004 direct_move_p (rtx op0, rtx op1)
6006 int regno0, regno1;
6008 if (!REG_P (op0) || !REG_P (op1))
6009 return false;
6011 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
6012 return false;
6014 regno0 = REGNO (op0);
6015 regno1 = REGNO (op1);
6016 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
6017 return false;
6019 if (INT_REGNO_P (regno0))
6020 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
6022 else if (INT_REGNO_P (regno1))
6024 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
6025 return true;
6027 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
6028 return true;
6031 return false;
6034 /* Return true if this is a load or store quad operation. This function does
6035 not handle the atomic quad memory instructions. */
6037 bool
6038 quad_load_store_p (rtx op0, rtx op1)
6040 bool ret;
6042 if (!TARGET_QUAD_MEMORY)
6043 ret = false;
6045 else if (REG_P (op0) && MEM_P (op1))
6046 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
6047 && quad_memory_operand (op1, GET_MODE (op1))
6048 && !reg_overlap_mentioned_p (op0, op1));
6050 else if (MEM_P (op0) && REG_P (op1))
6051 ret = (quad_memory_operand (op0, GET_MODE (op0))
6052 && quad_int_reg_operand (op1, GET_MODE (op1)));
6054 else
6055 ret = false;
6057 if (TARGET_DEBUG_ADDR)
6059 fprintf (stderr, "\n========== quad_load_store, return %s\n",
6060 ret ? "true" : "false");
6061 debug_rtx (gen_rtx_SET (VOIDmode, op0, op1));
6064 return ret;
6067 /* Given an address, return a constant offset term if one exists. */
6069 static rtx
6070 address_offset (rtx op)
6072 if (GET_CODE (op) == PRE_INC
6073 || GET_CODE (op) == PRE_DEC)
6074 op = XEXP (op, 0);
6075 else if (GET_CODE (op) == PRE_MODIFY
6076 || GET_CODE (op) == LO_SUM)
6077 op = XEXP (op, 1);
6079 if (GET_CODE (op) == CONST)
6080 op = XEXP (op, 0);
6082 if (GET_CODE (op) == PLUS)
6083 op = XEXP (op, 1);
6085 if (CONST_INT_P (op))
6086 return op;
6088 return NULL_RTX;
6091 /* Return true if the MEM operand is a memory operand suitable for use
6092 with a (full width, possibly multiple) gpr load/store. On
6093 powerpc64 this means the offset must be divisible by 4.
6094 Implements 'Y' constraint.
6096 Accept direct, indexed, offset, lo_sum and tocref. Since this is
6097 a constraint function we know the operand has satisfied a suitable
6098 memory predicate. Also accept some odd rtl generated by reload
6099 (see rs6000_legitimize_reload_address for various forms). It is
6100 important that reload rtl be accepted by appropriate constraints
6101 but not by the operand predicate.
6103 Offsetting a lo_sum should not be allowed, except where we know by
6104 alignment that a 32k boundary is not crossed, but see the ???
6105 comment in rs6000_legitimize_reload_address. Note that by
6106 "offsetting" here we mean a further offset to access parts of the
6107 MEM. It's fine to have a lo_sum where the inner address is offset
6108 from a sym, since the same sym+offset will appear in the high part
6109 of the address calculation. */
6111 bool
6112 mem_operand_gpr (rtx op, enum machine_mode mode)
6114 unsigned HOST_WIDE_INT offset;
6115 int extra;
6116 rtx addr = XEXP (op, 0);
6118 op = address_offset (addr);
6119 if (op == NULL_RTX)
6120 return true;
6122 offset = INTVAL (op);
6123 if (TARGET_POWERPC64 && (offset & 3) != 0)
6124 return false;
6126 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
6127 if (extra < 0)
6128 extra = 0;
6130 if (GET_CODE (addr) == LO_SUM)
6131 /* For lo_sum addresses, we must allow any offset except one that
6132 causes a wrap, so test only the low 16 bits. */
6133 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
6135 return offset + 0x8000 < 0x10000u - extra;
6138 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
6140 static bool
6141 reg_offset_addressing_ok_p (enum machine_mode mode)
6143 switch (mode)
6145 case V16QImode:
6146 case V8HImode:
6147 case V4SFmode:
6148 case V4SImode:
6149 case V2DFmode:
6150 case V2DImode:
6151 case V1TImode:
6152 case TImode:
6153 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
6154 TImode is not a vector mode, if we want to use the VSX registers to
6155 move it around, we need to restrict ourselves to reg+reg
6156 addressing. */
6157 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
6158 return false;
6159 break;
6161 case V4HImode:
6162 case V2SImode:
6163 case V1DImode:
6164 case V2SFmode:
6165 /* Paired vector modes. Only reg+reg addressing is valid. */
6166 if (TARGET_PAIRED_FLOAT)
6167 return false;
6168 break;
6170 case SDmode:
6171 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
6172 addressing for the LFIWZX and STFIWX instructions. */
6173 if (TARGET_NO_SDMODE_STACK)
6174 return false;
6175 break;
6177 default:
6178 break;
6181 return true;
6184 static bool
6185 virtual_stack_registers_memory_p (rtx op)
6187 int regnum;
6189 if (GET_CODE (op) == REG)
6190 regnum = REGNO (op);
6192 else if (GET_CODE (op) == PLUS
6193 && GET_CODE (XEXP (op, 0)) == REG
6194 && GET_CODE (XEXP (op, 1)) == CONST_INT)
6195 regnum = REGNO (XEXP (op, 0));
6197 else
6198 return false;
6200 return (regnum >= FIRST_VIRTUAL_REGISTER
6201 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
6204 /* Return true if a MODE sized memory accesses to OP plus OFFSET
6205 is known to not straddle a 32k boundary. */
6207 static bool
6208 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
6209 enum machine_mode mode)
6211 tree decl, type;
6212 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
6214 if (GET_CODE (op) != SYMBOL_REF)
6215 return false;
6217 dsize = GET_MODE_SIZE (mode);
6218 decl = SYMBOL_REF_DECL (op);
6219 if (!decl)
6221 if (dsize == 0)
6222 return false;
6224 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
6225 replacing memory addresses with an anchor plus offset. We
6226 could find the decl by rummaging around in the block->objects
6227 VEC for the given offset but that seems like too much work. */
6228 dalign = BITS_PER_UNIT;
6229 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
6230 && SYMBOL_REF_ANCHOR_P (op)
6231 && SYMBOL_REF_BLOCK (op) != NULL)
6233 struct object_block *block = SYMBOL_REF_BLOCK (op);
6235 dalign = block->alignment;
6236 offset += SYMBOL_REF_BLOCK_OFFSET (op);
6238 else if (CONSTANT_POOL_ADDRESS_P (op))
6240 /* It would be nice to have get_pool_align().. */
6241 enum machine_mode cmode = get_pool_mode (op);
6243 dalign = GET_MODE_ALIGNMENT (cmode);
6246 else if (DECL_P (decl))
6248 dalign = DECL_ALIGN (decl);
6250 if (dsize == 0)
6252 /* Allow BLKmode when the entire object is known to not
6253 cross a 32k boundary. */
6254 if (!DECL_SIZE_UNIT (decl))
6255 return false;
6257 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
6258 return false;
6260 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
6261 if (dsize > 32768)
6262 return false;
6264 return dalign / BITS_PER_UNIT >= dsize;
6267 else
6269 type = TREE_TYPE (decl);
6271 dalign = TYPE_ALIGN (type);
6272 if (CONSTANT_CLASS_P (decl))
6273 dalign = CONSTANT_ALIGNMENT (decl, dalign);
6274 else
6275 dalign = DATA_ALIGNMENT (decl, dalign);
6277 if (dsize == 0)
6279 /* BLKmode, check the entire object. */
6280 if (TREE_CODE (decl) == STRING_CST)
6281 dsize = TREE_STRING_LENGTH (decl);
6282 else if (TYPE_SIZE_UNIT (type)
6283 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
6284 dsize = tree_to_uhwi (TYPE_SIZE_UNIT (type));
6285 else
6286 return false;
6287 if (dsize > 32768)
6288 return false;
6290 return dalign / BITS_PER_UNIT >= dsize;
6294 /* Find how many bits of the alignment we know for this access. */
6295 mask = dalign / BITS_PER_UNIT - 1;
6296 lsb = offset & -offset;
6297 mask &= lsb - 1;
6298 dalign = mask + 1;
6300 return dalign >= dsize;
6303 static bool
6304 constant_pool_expr_p (rtx op)
6306 rtx base, offset;
6308 split_const (op, &base, &offset);
6309 return (GET_CODE (base) == SYMBOL_REF
6310 && CONSTANT_POOL_ADDRESS_P (base)
6311 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
6314 static const_rtx tocrel_base, tocrel_offset;
6316 /* Return true if OP is a toc pointer relative address (the output
6317 of create_TOC_reference). If STRICT, do not match high part or
6318 non-split -mcmodel=large/medium toc pointer relative addresses. */
6320 bool
6321 toc_relative_expr_p (const_rtx op, bool strict)
6323 if (!TARGET_TOC)
6324 return false;
6326 if (TARGET_CMODEL != CMODEL_SMALL)
6328 /* Only match the low part. */
6329 if (GET_CODE (op) == LO_SUM
6330 && REG_P (XEXP (op, 0))
6331 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
6332 op = XEXP (op, 1);
6333 else if (strict)
6334 return false;
6337 tocrel_base = op;
6338 tocrel_offset = const0_rtx;
6339 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
6341 tocrel_base = XEXP (op, 0);
6342 tocrel_offset = XEXP (op, 1);
6345 return (GET_CODE (tocrel_base) == UNSPEC
6346 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
6349 /* Return true if X is a constant pool address, and also for cmodel=medium
6350 if X is a toc-relative address known to be offsettable within MODE. */
6352 bool
6353 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
6354 bool strict)
6356 return (toc_relative_expr_p (x, strict)
6357 && (TARGET_CMODEL != CMODEL_MEDIUM
6358 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
6359 || mode == QImode
6360 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
6361 INTVAL (tocrel_offset), mode)));
6364 static bool
6365 legitimate_small_data_p (enum machine_mode mode, rtx x)
6367 return (DEFAULT_ABI == ABI_V4
6368 && !flag_pic && !TARGET_TOC
6369 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
6370 && small_data_operand (x, mode));
6373 /* SPE offset addressing is limited to 5-bits worth of double words. */
6374 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
6376 bool
6377 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
6378 bool strict, bool worst_case)
6380 unsigned HOST_WIDE_INT offset;
6381 unsigned int extra;
6383 if (GET_CODE (x) != PLUS)
6384 return false;
6385 if (!REG_P (XEXP (x, 0)))
6386 return false;
6387 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6388 return false;
6389 if (!reg_offset_addressing_ok_p (mode))
6390 return virtual_stack_registers_memory_p (x);
6391 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
6392 return true;
6393 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6394 return false;
6396 offset = INTVAL (XEXP (x, 1));
6397 extra = 0;
6398 switch (mode)
6400 case V4HImode:
6401 case V2SImode:
6402 case V1DImode:
6403 case V2SFmode:
6404 /* SPE vector modes. */
6405 return SPE_CONST_OFFSET_OK (offset);
6407 case DFmode:
6408 case DDmode:
6409 case DImode:
6410 /* On e500v2, we may have:
6412 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
6414 Which gets addressed with evldd instructions. */
6415 if (TARGET_E500_DOUBLE)
6416 return SPE_CONST_OFFSET_OK (offset);
6418 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
6419 addressing. */
6420 if (VECTOR_MEM_VSX_P (mode))
6421 return false;
6423 if (!worst_case)
6424 break;
6425 if (!TARGET_POWERPC64)
6426 extra = 4;
6427 else if (offset & 3)
6428 return false;
6429 break;
6431 case TFmode:
6432 if (TARGET_E500_DOUBLE)
6433 return (SPE_CONST_OFFSET_OK (offset)
6434 && SPE_CONST_OFFSET_OK (offset + 8));
6435 /* fall through */
6437 case TDmode:
6438 case TImode:
6439 case PTImode:
6440 extra = 8;
6441 if (!worst_case)
6442 break;
6443 if (!TARGET_POWERPC64)
6444 extra = 12;
6445 else if (offset & 3)
6446 return false;
6447 break;
6449 default:
6450 break;
6453 offset += 0x8000;
6454 return offset < 0x10000 - extra;
6457 bool
6458 legitimate_indexed_address_p (rtx x, int strict)
6460 rtx op0, op1;
6462 if (GET_CODE (x) != PLUS)
6463 return false;
6465 op0 = XEXP (x, 0);
6466 op1 = XEXP (x, 1);
6468 /* Recognize the rtl generated by reload which we know will later be
6469 replaced with proper base and index regs. */
6470 if (!strict
6471 && reload_in_progress
6472 && (REG_P (op0) || GET_CODE (op0) == PLUS)
6473 && REG_P (op1))
6474 return true;
6476 return (REG_P (op0) && REG_P (op1)
6477 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
6478 && INT_REG_OK_FOR_INDEX_P (op1, strict))
6479 || (INT_REG_OK_FOR_BASE_P (op1, strict)
6480 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
6483 bool
6484 avoiding_indexed_address_p (enum machine_mode mode)
6486 /* Avoid indexed addressing for modes that have non-indexed
6487 load/store instruction forms. */
6488 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
6491 bool
6492 legitimate_indirect_address_p (rtx x, int strict)
6494 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
6497 bool
6498 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
6500 if (!TARGET_MACHO || !flag_pic
6501 || mode != SImode || GET_CODE (x) != MEM)
6502 return false;
6503 x = XEXP (x, 0);
6505 if (GET_CODE (x) != LO_SUM)
6506 return false;
6507 if (GET_CODE (XEXP (x, 0)) != REG)
6508 return false;
6509 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
6510 return false;
6511 x = XEXP (x, 1);
6513 return CONSTANT_P (x);
6516 static bool
6517 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
6519 if (GET_CODE (x) != LO_SUM)
6520 return false;
6521 if (GET_CODE (XEXP (x, 0)) != REG)
6522 return false;
6523 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6524 return false;
6525 /* Restrict addressing for DI because of our SUBREG hackery. */
6526 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
6527 return false;
6528 x = XEXP (x, 1);
6530 if (TARGET_ELF || TARGET_MACHO)
6532 bool large_toc_ok;
6534 if (DEFAULT_ABI == ABI_V4 && flag_pic)
6535 return false;
6536 /* LRA don't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
6537 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
6538 recognizes some LO_SUM addresses as valid although this
6539 function says opposite. In most cases, LRA through different
6540 transformations can generate correct code for address reloads.
6541 It can not manage only some LO_SUM cases. So we need to add
6542 code analogous to one in rs6000_legitimize_reload_address for
6543 LOW_SUM here saying that some addresses are still valid. */
6544 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
6545 && small_toc_ref (x, VOIDmode));
6546 if (TARGET_TOC && ! large_toc_ok)
6547 return false;
6548 if (GET_MODE_NUNITS (mode) != 1)
6549 return false;
6550 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6551 && !(/* ??? Assume floating point reg based on mode? */
6552 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6553 && (mode == DFmode || mode == DDmode)))
6554 return false;
6556 return CONSTANT_P (x) || large_toc_ok;
6559 return false;
6563 /* Try machine-dependent ways of modifying an illegitimate address
6564 to be legitimate. If we find one, return the new, valid address.
6565 This is used from only one place: `memory_address' in explow.c.
6567 OLDX is the address as it was before break_out_memory_refs was
6568 called. In some cases it is useful to look at this to decide what
6569 needs to be done.
6571 It is always safe for this function to do nothing. It exists to
6572 recognize opportunities to optimize the output.
6574 On RS/6000, first check for the sum of a register with a constant
6575 integer that is out of range. If so, generate code to add the
6576 constant with the low-order 16 bits masked to the register and force
6577 this result into another register (this can be done with `cau').
6578 Then generate an address of REG+(CONST&0xffff), allowing for the
6579 possibility of bit 16 being a one.
6581 Then check for the sum of a register and something not constant, try to
6582 load the other things into a register and return the sum. */
6584 static rtx
6585 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
6586 enum machine_mode mode)
6588 unsigned int extra;
6590 if (!reg_offset_addressing_ok_p (mode))
6592 if (virtual_stack_registers_memory_p (x))
6593 return x;
6595 /* In theory we should not be seeing addresses of the form reg+0,
6596 but just in case it is generated, optimize it away. */
6597 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
6598 return force_reg (Pmode, XEXP (x, 0));
6600 /* For TImode with load/store quad, restrict addresses to just a single
6601 pointer, so it works with both GPRs and VSX registers. */
6602 /* Make sure both operands are registers. */
6603 else if (GET_CODE (x) == PLUS
6604 && (mode != TImode || !TARGET_QUAD_MEMORY))
6605 return gen_rtx_PLUS (Pmode,
6606 force_reg (Pmode, XEXP (x, 0)),
6607 force_reg (Pmode, XEXP (x, 1)));
6608 else
6609 return force_reg (Pmode, x);
6611 if (GET_CODE (x) == SYMBOL_REF)
6613 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
6614 if (model != 0)
6615 return rs6000_legitimize_tls_address (x, model);
6618 extra = 0;
6619 switch (mode)
6621 case TFmode:
6622 case TDmode:
6623 case TImode:
6624 case PTImode:
6625 /* As in legitimate_offset_address_p we do not assume
6626 worst-case. The mode here is just a hint as to the registers
6627 used. A TImode is usually in gprs, but may actually be in
6628 fprs. Leave worst-case scenario for reload to handle via
6629 insn constraints. PTImode is only GPRs. */
6630 extra = 8;
6631 break;
6632 default:
6633 break;
6636 if (GET_CODE (x) == PLUS
6637 && GET_CODE (XEXP (x, 0)) == REG
6638 && GET_CODE (XEXP (x, 1)) == CONST_INT
6639 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
6640 >= 0x10000 - extra)
6641 && !(SPE_VECTOR_MODE (mode)
6642 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
6644 HOST_WIDE_INT high_int, low_int;
6645 rtx sum;
6646 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6647 if (low_int >= 0x8000 - extra)
6648 low_int = 0;
6649 high_int = INTVAL (XEXP (x, 1)) - low_int;
6650 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
6651 GEN_INT (high_int)), 0);
6652 return plus_constant (Pmode, sum, low_int);
6654 else if (GET_CODE (x) == PLUS
6655 && GET_CODE (XEXP (x, 0)) == REG
6656 && GET_CODE (XEXP (x, 1)) != CONST_INT
6657 && GET_MODE_NUNITS (mode) == 1
6658 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6659 || (/* ??? Assume floating point reg based on mode? */
6660 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6661 && (mode == DFmode || mode == DDmode)))
6662 && !avoiding_indexed_address_p (mode))
6664 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
6665 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
6667 else if (SPE_VECTOR_MODE (mode)
6668 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
6670 if (mode == DImode)
6671 return x;
6672 /* We accept [reg + reg] and [reg + OFFSET]. */
6674 if (GET_CODE (x) == PLUS)
6676 rtx op1 = XEXP (x, 0);
6677 rtx op2 = XEXP (x, 1);
6678 rtx y;
6680 op1 = force_reg (Pmode, op1);
6682 if (GET_CODE (op2) != REG
6683 && (GET_CODE (op2) != CONST_INT
6684 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
6685 || (GET_MODE_SIZE (mode) > 8
6686 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
6687 op2 = force_reg (Pmode, op2);
6689 /* We can't always do [reg + reg] for these, because [reg +
6690 reg + offset] is not a legitimate addressing mode. */
6691 y = gen_rtx_PLUS (Pmode, op1, op2);
6693 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
6694 return force_reg (Pmode, y);
6695 else
6696 return y;
6699 return force_reg (Pmode, x);
6701 else if ((TARGET_ELF
6702 #if TARGET_MACHO
6703 || !MACHO_DYNAMIC_NO_PIC_P
6704 #endif
6706 && TARGET_32BIT
6707 && TARGET_NO_TOC
6708 && ! flag_pic
6709 && GET_CODE (x) != CONST_INT
6710 && GET_CODE (x) != CONST_WIDE_INT
6711 && GET_CODE (x) != CONST_DOUBLE
6712 && CONSTANT_P (x)
6713 && GET_MODE_NUNITS (mode) == 1
6714 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6715 || (/* ??? Assume floating point reg based on mode? */
6716 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6717 && (mode == DFmode || mode == DDmode))))
6719 rtx reg = gen_reg_rtx (Pmode);
6720 if (TARGET_ELF)
6721 emit_insn (gen_elf_high (reg, x));
6722 else
6723 emit_insn (gen_macho_high (reg, x));
6724 return gen_rtx_LO_SUM (Pmode, reg, x);
6726 else if (TARGET_TOC
6727 && GET_CODE (x) == SYMBOL_REF
6728 && constant_pool_expr_p (x)
6729 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
6730 return create_TOC_reference (x, NULL_RTX);
6731 else
6732 return x;
6735 /* Debug version of rs6000_legitimize_address. */
6736 static rtx
6737 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
6739 rtx ret;
6740 rtx insns;
6742 start_sequence ();
6743 ret = rs6000_legitimize_address (x, oldx, mode);
6744 insns = get_insns ();
6745 end_sequence ();
6747 if (ret != x)
6749 fprintf (stderr,
6750 "\nrs6000_legitimize_address: mode %s, old code %s, "
6751 "new code %s, modified\n",
6752 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
6753 GET_RTX_NAME (GET_CODE (ret)));
6755 fprintf (stderr, "Original address:\n");
6756 debug_rtx (x);
6758 fprintf (stderr, "oldx:\n");
6759 debug_rtx (oldx);
6761 fprintf (stderr, "New address:\n");
6762 debug_rtx (ret);
6764 if (insns)
6766 fprintf (stderr, "Insns added:\n");
6767 debug_rtx_list (insns, 20);
6770 else
6772 fprintf (stderr,
6773 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6774 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
6776 debug_rtx (x);
6779 if (insns)
6780 emit_insn (insns);
6782 return ret;
6785 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6786 We need to emit DTP-relative relocations. */
6788 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6789 static void
6790 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
6792 switch (size)
6794 case 4:
6795 fputs ("\t.long\t", file);
6796 break;
6797 case 8:
6798 fputs (DOUBLE_INT_ASM_OP, file);
6799 break;
6800 default:
6801 gcc_unreachable ();
6803 output_addr_const (file, x);
6804 fputs ("@dtprel+0x8000", file);
6807 /* Return true if X is a symbol that refers to real (rather than emulated)
6808 TLS. */
6810 static bool
6811 rs6000_real_tls_symbol_ref_p (rtx x)
6813 return (GET_CODE (x) == SYMBOL_REF
6814 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
6817 /* In the name of slightly smaller debug output, and to cater to
6818 general assembler lossage, recognize various UNSPEC sequences
6819 and turn them back into a direct symbol reference. */
6821 static rtx
6822 rs6000_delegitimize_address (rtx orig_x)
6824 rtx x, y, offset;
6826 orig_x = delegitimize_mem_from_attrs (orig_x);
6827 x = orig_x;
6828 if (MEM_P (x))
6829 x = XEXP (x, 0);
6831 y = x;
6832 if (TARGET_CMODEL != CMODEL_SMALL
6833 && GET_CODE (y) == LO_SUM)
6834 y = XEXP (y, 1);
6836 offset = NULL_RTX;
6837 if (GET_CODE (y) == PLUS
6838 && GET_MODE (y) == Pmode
6839 && CONST_INT_P (XEXP (y, 1)))
6841 offset = XEXP (y, 1);
6842 y = XEXP (y, 0);
6845 if (GET_CODE (y) == UNSPEC
6846 && XINT (y, 1) == UNSPEC_TOCREL)
6848 #ifdef ENABLE_CHECKING
6849 if (REG_P (XVECEXP (y, 0, 1))
6850 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
6852 /* All good. */
6854 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
6856 /* Weirdness alert. df_note_compute can replace r2 with a
6857 debug_expr when this unspec is in a debug_insn.
6858 Seen in gcc.dg/pr51957-1.c */
6860 else
6862 debug_rtx (orig_x);
6863 abort ();
6865 #endif
6866 y = XVECEXP (y, 0, 0);
6868 #ifdef HAVE_AS_TLS
6869 /* Do not associate thread-local symbols with the original
6870 constant pool symbol. */
6871 if (TARGET_XCOFF
6872 && GET_CODE (y) == SYMBOL_REF
6873 && CONSTANT_POOL_ADDRESS_P (y)
6874 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
6875 return orig_x;
6876 #endif
6878 if (offset != NULL_RTX)
6879 y = gen_rtx_PLUS (Pmode, y, offset);
6880 if (!MEM_P (orig_x))
6881 return y;
6882 else
6883 return replace_equiv_address_nv (orig_x, y);
6886 if (TARGET_MACHO
6887 && GET_CODE (orig_x) == LO_SUM
6888 && GET_CODE (XEXP (orig_x, 1)) == CONST)
6890 y = XEXP (XEXP (orig_x, 1), 0);
6891 if (GET_CODE (y) == UNSPEC
6892 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
6893 return XVECEXP (y, 0, 0);
6896 return orig_x;
6899 /* Return true if X shouldn't be emitted into the debug info.
6900 The linker doesn't like .toc section references from
6901 .debug_* sections, so reject .toc section symbols. */
6903 static bool
6904 rs6000_const_not_ok_for_debug_p (rtx x)
6906 if (GET_CODE (x) == SYMBOL_REF
6907 && CONSTANT_POOL_ADDRESS_P (x))
6909 rtx c = get_pool_constant (x);
6910 enum machine_mode cmode = get_pool_mode (x);
6911 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
6912 return true;
6915 return false;
6918 /* Construct the SYMBOL_REF for the tls_get_addr function. */
6920 static GTY(()) rtx rs6000_tls_symbol;
6921 static rtx
6922 rs6000_tls_get_addr (void)
6924 if (!rs6000_tls_symbol)
6925 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
6927 return rs6000_tls_symbol;
6930 /* Construct the SYMBOL_REF for TLS GOT references. */
6932 static GTY(()) rtx rs6000_got_symbol;
6933 static rtx
6934 rs6000_got_sym (void)
6936 if (!rs6000_got_symbol)
6938 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
6939 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
6940 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
6943 return rs6000_got_symbol;
6946 /* AIX Thread-Local Address support. */
6948 static rtx
6949 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
6951 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
6952 const char *name;
6953 char *tlsname;
6955 name = XSTR (addr, 0);
6956 /* Append TLS CSECT qualifier, unless the symbol already is qualified
6957 or the symbol will be in TLS private data section. */
6958 if (name[strlen (name) - 1] != ']'
6959 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
6960 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
6962 tlsname = XALLOCAVEC (char, strlen (name) + 4);
6963 strcpy (tlsname, name);
6964 strcat (tlsname,
6965 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
6966 tlsaddr = copy_rtx (addr);
6967 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
6969 else
6970 tlsaddr = addr;
6972 /* Place addr into TOC constant pool. */
6973 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
6975 /* Output the TOC entry and create the MEM referencing the value. */
6976 if (constant_pool_expr_p (XEXP (sym, 0))
6977 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
6979 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
6980 mem = gen_const_mem (Pmode, tocref);
6981 set_mem_alias_set (mem, get_TOC_alias_set ());
6983 else
6984 return sym;
6986 /* Use global-dynamic for local-dynamic. */
6987 if (model == TLS_MODEL_GLOBAL_DYNAMIC
6988 || model == TLS_MODEL_LOCAL_DYNAMIC)
6990 /* Create new TOC reference for @m symbol. */
6991 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
6992 tlsname = XALLOCAVEC (char, strlen (name) + 1);
6993 strcpy (tlsname, "*LCM");
6994 strcat (tlsname, name + 3);
6995 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
6996 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
6997 tocref = create_TOC_reference (modaddr, NULL_RTX);
6998 rtx modmem = gen_const_mem (Pmode, tocref);
6999 set_mem_alias_set (modmem, get_TOC_alias_set ());
7001 rtx modreg = gen_reg_rtx (Pmode);
7002 emit_insn (gen_rtx_SET (VOIDmode, modreg, modmem));
7004 tmpreg = gen_reg_rtx (Pmode);
7005 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
7007 dest = gen_reg_rtx (Pmode);
7008 if (TARGET_32BIT)
7009 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
7010 else
7011 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
7012 return dest;
7014 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
7015 else if (TARGET_32BIT)
7017 tlsreg = gen_reg_rtx (SImode);
7018 emit_insn (gen_tls_get_tpointer (tlsreg));
7020 else
7021 tlsreg = gen_rtx_REG (DImode, 13);
7023 /* Load the TOC value into temporary register. */
7024 tmpreg = gen_reg_rtx (Pmode);
7025 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
7026 set_unique_reg_note (get_last_insn (), REG_EQUAL,
7027 gen_rtx_MINUS (Pmode, addr, tlsreg));
7029 /* Add TOC symbol value to TLS pointer. */
7030 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
7032 return dest;
7035 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
7036 this (thread-local) address. */
7038 static rtx
7039 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
7041 rtx dest, insn;
7043 if (TARGET_XCOFF)
7044 return rs6000_legitimize_tls_address_aix (addr, model);
7046 dest = gen_reg_rtx (Pmode);
7047 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
7049 rtx tlsreg;
7051 if (TARGET_64BIT)
7053 tlsreg = gen_rtx_REG (Pmode, 13);
7054 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
7056 else
7058 tlsreg = gen_rtx_REG (Pmode, 2);
7059 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
7061 emit_insn (insn);
7063 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
7065 rtx tlsreg, tmp;
7067 tmp = gen_reg_rtx (Pmode);
7068 if (TARGET_64BIT)
7070 tlsreg = gen_rtx_REG (Pmode, 13);
7071 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
7073 else
7075 tlsreg = gen_rtx_REG (Pmode, 2);
7076 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
7078 emit_insn (insn);
7079 if (TARGET_64BIT)
7080 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
7081 else
7082 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
7083 emit_insn (insn);
7085 else
7087 rtx r3, got, tga, tmp1, tmp2, call_insn;
7089 /* We currently use relocations like @got@tlsgd for tls, which
7090 means the linker will handle allocation of tls entries, placing
7091 them in the .got section. So use a pointer to the .got section,
7092 not one to secondary TOC sections used by 64-bit -mminimal-toc,
7093 or to secondary GOT sections used by 32-bit -fPIC. */
7094 if (TARGET_64BIT)
7095 got = gen_rtx_REG (Pmode, 2);
7096 else
7098 if (flag_pic == 1)
7099 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
7100 else
7102 rtx gsym = rs6000_got_sym ();
7103 got = gen_reg_rtx (Pmode);
7104 if (flag_pic == 0)
7105 rs6000_emit_move (got, gsym, Pmode);
7106 else
7108 rtx mem, lab, last;
7110 tmp1 = gen_reg_rtx (Pmode);
7111 tmp2 = gen_reg_rtx (Pmode);
7112 mem = gen_const_mem (Pmode, tmp1);
7113 lab = gen_label_rtx ();
7114 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
7115 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
7116 if (TARGET_LINK_STACK)
7117 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
7118 emit_move_insn (tmp2, mem);
7119 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
7120 set_unique_reg_note (last, REG_EQUAL, gsym);
7125 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
7127 tga = rs6000_tls_get_addr ();
7128 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
7129 1, const0_rtx, Pmode);
7131 r3 = gen_rtx_REG (Pmode, 3);
7132 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7134 if (TARGET_64BIT)
7135 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
7136 else
7137 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
7139 else if (DEFAULT_ABI == ABI_V4)
7140 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
7141 else
7142 gcc_unreachable ();
7143 call_insn = last_call_insn ();
7144 PATTERN (call_insn) = insn;
7145 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7146 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7147 pic_offset_table_rtx);
7149 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
7151 tga = rs6000_tls_get_addr ();
7152 tmp1 = gen_reg_rtx (Pmode);
7153 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
7154 1, const0_rtx, Pmode);
7156 r3 = gen_rtx_REG (Pmode, 3);
7157 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7159 if (TARGET_64BIT)
7160 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
7161 else
7162 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
7164 else if (DEFAULT_ABI == ABI_V4)
7165 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
7166 else
7167 gcc_unreachable ();
7168 call_insn = last_call_insn ();
7169 PATTERN (call_insn) = insn;
7170 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7171 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7172 pic_offset_table_rtx);
7174 if (rs6000_tls_size == 16)
7176 if (TARGET_64BIT)
7177 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
7178 else
7179 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
7181 else if (rs6000_tls_size == 32)
7183 tmp2 = gen_reg_rtx (Pmode);
7184 if (TARGET_64BIT)
7185 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
7186 else
7187 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
7188 emit_insn (insn);
7189 if (TARGET_64BIT)
7190 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
7191 else
7192 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
7194 else
7196 tmp2 = gen_reg_rtx (Pmode);
7197 if (TARGET_64BIT)
7198 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
7199 else
7200 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
7201 emit_insn (insn);
7202 insn = gen_rtx_SET (Pmode, dest,
7203 gen_rtx_PLUS (Pmode, tmp2, tmp1));
7205 emit_insn (insn);
7207 else
7209 /* IE, or 64-bit offset LE. */
7210 tmp2 = gen_reg_rtx (Pmode);
7211 if (TARGET_64BIT)
7212 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
7213 else
7214 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
7215 emit_insn (insn);
7216 if (TARGET_64BIT)
7217 insn = gen_tls_tls_64 (dest, tmp2, addr);
7218 else
7219 insn = gen_tls_tls_32 (dest, tmp2, addr);
7220 emit_insn (insn);
7224 return dest;
7227 /* Return 1 if X contains a thread-local symbol. */
7229 static bool
7230 rs6000_tls_referenced_p (rtx x)
7232 if (! TARGET_HAVE_TLS)
7233 return false;
7235 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
7238 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
7240 static bool
7241 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
7243 if (GET_CODE (x) == HIGH
7244 && GET_CODE (XEXP (x, 0)) == UNSPEC)
7245 return true;
7247 /* A TLS symbol in the TOC cannot contain a sum. */
7248 if (GET_CODE (x) == CONST
7249 && GET_CODE (XEXP (x, 0)) == PLUS
7250 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7251 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
7252 return true;
7254 /* Do not place an ELF TLS symbol in the constant pool. */
7255 return TARGET_ELF && rs6000_tls_referenced_p (x);
7258 /* Return 1 if *X is a thread-local symbol. This is the same as
7259 rs6000_tls_symbol_ref except for the type of the unused argument. */
7261 static int
7262 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
7264 return RS6000_SYMBOL_REF_TLS_P (*x);
7267 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
7268 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
7269 can be addressed relative to the toc pointer. */
7271 static bool
7272 use_toc_relative_ref (rtx sym)
7274 return ((constant_pool_expr_p (sym)
7275 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
7276 get_pool_mode (sym)))
7277 || (TARGET_CMODEL == CMODEL_MEDIUM
7278 && SYMBOL_REF_LOCAL_P (sym)));
7281 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
7282 replace the input X, or the original X if no replacement is called for.
7283 The output parameter *WIN is 1 if the calling macro should goto WIN,
7284 0 if it should not.
7286 For RS/6000, we wish to handle large displacements off a base
7287 register by splitting the addend across an addiu/addis and the mem insn.
7288 This cuts number of extra insns needed from 3 to 1.
7290 On Darwin, we use this to generate code for floating point constants.
7291 A movsf_low is generated so we wind up with 2 instructions rather than 3.
7292 The Darwin code is inside #if TARGET_MACHO because only then are the
7293 machopic_* functions defined. */
7294 static rtx
7295 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
7296 int opnum, int type,
7297 int ind_levels ATTRIBUTE_UNUSED, int *win)
7299 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7301 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
7302 DFmode/DImode MEM. */
7303 if (reg_offset_p
7304 && opnum == 1
7305 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
7306 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
7307 reg_offset_p = false;
7309 /* We must recognize output that we have already generated ourselves. */
7310 if (GET_CODE (x) == PLUS
7311 && GET_CODE (XEXP (x, 0)) == PLUS
7312 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7313 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7314 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7316 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7317 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7318 opnum, (enum reload_type) type);
7319 *win = 1;
7320 return x;
7323 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
7324 if (GET_CODE (x) == LO_SUM
7325 && GET_CODE (XEXP (x, 0)) == HIGH)
7327 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7328 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7329 opnum, (enum reload_type) type);
7330 *win = 1;
7331 return x;
7334 #if TARGET_MACHO
7335 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
7336 && GET_CODE (x) == LO_SUM
7337 && GET_CODE (XEXP (x, 0)) == PLUS
7338 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
7339 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
7340 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
7341 && machopic_operand_p (XEXP (x, 1)))
7343 /* Result of previous invocation of this function on Darwin
7344 floating point constant. */
7345 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7346 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7347 opnum, (enum reload_type) type);
7348 *win = 1;
7349 return x;
7351 #endif
7353 if (TARGET_CMODEL != CMODEL_SMALL
7354 && reg_offset_p
7355 && small_toc_ref (x, VOIDmode))
7357 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
7358 x = gen_rtx_LO_SUM (Pmode, hi, x);
7359 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7360 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7361 opnum, (enum reload_type) type);
7362 *win = 1;
7363 return x;
7366 if (GET_CODE (x) == PLUS
7367 && GET_CODE (XEXP (x, 0)) == REG
7368 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
7369 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
7370 && GET_CODE (XEXP (x, 1)) == CONST_INT
7371 && reg_offset_p
7372 && !SPE_VECTOR_MODE (mode)
7373 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
7374 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
7376 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
7377 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
7378 HOST_WIDE_INT high
7379 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7381 /* Check for 32-bit overflow. */
7382 if (high + low != val)
7384 *win = 0;
7385 return x;
7388 /* Reload the high part into a base reg; leave the low part
7389 in the mem directly. */
7391 x = gen_rtx_PLUS (GET_MODE (x),
7392 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
7393 GEN_INT (high)),
7394 GEN_INT (low));
7396 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7397 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7398 opnum, (enum reload_type) type);
7399 *win = 1;
7400 return x;
7403 if (GET_CODE (x) == SYMBOL_REF
7404 && reg_offset_p
7405 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
7406 && !SPE_VECTOR_MODE (mode)
7407 #if TARGET_MACHO
7408 && DEFAULT_ABI == ABI_DARWIN
7409 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
7410 && machopic_symbol_defined_p (x)
7411 #else
7412 && DEFAULT_ABI == ABI_V4
7413 && !flag_pic
7414 #endif
7415 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
7416 The same goes for DImode without 64-bit gprs and DFmode and DDmode
7417 without fprs.
7418 ??? Assume floating point reg based on mode? This assumption is
7419 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
7420 where reload ends up doing a DFmode load of a constant from
7421 mem using two gprs. Unfortunately, at this point reload
7422 hasn't yet selected regs so poking around in reload data
7423 won't help and even if we could figure out the regs reliably,
7424 we'd still want to allow this transformation when the mem is
7425 naturally aligned. Since we say the address is good here, we
7426 can't disable offsets from LO_SUMs in mem_operand_gpr.
7427 FIXME: Allow offset from lo_sum for other modes too, when
7428 mem is sufficiently aligned. */
7429 && mode != TFmode
7430 && mode != TDmode
7431 && (mode != TImode || !TARGET_VSX_TIMODE)
7432 && mode != PTImode
7433 && (mode != DImode || TARGET_POWERPC64)
7434 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
7435 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
7437 #if TARGET_MACHO
7438 if (flag_pic)
7440 rtx offset = machopic_gen_offset (x);
7441 x = gen_rtx_LO_SUM (GET_MODE (x),
7442 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
7443 gen_rtx_HIGH (Pmode, offset)), offset);
7445 else
7446 #endif
7447 x = gen_rtx_LO_SUM (GET_MODE (x),
7448 gen_rtx_HIGH (Pmode, x), x);
7450 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7451 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7452 opnum, (enum reload_type) type);
7453 *win = 1;
7454 return x;
7457 /* Reload an offset address wrapped by an AND that represents the
7458 masking of the lower bits. Strip the outer AND and let reload
7459 convert the offset address into an indirect address. For VSX,
7460 force reload to create the address with an AND in a separate
7461 register, because we can't guarantee an altivec register will
7462 be used. */
7463 if (VECTOR_MEM_ALTIVEC_P (mode)
7464 && GET_CODE (x) == AND
7465 && GET_CODE (XEXP (x, 0)) == PLUS
7466 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7467 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7468 && GET_CODE (XEXP (x, 1)) == CONST_INT
7469 && INTVAL (XEXP (x, 1)) == -16)
7471 x = XEXP (x, 0);
7472 *win = 1;
7473 return x;
7476 if (TARGET_TOC
7477 && reg_offset_p
7478 && GET_CODE (x) == SYMBOL_REF
7479 && use_toc_relative_ref (x))
7481 x = create_TOC_reference (x, NULL_RTX);
7482 if (TARGET_CMODEL != CMODEL_SMALL)
7483 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7484 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7485 opnum, (enum reload_type) type);
7486 *win = 1;
7487 return x;
7489 *win = 0;
7490 return x;
7493 /* Debug version of rs6000_legitimize_reload_address. */
7494 static rtx
7495 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
7496 int opnum, int type,
7497 int ind_levels, int *win)
7499 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
7500 ind_levels, win);
7501 fprintf (stderr,
7502 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7503 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7504 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
7505 debug_rtx (x);
7507 if (x == ret)
7508 fprintf (stderr, "Same address returned\n");
7509 else if (!ret)
7510 fprintf (stderr, "NULL returned\n");
7511 else
7513 fprintf (stderr, "New address:\n");
7514 debug_rtx (ret);
7517 return ret;
7520 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7521 that is a valid memory address for an instruction.
7522 The MODE argument is the machine mode for the MEM expression
7523 that wants to use this address.
7525 On the RS/6000, there are four valid address: a SYMBOL_REF that
7526 refers to a constant pool entry of an address (or the sum of it
7527 plus a constant), a short (16-bit signed) constant plus a register,
7528 the sum of two registers, or a register indirect, possibly with an
7529 auto-increment. For DFmode, DDmode and DImode with a constant plus
7530 register, we must ensure that both words are addressable or PowerPC64
7531 with offset word aligned.
7533 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7534 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7535 because adjacent memory cells are accessed by adding word-sized offsets
7536 during assembly output. */
7537 static bool
7538 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
7540 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7542 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7543 if (VECTOR_MEM_ALTIVEC_P (mode)
7544 && GET_CODE (x) == AND
7545 && GET_CODE (XEXP (x, 1)) == CONST_INT
7546 && INTVAL (XEXP (x, 1)) == -16)
7547 x = XEXP (x, 0);
7549 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
7550 return 0;
7551 if (legitimate_indirect_address_p (x, reg_ok_strict))
7552 return 1;
7553 if (TARGET_UPDATE
7554 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
7555 && mode_supports_pre_incdec_p (mode)
7556 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
7557 return 1;
7558 if (virtual_stack_registers_memory_p (x))
7559 return 1;
7560 if (reg_offset_p && legitimate_small_data_p (mode, x))
7561 return 1;
7562 if (reg_offset_p
7563 && legitimate_constant_pool_address_p (x, mode,
7564 reg_ok_strict || lra_in_progress))
7565 return 1;
7566 /* For TImode, if we have load/store quad and TImode in VSX registers, only
7567 allow register indirect addresses. This will allow the values to go in
7568 either GPRs or VSX registers without reloading. The vector types would
7569 tend to go into VSX registers, so we allow REG+REG, while TImode seems
7570 somewhat split, in that some uses are GPR based, and some VSX based. */
7571 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
7572 return 0;
7573 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7574 if (! reg_ok_strict
7575 && reg_offset_p
7576 && GET_CODE (x) == PLUS
7577 && GET_CODE (XEXP (x, 0)) == REG
7578 && (XEXP (x, 0) == virtual_stack_vars_rtx
7579 || XEXP (x, 0) == arg_pointer_rtx)
7580 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7581 return 1;
7582 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
7583 return 1;
7584 if (mode != TFmode
7585 && mode != TDmode
7586 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7587 || TARGET_POWERPC64
7588 || (mode != DFmode && mode != DDmode)
7589 || (TARGET_E500_DOUBLE && mode != DDmode))
7590 && (TARGET_POWERPC64 || mode != DImode)
7591 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
7592 && mode != PTImode
7593 && !avoiding_indexed_address_p (mode)
7594 && legitimate_indexed_address_p (x, reg_ok_strict))
7595 return 1;
7596 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
7597 && mode_supports_pre_modify_p (mode)
7598 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
7599 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
7600 reg_ok_strict, false)
7601 || (!avoiding_indexed_address_p (mode)
7602 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
7603 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7604 return 1;
7605 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
7606 return 1;
7607 return 0;
7610 /* Debug version of rs6000_legitimate_address_p. */
7611 static bool
7612 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
7613 bool reg_ok_strict)
7615 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
7616 fprintf (stderr,
7617 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7618 "strict = %d, reload = %s, code = %s\n",
7619 ret ? "true" : "false",
7620 GET_MODE_NAME (mode),
7621 reg_ok_strict,
7622 (reload_completed
7623 ? "after"
7624 : (reload_in_progress ? "progress" : "before")),
7625 GET_RTX_NAME (GET_CODE (x)));
7626 debug_rtx (x);
7628 return ret;
7631 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7633 static bool
7634 rs6000_mode_dependent_address_p (const_rtx addr,
7635 addr_space_t as ATTRIBUTE_UNUSED)
7637 return rs6000_mode_dependent_address_ptr (addr);
7640 /* Go to LABEL if ADDR (a legitimate address expression)
7641 has an effect that depends on the machine mode it is used for.
7643 On the RS/6000 this is true of all integral offsets (since AltiVec
7644 and VSX modes don't allow them) or is a pre-increment or decrement.
7646 ??? Except that due to conceptual problems in offsettable_address_p
7647 we can't really report the problems of integral offsets. So leave
7648 this assuming that the adjustable offset must be valid for the
7649 sub-words of a TFmode operand, which is what we had before. */
7651 static bool
7652 rs6000_mode_dependent_address (const_rtx addr)
7654 switch (GET_CODE (addr))
7656 case PLUS:
7657 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7658 is considered a legitimate address before reload, so there
7659 are no offset restrictions in that case. Note that this
7660 condition is safe in strict mode because any address involving
7661 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7662 been rejected as illegitimate. */
7663 if (XEXP (addr, 0) != virtual_stack_vars_rtx
7664 && XEXP (addr, 0) != arg_pointer_rtx
7665 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
7667 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
7668 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
7670 break;
7672 case LO_SUM:
7673 /* Anything in the constant pool is sufficiently aligned that
7674 all bytes have the same high part address. */
7675 return !legitimate_constant_pool_address_p (addr, QImode, false);
7677 /* Auto-increment cases are now treated generically in recog.c. */
7678 case PRE_MODIFY:
7679 return TARGET_UPDATE;
7681 /* AND is only allowed in Altivec loads. */
7682 case AND:
7683 return true;
7685 default:
7686 break;
7689 return false;
7692 /* Debug version of rs6000_mode_dependent_address. */
7693 static bool
7694 rs6000_debug_mode_dependent_address (const_rtx addr)
7696 bool ret = rs6000_mode_dependent_address (addr);
7698 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
7699 ret ? "true" : "false");
7700 debug_rtx (addr);
7702 return ret;
7705 /* Implement FIND_BASE_TERM. */
7708 rs6000_find_base_term (rtx op)
7710 rtx base;
7712 base = op;
7713 if (GET_CODE (base) == CONST)
7714 base = XEXP (base, 0);
7715 if (GET_CODE (base) == PLUS)
7716 base = XEXP (base, 0);
7717 if (GET_CODE (base) == UNSPEC)
7718 switch (XINT (base, 1))
7720 case UNSPEC_TOCREL:
7721 case UNSPEC_MACHOPIC_OFFSET:
7722 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
7723 for aliasing purposes. */
7724 return XVECEXP (base, 0, 0);
7727 return op;
7730 /* More elaborate version of recog's offsettable_memref_p predicate
7731 that works around the ??? note of rs6000_mode_dependent_address.
7732 In particular it accepts
7734 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
7736 in 32-bit mode, that the recog predicate rejects. */
7738 static bool
7739 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
7741 bool worst_case;
7743 if (!MEM_P (op))
7744 return false;
7746 /* First mimic offsettable_memref_p. */
7747 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
7748 return true;
7750 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
7751 the latter predicate knows nothing about the mode of the memory
7752 reference and, therefore, assumes that it is the largest supported
7753 mode (TFmode). As a consequence, legitimate offsettable memory
7754 references are rejected. rs6000_legitimate_offset_address_p contains
7755 the correct logic for the PLUS case of rs6000_mode_dependent_address,
7756 at least with a little bit of help here given that we know the
7757 actual registers used. */
7758 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
7759 || GET_MODE_SIZE (reg_mode) == 4);
7760 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
7761 true, worst_case);
7764 /* Change register usage conditional on target flags. */
7765 static void
7766 rs6000_conditional_register_usage (void)
7768 int i;
7770 if (TARGET_DEBUG_TARGET)
7771 fprintf (stderr, "rs6000_conditional_register_usage called\n");
7773 /* Set MQ register fixed (already call_used) so that it will not be
7774 allocated. */
7775 fixed_regs[64] = 1;
7777 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7778 if (TARGET_64BIT)
7779 fixed_regs[13] = call_used_regs[13]
7780 = call_really_used_regs[13] = 1;
7782 /* Conditionally disable FPRs. */
7783 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
7784 for (i = 32; i < 64; i++)
7785 fixed_regs[i] = call_used_regs[i]
7786 = call_really_used_regs[i] = 1;
7788 /* The TOC register is not killed across calls in a way that is
7789 visible to the compiler. */
7790 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7791 call_really_used_regs[2] = 0;
7793 if (DEFAULT_ABI == ABI_V4
7794 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7795 && flag_pic == 2)
7796 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7798 if (DEFAULT_ABI == ABI_V4
7799 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7800 && flag_pic == 1)
7801 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7802 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7803 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7805 if (DEFAULT_ABI == ABI_DARWIN
7806 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
7807 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7808 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7809 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7811 if (TARGET_TOC && TARGET_MINIMAL_TOC)
7812 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7813 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7815 if (TARGET_SPE)
7817 global_regs[SPEFSCR_REGNO] = 1;
7818 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7819 registers in prologues and epilogues. We no longer use r14
7820 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7821 pool for link-compatibility with older versions of GCC. Once
7822 "old" code has died out, we can return r14 to the allocation
7823 pool. */
7824 fixed_regs[14]
7825 = call_used_regs[14]
7826 = call_really_used_regs[14] = 1;
7829 if (!TARGET_ALTIVEC && !TARGET_VSX)
7831 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
7832 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7833 call_really_used_regs[VRSAVE_REGNO] = 1;
7836 if (TARGET_ALTIVEC || TARGET_VSX)
7837 global_regs[VSCR_REGNO] = 1;
7839 if (TARGET_ALTIVEC_ABI)
7841 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
7842 call_used_regs[i] = call_really_used_regs[i] = 1;
7844 /* AIX reserves VR20:31 in non-extended ABI mode. */
7845 if (TARGET_XCOFF)
7846 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
7847 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7852 /* Try to output insns to set TARGET equal to the constant C if it can
7853 be done in less than N insns. Do all computations in MODE.
7854 Returns the place where the output has been placed if it can be
7855 done and the insns have been emitted. If it would take more than N
7856 insns, zero is returned and no insns and emitted. */
7859 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
7860 rtx source, int n ATTRIBUTE_UNUSED)
7862 rtx result, insn, set;
7863 HOST_WIDE_INT c0, c1;
7865 switch (mode)
7867 case QImode:
7868 case HImode:
7869 if (dest == NULL)
7870 dest = gen_reg_rtx (mode);
7871 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
7872 return dest;
7874 case SImode:
7875 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
7877 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
7878 GEN_INT (INTVAL (source)
7879 & (~ (HOST_WIDE_INT) 0xffff))));
7880 emit_insn (gen_rtx_SET (VOIDmode, dest,
7881 gen_rtx_IOR (SImode, copy_rtx (result),
7882 GEN_INT (INTVAL (source) & 0xffff))));
7883 result = dest;
7884 break;
7886 case DImode:
7887 switch (GET_CODE (source))
7889 case CONST_INT:
7890 c0 = INTVAL (source);
7891 c1 = -(c0 < 0);
7892 break;
7894 default:
7895 gcc_unreachable ();
7898 result = rs6000_emit_set_long_const (dest, c0, c1);
7899 break;
7901 default:
7902 gcc_unreachable ();
7905 insn = get_last_insn ();
7906 set = single_set (insn);
7907 if (! CONSTANT_P (SET_SRC (set)))
7908 set_unique_reg_note (insn, REG_EQUAL, source);
7910 return result;
7913 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
7914 fall back to a straight forward decomposition. We do this to avoid
7915 exponential run times encountered when looking for longer sequences
7916 with rs6000_emit_set_const. */
7917 static rtx
7918 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
7920 if (!TARGET_POWERPC64)
7922 rtx operand1, operand2;
7924 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
7925 DImode);
7926 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
7927 DImode);
7928 emit_move_insn (operand1, GEN_INT (c1));
7929 emit_move_insn (operand2, GEN_INT (c2));
7931 else
7933 HOST_WIDE_INT ud1, ud2, ud3, ud4;
7935 ud1 = c1 & 0xffff;
7936 ud2 = (c1 & 0xffff0000) >> 16;
7937 c2 = c1 >> 32;
7938 ud3 = c2 & 0xffff;
7939 ud4 = (c2 & 0xffff0000) >> 16;
7941 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
7942 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
7943 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
7945 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
7946 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
7948 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7949 - 0x80000000));
7950 if (ud1 != 0)
7951 emit_move_insn (copy_rtx (dest),
7952 gen_rtx_IOR (DImode, copy_rtx (dest),
7953 GEN_INT (ud1)));
7955 else if (ud3 == 0 && ud4 == 0)
7957 gcc_assert (ud2 & 0x8000);
7958 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
7959 - 0x80000000));
7960 if (ud1 != 0)
7961 emit_move_insn (copy_rtx (dest),
7962 gen_rtx_IOR (DImode, copy_rtx (dest),
7963 GEN_INT (ud1)));
7964 emit_move_insn (copy_rtx (dest),
7965 gen_rtx_ZERO_EXTEND (DImode,
7966 gen_lowpart (SImode,
7967 copy_rtx (dest))));
7969 else if ((ud4 == 0xffff && (ud3 & 0x8000))
7970 || (ud4 == 0 && ! (ud3 & 0x8000)))
7972 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
7973 - 0x80000000));
7974 if (ud2 != 0)
7975 emit_move_insn (copy_rtx (dest),
7976 gen_rtx_IOR (DImode, copy_rtx (dest),
7977 GEN_INT (ud2)));
7978 emit_move_insn (copy_rtx (dest),
7979 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7980 GEN_INT (16)));
7981 if (ud1 != 0)
7982 emit_move_insn (copy_rtx (dest),
7983 gen_rtx_IOR (DImode, copy_rtx (dest),
7984 GEN_INT (ud1)));
7986 else
7988 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
7989 - 0x80000000));
7990 if (ud3 != 0)
7991 emit_move_insn (copy_rtx (dest),
7992 gen_rtx_IOR (DImode, copy_rtx (dest),
7993 GEN_INT (ud3)));
7995 emit_move_insn (copy_rtx (dest),
7996 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
7997 GEN_INT (32)));
7998 if (ud2 != 0)
7999 emit_move_insn (copy_rtx (dest),
8000 gen_rtx_IOR (DImode, copy_rtx (dest),
8001 GEN_INT (ud2 << 16)));
8002 if (ud1 != 0)
8003 emit_move_insn (copy_rtx (dest),
8004 gen_rtx_IOR (DImode, copy_rtx (dest),
8005 GEN_INT (ud1)));
8008 return dest;
8011 /* Helper for the following. Get rid of [r+r] memory refs
8012 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
8014 static void
8015 rs6000_eliminate_indexed_memrefs (rtx operands[2])
8017 if (reload_in_progress)
8018 return;
8020 if (GET_CODE (operands[0]) == MEM
8021 && GET_CODE (XEXP (operands[0], 0)) != REG
8022 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
8023 GET_MODE (operands[0]), false))
8024 operands[0]
8025 = replace_equiv_address (operands[0],
8026 copy_addr_to_reg (XEXP (operands[0], 0)));
8028 if (GET_CODE (operands[1]) == MEM
8029 && GET_CODE (XEXP (operands[1], 0)) != REG
8030 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
8031 GET_MODE (operands[1]), false))
8032 operands[1]
8033 = replace_equiv_address (operands[1],
8034 copy_addr_to_reg (XEXP (operands[1], 0)));
8037 /* Generate a vector of constants to permute MODE for a little-endian
8038 storage operation by swapping the two halves of a vector. */
8039 static rtvec
8040 rs6000_const_vec (enum machine_mode mode)
8042 int i, subparts;
8043 rtvec v;
8045 switch (mode)
8047 case V1TImode:
8048 subparts = 1;
8049 break;
8050 case V2DFmode:
8051 case V2DImode:
8052 subparts = 2;
8053 break;
8054 case V4SFmode:
8055 case V4SImode:
8056 subparts = 4;
8057 break;
8058 case V8HImode:
8059 subparts = 8;
8060 break;
8061 case V16QImode:
8062 subparts = 16;
8063 break;
8064 default:
8065 gcc_unreachable();
8068 v = rtvec_alloc (subparts);
8070 for (i = 0; i < subparts / 2; ++i)
8071 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
8072 for (i = subparts / 2; i < subparts; ++i)
8073 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
8075 return v;
8078 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
8079 for a VSX load or store operation. */
8081 rs6000_gen_le_vsx_permute (rtx source, enum machine_mode mode)
8083 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
8084 return gen_rtx_VEC_SELECT (mode, source, par);
8087 /* Emit a little-endian load from vector memory location SOURCE to VSX
8088 register DEST in mode MODE. The load is done with two permuting
8089 insn's that represent an lxvd2x and xxpermdi. */
8090 void
8091 rs6000_emit_le_vsx_load (rtx dest, rtx source, enum machine_mode mode)
8093 rtx tmp, permute_mem, permute_reg;
8095 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8096 V1TImode). */
8097 if (mode == TImode || mode == V1TImode)
8099 mode = V2DImode;
8100 dest = gen_lowpart (V2DImode, dest);
8101 source = adjust_address (source, V2DImode, 0);
8104 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
8105 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
8106 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
8107 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_mem));
8108 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_reg));
8111 /* Emit a little-endian store to vector memory location DEST from VSX
8112 register SOURCE in mode MODE. The store is done with two permuting
8113 insn's that represent an xxpermdi and an stxvd2x. */
8114 void
8115 rs6000_emit_le_vsx_store (rtx dest, rtx source, enum machine_mode mode)
8117 rtx tmp, permute_src, permute_tmp;
8119 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8120 V1TImode). */
8121 if (mode == TImode || mode == V1TImode)
8123 mode = V2DImode;
8124 dest = adjust_address (dest, V2DImode, 0);
8125 source = gen_lowpart (V2DImode, source);
8128 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
8129 permute_src = rs6000_gen_le_vsx_permute (source, mode);
8130 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
8131 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_src));
8132 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_tmp));
8135 /* Emit a sequence representing a little-endian VSX load or store,
8136 moving data from SOURCE to DEST in mode MODE. This is done
8137 separately from rs6000_emit_move to ensure it is called only
8138 during expand. LE VSX loads and stores introduced later are
8139 handled with a split. The expand-time RTL generation allows
8140 us to optimize away redundant pairs of register-permutes. */
8141 void
8142 rs6000_emit_le_vsx_move (rtx dest, rtx source, enum machine_mode mode)
8144 gcc_assert (!BYTES_BIG_ENDIAN
8145 && VECTOR_MEM_VSX_P (mode)
8146 && !gpr_or_gpr_p (dest, source)
8147 && (MEM_P (source) ^ MEM_P (dest)));
8149 if (MEM_P (source))
8151 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
8152 rs6000_emit_le_vsx_load (dest, source, mode);
8154 else
8156 if (!REG_P (source))
8157 source = force_reg (mode, source);
8158 rs6000_emit_le_vsx_store (dest, source, mode);
8162 /* Emit a move from SOURCE to DEST in mode MODE. */
8163 void
8164 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
8166 rtx operands[2];
8167 operands[0] = dest;
8168 operands[1] = source;
8170 if (TARGET_DEBUG_ADDR)
8172 fprintf (stderr,
8173 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
8174 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
8175 GET_MODE_NAME (mode),
8176 reload_in_progress,
8177 reload_completed,
8178 can_create_pseudo_p ());
8179 debug_rtx (dest);
8180 fprintf (stderr, "source:\n");
8181 debug_rtx (source);
8184 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
8185 if (CONST_WIDE_INT_P (operands[1])
8186 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
8188 /* This should be fixed with the introduction of CONST_WIDE_INT. */
8189 gcc_unreachable ();
8192 /* Check if GCC is setting up a block move that will end up using FP
8193 registers as temporaries. We must make sure this is acceptable. */
8194 if (GET_CODE (operands[0]) == MEM
8195 && GET_CODE (operands[1]) == MEM
8196 && mode == DImode
8197 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
8198 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
8199 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
8200 ? 32 : MEM_ALIGN (operands[0])))
8201 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
8202 ? 32
8203 : MEM_ALIGN (operands[1]))))
8204 && ! MEM_VOLATILE_P (operands [0])
8205 && ! MEM_VOLATILE_P (operands [1]))
8207 emit_move_insn (adjust_address (operands[0], SImode, 0),
8208 adjust_address (operands[1], SImode, 0));
8209 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
8210 adjust_address (copy_rtx (operands[1]), SImode, 4));
8211 return;
8214 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
8215 && !gpc_reg_operand (operands[1], mode))
8216 operands[1] = force_reg (mode, operands[1]);
8218 /* Recognize the case where operand[1] is a reference to thread-local
8219 data and load its address to a register. */
8220 if (rs6000_tls_referenced_p (operands[1]))
8222 enum tls_model model;
8223 rtx tmp = operands[1];
8224 rtx addend = NULL;
8226 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
8228 addend = XEXP (XEXP (tmp, 0), 1);
8229 tmp = XEXP (XEXP (tmp, 0), 0);
8232 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
8233 model = SYMBOL_REF_TLS_MODEL (tmp);
8234 gcc_assert (model != 0);
8236 tmp = rs6000_legitimize_tls_address (tmp, model);
8237 if (addend)
8239 tmp = gen_rtx_PLUS (mode, tmp, addend);
8240 tmp = force_operand (tmp, operands[0]);
8242 operands[1] = tmp;
8245 /* Handle the case where reload calls us with an invalid address. */
8246 if (reload_in_progress && mode == Pmode
8247 && (! general_operand (operands[1], mode)
8248 || ! nonimmediate_operand (operands[0], mode)))
8249 goto emit_set;
8251 /* 128-bit constant floating-point values on Darwin should really be
8252 loaded as two parts. */
8253 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
8254 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
8256 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
8257 simplify_gen_subreg (DFmode, operands[1], mode, 0),
8258 DFmode);
8259 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
8260 GET_MODE_SIZE (DFmode)),
8261 simplify_gen_subreg (DFmode, operands[1], mode,
8262 GET_MODE_SIZE (DFmode)),
8263 DFmode);
8264 return;
8267 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
8268 cfun->machine->sdmode_stack_slot =
8269 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
8272 if (lra_in_progress
8273 && mode == SDmode
8274 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
8275 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
8276 && (REG_P (operands[1])
8277 || (GET_CODE (operands[1]) == SUBREG
8278 && REG_P (SUBREG_REG (operands[1])))))
8280 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
8281 ? SUBREG_REG (operands[1]) : operands[1]);
8282 enum reg_class cl;
8284 if (regno >= FIRST_PSEUDO_REGISTER)
8286 cl = reg_preferred_class (regno);
8287 gcc_assert (cl != NO_REGS);
8288 regno = ira_class_hard_regs[cl][0];
8290 if (FP_REGNO_P (regno))
8292 if (GET_MODE (operands[0]) != DDmode)
8293 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
8294 emit_insn (gen_movsd_store (operands[0], operands[1]));
8296 else if (INT_REGNO_P (regno))
8297 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8298 else
8299 gcc_unreachable();
8300 return;
8302 if (lra_in_progress
8303 && mode == SDmode
8304 && (REG_P (operands[0])
8305 || (GET_CODE (operands[0]) == SUBREG
8306 && REG_P (SUBREG_REG (operands[0]))))
8307 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
8308 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
8310 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
8311 ? SUBREG_REG (operands[0]) : operands[0]);
8312 enum reg_class cl;
8314 if (regno >= FIRST_PSEUDO_REGISTER)
8316 cl = reg_preferred_class (regno);
8317 gcc_assert (cl != NO_REGS);
8318 regno = ira_class_hard_regs[cl][0];
8320 if (FP_REGNO_P (regno))
8322 if (GET_MODE (operands[1]) != DDmode)
8323 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
8324 emit_insn (gen_movsd_load (operands[0], operands[1]));
8326 else if (INT_REGNO_P (regno))
8327 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8328 else
8329 gcc_unreachable();
8330 return;
8333 if (reload_in_progress
8334 && mode == SDmode
8335 && cfun->machine->sdmode_stack_slot != NULL_RTX
8336 && MEM_P (operands[0])
8337 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
8338 && REG_P (operands[1]))
8340 if (FP_REGNO_P (REGNO (operands[1])))
8342 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
8343 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8344 emit_insn (gen_movsd_store (mem, operands[1]));
8346 else if (INT_REGNO_P (REGNO (operands[1])))
8348 rtx mem = operands[0];
8349 if (BYTES_BIG_ENDIAN)
8350 mem = adjust_address_nv (mem, mode, 4);
8351 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8352 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
8354 else
8355 gcc_unreachable();
8356 return;
8358 if (reload_in_progress
8359 && mode == SDmode
8360 && REG_P (operands[0])
8361 && MEM_P (operands[1])
8362 && cfun->machine->sdmode_stack_slot != NULL_RTX
8363 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
8365 if (FP_REGNO_P (REGNO (operands[0])))
8367 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
8368 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8369 emit_insn (gen_movsd_load (operands[0], mem));
8371 else if (INT_REGNO_P (REGNO (operands[0])))
8373 rtx mem = operands[1];
8374 if (BYTES_BIG_ENDIAN)
8375 mem = adjust_address_nv (mem, mode, 4);
8376 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8377 emit_insn (gen_movsd_hardfloat (operands[0], mem));
8379 else
8380 gcc_unreachable();
8381 return;
8384 /* FIXME: In the long term, this switch statement should go away
8385 and be replaced by a sequence of tests based on things like
8386 mode == Pmode. */
8387 switch (mode)
8389 case HImode:
8390 case QImode:
8391 if (CONSTANT_P (operands[1])
8392 && GET_CODE (operands[1]) != CONST_INT)
8393 operands[1] = force_const_mem (mode, operands[1]);
8394 break;
8396 case TFmode:
8397 case TDmode:
8398 rs6000_eliminate_indexed_memrefs (operands);
8399 /* fall through */
8401 case DFmode:
8402 case DDmode:
8403 case SFmode:
8404 case SDmode:
8405 if (CONSTANT_P (operands[1])
8406 && ! easy_fp_constant (operands[1], mode))
8407 operands[1] = force_const_mem (mode, operands[1]);
8408 break;
8410 case V16QImode:
8411 case V8HImode:
8412 case V4SFmode:
8413 case V4SImode:
8414 case V4HImode:
8415 case V2SFmode:
8416 case V2SImode:
8417 case V1DImode:
8418 case V2DFmode:
8419 case V2DImode:
8420 case V1TImode:
8421 if (CONSTANT_P (operands[1])
8422 && !easy_vector_constant (operands[1], mode))
8423 operands[1] = force_const_mem (mode, operands[1]);
8424 break;
8426 case SImode:
8427 case DImode:
8428 /* Use default pattern for address of ELF small data */
8429 if (TARGET_ELF
8430 && mode == Pmode
8431 && DEFAULT_ABI == ABI_V4
8432 && (GET_CODE (operands[1]) == SYMBOL_REF
8433 || GET_CODE (operands[1]) == CONST)
8434 && small_data_operand (operands[1], mode))
8436 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8437 return;
8440 if (DEFAULT_ABI == ABI_V4
8441 && mode == Pmode && mode == SImode
8442 && flag_pic == 1 && got_operand (operands[1], mode))
8444 emit_insn (gen_movsi_got (operands[0], operands[1]));
8445 return;
8448 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
8449 && TARGET_NO_TOC
8450 && ! flag_pic
8451 && mode == Pmode
8452 && CONSTANT_P (operands[1])
8453 && GET_CODE (operands[1]) != HIGH
8454 && GET_CODE (operands[1]) != CONST_INT)
8456 rtx target = (!can_create_pseudo_p ()
8457 ? operands[0]
8458 : gen_reg_rtx (mode));
8460 /* If this is a function address on -mcall-aixdesc,
8461 convert it to the address of the descriptor. */
8462 if (DEFAULT_ABI == ABI_AIX
8463 && GET_CODE (operands[1]) == SYMBOL_REF
8464 && XSTR (operands[1], 0)[0] == '.')
8466 const char *name = XSTR (operands[1], 0);
8467 rtx new_ref;
8468 while (*name == '.')
8469 name++;
8470 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
8471 CONSTANT_POOL_ADDRESS_P (new_ref)
8472 = CONSTANT_POOL_ADDRESS_P (operands[1]);
8473 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
8474 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
8475 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
8476 operands[1] = new_ref;
8479 if (DEFAULT_ABI == ABI_DARWIN)
8481 #if TARGET_MACHO
8482 if (MACHO_DYNAMIC_NO_PIC_P)
8484 /* Take care of any required data indirection. */
8485 operands[1] = rs6000_machopic_legitimize_pic_address (
8486 operands[1], mode, operands[0]);
8487 if (operands[0] != operands[1])
8488 emit_insn (gen_rtx_SET (VOIDmode,
8489 operands[0], operands[1]));
8490 return;
8492 #endif
8493 emit_insn (gen_macho_high (target, operands[1]));
8494 emit_insn (gen_macho_low (operands[0], target, operands[1]));
8495 return;
8498 emit_insn (gen_elf_high (target, operands[1]));
8499 emit_insn (gen_elf_low (operands[0], target, operands[1]));
8500 return;
8503 /* If this is a SYMBOL_REF that refers to a constant pool entry,
8504 and we have put it in the TOC, we just need to make a TOC-relative
8505 reference to it. */
8506 if (TARGET_TOC
8507 && GET_CODE (operands[1]) == SYMBOL_REF
8508 && use_toc_relative_ref (operands[1]))
8509 operands[1] = create_TOC_reference (operands[1], operands[0]);
8510 else if (mode == Pmode
8511 && CONSTANT_P (operands[1])
8512 && GET_CODE (operands[1]) != HIGH
8513 && ((GET_CODE (operands[1]) != CONST_INT
8514 && ! easy_fp_constant (operands[1], mode))
8515 || (GET_CODE (operands[1]) == CONST_INT
8516 && (num_insns_constant (operands[1], mode)
8517 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
8518 || (GET_CODE (operands[0]) == REG
8519 && FP_REGNO_P (REGNO (operands[0]))))
8520 && !toc_relative_expr_p (operands[1], false)
8521 && (TARGET_CMODEL == CMODEL_SMALL
8522 || can_create_pseudo_p ()
8523 || (REG_P (operands[0])
8524 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
8527 #if TARGET_MACHO
8528 /* Darwin uses a special PIC legitimizer. */
8529 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
8531 operands[1] =
8532 rs6000_machopic_legitimize_pic_address (operands[1], mode,
8533 operands[0]);
8534 if (operands[0] != operands[1])
8535 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8536 return;
8538 #endif
8540 /* If we are to limit the number of things we put in the TOC and
8541 this is a symbol plus a constant we can add in one insn,
8542 just put the symbol in the TOC and add the constant. Don't do
8543 this if reload is in progress. */
8544 if (GET_CODE (operands[1]) == CONST
8545 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
8546 && GET_CODE (XEXP (operands[1], 0)) == PLUS
8547 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
8548 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
8549 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
8550 && ! side_effects_p (operands[0]))
8552 rtx sym =
8553 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
8554 rtx other = XEXP (XEXP (operands[1], 0), 1);
8556 sym = force_reg (mode, sym);
8557 emit_insn (gen_add3_insn (operands[0], sym, other));
8558 return;
8561 operands[1] = force_const_mem (mode, operands[1]);
8563 if (TARGET_TOC
8564 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
8565 && constant_pool_expr_p (XEXP (operands[1], 0))
8566 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
8567 get_pool_constant (XEXP (operands[1], 0)),
8568 get_pool_mode (XEXP (operands[1], 0))))
8570 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
8571 operands[0]);
8572 operands[1] = gen_const_mem (mode, tocref);
8573 set_mem_alias_set (operands[1], get_TOC_alias_set ());
8576 break;
8578 case TImode:
8579 if (!VECTOR_MEM_VSX_P (TImode))
8580 rs6000_eliminate_indexed_memrefs (operands);
8581 break;
8583 case PTImode:
8584 rs6000_eliminate_indexed_memrefs (operands);
8585 break;
8587 default:
8588 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
8591 /* Above, we may have called force_const_mem which may have returned
8592 an invalid address. If we can, fix this up; otherwise, reload will
8593 have to deal with it. */
8594 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
8595 operands[1] = validize_mem (operands[1]);
8597 emit_set:
8598 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8601 /* Return true if a structure, union or array containing FIELD should be
8602 accessed using `BLKMODE'.
8604 For the SPE, simd types are V2SI, and gcc can be tempted to put the
8605 entire thing in a DI and use subregs to access the internals.
8606 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
8607 back-end. Because a single GPR can hold a V2SI, but not a DI, the
8608 best thing to do is set structs to BLKmode and avoid Severe Tire
8609 Damage.
8611 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
8612 fit into 1, whereas DI still needs two. */
8614 static bool
8615 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
8617 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
8618 || (TARGET_E500_DOUBLE && mode == DFmode));
8621 /* Nonzero if we can use a floating-point register to pass this arg. */
8622 #define USE_FP_FOR_ARG_P(CUM,MODE) \
8623 (SCALAR_FLOAT_MODE_P (MODE) \
8624 && (CUM)->fregno <= FP_ARG_MAX_REG \
8625 && TARGET_HARD_FLOAT && TARGET_FPRS)
8627 /* Nonzero if we can use an AltiVec register to pass this arg. */
8628 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
8629 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
8630 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
8631 && TARGET_ALTIVEC_ABI \
8632 && (NAMED))
8634 /* Walk down the type tree of TYPE counting consecutive base elements.
8635 If *MODEP is VOIDmode, then set it to the first valid floating point
8636 or vector type. If a non-floating point or vector type is found, or
8637 if a floating point or vector type that doesn't match a non-VOIDmode
8638 *MODEP is found, then return -1, otherwise return the count in the
8639 sub-tree. */
8641 static int
8642 rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
8644 enum machine_mode mode;
8645 HOST_WIDE_INT size;
8647 switch (TREE_CODE (type))
8649 case REAL_TYPE:
8650 mode = TYPE_MODE (type);
8651 if (!SCALAR_FLOAT_MODE_P (mode))
8652 return -1;
8654 if (*modep == VOIDmode)
8655 *modep = mode;
8657 if (*modep == mode)
8658 return 1;
8660 break;
8662 case COMPLEX_TYPE:
8663 mode = TYPE_MODE (TREE_TYPE (type));
8664 if (!SCALAR_FLOAT_MODE_P (mode))
8665 return -1;
8667 if (*modep == VOIDmode)
8668 *modep = mode;
8670 if (*modep == mode)
8671 return 2;
8673 break;
8675 case VECTOR_TYPE:
8676 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
8677 return -1;
8679 /* Use V4SImode as representative of all 128-bit vector types. */
8680 size = int_size_in_bytes (type);
8681 switch (size)
8683 case 16:
8684 mode = V4SImode;
8685 break;
8686 default:
8687 return -1;
8690 if (*modep == VOIDmode)
8691 *modep = mode;
8693 /* Vector modes are considered to be opaque: two vectors are
8694 equivalent for the purposes of being homogeneous aggregates
8695 if they are the same size. */
8696 if (*modep == mode)
8697 return 1;
8699 break;
8701 case ARRAY_TYPE:
8703 int count;
8704 tree index = TYPE_DOMAIN (type);
8706 /* Can't handle incomplete types nor sizes that are not
8707 fixed. */
8708 if (!COMPLETE_TYPE_P (type)
8709 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8710 return -1;
8712 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
8713 if (count == -1
8714 || !index
8715 || !TYPE_MAX_VALUE (index)
8716 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
8717 || !TYPE_MIN_VALUE (index)
8718 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
8719 || count < 0)
8720 return -1;
8722 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
8723 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
8725 /* There must be no padding. */
8726 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8727 return -1;
8729 return count;
8732 case RECORD_TYPE:
8734 int count = 0;
8735 int sub_count;
8736 tree field;
8738 /* Can't handle incomplete types nor sizes that are not
8739 fixed. */
8740 if (!COMPLETE_TYPE_P (type)
8741 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8742 return -1;
8744 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8746 if (TREE_CODE (field) != FIELD_DECL)
8747 continue;
8749 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8750 if (sub_count < 0)
8751 return -1;
8752 count += sub_count;
8755 /* There must be no padding. */
8756 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8757 return -1;
8759 return count;
8762 case UNION_TYPE:
8763 case QUAL_UNION_TYPE:
8765 /* These aren't very interesting except in a degenerate case. */
8766 int count = 0;
8767 int sub_count;
8768 tree field;
8770 /* Can't handle incomplete types nor sizes that are not
8771 fixed. */
8772 if (!COMPLETE_TYPE_P (type)
8773 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8775 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8777 if (TREE_CODE (field) != FIELD_DECL)
8778 continue;
8780 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8781 if (sub_count < 0)
8782 return -1;
8783 count = count > sub_count ? count : sub_count;
8786 /* There must be no padding. */
8787 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8788 return -1;
8790 return count;
8793 default:
8794 break;
8797 return -1;
8800 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
8801 float or vector aggregate that shall be passed in FP/vector registers
8802 according to the ELFv2 ABI, return the homogeneous element mode in
8803 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
8805 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
8807 static bool
8808 rs6000_discover_homogeneous_aggregate (enum machine_mode mode, const_tree type,
8809 enum machine_mode *elt_mode,
8810 int *n_elts)
8812 /* Note that we do not accept complex types at the top level as
8813 homogeneous aggregates; these types are handled via the
8814 targetm.calls.split_complex_arg mechanism. Complex types
8815 can be elements of homogeneous aggregates, however. */
8816 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
8818 enum machine_mode field_mode = VOIDmode;
8819 int field_count = rs6000_aggregate_candidate (type, &field_mode);
8821 if (field_count > 0)
8823 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode)?
8824 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
8826 /* The ELFv2 ABI allows homogeneous aggregates to occupy
8827 up to AGGR_ARG_NUM_REG registers. */
8828 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
8830 if (elt_mode)
8831 *elt_mode = field_mode;
8832 if (n_elts)
8833 *n_elts = field_count;
8834 return true;
8839 if (elt_mode)
8840 *elt_mode = mode;
8841 if (n_elts)
8842 *n_elts = 1;
8843 return false;
8846 /* Return a nonzero value to say to return the function value in
8847 memory, just as large structures are always returned. TYPE will be
8848 the data type of the value, and FNTYPE will be the type of the
8849 function doing the returning, or @code{NULL} for libcalls.
8851 The AIX ABI for the RS/6000 specifies that all structures are
8852 returned in memory. The Darwin ABI does the same.
8854 For the Darwin 64 Bit ABI, a function result can be returned in
8855 registers or in memory, depending on the size of the return data
8856 type. If it is returned in registers, the value occupies the same
8857 registers as it would if it were the first and only function
8858 argument. Otherwise, the function places its result in memory at
8859 the location pointed to by GPR3.
8861 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
8862 but a draft put them in memory, and GCC used to implement the draft
8863 instead of the final standard. Therefore, aix_struct_return
8864 controls this instead of DEFAULT_ABI; V.4 targets needing backward
8865 compatibility can change DRAFT_V4_STRUCT_RET to override the
8866 default, and -m switches get the final word. See
8867 rs6000_option_override_internal for more details.
8869 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
8870 long double support is enabled. These values are returned in memory.
8872 int_size_in_bytes returns -1 for variable size objects, which go in
8873 memory always. The cast to unsigned makes -1 > 8. */
8875 static bool
8876 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
8878 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
8879 if (TARGET_MACHO
8880 && rs6000_darwin64_abi
8881 && TREE_CODE (type) == RECORD_TYPE
8882 && int_size_in_bytes (type) > 0)
8884 CUMULATIVE_ARGS valcum;
8885 rtx valret;
8887 valcum.words = 0;
8888 valcum.fregno = FP_ARG_MIN_REG;
8889 valcum.vregno = ALTIVEC_ARG_MIN_REG;
8890 /* Do a trial code generation as if this were going to be passed
8891 as an argument; if any part goes in memory, we return NULL. */
8892 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
8893 if (valret)
8894 return false;
8895 /* Otherwise fall through to more conventional ABI rules. */
8898 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
8899 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
8900 NULL, NULL))
8901 return false;
8903 /* The ELFv2 ABI returns aggregates up to 16B in registers */
8904 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
8905 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
8906 return false;
8908 if (AGGREGATE_TYPE_P (type)
8909 && (aix_struct_return
8910 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
8911 return true;
8913 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8914 modes only exist for GCC vector types if -maltivec. */
8915 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
8916 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
8917 return false;
8919 /* Return synthetic vectors in memory. */
8920 if (TREE_CODE (type) == VECTOR_TYPE
8921 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8923 static bool warned_for_return_big_vectors = false;
8924 if (!warned_for_return_big_vectors)
8926 warning (0, "GCC vector returned by reference: "
8927 "non-standard ABI extension with no compatibility guarantee");
8928 warned_for_return_big_vectors = true;
8930 return true;
8933 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
8934 return true;
8936 return false;
8939 /* Specify whether values returned in registers should be at the most
8940 significant end of a register. We want aggregates returned by
8941 value to match the way aggregates are passed to functions. */
8943 static bool
8944 rs6000_return_in_msb (const_tree valtype)
8946 return (DEFAULT_ABI == ABI_ELFv2
8947 && BYTES_BIG_ENDIAN
8948 && AGGREGATE_TYPE_P (valtype)
8949 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
8952 #ifdef HAVE_AS_GNU_ATTRIBUTE
8953 /* Return TRUE if a call to function FNDECL may be one that
8954 potentially affects the function calling ABI of the object file. */
8956 static bool
8957 call_ABI_of_interest (tree fndecl)
8959 if (cgraph_state == CGRAPH_STATE_EXPANSION)
8961 struct cgraph_node *c_node;
8963 /* Libcalls are always interesting. */
8964 if (fndecl == NULL_TREE)
8965 return true;
8967 /* Any call to an external function is interesting. */
8968 if (DECL_EXTERNAL (fndecl))
8969 return true;
8971 /* Interesting functions that we are emitting in this object file. */
8972 c_node = cgraph_get_node (fndecl);
8973 c_node = cgraph_function_or_thunk_node (c_node, NULL);
8974 return !cgraph_only_called_directly_p (c_node);
8976 return false;
8978 #endif
8980 /* Initialize a variable CUM of type CUMULATIVE_ARGS
8981 for a call to a function whose data type is FNTYPE.
8982 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
8984 For incoming args we set the number of arguments in the prototype large
8985 so we never return a PARALLEL. */
8987 void
8988 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
8989 rtx libname ATTRIBUTE_UNUSED, int incoming,
8990 int libcall, int n_named_args,
8991 tree fndecl ATTRIBUTE_UNUSED,
8992 enum machine_mode return_mode ATTRIBUTE_UNUSED)
8994 static CUMULATIVE_ARGS zero_cumulative;
8996 *cum = zero_cumulative;
8997 cum->words = 0;
8998 cum->fregno = FP_ARG_MIN_REG;
8999 cum->vregno = ALTIVEC_ARG_MIN_REG;
9000 cum->prototype = (fntype && prototype_p (fntype));
9001 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
9002 ? CALL_LIBCALL : CALL_NORMAL);
9003 cum->sysv_gregno = GP_ARG_MIN_REG;
9004 cum->stdarg = stdarg_p (fntype);
9006 cum->nargs_prototype = 0;
9007 if (incoming || cum->prototype)
9008 cum->nargs_prototype = n_named_args;
9010 /* Check for a longcall attribute. */
9011 if ((!fntype && rs6000_default_long_calls)
9012 || (fntype
9013 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
9014 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
9015 cum->call_cookie |= CALL_LONG;
9017 if (TARGET_DEBUG_ARG)
9019 fprintf (stderr, "\ninit_cumulative_args:");
9020 if (fntype)
9022 tree ret_type = TREE_TYPE (fntype);
9023 fprintf (stderr, " ret code = %s,",
9024 get_tree_code_name (TREE_CODE (ret_type)));
9027 if (cum->call_cookie & CALL_LONG)
9028 fprintf (stderr, " longcall,");
9030 fprintf (stderr, " proto = %d, nargs = %d\n",
9031 cum->prototype, cum->nargs_prototype);
9034 #ifdef HAVE_AS_GNU_ATTRIBUTE
9035 if (DEFAULT_ABI == ABI_V4)
9037 cum->escapes = call_ABI_of_interest (fndecl);
9038 if (cum->escapes)
9040 tree return_type;
9042 if (fntype)
9044 return_type = TREE_TYPE (fntype);
9045 return_mode = TYPE_MODE (return_type);
9047 else
9048 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
9050 if (return_type != NULL)
9052 if (TREE_CODE (return_type) == RECORD_TYPE
9053 && TYPE_TRANSPARENT_AGGR (return_type))
9055 return_type = TREE_TYPE (first_field (return_type));
9056 return_mode = TYPE_MODE (return_type);
9058 if (AGGREGATE_TYPE_P (return_type)
9059 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
9060 <= 8))
9061 rs6000_returns_struct = true;
9063 if (SCALAR_FLOAT_MODE_P (return_mode))
9064 rs6000_passes_float = true;
9065 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
9066 || SPE_VECTOR_MODE (return_mode))
9067 rs6000_passes_vector = true;
9070 #endif
9072 if (fntype
9073 && !TARGET_ALTIVEC
9074 && TARGET_ALTIVEC_ABI
9075 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
9077 error ("cannot return value in vector register because"
9078 " altivec instructions are disabled, use -maltivec"
9079 " to enable them");
9083 /* Return true if TYPE must be passed on the stack and not in registers. */
9085 static bool
9086 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
9088 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
9089 return must_pass_in_stack_var_size (mode, type);
9090 else
9091 return must_pass_in_stack_var_size_or_pad (mode, type);
9094 /* If defined, a C expression which determines whether, and in which
9095 direction, to pad out an argument with extra space. The value
9096 should be of type `enum direction': either `upward' to pad above
9097 the argument, `downward' to pad below, or `none' to inhibit
9098 padding.
9100 For the AIX ABI structs are always stored left shifted in their
9101 argument slot. */
9103 enum direction
9104 function_arg_padding (enum machine_mode mode, const_tree type)
9106 #ifndef AGGREGATE_PADDING_FIXED
9107 #define AGGREGATE_PADDING_FIXED 0
9108 #endif
9109 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
9110 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
9111 #endif
9113 if (!AGGREGATE_PADDING_FIXED)
9115 /* GCC used to pass structures of the same size as integer types as
9116 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
9117 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
9118 passed padded downward, except that -mstrict-align further
9119 muddied the water in that multi-component structures of 2 and 4
9120 bytes in size were passed padded upward.
9122 The following arranges for best compatibility with previous
9123 versions of gcc, but removes the -mstrict-align dependency. */
9124 if (BYTES_BIG_ENDIAN)
9126 HOST_WIDE_INT size = 0;
9128 if (mode == BLKmode)
9130 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
9131 size = int_size_in_bytes (type);
9133 else
9134 size = GET_MODE_SIZE (mode);
9136 if (size == 1 || size == 2 || size == 4)
9137 return downward;
9139 return upward;
9142 if (AGGREGATES_PAD_UPWARD_ALWAYS)
9144 if (type != 0 && AGGREGATE_TYPE_P (type))
9145 return upward;
9148 /* Fall back to the default. */
9149 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9152 /* If defined, a C expression that gives the alignment boundary, in bits,
9153 of an argument with the specified mode and type. If it is not defined,
9154 PARM_BOUNDARY is used for all arguments.
9156 V.4 wants long longs and doubles to be double word aligned. Just
9157 testing the mode size is a boneheaded way to do this as it means
9158 that other types such as complex int are also double word aligned.
9159 However, we're stuck with this because changing the ABI might break
9160 existing library interfaces.
9162 Doubleword align SPE vectors.
9163 Quadword align Altivec/VSX vectors.
9164 Quadword align large synthetic vector types. */
9166 static unsigned int
9167 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
9169 enum machine_mode elt_mode;
9170 int n_elts;
9172 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9174 if (DEFAULT_ABI == ABI_V4
9175 && (GET_MODE_SIZE (mode) == 8
9176 || (TARGET_HARD_FLOAT
9177 && TARGET_FPRS
9178 && (mode == TFmode || mode == TDmode))))
9179 return 64;
9180 else if (SPE_VECTOR_MODE (mode)
9181 || (type && TREE_CODE (type) == VECTOR_TYPE
9182 && int_size_in_bytes (type) >= 8
9183 && int_size_in_bytes (type) < 16))
9184 return 64;
9185 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9186 || (type && TREE_CODE (type) == VECTOR_TYPE
9187 && int_size_in_bytes (type) >= 16))
9188 return 128;
9189 else if (((TARGET_MACHO && rs6000_darwin64_abi)
9190 || DEFAULT_ABI == ABI_ELFv2
9191 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
9192 && mode == BLKmode
9193 && type && TYPE_ALIGN (type) > 64)
9194 return 128;
9195 else
9196 return PARM_BOUNDARY;
9199 /* The offset in words to the start of the parameter save area. */
9201 static unsigned int
9202 rs6000_parm_offset (void)
9204 return (DEFAULT_ABI == ABI_V4 ? 2
9205 : DEFAULT_ABI == ABI_ELFv2 ? 4
9206 : 6);
9209 /* For a function parm of MODE and TYPE, return the starting word in
9210 the parameter area. NWORDS of the parameter area are already used. */
9212 static unsigned int
9213 rs6000_parm_start (enum machine_mode mode, const_tree type,
9214 unsigned int nwords)
9216 unsigned int align;
9218 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
9219 return nwords + (-(rs6000_parm_offset () + nwords) & align);
9222 /* Compute the size (in words) of a function argument. */
9224 static unsigned long
9225 rs6000_arg_size (enum machine_mode mode, const_tree type)
9227 unsigned long size;
9229 if (mode != BLKmode)
9230 size = GET_MODE_SIZE (mode);
9231 else
9232 size = int_size_in_bytes (type);
9234 if (TARGET_32BIT)
9235 return (size + 3) >> 2;
9236 else
9237 return (size + 7) >> 3;
9240 /* Use this to flush pending int fields. */
9242 static void
9243 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
9244 HOST_WIDE_INT bitpos, int final)
9246 unsigned int startbit, endbit;
9247 int intregs, intoffset;
9248 enum machine_mode mode;
9250 /* Handle the situations where a float is taking up the first half
9251 of the GPR, and the other half is empty (typically due to
9252 alignment restrictions). We can detect this by a 8-byte-aligned
9253 int field, or by seeing that this is the final flush for this
9254 argument. Count the word and continue on. */
9255 if (cum->floats_in_gpr == 1
9256 && (cum->intoffset % 64 == 0
9257 || (cum->intoffset == -1 && final)))
9259 cum->words++;
9260 cum->floats_in_gpr = 0;
9263 if (cum->intoffset == -1)
9264 return;
9266 intoffset = cum->intoffset;
9267 cum->intoffset = -1;
9268 cum->floats_in_gpr = 0;
9270 if (intoffset % BITS_PER_WORD != 0)
9272 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9273 MODE_INT, 0);
9274 if (mode == BLKmode)
9276 /* We couldn't find an appropriate mode, which happens,
9277 e.g., in packed structs when there are 3 bytes to load.
9278 Back intoffset back to the beginning of the word in this
9279 case. */
9280 intoffset = intoffset & -BITS_PER_WORD;
9284 startbit = intoffset & -BITS_PER_WORD;
9285 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9286 intregs = (endbit - startbit) / BITS_PER_WORD;
9287 cum->words += intregs;
9288 /* words should be unsigned. */
9289 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
9291 int pad = (endbit/BITS_PER_WORD) - cum->words;
9292 cum->words += pad;
9296 /* The darwin64 ABI calls for us to recurse down through structs,
9297 looking for elements passed in registers. Unfortunately, we have
9298 to track int register count here also because of misalignments
9299 in powerpc alignment mode. */
9301 static void
9302 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
9303 const_tree type,
9304 HOST_WIDE_INT startbitpos)
9306 tree f;
9308 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9309 if (TREE_CODE (f) == FIELD_DECL)
9311 HOST_WIDE_INT bitpos = startbitpos;
9312 tree ftype = TREE_TYPE (f);
9313 enum machine_mode mode;
9314 if (ftype == error_mark_node)
9315 continue;
9316 mode = TYPE_MODE (ftype);
9318 if (DECL_SIZE (f) != 0
9319 && tree_fits_uhwi_p (bit_position (f)))
9320 bitpos += int_bit_position (f);
9322 /* ??? FIXME: else assume zero offset. */
9324 if (TREE_CODE (ftype) == RECORD_TYPE)
9325 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
9326 else if (USE_FP_FOR_ARG_P (cum, mode))
9328 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
9329 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9330 cum->fregno += n_fpregs;
9331 /* Single-precision floats present a special problem for
9332 us, because they are smaller than an 8-byte GPR, and so
9333 the structure-packing rules combined with the standard
9334 varargs behavior mean that we want to pack float/float
9335 and float/int combinations into a single register's
9336 space. This is complicated by the arg advance flushing,
9337 which works on arbitrarily large groups of int-type
9338 fields. */
9339 if (mode == SFmode)
9341 if (cum->floats_in_gpr == 1)
9343 /* Two floats in a word; count the word and reset
9344 the float count. */
9345 cum->words++;
9346 cum->floats_in_gpr = 0;
9348 else if (bitpos % 64 == 0)
9350 /* A float at the beginning of an 8-byte word;
9351 count it and put off adjusting cum->words until
9352 we see if a arg advance flush is going to do it
9353 for us. */
9354 cum->floats_in_gpr++;
9356 else
9358 /* The float is at the end of a word, preceded
9359 by integer fields, so the arg advance flush
9360 just above has already set cum->words and
9361 everything is taken care of. */
9364 else
9365 cum->words += n_fpregs;
9367 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9369 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9370 cum->vregno++;
9371 cum->words += 2;
9373 else if (cum->intoffset == -1)
9374 cum->intoffset = bitpos;
9378 /* Check for an item that needs to be considered specially under the darwin 64
9379 bit ABI. These are record types where the mode is BLK or the structure is
9380 8 bytes in size. */
9381 static int
9382 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
9384 return rs6000_darwin64_abi
9385 && ((mode == BLKmode
9386 && TREE_CODE (type) == RECORD_TYPE
9387 && int_size_in_bytes (type) > 0)
9388 || (type && TREE_CODE (type) == RECORD_TYPE
9389 && int_size_in_bytes (type) == 8)) ? 1 : 0;
9392 /* Update the data in CUM to advance over an argument
9393 of mode MODE and data type TYPE.
9394 (TYPE is null for libcalls where that information may not be available.)
9396 Note that for args passed by reference, function_arg will be called
9397 with MODE and TYPE set to that of the pointer to the arg, not the arg
9398 itself. */
9400 static void
9401 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9402 const_tree type, bool named, int depth)
9404 enum machine_mode elt_mode;
9405 int n_elts;
9407 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9409 /* Only tick off an argument if we're not recursing. */
9410 if (depth == 0)
9411 cum->nargs_prototype--;
9413 #ifdef HAVE_AS_GNU_ATTRIBUTE
9414 if (DEFAULT_ABI == ABI_V4
9415 && cum->escapes)
9417 if (SCALAR_FLOAT_MODE_P (mode))
9418 rs6000_passes_float = true;
9419 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
9420 rs6000_passes_vector = true;
9421 else if (SPE_VECTOR_MODE (mode)
9422 && !cum->stdarg
9423 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9424 rs6000_passes_vector = true;
9426 #endif
9428 if (TARGET_ALTIVEC_ABI
9429 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9430 || (type && TREE_CODE (type) == VECTOR_TYPE
9431 && int_size_in_bytes (type) == 16)))
9433 bool stack = false;
9435 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
9437 cum->vregno += n_elts;
9439 if (!TARGET_ALTIVEC)
9440 error ("cannot pass argument in vector register because"
9441 " altivec instructions are disabled, use -maltivec"
9442 " to enable them");
9444 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
9445 even if it is going to be passed in a vector register.
9446 Darwin does the same for variable-argument functions. */
9447 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9448 && TARGET_64BIT)
9449 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
9450 stack = true;
9452 else
9453 stack = true;
9455 if (stack)
9457 int align;
9459 /* Vector parameters must be 16-byte aligned. In 32-bit
9460 mode this means we need to take into account the offset
9461 to the parameter save area. In 64-bit mode, they just
9462 have to start on an even word, since the parameter save
9463 area is 16-byte aligned. */
9464 if (TARGET_32BIT)
9465 align = -(rs6000_parm_offset () + cum->words) & 3;
9466 else
9467 align = cum->words & 1;
9468 cum->words += align + rs6000_arg_size (mode, type);
9470 if (TARGET_DEBUG_ARG)
9472 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
9473 cum->words, align);
9474 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
9475 cum->nargs_prototype, cum->prototype,
9476 GET_MODE_NAME (mode));
9480 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
9481 && !cum->stdarg
9482 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9483 cum->sysv_gregno++;
9485 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9487 int size = int_size_in_bytes (type);
9488 /* Variable sized types have size == -1 and are
9489 treated as if consisting entirely of ints.
9490 Pad to 16 byte boundary if needed. */
9491 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9492 && (cum->words % 2) != 0)
9493 cum->words++;
9494 /* For varargs, we can just go up by the size of the struct. */
9495 if (!named)
9496 cum->words += (size + 7) / 8;
9497 else
9499 /* It is tempting to say int register count just goes up by
9500 sizeof(type)/8, but this is wrong in a case such as
9501 { int; double; int; } [powerpc alignment]. We have to
9502 grovel through the fields for these too. */
9503 cum->intoffset = 0;
9504 cum->floats_in_gpr = 0;
9505 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
9506 rs6000_darwin64_record_arg_advance_flush (cum,
9507 size * BITS_PER_UNIT, 1);
9509 if (TARGET_DEBUG_ARG)
9511 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
9512 cum->words, TYPE_ALIGN (type), size);
9513 fprintf (stderr,
9514 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
9515 cum->nargs_prototype, cum->prototype,
9516 GET_MODE_NAME (mode));
9519 else if (DEFAULT_ABI == ABI_V4)
9521 if (TARGET_HARD_FLOAT && TARGET_FPRS
9522 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
9523 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
9524 || (mode == TFmode && !TARGET_IEEEQUAD)
9525 || mode == SDmode || mode == DDmode || mode == TDmode))
9527 /* _Decimal128 must use an even/odd register pair. This assumes
9528 that the register number is odd when fregno is odd. */
9529 if (mode == TDmode && (cum->fregno % 2) == 1)
9530 cum->fregno++;
9532 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
9533 <= FP_ARG_V4_MAX_REG)
9534 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
9535 else
9537 cum->fregno = FP_ARG_V4_MAX_REG + 1;
9538 if (mode == DFmode || mode == TFmode
9539 || mode == DDmode || mode == TDmode)
9540 cum->words += cum->words & 1;
9541 cum->words += rs6000_arg_size (mode, type);
9544 else
9546 int n_words = rs6000_arg_size (mode, type);
9547 int gregno = cum->sysv_gregno;
9549 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9550 (r7,r8) or (r9,r10). As does any other 2 word item such
9551 as complex int due to a historical mistake. */
9552 if (n_words == 2)
9553 gregno += (1 - gregno) & 1;
9555 /* Multi-reg args are not split between registers and stack. */
9556 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9558 /* Long long and SPE vectors are aligned on the stack.
9559 So are other 2 word items such as complex int due to
9560 a historical mistake. */
9561 if (n_words == 2)
9562 cum->words += cum->words & 1;
9563 cum->words += n_words;
9566 /* Note: continuing to accumulate gregno past when we've started
9567 spilling to the stack indicates the fact that we've started
9568 spilling to the stack to expand_builtin_saveregs. */
9569 cum->sysv_gregno = gregno + n_words;
9572 if (TARGET_DEBUG_ARG)
9574 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9575 cum->words, cum->fregno);
9576 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
9577 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
9578 fprintf (stderr, "mode = %4s, named = %d\n",
9579 GET_MODE_NAME (mode), named);
9582 else
9584 int n_words = rs6000_arg_size (mode, type);
9585 int start_words = cum->words;
9586 int align_words = rs6000_parm_start (mode, type, start_words);
9588 cum->words = align_words + n_words;
9590 if (SCALAR_FLOAT_MODE_P (elt_mode)
9591 && TARGET_HARD_FLOAT && TARGET_FPRS)
9593 /* _Decimal128 must be passed in an even/odd float register pair.
9594 This assumes that the register number is odd when fregno is
9595 odd. */
9596 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
9597 cum->fregno++;
9598 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
9601 if (TARGET_DEBUG_ARG)
9603 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9604 cum->words, cum->fregno);
9605 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
9606 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
9607 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
9608 named, align_words - start_words, depth);
9613 static void
9614 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
9615 const_tree type, bool named)
9617 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
9621 static rtx
9622 spe_build_register_parallel (enum machine_mode mode, int gregno)
9624 rtx r1, r3, r5, r7;
9626 switch (mode)
9628 case DFmode:
9629 r1 = gen_rtx_REG (DImode, gregno);
9630 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9631 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
9633 case DCmode:
9634 case TFmode:
9635 r1 = gen_rtx_REG (DImode, gregno);
9636 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9637 r3 = gen_rtx_REG (DImode, gregno + 2);
9638 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9639 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
9641 case TCmode:
9642 r1 = gen_rtx_REG (DImode, gregno);
9643 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9644 r3 = gen_rtx_REG (DImode, gregno + 2);
9645 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9646 r5 = gen_rtx_REG (DImode, gregno + 4);
9647 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
9648 r7 = gen_rtx_REG (DImode, gregno + 6);
9649 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
9650 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
9652 default:
9653 gcc_unreachable ();
9657 /* Determine where to put a SIMD argument on the SPE. */
9658 static rtx
9659 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
9660 const_tree type)
9662 int gregno = cum->sysv_gregno;
9664 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
9665 are passed and returned in a pair of GPRs for ABI compatibility. */
9666 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
9667 || mode == DCmode || mode == TCmode))
9669 int n_words = rs6000_arg_size (mode, type);
9671 /* Doubles go in an odd/even register pair (r5/r6, etc). */
9672 if (mode == DFmode)
9673 gregno += (1 - gregno) & 1;
9675 /* Multi-reg args are not split between registers and stack. */
9676 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9677 return NULL_RTX;
9679 return spe_build_register_parallel (mode, gregno);
9681 if (cum->stdarg)
9683 int n_words = rs6000_arg_size (mode, type);
9685 /* SPE vectors are put in odd registers. */
9686 if (n_words == 2 && (gregno & 1) == 0)
9687 gregno += 1;
9689 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
9691 rtx r1, r2;
9692 enum machine_mode m = SImode;
9694 r1 = gen_rtx_REG (m, gregno);
9695 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
9696 r2 = gen_rtx_REG (m, gregno + 1);
9697 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
9698 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
9700 else
9701 return NULL_RTX;
9703 else
9705 if (gregno <= GP_ARG_MAX_REG)
9706 return gen_rtx_REG (mode, gregno);
9707 else
9708 return NULL_RTX;
9712 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
9713 structure between cum->intoffset and bitpos to integer registers. */
9715 static void
9716 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
9717 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
9719 enum machine_mode mode;
9720 unsigned int regno;
9721 unsigned int startbit, endbit;
9722 int this_regno, intregs, intoffset;
9723 rtx reg;
9725 if (cum->intoffset == -1)
9726 return;
9728 intoffset = cum->intoffset;
9729 cum->intoffset = -1;
9731 /* If this is the trailing part of a word, try to only load that
9732 much into the register. Otherwise load the whole register. Note
9733 that in the latter case we may pick up unwanted bits. It's not a
9734 problem at the moment but may wish to revisit. */
9736 if (intoffset % BITS_PER_WORD != 0)
9738 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9739 MODE_INT, 0);
9740 if (mode == BLKmode)
9742 /* We couldn't find an appropriate mode, which happens,
9743 e.g., in packed structs when there are 3 bytes to load.
9744 Back intoffset back to the beginning of the word in this
9745 case. */
9746 intoffset = intoffset & -BITS_PER_WORD;
9747 mode = word_mode;
9750 else
9751 mode = word_mode;
9753 startbit = intoffset & -BITS_PER_WORD;
9754 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9755 intregs = (endbit - startbit) / BITS_PER_WORD;
9756 this_regno = cum->words + intoffset / BITS_PER_WORD;
9758 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
9759 cum->use_stack = 1;
9761 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
9762 if (intregs <= 0)
9763 return;
9765 intoffset /= BITS_PER_UNIT;
9768 regno = GP_ARG_MIN_REG + this_regno;
9769 reg = gen_rtx_REG (mode, regno);
9770 rvec[(*k)++] =
9771 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
9773 this_regno += 1;
9774 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
9775 mode = word_mode;
9776 intregs -= 1;
9778 while (intregs > 0);
9781 /* Recursive workhorse for the following. */
9783 static void
9784 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
9785 HOST_WIDE_INT startbitpos, rtx rvec[],
9786 int *k)
9788 tree f;
9790 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9791 if (TREE_CODE (f) == FIELD_DECL)
9793 HOST_WIDE_INT bitpos = startbitpos;
9794 tree ftype = TREE_TYPE (f);
9795 enum machine_mode mode;
9796 if (ftype == error_mark_node)
9797 continue;
9798 mode = TYPE_MODE (ftype);
9800 if (DECL_SIZE (f) != 0
9801 && tree_fits_uhwi_p (bit_position (f)))
9802 bitpos += int_bit_position (f);
9804 /* ??? FIXME: else assume zero offset. */
9806 if (TREE_CODE (ftype) == RECORD_TYPE)
9807 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
9808 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
9810 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
9811 #if 0
9812 switch (mode)
9814 case SCmode: mode = SFmode; break;
9815 case DCmode: mode = DFmode; break;
9816 case TCmode: mode = TFmode; break;
9817 default: break;
9819 #endif
9820 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9821 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
9823 gcc_assert (cum->fregno == FP_ARG_MAX_REG
9824 && (mode == TFmode || mode == TDmode));
9825 /* Long double or _Decimal128 split over regs and memory. */
9826 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
9827 cum->use_stack=1;
9829 rvec[(*k)++]
9830 = gen_rtx_EXPR_LIST (VOIDmode,
9831 gen_rtx_REG (mode, cum->fregno++),
9832 GEN_INT (bitpos / BITS_PER_UNIT));
9833 if (mode == TFmode || mode == TDmode)
9834 cum->fregno++;
9836 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9838 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9839 rvec[(*k)++]
9840 = gen_rtx_EXPR_LIST (VOIDmode,
9841 gen_rtx_REG (mode, cum->vregno++),
9842 GEN_INT (bitpos / BITS_PER_UNIT));
9844 else if (cum->intoffset == -1)
9845 cum->intoffset = bitpos;
9849 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
9850 the register(s) to be used for each field and subfield of a struct
9851 being passed by value, along with the offset of where the
9852 register's value may be found in the block. FP fields go in FP
9853 register, vector fields go in vector registers, and everything
9854 else goes in int registers, packed as in memory.
9856 This code is also used for function return values. RETVAL indicates
9857 whether this is the case.
9859 Much of this is taken from the SPARC V9 port, which has a similar
9860 calling convention. */
9862 static rtx
9863 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
9864 bool named, bool retval)
9866 rtx rvec[FIRST_PSEUDO_REGISTER];
9867 int k = 1, kbase = 1;
9868 HOST_WIDE_INT typesize = int_size_in_bytes (type);
9869 /* This is a copy; modifications are not visible to our caller. */
9870 CUMULATIVE_ARGS copy_cum = *orig_cum;
9871 CUMULATIVE_ARGS *cum = &copy_cum;
9873 /* Pad to 16 byte boundary if needed. */
9874 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9875 && (cum->words % 2) != 0)
9876 cum->words++;
9878 cum->intoffset = 0;
9879 cum->use_stack = 0;
9880 cum->named = named;
9882 /* Put entries into rvec[] for individual FP and vector fields, and
9883 for the chunks of memory that go in int regs. Note we start at
9884 element 1; 0 is reserved for an indication of using memory, and
9885 may or may not be filled in below. */
9886 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
9887 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
9889 /* If any part of the struct went on the stack put all of it there.
9890 This hack is because the generic code for
9891 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
9892 parts of the struct are not at the beginning. */
9893 if (cum->use_stack)
9895 if (retval)
9896 return NULL_RTX; /* doesn't go in registers at all */
9897 kbase = 0;
9898 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9900 if (k > 1 || cum->use_stack)
9901 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
9902 else
9903 return NULL_RTX;
9906 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
9908 static rtx
9909 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
9910 int align_words)
9912 int n_units;
9913 int i, k;
9914 rtx rvec[GP_ARG_NUM_REG + 1];
9916 if (align_words >= GP_ARG_NUM_REG)
9917 return NULL_RTX;
9919 n_units = rs6000_arg_size (mode, type);
9921 /* Optimize the simple case where the arg fits in one gpr, except in
9922 the case of BLKmode due to assign_parms assuming that registers are
9923 BITS_PER_WORD wide. */
9924 if (n_units == 0
9925 || (n_units == 1 && mode != BLKmode))
9926 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9928 k = 0;
9929 if (align_words + n_units > GP_ARG_NUM_REG)
9930 /* Not all of the arg fits in gprs. Say that it goes in memory too,
9931 using a magic NULL_RTX component.
9932 This is not strictly correct. Only some of the arg belongs in
9933 memory, not all of it. However, the normal scheme using
9934 function_arg_partial_nregs can result in unusual subregs, eg.
9935 (subreg:SI (reg:DF) 4), which are not handled well. The code to
9936 store the whole arg to memory is often more efficient than code
9937 to store pieces, and we know that space is available in the right
9938 place for the whole arg. */
9939 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9941 i = 0;
9944 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
9945 rtx off = GEN_INT (i++ * 4);
9946 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9948 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
9950 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
9953 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
9954 but must also be copied into the parameter save area starting at
9955 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
9956 to the GPRs and/or memory. Return the number of elements used. */
9958 static int
9959 rs6000_psave_function_arg (enum machine_mode mode, const_tree type,
9960 int align_words, rtx *rvec)
9962 int k = 0;
9964 if (align_words < GP_ARG_NUM_REG)
9966 int n_words = rs6000_arg_size (mode, type);
9968 if (align_words + n_words > GP_ARG_NUM_REG
9969 || mode == BLKmode
9970 || (TARGET_32BIT && TARGET_POWERPC64))
9972 /* If this is partially on the stack, then we only
9973 include the portion actually in registers here. */
9974 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
9975 int i = 0;
9977 if (align_words + n_words > GP_ARG_NUM_REG)
9979 /* Not all of the arg fits in gprs. Say that it goes in memory
9980 too, using a magic NULL_RTX component. Also see comment in
9981 rs6000_mixed_function_arg for why the normal
9982 function_arg_partial_nregs scheme doesn't work in this case. */
9983 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
9988 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
9989 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
9990 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
9992 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
9994 else
9996 /* The whole arg fits in gprs. */
9997 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
9998 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
10001 else
10003 /* It's entirely in memory. */
10004 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10007 return k;
10010 /* RVEC is a vector of K components of an argument of mode MODE.
10011 Construct the final function_arg return value from it. */
10013 static rtx
10014 rs6000_finish_function_arg (enum machine_mode mode, rtx *rvec, int k)
10016 gcc_assert (k >= 1);
10018 /* Avoid returning a PARALLEL in the trivial cases. */
10019 if (k == 1)
10021 if (XEXP (rvec[0], 0) == NULL_RTX)
10022 return NULL_RTX;
10024 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
10025 return XEXP (rvec[0], 0);
10028 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
10031 /* Determine where to put an argument to a function.
10032 Value is zero to push the argument on the stack,
10033 or a hard register in which to store the argument.
10035 MODE is the argument's machine mode.
10036 TYPE is the data type of the argument (as a tree).
10037 This is null for libcalls where that information may
10038 not be available.
10039 CUM is a variable of type CUMULATIVE_ARGS which gives info about
10040 the preceding args and about the function being called. It is
10041 not modified in this routine.
10042 NAMED is nonzero if this argument is a named parameter
10043 (otherwise it is an extra parameter matching an ellipsis).
10045 On RS/6000 the first eight words of non-FP are normally in registers
10046 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
10047 Under V.4, the first 8 FP args are in registers.
10049 If this is floating-point and no prototype is specified, we use
10050 both an FP and integer register (or possibly FP reg and stack). Library
10051 functions (when CALL_LIBCALL is set) always have the proper types for args,
10052 so we can pass the FP value just in one register. emit_library_function
10053 doesn't support PARALLEL anyway.
10055 Note that for args passed by reference, function_arg will be called
10056 with MODE and TYPE set to that of the pointer to the arg, not the arg
10057 itself. */
10059 static rtx
10060 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
10061 const_tree type, bool named)
10063 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10064 enum rs6000_abi abi = DEFAULT_ABI;
10065 enum machine_mode elt_mode;
10066 int n_elts;
10068 /* Return a marker to indicate whether CR1 needs to set or clear the
10069 bit that V.4 uses to say fp args were passed in registers.
10070 Assume that we don't need the marker for software floating point,
10071 or compiler generated library calls. */
10072 if (mode == VOIDmode)
10074 if (abi == ABI_V4
10075 && (cum->call_cookie & CALL_LIBCALL) == 0
10076 && (cum->stdarg
10077 || (cum->nargs_prototype < 0
10078 && (cum->prototype || TARGET_NO_PROTOTYPE))))
10080 /* For the SPE, we need to crxor CR6 always. */
10081 if (TARGET_SPE_ABI)
10082 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
10083 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
10084 return GEN_INT (cum->call_cookie
10085 | ((cum->fregno == FP_ARG_MIN_REG)
10086 ? CALL_V4_SET_FP_ARGS
10087 : CALL_V4_CLEAR_FP_ARGS));
10090 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
10093 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10095 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10097 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
10098 if (rslt != NULL_RTX)
10099 return rslt;
10100 /* Else fall through to usual handling. */
10103 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10105 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10106 rtx r, off;
10107 int i, k = 0;
10109 /* Do we also need to pass this argument in the parameter
10110 save area? */
10111 if (TARGET_64BIT && ! cum->prototype)
10113 int align_words = (cum->words + 1) & ~1;
10114 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10117 /* Describe where this argument goes in the vector registers. */
10118 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
10120 r = gen_rtx_REG (elt_mode, cum->vregno + i);
10121 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10122 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10125 return rs6000_finish_function_arg (mode, rvec, k);
10127 else if (TARGET_ALTIVEC_ABI
10128 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
10129 || (type && TREE_CODE (type) == VECTOR_TYPE
10130 && int_size_in_bytes (type) == 16)))
10132 if (named || abi == ABI_V4)
10133 return NULL_RTX;
10134 else
10136 /* Vector parameters to varargs functions under AIX or Darwin
10137 get passed in memory and possibly also in GPRs. */
10138 int align, align_words, n_words;
10139 enum machine_mode part_mode;
10141 /* Vector parameters must be 16-byte aligned. In 32-bit
10142 mode this means we need to take into account the offset
10143 to the parameter save area. In 64-bit mode, they just
10144 have to start on an even word, since the parameter save
10145 area is 16-byte aligned. */
10146 if (TARGET_32BIT)
10147 align = -(rs6000_parm_offset () + cum->words) & 3;
10148 else
10149 align = cum->words & 1;
10150 align_words = cum->words + align;
10152 /* Out of registers? Memory, then. */
10153 if (align_words >= GP_ARG_NUM_REG)
10154 return NULL_RTX;
10156 if (TARGET_32BIT && TARGET_POWERPC64)
10157 return rs6000_mixed_function_arg (mode, type, align_words);
10159 /* The vector value goes in GPRs. Only the part of the
10160 value in GPRs is reported here. */
10161 part_mode = mode;
10162 n_words = rs6000_arg_size (mode, type);
10163 if (align_words + n_words > GP_ARG_NUM_REG)
10164 /* Fortunately, there are only two possibilities, the value
10165 is either wholly in GPRs or half in GPRs and half not. */
10166 part_mode = DImode;
10168 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
10171 else if (TARGET_SPE_ABI && TARGET_SPE
10172 && (SPE_VECTOR_MODE (mode)
10173 || (TARGET_E500_DOUBLE && (mode == DFmode
10174 || mode == DCmode
10175 || mode == TFmode
10176 || mode == TCmode))))
10177 return rs6000_spe_function_arg (cum, mode, type);
10179 else if (abi == ABI_V4)
10181 if (TARGET_HARD_FLOAT && TARGET_FPRS
10182 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
10183 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
10184 || (mode == TFmode && !TARGET_IEEEQUAD)
10185 || mode == SDmode || mode == DDmode || mode == TDmode))
10187 /* _Decimal128 must use an even/odd register pair. This assumes
10188 that the register number is odd when fregno is odd. */
10189 if (mode == TDmode && (cum->fregno % 2) == 1)
10190 cum->fregno++;
10192 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
10193 <= FP_ARG_V4_MAX_REG)
10194 return gen_rtx_REG (mode, cum->fregno);
10195 else
10196 return NULL_RTX;
10198 else
10200 int n_words = rs6000_arg_size (mode, type);
10201 int gregno = cum->sysv_gregno;
10203 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
10204 (r7,r8) or (r9,r10). As does any other 2 word item such
10205 as complex int due to a historical mistake. */
10206 if (n_words == 2)
10207 gregno += (1 - gregno) & 1;
10209 /* Multi-reg args are not split between registers and stack. */
10210 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
10211 return NULL_RTX;
10213 if (TARGET_32BIT && TARGET_POWERPC64)
10214 return rs6000_mixed_function_arg (mode, type,
10215 gregno - GP_ARG_MIN_REG);
10216 return gen_rtx_REG (mode, gregno);
10219 else
10221 int align_words = rs6000_parm_start (mode, type, cum->words);
10223 /* _Decimal128 must be passed in an even/odd float register pair.
10224 This assumes that the register number is odd when fregno is odd. */
10225 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
10226 cum->fregno++;
10228 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10230 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10231 rtx r, off;
10232 int i, k = 0;
10233 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10235 /* Do we also need to pass this argument in the parameter
10236 save area? */
10237 if (type && (cum->nargs_prototype <= 0
10238 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10239 && TARGET_XL_COMPAT
10240 && align_words >= GP_ARG_NUM_REG)))
10241 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10243 /* Describe where this argument goes in the fprs. */
10244 for (i = 0; i < n_elts
10245 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
10247 /* Check if the argument is split over registers and memory.
10248 This can only ever happen for long double or _Decimal128;
10249 complex types are handled via split_complex_arg. */
10250 enum machine_mode fmode = elt_mode;
10251 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
10253 gcc_assert (fmode == TFmode || fmode == TDmode);
10254 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
10257 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
10258 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10259 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10262 return rs6000_finish_function_arg (mode, rvec, k);
10264 else if (align_words < GP_ARG_NUM_REG)
10266 if (TARGET_32BIT && TARGET_POWERPC64)
10267 return rs6000_mixed_function_arg (mode, type, align_words);
10269 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10271 else
10272 return NULL_RTX;
10276 /* For an arg passed partly in registers and partly in memory, this is
10277 the number of bytes passed in registers. For args passed entirely in
10278 registers or entirely in memory, zero. When an arg is described by a
10279 PARALLEL, perhaps using more than one register type, this function
10280 returns the number of bytes used by the first element of the PARALLEL. */
10282 static int
10283 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
10284 tree type, bool named)
10286 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10287 bool passed_in_gprs = true;
10288 int ret = 0;
10289 int align_words;
10290 enum machine_mode elt_mode;
10291 int n_elts;
10293 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10295 if (DEFAULT_ABI == ABI_V4)
10296 return 0;
10298 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10300 /* If we are passing this arg in the fixed parameter save area
10301 (gprs or memory) as well as VRs, we do not use the partial
10302 bytes mechanism; instead, rs6000_function_arg will return a
10303 PARALLEL including a memory element as necessary. */
10304 if (TARGET_64BIT && ! cum->prototype)
10305 return 0;
10307 /* Otherwise, we pass in VRs only. Check for partial copies. */
10308 passed_in_gprs = false;
10309 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
10310 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
10313 /* In this complicated case we just disable the partial_nregs code. */
10314 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10315 return 0;
10317 align_words = rs6000_parm_start (mode, type, cum->words);
10319 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10321 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10323 /* If we are passing this arg in the fixed parameter save area
10324 (gprs or memory) as well as FPRs, we do not use the partial
10325 bytes mechanism; instead, rs6000_function_arg will return a
10326 PARALLEL including a memory element as necessary. */
10327 if (type
10328 && (cum->nargs_prototype <= 0
10329 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10330 && TARGET_XL_COMPAT
10331 && align_words >= GP_ARG_NUM_REG)))
10332 return 0;
10334 /* Otherwise, we pass in FPRs only. Check for partial copies. */
10335 passed_in_gprs = false;
10336 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
10337 ret = ((FP_ARG_MAX_REG + 1 - cum->fregno)
10338 * MIN (8, GET_MODE_SIZE (elt_mode)));
10341 if (passed_in_gprs
10342 && align_words < GP_ARG_NUM_REG
10343 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
10344 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
10346 if (ret != 0 && TARGET_DEBUG_ARG)
10347 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
10349 return ret;
10352 /* A C expression that indicates when an argument must be passed by
10353 reference. If nonzero for an argument, a copy of that argument is
10354 made in memory and a pointer to the argument is passed instead of
10355 the argument itself. The pointer is passed in whatever way is
10356 appropriate for passing a pointer to that type.
10358 Under V.4, aggregates and long double are passed by reference.
10360 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
10361 reference unless the AltiVec vector extension ABI is in force.
10363 As an extension to all ABIs, variable sized types are passed by
10364 reference. */
10366 static bool
10367 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
10368 enum machine_mode mode, const_tree type,
10369 bool named ATTRIBUTE_UNUSED)
10371 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
10373 if (TARGET_DEBUG_ARG)
10374 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
10375 return 1;
10378 if (!type)
10379 return 0;
10381 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
10383 if (TARGET_DEBUG_ARG)
10384 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
10385 return 1;
10388 if (int_size_in_bytes (type) < 0)
10390 if (TARGET_DEBUG_ARG)
10391 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
10392 return 1;
10395 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10396 modes only exist for GCC vector types if -maltivec. */
10397 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
10399 if (TARGET_DEBUG_ARG)
10400 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
10401 return 1;
10404 /* Pass synthetic vectors in memory. */
10405 if (TREE_CODE (type) == VECTOR_TYPE
10406 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10408 static bool warned_for_pass_big_vectors = false;
10409 if (TARGET_DEBUG_ARG)
10410 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
10411 if (!warned_for_pass_big_vectors)
10413 warning (0, "GCC vector passed by reference: "
10414 "non-standard ABI extension with no compatibility guarantee");
10415 warned_for_pass_big_vectors = true;
10417 return 1;
10420 return 0;
10423 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
10424 already processes. Return true if the parameter must be passed
10425 (fully or partially) on the stack. */
10427 static bool
10428 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
10430 enum machine_mode mode;
10431 int unsignedp;
10432 rtx entry_parm;
10434 /* Catch errors. */
10435 if (type == NULL || type == error_mark_node)
10436 return true;
10438 /* Handle types with no storage requirement. */
10439 if (TYPE_MODE (type) == VOIDmode)
10440 return false;
10442 /* Handle complex types. */
10443 if (TREE_CODE (type) == COMPLEX_TYPE)
10444 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
10445 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
10447 /* Handle transparent aggregates. */
10448 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
10449 && TYPE_TRANSPARENT_AGGR (type))
10450 type = TREE_TYPE (first_field (type));
10452 /* See if this arg was passed by invisible reference. */
10453 if (pass_by_reference (get_cumulative_args (args_so_far),
10454 TYPE_MODE (type), type, true))
10455 type = build_pointer_type (type);
10457 /* Find mode as it is passed by the ABI. */
10458 unsignedp = TYPE_UNSIGNED (type);
10459 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
10461 /* If we must pass in stack, we need a stack. */
10462 if (rs6000_must_pass_in_stack (mode, type))
10463 return true;
10465 /* If there is no incoming register, we need a stack. */
10466 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
10467 if (entry_parm == NULL)
10468 return true;
10470 /* Likewise if we need to pass both in registers and on the stack. */
10471 if (GET_CODE (entry_parm) == PARALLEL
10472 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
10473 return true;
10475 /* Also true if we're partially in registers and partially not. */
10476 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
10477 return true;
10479 /* Update info on where next arg arrives in registers. */
10480 rs6000_function_arg_advance (args_so_far, mode, type, true);
10481 return false;
10484 /* Return true if FUN has no prototype, has a variable argument
10485 list, or passes any parameter in memory. */
10487 static bool
10488 rs6000_function_parms_need_stack (tree fun)
10490 function_args_iterator args_iter;
10491 tree arg_type;
10492 CUMULATIVE_ARGS args_so_far_v;
10493 cumulative_args_t args_so_far;
10495 if (!fun)
10496 /* Must be a libcall, all of which only use reg parms. */
10497 return false;
10498 if (!TYPE_P (fun))
10499 fun = TREE_TYPE (fun);
10501 /* Varargs functions need the parameter save area. */
10502 if (!prototype_p (fun) || stdarg_p (fun))
10503 return true;
10505 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fun, NULL_RTX);
10506 args_so_far = pack_cumulative_args (&args_so_far_v);
10508 if (aggregate_value_p (TREE_TYPE (fun), fun))
10510 tree type = build_pointer_type (TREE_TYPE (fun));
10511 rs6000_parm_needs_stack (args_so_far, type);
10514 FOREACH_FUNCTION_ARGS (fun, arg_type, args_iter)
10515 if (rs6000_parm_needs_stack (args_so_far, arg_type))
10516 return true;
10518 return false;
10521 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
10522 usually a constant depending on the ABI. However, in the ELFv2 ABI
10523 the register parameter area is optional when calling a function that
10524 has a prototype is scope, has no variable argument list, and passes
10525 all parameters in registers. */
10528 rs6000_reg_parm_stack_space (tree fun)
10530 int reg_parm_stack_space;
10532 switch (DEFAULT_ABI)
10534 default:
10535 reg_parm_stack_space = 0;
10536 break;
10538 case ABI_AIX:
10539 case ABI_DARWIN:
10540 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10541 break;
10543 case ABI_ELFv2:
10544 /* ??? Recomputing this every time is a bit expensive. Is there
10545 a place to cache this information? */
10546 if (rs6000_function_parms_need_stack (fun))
10547 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10548 else
10549 reg_parm_stack_space = 0;
10550 break;
10553 return reg_parm_stack_space;
10556 static void
10557 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
10559 int i;
10560 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
10562 if (nregs == 0)
10563 return;
10565 for (i = 0; i < nregs; i++)
10567 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
10568 if (reload_completed)
10570 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
10571 tem = NULL_RTX;
10572 else
10573 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
10574 i * GET_MODE_SIZE (reg_mode));
10576 else
10577 tem = replace_equiv_address (tem, XEXP (tem, 0));
10579 gcc_assert (tem);
10581 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
10585 /* Perform any needed actions needed for a function that is receiving a
10586 variable number of arguments.
10588 CUM is as above.
10590 MODE and TYPE are the mode and type of the current parameter.
10592 PRETEND_SIZE is a variable that should be set to the amount of stack
10593 that must be pushed by the prolog to pretend that our caller pushed
10596 Normally, this macro will push all remaining incoming registers on the
10597 stack and set PRETEND_SIZE to the length of the registers pushed. */
10599 static void
10600 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
10601 tree type, int *pretend_size ATTRIBUTE_UNUSED,
10602 int no_rtl)
10604 CUMULATIVE_ARGS next_cum;
10605 int reg_size = TARGET_32BIT ? 4 : 8;
10606 rtx save_area = NULL_RTX, mem;
10607 int first_reg_offset;
10608 alias_set_type set;
10610 /* Skip the last named argument. */
10611 next_cum = *get_cumulative_args (cum);
10612 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
10614 if (DEFAULT_ABI == ABI_V4)
10616 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
10618 if (! no_rtl)
10620 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
10621 HOST_WIDE_INT offset = 0;
10623 /* Try to optimize the size of the varargs save area.
10624 The ABI requires that ap.reg_save_area is doubleword
10625 aligned, but we don't need to allocate space for all
10626 the bytes, only those to which we actually will save
10627 anything. */
10628 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
10629 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
10630 if (TARGET_HARD_FLOAT && TARGET_FPRS
10631 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10632 && cfun->va_list_fpr_size)
10634 if (gpr_reg_num)
10635 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
10636 * UNITS_PER_FP_WORD;
10637 if (cfun->va_list_fpr_size
10638 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10639 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
10640 else
10641 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10642 * UNITS_PER_FP_WORD;
10644 if (gpr_reg_num)
10646 offset = -((first_reg_offset * reg_size) & ~7);
10647 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
10649 gpr_reg_num = cfun->va_list_gpr_size;
10650 if (reg_size == 4 && (first_reg_offset & 1))
10651 gpr_reg_num++;
10653 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
10655 else if (fpr_size)
10656 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
10657 * UNITS_PER_FP_WORD
10658 - (int) (GP_ARG_NUM_REG * reg_size);
10660 if (gpr_size + fpr_size)
10662 rtx reg_save_area
10663 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
10664 gcc_assert (GET_CODE (reg_save_area) == MEM);
10665 reg_save_area = XEXP (reg_save_area, 0);
10666 if (GET_CODE (reg_save_area) == PLUS)
10668 gcc_assert (XEXP (reg_save_area, 0)
10669 == virtual_stack_vars_rtx);
10670 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
10671 offset += INTVAL (XEXP (reg_save_area, 1));
10673 else
10674 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
10677 cfun->machine->varargs_save_offset = offset;
10678 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
10681 else
10683 first_reg_offset = next_cum.words;
10684 save_area = virtual_incoming_args_rtx;
10686 if (targetm.calls.must_pass_in_stack (mode, type))
10687 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
10690 set = get_varargs_alias_set ();
10691 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
10692 && cfun->va_list_gpr_size)
10694 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
10696 if (va_list_gpr_counter_field)
10697 /* V4 va_list_gpr_size counts number of registers needed. */
10698 n_gpr = cfun->va_list_gpr_size;
10699 else
10700 /* char * va_list instead counts number of bytes needed. */
10701 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
10703 if (nregs > n_gpr)
10704 nregs = n_gpr;
10706 mem = gen_rtx_MEM (BLKmode,
10707 plus_constant (Pmode, save_area,
10708 first_reg_offset * reg_size));
10709 MEM_NOTRAP_P (mem) = 1;
10710 set_mem_alias_set (mem, set);
10711 set_mem_align (mem, BITS_PER_WORD);
10713 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
10714 nregs);
10717 /* Save FP registers if needed. */
10718 if (DEFAULT_ABI == ABI_V4
10719 && TARGET_HARD_FLOAT && TARGET_FPRS
10720 && ! no_rtl
10721 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10722 && cfun->va_list_fpr_size)
10724 int fregno = next_cum.fregno, nregs;
10725 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
10726 rtx lab = gen_label_rtx ();
10727 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
10728 * UNITS_PER_FP_WORD);
10730 emit_jump_insn
10731 (gen_rtx_SET (VOIDmode,
10732 pc_rtx,
10733 gen_rtx_IF_THEN_ELSE (VOIDmode,
10734 gen_rtx_NE (VOIDmode, cr1,
10735 const0_rtx),
10736 gen_rtx_LABEL_REF (VOIDmode, lab),
10737 pc_rtx)));
10739 for (nregs = 0;
10740 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
10741 fregno++, off += UNITS_PER_FP_WORD, nregs++)
10743 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10744 ? DFmode : SFmode,
10745 plus_constant (Pmode, save_area, off));
10746 MEM_NOTRAP_P (mem) = 1;
10747 set_mem_alias_set (mem, set);
10748 set_mem_align (mem, GET_MODE_ALIGNMENT (
10749 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10750 ? DFmode : SFmode));
10751 emit_move_insn (mem, gen_rtx_REG (
10752 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10753 ? DFmode : SFmode, fregno));
10756 emit_label (lab);
10760 /* Create the va_list data type. */
10762 static tree
10763 rs6000_build_builtin_va_list (void)
10765 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
10767 /* For AIX, prefer 'char *' because that's what the system
10768 header files like. */
10769 if (DEFAULT_ABI != ABI_V4)
10770 return build_pointer_type (char_type_node);
10772 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
10773 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
10774 get_identifier ("__va_list_tag"), record);
10776 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
10777 unsigned_char_type_node);
10778 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
10779 unsigned_char_type_node);
10780 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
10781 every user file. */
10782 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
10783 get_identifier ("reserved"), short_unsigned_type_node);
10784 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
10785 get_identifier ("overflow_arg_area"),
10786 ptr_type_node);
10787 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
10788 get_identifier ("reg_save_area"),
10789 ptr_type_node);
10791 va_list_gpr_counter_field = f_gpr;
10792 va_list_fpr_counter_field = f_fpr;
10794 DECL_FIELD_CONTEXT (f_gpr) = record;
10795 DECL_FIELD_CONTEXT (f_fpr) = record;
10796 DECL_FIELD_CONTEXT (f_res) = record;
10797 DECL_FIELD_CONTEXT (f_ovf) = record;
10798 DECL_FIELD_CONTEXT (f_sav) = record;
10800 TYPE_STUB_DECL (record) = type_decl;
10801 TYPE_NAME (record) = type_decl;
10802 TYPE_FIELDS (record) = f_gpr;
10803 DECL_CHAIN (f_gpr) = f_fpr;
10804 DECL_CHAIN (f_fpr) = f_res;
10805 DECL_CHAIN (f_res) = f_ovf;
10806 DECL_CHAIN (f_ovf) = f_sav;
10808 layout_type (record);
10810 /* The correct type is an array type of one element. */
10811 return build_array_type (record, build_index_type (size_zero_node));
10814 /* Implement va_start. */
10816 static void
10817 rs6000_va_start (tree valist, rtx nextarg)
10819 HOST_WIDE_INT words, n_gpr, n_fpr;
10820 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
10821 tree gpr, fpr, ovf, sav, t;
10823 /* Only SVR4 needs something special. */
10824 if (DEFAULT_ABI != ABI_V4)
10826 std_expand_builtin_va_start (valist, nextarg);
10827 return;
10830 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
10831 f_fpr = DECL_CHAIN (f_gpr);
10832 f_res = DECL_CHAIN (f_fpr);
10833 f_ovf = DECL_CHAIN (f_res);
10834 f_sav = DECL_CHAIN (f_ovf);
10836 valist = build_simple_mem_ref (valist);
10837 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
10838 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
10839 f_fpr, NULL_TREE);
10840 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
10841 f_ovf, NULL_TREE);
10842 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
10843 f_sav, NULL_TREE);
10845 /* Count number of gp and fp argument registers used. */
10846 words = crtl->args.info.words;
10847 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
10848 GP_ARG_NUM_REG);
10849 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
10850 FP_ARG_NUM_REG);
10852 if (TARGET_DEBUG_ARG)
10853 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
10854 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
10855 words, n_gpr, n_fpr);
10857 if (cfun->va_list_gpr_size)
10859 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
10860 build_int_cst (NULL_TREE, n_gpr));
10861 TREE_SIDE_EFFECTS (t) = 1;
10862 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10865 if (cfun->va_list_fpr_size)
10867 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
10868 build_int_cst (NULL_TREE, n_fpr));
10869 TREE_SIDE_EFFECTS (t) = 1;
10870 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10872 #ifdef HAVE_AS_GNU_ATTRIBUTE
10873 if (call_ABI_of_interest (cfun->decl))
10874 rs6000_passes_float = true;
10875 #endif
10878 /* Find the overflow area. */
10879 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
10880 if (words != 0)
10881 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
10882 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
10883 TREE_SIDE_EFFECTS (t) = 1;
10884 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10886 /* If there were no va_arg invocations, don't set up the register
10887 save area. */
10888 if (!cfun->va_list_gpr_size
10889 && !cfun->va_list_fpr_size
10890 && n_gpr < GP_ARG_NUM_REG
10891 && n_fpr < FP_ARG_V4_MAX_REG)
10892 return;
10894 /* Find the register save area. */
10895 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
10896 if (cfun->machine->varargs_save_offset)
10897 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
10898 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
10899 TREE_SIDE_EFFECTS (t) = 1;
10900 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
10903 /* Implement va_arg. */
10905 static tree
10906 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
10907 gimple_seq *post_p)
10909 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
10910 tree gpr, fpr, ovf, sav, reg, t, u;
10911 int size, rsize, n_reg, sav_ofs, sav_scale;
10912 tree lab_false, lab_over, addr;
10913 int align;
10914 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
10915 int regalign = 0;
10916 gimple stmt;
10918 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
10920 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
10921 return build_va_arg_indirect_ref (t);
10924 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
10925 earlier version of gcc, with the property that it always applied alignment
10926 adjustments to the va-args (even for zero-sized types). The cheapest way
10927 to deal with this is to replicate the effect of the part of
10928 std_gimplify_va_arg_expr that carries out the align adjust, for the case
10929 of relevance.
10930 We don't need to check for pass-by-reference because of the test above.
10931 We can return a simplifed answer, since we know there's no offset to add. */
10933 if (((TARGET_MACHO
10934 && rs6000_darwin64_abi)
10935 || DEFAULT_ABI == ABI_ELFv2
10936 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
10937 && integer_zerop (TYPE_SIZE (type)))
10939 unsigned HOST_WIDE_INT align, boundary;
10940 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
10941 align = PARM_BOUNDARY / BITS_PER_UNIT;
10942 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
10943 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
10944 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
10945 boundary /= BITS_PER_UNIT;
10946 if (boundary > align)
10948 tree t ;
10949 /* This updates arg ptr by the amount that would be necessary
10950 to align the zero-sized (but not zero-alignment) item. */
10951 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
10952 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
10953 gimplify_and_add (t, pre_p);
10955 t = fold_convert (sizetype, valist_tmp);
10956 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
10957 fold_convert (TREE_TYPE (valist),
10958 fold_build2 (BIT_AND_EXPR, sizetype, t,
10959 size_int (-boundary))));
10960 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
10961 gimplify_and_add (t, pre_p);
10963 /* Since it is zero-sized there's no increment for the item itself. */
10964 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
10965 return build_va_arg_indirect_ref (valist_tmp);
10968 if (DEFAULT_ABI != ABI_V4)
10970 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
10972 tree elem_type = TREE_TYPE (type);
10973 enum machine_mode elem_mode = TYPE_MODE (elem_type);
10974 int elem_size = GET_MODE_SIZE (elem_mode);
10976 if (elem_size < UNITS_PER_WORD)
10978 tree real_part, imag_part;
10979 gimple_seq post = NULL;
10981 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
10982 &post);
10983 /* Copy the value into a temporary, lest the formal temporary
10984 be reused out from under us. */
10985 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
10986 gimple_seq_add_seq (pre_p, post);
10988 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
10989 post_p);
10991 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
10995 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
10998 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
10999 f_fpr = DECL_CHAIN (f_gpr);
11000 f_res = DECL_CHAIN (f_fpr);
11001 f_ovf = DECL_CHAIN (f_res);
11002 f_sav = DECL_CHAIN (f_ovf);
11004 valist = build_va_arg_indirect_ref (valist);
11005 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11006 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
11007 f_fpr, NULL_TREE);
11008 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
11009 f_ovf, NULL_TREE);
11010 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
11011 f_sav, NULL_TREE);
11013 size = int_size_in_bytes (type);
11014 rsize = (size + 3) / 4;
11015 align = 1;
11017 if (TARGET_HARD_FLOAT && TARGET_FPRS
11018 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
11019 || (TARGET_DOUBLE_FLOAT
11020 && (TYPE_MODE (type) == DFmode
11021 || TYPE_MODE (type) == TFmode
11022 || TYPE_MODE (type) == SDmode
11023 || TYPE_MODE (type) == DDmode
11024 || TYPE_MODE (type) == TDmode))))
11026 /* FP args go in FP registers, if present. */
11027 reg = fpr;
11028 n_reg = (size + 7) / 8;
11029 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
11030 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
11031 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
11032 align = 8;
11034 else
11036 /* Otherwise into GP registers. */
11037 reg = gpr;
11038 n_reg = rsize;
11039 sav_ofs = 0;
11040 sav_scale = 4;
11041 if (n_reg == 2)
11042 align = 8;
11045 /* Pull the value out of the saved registers.... */
11047 lab_over = NULL;
11048 addr = create_tmp_var (ptr_type_node, "addr");
11050 /* AltiVec vectors never go in registers when -mabi=altivec. */
11051 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11052 align = 16;
11053 else
11055 lab_false = create_artificial_label (input_location);
11056 lab_over = create_artificial_label (input_location);
11058 /* Long long and SPE vectors are aligned in the registers.
11059 As are any other 2 gpr item such as complex int due to a
11060 historical mistake. */
11061 u = reg;
11062 if (n_reg == 2 && reg == gpr)
11064 regalign = 1;
11065 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11066 build_int_cst (TREE_TYPE (reg), n_reg - 1));
11067 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
11068 unshare_expr (reg), u);
11070 /* _Decimal128 is passed in even/odd fpr pairs; the stored
11071 reg number is 0 for f1, so we want to make it odd. */
11072 else if (reg == fpr && TYPE_MODE (type) == TDmode)
11074 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11075 build_int_cst (TREE_TYPE (reg), 1));
11076 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
11079 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
11080 t = build2 (GE_EXPR, boolean_type_node, u, t);
11081 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11082 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11083 gimplify_and_add (t, pre_p);
11085 t = sav;
11086 if (sav_ofs)
11087 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11089 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11090 build_int_cst (TREE_TYPE (reg), n_reg));
11091 u = fold_convert (sizetype, u);
11092 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
11093 t = fold_build_pointer_plus (t, u);
11095 /* _Decimal32 varargs are located in the second word of the 64-bit
11096 FP register for 32-bit binaries. */
11097 if (!TARGET_POWERPC64
11098 && TARGET_HARD_FLOAT && TARGET_FPRS
11099 && TYPE_MODE (type) == SDmode)
11100 t = fold_build_pointer_plus_hwi (t, size);
11102 gimplify_assign (addr, t, pre_p);
11104 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11106 stmt = gimple_build_label (lab_false);
11107 gimple_seq_add_stmt (pre_p, stmt);
11109 if ((n_reg == 2 && !regalign) || n_reg > 2)
11111 /* Ensure that we don't find any more args in regs.
11112 Alignment has taken care of for special cases. */
11113 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
11117 /* ... otherwise out of the overflow area. */
11119 /* Care for on-stack alignment if needed. */
11120 t = ovf;
11121 if (align != 1)
11123 t = fold_build_pointer_plus_hwi (t, align - 1);
11124 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
11125 build_int_cst (TREE_TYPE (t), -align));
11127 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11129 gimplify_assign (unshare_expr (addr), t, pre_p);
11131 t = fold_build_pointer_plus_hwi (t, size);
11132 gimplify_assign (unshare_expr (ovf), t, pre_p);
11134 if (lab_over)
11136 stmt = gimple_build_label (lab_over);
11137 gimple_seq_add_stmt (pre_p, stmt);
11140 if (STRICT_ALIGNMENT
11141 && (TYPE_ALIGN (type)
11142 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
11144 /* The value (of type complex double, for example) may not be
11145 aligned in memory in the saved registers, so copy via a
11146 temporary. (This is the same code as used for SPARC.) */
11147 tree tmp = create_tmp_var (type, "va_arg_tmp");
11148 tree dest_addr = build_fold_addr_expr (tmp);
11150 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
11151 3, dest_addr, addr, size_int (rsize * 4));
11153 gimplify_and_add (copy, pre_p);
11154 addr = dest_addr;
11157 addr = fold_convert (ptrtype, addr);
11158 return build_va_arg_indirect_ref (addr);
11161 /* Builtins. */
11163 static void
11164 def_builtin (const char *name, tree type, enum rs6000_builtins code)
11166 tree t;
11167 unsigned classify = rs6000_builtin_info[(int)code].attr;
11168 const char *attr_string = "";
11170 gcc_assert (name != NULL);
11171 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
11173 if (rs6000_builtin_decls[(int)code])
11174 fatal_error ("internal error: builtin function %s already processed", name);
11176 rs6000_builtin_decls[(int)code] = t =
11177 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
11179 /* Set any special attributes. */
11180 if ((classify & RS6000_BTC_CONST) != 0)
11182 /* const function, function only depends on the inputs. */
11183 TREE_READONLY (t) = 1;
11184 TREE_NOTHROW (t) = 1;
11185 attr_string = ", pure";
11187 else if ((classify & RS6000_BTC_PURE) != 0)
11189 /* pure function, function can read global memory, but does not set any
11190 external state. */
11191 DECL_PURE_P (t) = 1;
11192 TREE_NOTHROW (t) = 1;
11193 attr_string = ", const";
11195 else if ((classify & RS6000_BTC_FP) != 0)
11197 /* Function is a math function. If rounding mode is on, then treat the
11198 function as not reading global memory, but it can have arbitrary side
11199 effects. If it is off, then assume the function is a const function.
11200 This mimics the ATTR_MATHFN_FPROUNDING attribute in
11201 builtin-attribute.def that is used for the math functions. */
11202 TREE_NOTHROW (t) = 1;
11203 if (flag_rounding_math)
11205 DECL_PURE_P (t) = 1;
11206 DECL_IS_NOVOPS (t) = 1;
11207 attr_string = ", fp, pure";
11209 else
11211 TREE_READONLY (t) = 1;
11212 attr_string = ", fp, const";
11215 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
11216 gcc_unreachable ();
11218 if (TARGET_DEBUG_BUILTIN)
11219 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
11220 (int)code, name, attr_string);
11223 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
11225 #undef RS6000_BUILTIN_1
11226 #undef RS6000_BUILTIN_2
11227 #undef RS6000_BUILTIN_3
11228 #undef RS6000_BUILTIN_A
11229 #undef RS6000_BUILTIN_D
11230 #undef RS6000_BUILTIN_E
11231 #undef RS6000_BUILTIN_H
11232 #undef RS6000_BUILTIN_P
11233 #undef RS6000_BUILTIN_Q
11234 #undef RS6000_BUILTIN_S
11235 #undef RS6000_BUILTIN_X
11237 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11238 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11239 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
11240 { MASK, ICODE, NAME, ENUM },
11242 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11243 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11244 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11245 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11246 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11247 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11248 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11249 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11251 static const struct builtin_description bdesc_3arg[] =
11253 #include "rs6000-builtin.def"
11256 /* DST operations: void foo (void *, const int, const char). */
11258 #undef RS6000_BUILTIN_1
11259 #undef RS6000_BUILTIN_2
11260 #undef RS6000_BUILTIN_3
11261 #undef RS6000_BUILTIN_A
11262 #undef RS6000_BUILTIN_D
11263 #undef RS6000_BUILTIN_E
11264 #undef RS6000_BUILTIN_H
11265 #undef RS6000_BUILTIN_P
11266 #undef RS6000_BUILTIN_Q
11267 #undef RS6000_BUILTIN_S
11268 #undef RS6000_BUILTIN_X
11270 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11271 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11272 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11273 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11274 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
11275 { MASK, ICODE, NAME, ENUM },
11277 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11278 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11279 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11280 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11281 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11282 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11284 static const struct builtin_description bdesc_dst[] =
11286 #include "rs6000-builtin.def"
11289 /* Simple binary operations: VECc = foo (VECa, VECb). */
11291 #undef RS6000_BUILTIN_1
11292 #undef RS6000_BUILTIN_2
11293 #undef RS6000_BUILTIN_3
11294 #undef RS6000_BUILTIN_A
11295 #undef RS6000_BUILTIN_D
11296 #undef RS6000_BUILTIN_E
11297 #undef RS6000_BUILTIN_H
11298 #undef RS6000_BUILTIN_P
11299 #undef RS6000_BUILTIN_Q
11300 #undef RS6000_BUILTIN_S
11301 #undef RS6000_BUILTIN_X
11303 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11304 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
11305 { MASK, ICODE, NAME, ENUM },
11307 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11308 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11309 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11310 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11311 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11312 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11313 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11314 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11315 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11317 static const struct builtin_description bdesc_2arg[] =
11319 #include "rs6000-builtin.def"
11322 #undef RS6000_BUILTIN_1
11323 #undef RS6000_BUILTIN_2
11324 #undef RS6000_BUILTIN_3
11325 #undef RS6000_BUILTIN_A
11326 #undef RS6000_BUILTIN_D
11327 #undef RS6000_BUILTIN_E
11328 #undef RS6000_BUILTIN_H
11329 #undef RS6000_BUILTIN_P
11330 #undef RS6000_BUILTIN_Q
11331 #undef RS6000_BUILTIN_S
11332 #undef RS6000_BUILTIN_X
11334 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11335 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11336 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11337 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11338 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11339 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11340 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11341 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
11342 { MASK, ICODE, NAME, ENUM },
11344 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11345 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11346 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11348 /* AltiVec predicates. */
11350 static const struct builtin_description bdesc_altivec_preds[] =
11352 #include "rs6000-builtin.def"
11355 /* SPE predicates. */
11356 #undef RS6000_BUILTIN_1
11357 #undef RS6000_BUILTIN_2
11358 #undef RS6000_BUILTIN_3
11359 #undef RS6000_BUILTIN_A
11360 #undef RS6000_BUILTIN_D
11361 #undef RS6000_BUILTIN_E
11362 #undef RS6000_BUILTIN_H
11363 #undef RS6000_BUILTIN_P
11364 #undef RS6000_BUILTIN_Q
11365 #undef RS6000_BUILTIN_S
11366 #undef RS6000_BUILTIN_X
11368 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11369 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11370 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11371 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11372 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11373 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11374 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11375 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11376 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11377 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
11378 { MASK, ICODE, NAME, ENUM },
11380 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11382 static const struct builtin_description bdesc_spe_predicates[] =
11384 #include "rs6000-builtin.def"
11387 /* SPE evsel predicates. */
11388 #undef RS6000_BUILTIN_1
11389 #undef RS6000_BUILTIN_2
11390 #undef RS6000_BUILTIN_3
11391 #undef RS6000_BUILTIN_A
11392 #undef RS6000_BUILTIN_D
11393 #undef RS6000_BUILTIN_E
11394 #undef RS6000_BUILTIN_H
11395 #undef RS6000_BUILTIN_P
11396 #undef RS6000_BUILTIN_Q
11397 #undef RS6000_BUILTIN_S
11398 #undef RS6000_BUILTIN_X
11400 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11401 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11402 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11403 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11404 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11405 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
11406 { MASK, ICODE, NAME, ENUM },
11408 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11409 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11410 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11411 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11412 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11414 static const struct builtin_description bdesc_spe_evsel[] =
11416 #include "rs6000-builtin.def"
11419 /* PAIRED predicates. */
11420 #undef RS6000_BUILTIN_1
11421 #undef RS6000_BUILTIN_2
11422 #undef RS6000_BUILTIN_3
11423 #undef RS6000_BUILTIN_A
11424 #undef RS6000_BUILTIN_D
11425 #undef RS6000_BUILTIN_E
11426 #undef RS6000_BUILTIN_H
11427 #undef RS6000_BUILTIN_P
11428 #undef RS6000_BUILTIN_Q
11429 #undef RS6000_BUILTIN_S
11430 #undef RS6000_BUILTIN_X
11432 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11433 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11434 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11435 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11436 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11437 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11438 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11439 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11440 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
11441 { MASK, ICODE, NAME, ENUM },
11443 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11444 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11446 static const struct builtin_description bdesc_paired_preds[] =
11448 #include "rs6000-builtin.def"
11451 /* ABS* operations. */
11453 #undef RS6000_BUILTIN_1
11454 #undef RS6000_BUILTIN_2
11455 #undef RS6000_BUILTIN_3
11456 #undef RS6000_BUILTIN_A
11457 #undef RS6000_BUILTIN_D
11458 #undef RS6000_BUILTIN_E
11459 #undef RS6000_BUILTIN_H
11460 #undef RS6000_BUILTIN_P
11461 #undef RS6000_BUILTIN_Q
11462 #undef RS6000_BUILTIN_S
11463 #undef RS6000_BUILTIN_X
11465 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11466 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11467 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11468 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
11469 { MASK, ICODE, NAME, ENUM },
11471 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11472 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11473 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11474 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11475 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11476 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11477 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11479 static const struct builtin_description bdesc_abs[] =
11481 #include "rs6000-builtin.def"
11484 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
11485 foo (VECa). */
11487 #undef RS6000_BUILTIN_1
11488 #undef RS6000_BUILTIN_2
11489 #undef RS6000_BUILTIN_3
11490 #undef RS6000_BUILTIN_A
11491 #undef RS6000_BUILTIN_D
11492 #undef RS6000_BUILTIN_E
11493 #undef RS6000_BUILTIN_H
11494 #undef RS6000_BUILTIN_P
11495 #undef RS6000_BUILTIN_Q
11496 #undef RS6000_BUILTIN_S
11497 #undef RS6000_BUILTIN_X
11499 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
11500 { MASK, ICODE, NAME, ENUM },
11502 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11503 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11504 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11505 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11506 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11507 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11508 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11509 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11510 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11511 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11513 static const struct builtin_description bdesc_1arg[] =
11515 #include "rs6000-builtin.def"
11518 /* HTM builtins. */
11519 #undef RS6000_BUILTIN_1
11520 #undef RS6000_BUILTIN_2
11521 #undef RS6000_BUILTIN_3
11522 #undef RS6000_BUILTIN_A
11523 #undef RS6000_BUILTIN_D
11524 #undef RS6000_BUILTIN_E
11525 #undef RS6000_BUILTIN_H
11526 #undef RS6000_BUILTIN_P
11527 #undef RS6000_BUILTIN_Q
11528 #undef RS6000_BUILTIN_S
11529 #undef RS6000_BUILTIN_X
11531 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11532 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11533 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11534 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11535 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11536 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11537 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
11538 { MASK, ICODE, NAME, ENUM },
11540 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11541 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11542 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11543 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11545 static const struct builtin_description bdesc_htm[] =
11547 #include "rs6000-builtin.def"
11550 #undef RS6000_BUILTIN_1
11551 #undef RS6000_BUILTIN_2
11552 #undef RS6000_BUILTIN_3
11553 #undef RS6000_BUILTIN_A
11554 #undef RS6000_BUILTIN_D
11555 #undef RS6000_BUILTIN_E
11556 #undef RS6000_BUILTIN_H
11557 #undef RS6000_BUILTIN_P
11558 #undef RS6000_BUILTIN_Q
11559 #undef RS6000_BUILTIN_S
11561 /* Return true if a builtin function is overloaded. */
11562 bool
11563 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
11565 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
11568 /* Expand an expression EXP that calls a builtin without arguments. */
11569 static rtx
11570 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
11572 rtx pat;
11573 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11575 if (icode == CODE_FOR_nothing)
11576 /* Builtin not supported on this processor. */
11577 return 0;
11579 if (target == 0
11580 || GET_MODE (target) != tmode
11581 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11582 target = gen_reg_rtx (tmode);
11584 pat = GEN_FCN (icode) (target);
11585 if (! pat)
11586 return 0;
11587 emit_insn (pat);
11589 return target;
11593 static rtx
11594 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
11596 rtx pat;
11597 tree arg0 = CALL_EXPR_ARG (exp, 0);
11598 tree arg1 = CALL_EXPR_ARG (exp, 1);
11599 rtx op0 = expand_normal (arg0);
11600 rtx op1 = expand_normal (arg1);
11601 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
11602 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
11604 if (icode == CODE_FOR_nothing)
11605 /* Builtin not supported on this processor. */
11606 return 0;
11608 /* If we got invalid arguments bail out before generating bad rtl. */
11609 if (arg0 == error_mark_node || arg1 == error_mark_node)
11610 return const0_rtx;
11612 if (GET_CODE (op0) != CONST_INT
11613 || INTVAL (op0) > 255
11614 || INTVAL (op0) < 0)
11616 error ("argument 1 must be an 8-bit field value");
11617 return const0_rtx;
11620 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11621 op0 = copy_to_mode_reg (mode0, op0);
11623 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11624 op1 = copy_to_mode_reg (mode1, op1);
11626 pat = GEN_FCN (icode) (op0, op1);
11627 if (! pat)
11628 return const0_rtx;
11629 emit_insn (pat);
11631 return NULL_RTX;
11635 static rtx
11636 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
11638 rtx pat;
11639 tree arg0 = CALL_EXPR_ARG (exp, 0);
11640 rtx op0 = expand_normal (arg0);
11641 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11642 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11644 if (icode == CODE_FOR_nothing)
11645 /* Builtin not supported on this processor. */
11646 return 0;
11648 /* If we got invalid arguments bail out before generating bad rtl. */
11649 if (arg0 == error_mark_node)
11650 return const0_rtx;
11652 if (icode == CODE_FOR_altivec_vspltisb
11653 || icode == CODE_FOR_altivec_vspltish
11654 || icode == CODE_FOR_altivec_vspltisw
11655 || icode == CODE_FOR_spe_evsplatfi
11656 || icode == CODE_FOR_spe_evsplati)
11658 /* Only allow 5-bit *signed* literals. */
11659 if (GET_CODE (op0) != CONST_INT
11660 || INTVAL (op0) > 15
11661 || INTVAL (op0) < -16)
11663 error ("argument 1 must be a 5-bit signed literal");
11664 return const0_rtx;
11668 if (target == 0
11669 || GET_MODE (target) != tmode
11670 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11671 target = gen_reg_rtx (tmode);
11673 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11674 op0 = copy_to_mode_reg (mode0, op0);
11676 pat = GEN_FCN (icode) (target, op0);
11677 if (! pat)
11678 return 0;
11679 emit_insn (pat);
11681 return target;
11684 static rtx
11685 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
11687 rtx pat, scratch1, scratch2;
11688 tree arg0 = CALL_EXPR_ARG (exp, 0);
11689 rtx op0 = expand_normal (arg0);
11690 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11691 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11693 /* If we have invalid arguments, bail out before generating bad rtl. */
11694 if (arg0 == error_mark_node)
11695 return const0_rtx;
11697 if (target == 0
11698 || GET_MODE (target) != tmode
11699 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11700 target = gen_reg_rtx (tmode);
11702 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11703 op0 = copy_to_mode_reg (mode0, op0);
11705 scratch1 = gen_reg_rtx (mode0);
11706 scratch2 = gen_reg_rtx (mode0);
11708 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
11709 if (! pat)
11710 return 0;
11711 emit_insn (pat);
11713 return target;
11716 static rtx
11717 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
11719 rtx pat;
11720 tree arg0 = CALL_EXPR_ARG (exp, 0);
11721 tree arg1 = CALL_EXPR_ARG (exp, 1);
11722 rtx op0 = expand_normal (arg0);
11723 rtx op1 = expand_normal (arg1);
11724 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11725 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11726 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11728 if (icode == CODE_FOR_nothing)
11729 /* Builtin not supported on this processor. */
11730 return 0;
11732 /* If we got invalid arguments bail out before generating bad rtl. */
11733 if (arg0 == error_mark_node || arg1 == error_mark_node)
11734 return const0_rtx;
11736 if (icode == CODE_FOR_altivec_vcfux
11737 || icode == CODE_FOR_altivec_vcfsx
11738 || icode == CODE_FOR_altivec_vctsxs
11739 || icode == CODE_FOR_altivec_vctuxs
11740 || icode == CODE_FOR_altivec_vspltb
11741 || icode == CODE_FOR_altivec_vsplth
11742 || icode == CODE_FOR_altivec_vspltw
11743 || icode == CODE_FOR_spe_evaddiw
11744 || icode == CODE_FOR_spe_evldd
11745 || icode == CODE_FOR_spe_evldh
11746 || icode == CODE_FOR_spe_evldw
11747 || icode == CODE_FOR_spe_evlhhesplat
11748 || icode == CODE_FOR_spe_evlhhossplat
11749 || icode == CODE_FOR_spe_evlhhousplat
11750 || icode == CODE_FOR_spe_evlwhe
11751 || icode == CODE_FOR_spe_evlwhos
11752 || icode == CODE_FOR_spe_evlwhou
11753 || icode == CODE_FOR_spe_evlwhsplat
11754 || icode == CODE_FOR_spe_evlwwsplat
11755 || icode == CODE_FOR_spe_evrlwi
11756 || icode == CODE_FOR_spe_evslwi
11757 || icode == CODE_FOR_spe_evsrwis
11758 || icode == CODE_FOR_spe_evsubifw
11759 || icode == CODE_FOR_spe_evsrwiu)
11761 /* Only allow 5-bit unsigned literals. */
11762 STRIP_NOPS (arg1);
11763 if (TREE_CODE (arg1) != INTEGER_CST
11764 || TREE_INT_CST_LOW (arg1) & ~0x1f)
11766 error ("argument 2 must be a 5-bit unsigned literal");
11767 return const0_rtx;
11771 if (target == 0
11772 || GET_MODE (target) != tmode
11773 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11774 target = gen_reg_rtx (tmode);
11776 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11777 op0 = copy_to_mode_reg (mode0, op0);
11778 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11779 op1 = copy_to_mode_reg (mode1, op1);
11781 pat = GEN_FCN (icode) (target, op0, op1);
11782 if (! pat)
11783 return 0;
11784 emit_insn (pat);
11786 return target;
11789 static rtx
11790 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
11792 rtx pat, scratch;
11793 tree cr6_form = CALL_EXPR_ARG (exp, 0);
11794 tree arg0 = CALL_EXPR_ARG (exp, 1);
11795 tree arg1 = CALL_EXPR_ARG (exp, 2);
11796 rtx op0 = expand_normal (arg0);
11797 rtx op1 = expand_normal (arg1);
11798 enum machine_mode tmode = SImode;
11799 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11800 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11801 int cr6_form_int;
11803 if (TREE_CODE (cr6_form) != INTEGER_CST)
11805 error ("argument 1 of __builtin_altivec_predicate must be a constant");
11806 return const0_rtx;
11808 else
11809 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
11811 gcc_assert (mode0 == mode1);
11813 /* If we have invalid arguments, bail out before generating bad rtl. */
11814 if (arg0 == error_mark_node || arg1 == error_mark_node)
11815 return const0_rtx;
11817 if (target == 0
11818 || GET_MODE (target) != tmode
11819 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11820 target = gen_reg_rtx (tmode);
11822 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11823 op0 = copy_to_mode_reg (mode0, op0);
11824 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11825 op1 = copy_to_mode_reg (mode1, op1);
11827 scratch = gen_reg_rtx (mode0);
11829 pat = GEN_FCN (icode) (scratch, op0, op1);
11830 if (! pat)
11831 return 0;
11832 emit_insn (pat);
11834 /* The vec_any* and vec_all* predicates use the same opcodes for two
11835 different operations, but the bits in CR6 will be different
11836 depending on what information we want. So we have to play tricks
11837 with CR6 to get the right bits out.
11839 If you think this is disgusting, look at the specs for the
11840 AltiVec predicates. */
11842 switch (cr6_form_int)
11844 case 0:
11845 emit_insn (gen_cr6_test_for_zero (target));
11846 break;
11847 case 1:
11848 emit_insn (gen_cr6_test_for_zero_reverse (target));
11849 break;
11850 case 2:
11851 emit_insn (gen_cr6_test_for_lt (target));
11852 break;
11853 case 3:
11854 emit_insn (gen_cr6_test_for_lt_reverse (target));
11855 break;
11856 default:
11857 error ("argument 1 of __builtin_altivec_predicate is out of range");
11858 break;
11861 return target;
11864 static rtx
11865 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
11867 rtx pat, addr;
11868 tree arg0 = CALL_EXPR_ARG (exp, 0);
11869 tree arg1 = CALL_EXPR_ARG (exp, 1);
11870 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11871 enum machine_mode mode0 = Pmode;
11872 enum machine_mode mode1 = Pmode;
11873 rtx op0 = expand_normal (arg0);
11874 rtx op1 = expand_normal (arg1);
11876 if (icode == CODE_FOR_nothing)
11877 /* Builtin not supported on this processor. */
11878 return 0;
11880 /* If we got invalid arguments bail out before generating bad rtl. */
11881 if (arg0 == error_mark_node || arg1 == error_mark_node)
11882 return const0_rtx;
11884 if (target == 0
11885 || GET_MODE (target) != tmode
11886 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11887 target = gen_reg_rtx (tmode);
11889 op1 = copy_to_mode_reg (mode1, op1);
11891 if (op0 == const0_rtx)
11893 addr = gen_rtx_MEM (tmode, op1);
11895 else
11897 op0 = copy_to_mode_reg (mode0, op0);
11898 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
11901 pat = GEN_FCN (icode) (target, addr);
11903 if (! pat)
11904 return 0;
11905 emit_insn (pat);
11907 return target;
11910 /* Return a constant vector for use as a little-endian permute control vector
11911 to reverse the order of elements of the given vector mode. */
11912 static rtx
11913 swap_selector_for_mode (enum machine_mode mode)
11915 /* These are little endian vectors, so their elements are reversed
11916 from what you would normally expect for a permute control vector. */
11917 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
11918 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
11919 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
11920 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
11921 unsigned int *swaparray, i;
11922 rtx perm[16];
11924 switch (mode)
11926 case V2DFmode:
11927 case V2DImode:
11928 swaparray = swap2;
11929 break;
11930 case V4SFmode:
11931 case V4SImode:
11932 swaparray = swap4;
11933 break;
11934 case V8HImode:
11935 swaparray = swap8;
11936 break;
11937 case V16QImode:
11938 swaparray = swap16;
11939 break;
11940 default:
11941 gcc_unreachable ();
11944 for (i = 0; i < 16; ++i)
11945 perm[i] = GEN_INT (swaparray[i]);
11947 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
11950 /* Generate code for an "lvx", "lvxl", or "lve*x" built-in for a little endian target
11951 with -maltivec=be specified. Issue the load followed by an element-reversing
11952 permute. */
11953 void
11954 altivec_expand_lvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
11956 rtx tmp = gen_reg_rtx (mode);
11957 rtx load = gen_rtx_SET (VOIDmode, tmp, op1);
11958 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
11959 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
11960 rtx sel = swap_selector_for_mode (mode);
11961 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
11963 gcc_assert (REG_P (op0));
11964 emit_insn (par);
11965 emit_insn (gen_rtx_SET (VOIDmode, op0, vperm));
11968 /* Generate code for a "stvx" or "stvxl" built-in for a little endian target
11969 with -maltivec=be specified. Issue the store preceded by an element-reversing
11970 permute. */
11971 void
11972 altivec_expand_stvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
11974 rtx tmp = gen_reg_rtx (mode);
11975 rtx store = gen_rtx_SET (VOIDmode, op0, tmp);
11976 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
11977 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
11978 rtx sel = swap_selector_for_mode (mode);
11979 rtx vperm;
11981 gcc_assert (REG_P (op1));
11982 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
11983 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
11984 emit_insn (par);
11987 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
11988 specified. Issue the store preceded by an element-reversing permute. */
11989 void
11990 altivec_expand_stvex_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
11992 enum machine_mode inner_mode = GET_MODE_INNER (mode);
11993 rtx tmp = gen_reg_rtx (mode);
11994 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
11995 rtx sel = swap_selector_for_mode (mode);
11996 rtx vperm;
11998 gcc_assert (REG_P (op1));
11999 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
12000 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
12001 emit_insn (gen_rtx_SET (VOIDmode, op0, stvx));
12004 static rtx
12005 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
12007 rtx pat, addr;
12008 tree arg0 = CALL_EXPR_ARG (exp, 0);
12009 tree arg1 = CALL_EXPR_ARG (exp, 1);
12010 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12011 enum machine_mode mode0 = Pmode;
12012 enum machine_mode mode1 = Pmode;
12013 rtx op0 = expand_normal (arg0);
12014 rtx op1 = expand_normal (arg1);
12016 if (icode == CODE_FOR_nothing)
12017 /* Builtin not supported on this processor. */
12018 return 0;
12020 /* If we got invalid arguments bail out before generating bad rtl. */
12021 if (arg0 == error_mark_node || arg1 == error_mark_node)
12022 return const0_rtx;
12024 if (target == 0
12025 || GET_MODE (target) != tmode
12026 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12027 target = gen_reg_rtx (tmode);
12029 op1 = copy_to_mode_reg (mode1, op1);
12031 if (op0 == const0_rtx)
12033 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
12035 else
12037 op0 = copy_to_mode_reg (mode0, op0);
12038 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
12041 pat = GEN_FCN (icode) (target, addr);
12043 if (! pat)
12044 return 0;
12045 emit_insn (pat);
12047 return target;
12050 static rtx
12051 spe_expand_stv_builtin (enum insn_code icode, tree exp)
12053 tree arg0 = CALL_EXPR_ARG (exp, 0);
12054 tree arg1 = CALL_EXPR_ARG (exp, 1);
12055 tree arg2 = CALL_EXPR_ARG (exp, 2);
12056 rtx op0 = expand_normal (arg0);
12057 rtx op1 = expand_normal (arg1);
12058 rtx op2 = expand_normal (arg2);
12059 rtx pat;
12060 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
12061 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
12062 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
12064 /* Invalid arguments. Bail before doing anything stoopid! */
12065 if (arg0 == error_mark_node
12066 || arg1 == error_mark_node
12067 || arg2 == error_mark_node)
12068 return const0_rtx;
12070 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
12071 op0 = copy_to_mode_reg (mode2, op0);
12072 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
12073 op1 = copy_to_mode_reg (mode0, op1);
12074 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
12075 op2 = copy_to_mode_reg (mode1, op2);
12077 pat = GEN_FCN (icode) (op1, op2, op0);
12078 if (pat)
12079 emit_insn (pat);
12080 return NULL_RTX;
12083 static rtx
12084 paired_expand_stv_builtin (enum insn_code icode, tree exp)
12086 tree arg0 = CALL_EXPR_ARG (exp, 0);
12087 tree arg1 = CALL_EXPR_ARG (exp, 1);
12088 tree arg2 = CALL_EXPR_ARG (exp, 2);
12089 rtx op0 = expand_normal (arg0);
12090 rtx op1 = expand_normal (arg1);
12091 rtx op2 = expand_normal (arg2);
12092 rtx pat, addr;
12093 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12094 enum machine_mode mode1 = Pmode;
12095 enum machine_mode mode2 = Pmode;
12097 /* Invalid arguments. Bail before doing anything stoopid! */
12098 if (arg0 == error_mark_node
12099 || arg1 == error_mark_node
12100 || arg2 == error_mark_node)
12101 return const0_rtx;
12103 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
12104 op0 = copy_to_mode_reg (tmode, op0);
12106 op2 = copy_to_mode_reg (mode2, op2);
12108 if (op1 == const0_rtx)
12110 addr = gen_rtx_MEM (tmode, op2);
12112 else
12114 op1 = copy_to_mode_reg (mode1, op1);
12115 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12118 pat = GEN_FCN (icode) (addr, op0);
12119 if (pat)
12120 emit_insn (pat);
12121 return NULL_RTX;
12124 static rtx
12125 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
12127 tree arg0 = CALL_EXPR_ARG (exp, 0);
12128 tree arg1 = CALL_EXPR_ARG (exp, 1);
12129 tree arg2 = CALL_EXPR_ARG (exp, 2);
12130 rtx op0 = expand_normal (arg0);
12131 rtx op1 = expand_normal (arg1);
12132 rtx op2 = expand_normal (arg2);
12133 rtx pat, addr;
12134 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12135 enum machine_mode smode = insn_data[icode].operand[1].mode;
12136 enum machine_mode mode1 = Pmode;
12137 enum machine_mode mode2 = Pmode;
12139 /* Invalid arguments. Bail before doing anything stoopid! */
12140 if (arg0 == error_mark_node
12141 || arg1 == error_mark_node
12142 || arg2 == error_mark_node)
12143 return const0_rtx;
12145 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
12146 op0 = copy_to_mode_reg (smode, op0);
12148 op2 = copy_to_mode_reg (mode2, op2);
12150 if (op1 == const0_rtx)
12152 addr = gen_rtx_MEM (tmode, op2);
12154 else
12156 op1 = copy_to_mode_reg (mode1, op1);
12157 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12160 pat = GEN_FCN (icode) (addr, op0);
12161 if (pat)
12162 emit_insn (pat);
12163 return NULL_RTX;
12166 /* Return the appropriate SPR number associated with the given builtin. */
12167 static inline HOST_WIDE_INT
12168 htm_spr_num (enum rs6000_builtins code)
12170 if (code == HTM_BUILTIN_GET_TFHAR
12171 || code == HTM_BUILTIN_SET_TFHAR)
12172 return TFHAR_SPR;
12173 else if (code == HTM_BUILTIN_GET_TFIAR
12174 || code == HTM_BUILTIN_SET_TFIAR)
12175 return TFIAR_SPR;
12176 else if (code == HTM_BUILTIN_GET_TEXASR
12177 || code == HTM_BUILTIN_SET_TEXASR)
12178 return TEXASR_SPR;
12179 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
12180 || code == HTM_BUILTIN_SET_TEXASRU);
12181 return TEXASRU_SPR;
12184 /* Return the appropriate SPR regno associated with the given builtin. */
12185 static inline HOST_WIDE_INT
12186 htm_spr_regno (enum rs6000_builtins code)
12188 if (code == HTM_BUILTIN_GET_TFHAR
12189 || code == HTM_BUILTIN_SET_TFHAR)
12190 return TFHAR_REGNO;
12191 else if (code == HTM_BUILTIN_GET_TFIAR
12192 || code == HTM_BUILTIN_SET_TFIAR)
12193 return TFIAR_REGNO;
12194 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
12195 || code == HTM_BUILTIN_SET_TEXASR
12196 || code == HTM_BUILTIN_GET_TEXASRU
12197 || code == HTM_BUILTIN_SET_TEXASRU);
12198 return TEXASR_REGNO;
12201 /* Return the correct ICODE value depending on whether we are
12202 setting or reading the HTM SPRs. */
12203 static inline enum insn_code
12204 rs6000_htm_spr_icode (bool nonvoid)
12206 if (nonvoid)
12207 return (TARGET_64BIT) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
12208 else
12209 return (TARGET_64BIT) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
12212 /* Expand the HTM builtin in EXP and store the result in TARGET.
12213 Store true in *EXPANDEDP if we found a builtin to expand. */
12214 static rtx
12215 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
12217 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12218 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
12219 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12220 const struct builtin_description *d;
12221 size_t i;
12223 *expandedp = false;
12225 /* Expand the HTM builtins. */
12226 d = bdesc_htm;
12227 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
12228 if (d->code == fcode)
12230 rtx op[MAX_HTM_OPERANDS], pat;
12231 int nopnds = 0;
12232 tree arg;
12233 call_expr_arg_iterator iter;
12234 unsigned attr = rs6000_builtin_info[fcode].attr;
12235 enum insn_code icode = d->icode;
12237 if (attr & RS6000_BTC_SPR)
12238 icode = rs6000_htm_spr_icode (nonvoid);
12240 if (nonvoid)
12242 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12243 if (!target
12244 || GET_MODE (target) != tmode
12245 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
12246 target = gen_reg_rtx (tmode);
12247 op[nopnds++] = target;
12250 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
12252 const struct insn_operand_data *insn_op;
12254 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
12255 return NULL_RTX;
12257 insn_op = &insn_data[icode].operand[nopnds];
12259 op[nopnds] = expand_normal (arg);
12261 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
12263 if (!strcmp (insn_op->constraint, "n"))
12265 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
12266 if (!CONST_INT_P (op[nopnds]))
12267 error ("argument %d must be an unsigned literal", arg_num);
12268 else
12269 error ("argument %d is an unsigned literal that is "
12270 "out of range", arg_num);
12271 return const0_rtx;
12273 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
12276 nopnds++;
12279 /* Handle the builtins for extended mnemonics. These accept
12280 no arguments, but map to builtins that take arguments. */
12281 switch (fcode)
12283 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
12284 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
12285 op[nopnds++] = GEN_INT (1);
12286 #ifdef ENABLE_CHECKING
12287 attr |= RS6000_BTC_UNARY;
12288 #endif
12289 break;
12290 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
12291 op[nopnds++] = GEN_INT (0);
12292 #ifdef ENABLE_CHECKING
12293 attr |= RS6000_BTC_UNARY;
12294 #endif
12295 break;
12296 default:
12297 break;
12300 /* If this builtin accesses SPRs, then pass in the appropriate
12301 SPR number and SPR regno as the last two operands. */
12302 if (attr & RS6000_BTC_SPR)
12304 op[nopnds++] = gen_rtx_CONST_INT (Pmode, htm_spr_num (fcode));
12305 op[nopnds++] = gen_rtx_REG (Pmode, htm_spr_regno (fcode));
12308 #ifdef ENABLE_CHECKING
12309 int expected_nopnds = 0;
12310 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
12311 expected_nopnds = 1;
12312 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
12313 expected_nopnds = 2;
12314 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
12315 expected_nopnds = 3;
12316 if (!(attr & RS6000_BTC_VOID))
12317 expected_nopnds += 1;
12318 if (attr & RS6000_BTC_SPR)
12319 expected_nopnds += 2;
12321 gcc_assert (nopnds == expected_nopnds && nopnds <= MAX_HTM_OPERANDS);
12322 #endif
12324 switch (nopnds)
12326 case 1:
12327 pat = GEN_FCN (icode) (op[0]);
12328 break;
12329 case 2:
12330 pat = GEN_FCN (icode) (op[0], op[1]);
12331 break;
12332 case 3:
12333 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
12334 break;
12335 case 4:
12336 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
12337 break;
12338 default:
12339 gcc_unreachable ();
12341 if (!pat)
12342 return NULL_RTX;
12343 emit_insn (pat);
12345 *expandedp = true;
12346 if (nonvoid)
12347 return target;
12348 return const0_rtx;
12351 return NULL_RTX;
12354 static rtx
12355 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
12357 rtx pat;
12358 tree arg0 = CALL_EXPR_ARG (exp, 0);
12359 tree arg1 = CALL_EXPR_ARG (exp, 1);
12360 tree arg2 = CALL_EXPR_ARG (exp, 2);
12361 rtx op0 = expand_normal (arg0);
12362 rtx op1 = expand_normal (arg1);
12363 rtx op2 = expand_normal (arg2);
12364 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12365 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12366 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12367 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
12369 if (icode == CODE_FOR_nothing)
12370 /* Builtin not supported on this processor. */
12371 return 0;
12373 /* If we got invalid arguments bail out before generating bad rtl. */
12374 if (arg0 == error_mark_node
12375 || arg1 == error_mark_node
12376 || arg2 == error_mark_node)
12377 return const0_rtx;
12379 /* Check and prepare argument depending on the instruction code.
12381 Note that a switch statement instead of the sequence of tests
12382 would be incorrect as many of the CODE_FOR values could be
12383 CODE_FOR_nothing and that would yield multiple alternatives
12384 with identical values. We'd never reach here at runtime in
12385 this case. */
12386 if (icode == CODE_FOR_altivec_vsldoi_v4sf
12387 || icode == CODE_FOR_altivec_vsldoi_v4si
12388 || icode == CODE_FOR_altivec_vsldoi_v8hi
12389 || icode == CODE_FOR_altivec_vsldoi_v16qi)
12391 /* Only allow 4-bit unsigned literals. */
12392 STRIP_NOPS (arg2);
12393 if (TREE_CODE (arg2) != INTEGER_CST
12394 || TREE_INT_CST_LOW (arg2) & ~0xf)
12396 error ("argument 3 must be a 4-bit unsigned literal");
12397 return const0_rtx;
12400 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
12401 || icode == CODE_FOR_vsx_xxpermdi_v2di
12402 || icode == CODE_FOR_vsx_xxsldwi_v16qi
12403 || icode == CODE_FOR_vsx_xxsldwi_v8hi
12404 || icode == CODE_FOR_vsx_xxsldwi_v4si
12405 || icode == CODE_FOR_vsx_xxsldwi_v4sf
12406 || icode == CODE_FOR_vsx_xxsldwi_v2di
12407 || icode == CODE_FOR_vsx_xxsldwi_v2df)
12409 /* Only allow 2-bit unsigned literals. */
12410 STRIP_NOPS (arg2);
12411 if (TREE_CODE (arg2) != INTEGER_CST
12412 || TREE_INT_CST_LOW (arg2) & ~0x3)
12414 error ("argument 3 must be a 2-bit unsigned literal");
12415 return const0_rtx;
12418 else if (icode == CODE_FOR_vsx_set_v2df
12419 || icode == CODE_FOR_vsx_set_v2di
12420 || icode == CODE_FOR_bcdadd
12421 || icode == CODE_FOR_bcdadd_lt
12422 || icode == CODE_FOR_bcdadd_eq
12423 || icode == CODE_FOR_bcdadd_gt
12424 || icode == CODE_FOR_bcdsub
12425 || icode == CODE_FOR_bcdsub_lt
12426 || icode == CODE_FOR_bcdsub_eq
12427 || icode == CODE_FOR_bcdsub_gt)
12429 /* Only allow 1-bit unsigned literals. */
12430 STRIP_NOPS (arg2);
12431 if (TREE_CODE (arg2) != INTEGER_CST
12432 || TREE_INT_CST_LOW (arg2) & ~0x1)
12434 error ("argument 3 must be a 1-bit unsigned literal");
12435 return const0_rtx;
12438 else if (icode == CODE_FOR_dfp_ddedpd_dd
12439 || icode == CODE_FOR_dfp_ddedpd_td)
12441 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
12442 STRIP_NOPS (arg0);
12443 if (TREE_CODE (arg0) != INTEGER_CST
12444 || TREE_INT_CST_LOW (arg2) & ~0x3)
12446 error ("argument 1 must be 0 or 2");
12447 return const0_rtx;
12450 else if (icode == CODE_FOR_dfp_denbcd_dd
12451 || icode == CODE_FOR_dfp_denbcd_td)
12453 /* Only allow 1-bit unsigned literals. */
12454 STRIP_NOPS (arg0);
12455 if (TREE_CODE (arg0) != INTEGER_CST
12456 || TREE_INT_CST_LOW (arg0) & ~0x1)
12458 error ("argument 1 must be a 1-bit unsigned literal");
12459 return const0_rtx;
12462 else if (icode == CODE_FOR_dfp_dscli_dd
12463 || icode == CODE_FOR_dfp_dscli_td
12464 || icode == CODE_FOR_dfp_dscri_dd
12465 || icode == CODE_FOR_dfp_dscri_td)
12467 /* Only allow 6-bit unsigned literals. */
12468 STRIP_NOPS (arg1);
12469 if (TREE_CODE (arg1) != INTEGER_CST
12470 || TREE_INT_CST_LOW (arg1) & ~0x3f)
12472 error ("argument 2 must be a 6-bit unsigned literal");
12473 return const0_rtx;
12476 else if (icode == CODE_FOR_crypto_vshasigmaw
12477 || icode == CODE_FOR_crypto_vshasigmad)
12479 /* Check whether the 2nd and 3rd arguments are integer constants and in
12480 range and prepare arguments. */
12481 STRIP_NOPS (arg1);
12482 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
12484 error ("argument 2 must be 0 or 1");
12485 return const0_rtx;
12488 STRIP_NOPS (arg2);
12489 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
12491 error ("argument 3 must be in the range 0..15");
12492 return const0_rtx;
12496 if (target == 0
12497 || GET_MODE (target) != tmode
12498 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12499 target = gen_reg_rtx (tmode);
12501 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12502 op0 = copy_to_mode_reg (mode0, op0);
12503 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12504 op1 = copy_to_mode_reg (mode1, op1);
12505 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12506 op2 = copy_to_mode_reg (mode2, op2);
12508 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
12509 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
12510 else
12511 pat = GEN_FCN (icode) (target, op0, op1, op2);
12512 if (! pat)
12513 return 0;
12514 emit_insn (pat);
12516 return target;
12519 /* Expand the lvx builtins. */
12520 static rtx
12521 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
12523 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12524 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12525 tree arg0;
12526 enum machine_mode tmode, mode0;
12527 rtx pat, op0;
12528 enum insn_code icode;
12530 switch (fcode)
12532 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
12533 icode = CODE_FOR_vector_altivec_load_v16qi;
12534 break;
12535 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
12536 icode = CODE_FOR_vector_altivec_load_v8hi;
12537 break;
12538 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
12539 icode = CODE_FOR_vector_altivec_load_v4si;
12540 break;
12541 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
12542 icode = CODE_FOR_vector_altivec_load_v4sf;
12543 break;
12544 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
12545 icode = CODE_FOR_vector_altivec_load_v2df;
12546 break;
12547 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
12548 icode = CODE_FOR_vector_altivec_load_v2di;
12549 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
12550 icode = CODE_FOR_vector_altivec_load_v1ti;
12551 break;
12552 default:
12553 *expandedp = false;
12554 return NULL_RTX;
12557 *expandedp = true;
12559 arg0 = CALL_EXPR_ARG (exp, 0);
12560 op0 = expand_normal (arg0);
12561 tmode = insn_data[icode].operand[0].mode;
12562 mode0 = insn_data[icode].operand[1].mode;
12564 if (target == 0
12565 || GET_MODE (target) != tmode
12566 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12567 target = gen_reg_rtx (tmode);
12569 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12570 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12572 pat = GEN_FCN (icode) (target, op0);
12573 if (! pat)
12574 return 0;
12575 emit_insn (pat);
12576 return target;
12579 /* Expand the stvx builtins. */
12580 static rtx
12581 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12582 bool *expandedp)
12584 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12585 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12586 tree arg0, arg1;
12587 enum machine_mode mode0, mode1;
12588 rtx pat, op0, op1;
12589 enum insn_code icode;
12591 switch (fcode)
12593 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
12594 icode = CODE_FOR_vector_altivec_store_v16qi;
12595 break;
12596 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
12597 icode = CODE_FOR_vector_altivec_store_v8hi;
12598 break;
12599 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
12600 icode = CODE_FOR_vector_altivec_store_v4si;
12601 break;
12602 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
12603 icode = CODE_FOR_vector_altivec_store_v4sf;
12604 break;
12605 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
12606 icode = CODE_FOR_vector_altivec_store_v2df;
12607 break;
12608 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
12609 icode = CODE_FOR_vector_altivec_store_v2di;
12610 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
12611 icode = CODE_FOR_vector_altivec_store_v1ti;
12612 break;
12613 default:
12614 *expandedp = false;
12615 return NULL_RTX;
12618 arg0 = CALL_EXPR_ARG (exp, 0);
12619 arg1 = CALL_EXPR_ARG (exp, 1);
12620 op0 = expand_normal (arg0);
12621 op1 = expand_normal (arg1);
12622 mode0 = insn_data[icode].operand[0].mode;
12623 mode1 = insn_data[icode].operand[1].mode;
12625 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12626 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12627 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
12628 op1 = copy_to_mode_reg (mode1, op1);
12630 pat = GEN_FCN (icode) (op0, op1);
12631 if (pat)
12632 emit_insn (pat);
12634 *expandedp = true;
12635 return NULL_RTX;
12638 /* Expand the dst builtins. */
12639 static rtx
12640 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12641 bool *expandedp)
12643 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12644 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12645 tree arg0, arg1, arg2;
12646 enum machine_mode mode0, mode1;
12647 rtx pat, op0, op1, op2;
12648 const struct builtin_description *d;
12649 size_t i;
12651 *expandedp = false;
12653 /* Handle DST variants. */
12654 d = bdesc_dst;
12655 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12656 if (d->code == fcode)
12658 arg0 = CALL_EXPR_ARG (exp, 0);
12659 arg1 = CALL_EXPR_ARG (exp, 1);
12660 arg2 = CALL_EXPR_ARG (exp, 2);
12661 op0 = expand_normal (arg0);
12662 op1 = expand_normal (arg1);
12663 op2 = expand_normal (arg2);
12664 mode0 = insn_data[d->icode].operand[0].mode;
12665 mode1 = insn_data[d->icode].operand[1].mode;
12667 /* Invalid arguments, bail out before generating bad rtl. */
12668 if (arg0 == error_mark_node
12669 || arg1 == error_mark_node
12670 || arg2 == error_mark_node)
12671 return const0_rtx;
12673 *expandedp = true;
12674 STRIP_NOPS (arg2);
12675 if (TREE_CODE (arg2) != INTEGER_CST
12676 || TREE_INT_CST_LOW (arg2) & ~0x3)
12678 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
12679 return const0_rtx;
12682 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
12683 op0 = copy_to_mode_reg (Pmode, op0);
12684 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
12685 op1 = copy_to_mode_reg (mode1, op1);
12687 pat = GEN_FCN (d->icode) (op0, op1, op2);
12688 if (pat != 0)
12689 emit_insn (pat);
12691 return NULL_RTX;
12694 return NULL_RTX;
12697 /* Expand vec_init builtin. */
12698 static rtx
12699 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
12701 enum machine_mode tmode = TYPE_MODE (type);
12702 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
12703 int i, n_elt = GET_MODE_NUNITS (tmode);
12705 gcc_assert (VECTOR_MODE_P (tmode));
12706 gcc_assert (n_elt == call_expr_nargs (exp));
12708 if (!target || !register_operand (target, tmode))
12709 target = gen_reg_rtx (tmode);
12711 /* If we have a vector compromised of a single element, such as V1TImode, do
12712 the initialization directly. */
12713 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
12715 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
12716 emit_move_insn (target, gen_lowpart (tmode, x));
12718 else
12720 rtvec v = rtvec_alloc (n_elt);
12722 for (i = 0; i < n_elt; ++i)
12724 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
12725 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
12728 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
12731 return target;
12734 /* Return the integer constant in ARG. Constrain it to be in the range
12735 of the subparts of VEC_TYPE; issue an error if not. */
12737 static int
12738 get_element_number (tree vec_type, tree arg)
12740 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
12742 if (!tree_fits_uhwi_p (arg)
12743 || (elt = tree_to_uhwi (arg), elt > max))
12745 error ("selector must be an integer constant in the range 0..%wi", max);
12746 return 0;
12749 return elt;
12752 /* Expand vec_set builtin. */
12753 static rtx
12754 altivec_expand_vec_set_builtin (tree exp)
12756 enum machine_mode tmode, mode1;
12757 tree arg0, arg1, arg2;
12758 int elt;
12759 rtx op0, op1;
12761 arg0 = CALL_EXPR_ARG (exp, 0);
12762 arg1 = CALL_EXPR_ARG (exp, 1);
12763 arg2 = CALL_EXPR_ARG (exp, 2);
12765 tmode = TYPE_MODE (TREE_TYPE (arg0));
12766 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
12767 gcc_assert (VECTOR_MODE_P (tmode));
12769 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
12770 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
12771 elt = get_element_number (TREE_TYPE (arg0), arg2);
12773 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
12774 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
12776 op0 = force_reg (tmode, op0);
12777 op1 = force_reg (mode1, op1);
12779 rs6000_expand_vector_set (op0, op1, elt);
12781 return op0;
12784 /* Expand vec_ext builtin. */
12785 static rtx
12786 altivec_expand_vec_ext_builtin (tree exp, rtx target)
12788 enum machine_mode tmode, mode0;
12789 tree arg0, arg1;
12790 int elt;
12791 rtx op0;
12793 arg0 = CALL_EXPR_ARG (exp, 0);
12794 arg1 = CALL_EXPR_ARG (exp, 1);
12796 op0 = expand_normal (arg0);
12797 elt = get_element_number (TREE_TYPE (arg0), arg1);
12799 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
12800 mode0 = TYPE_MODE (TREE_TYPE (arg0));
12801 gcc_assert (VECTOR_MODE_P (mode0));
12803 op0 = force_reg (mode0, op0);
12805 if (optimize || !target || !register_operand (target, tmode))
12806 target = gen_reg_rtx (tmode);
12808 rs6000_expand_vector_extract (target, op0, elt);
12810 return target;
12813 /* Expand the builtin in EXP and store the result in TARGET. Store
12814 true in *EXPANDEDP if we found a builtin to expand. */
12815 static rtx
12816 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
12818 const struct builtin_description *d;
12819 size_t i;
12820 enum insn_code icode;
12821 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12822 tree arg0;
12823 rtx op0, pat;
12824 enum machine_mode tmode, mode0;
12825 enum rs6000_builtins fcode
12826 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12828 if (rs6000_overloaded_builtin_p (fcode))
12830 *expandedp = true;
12831 error ("unresolved overload for Altivec builtin %qF", fndecl);
12833 /* Given it is invalid, just generate a normal call. */
12834 return expand_call (exp, target, false);
12837 target = altivec_expand_ld_builtin (exp, target, expandedp);
12838 if (*expandedp)
12839 return target;
12841 target = altivec_expand_st_builtin (exp, target, expandedp);
12842 if (*expandedp)
12843 return target;
12845 target = altivec_expand_dst_builtin (exp, target, expandedp);
12846 if (*expandedp)
12847 return target;
12849 *expandedp = true;
12851 switch (fcode)
12853 case ALTIVEC_BUILTIN_STVX_V2DF:
12854 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
12855 case ALTIVEC_BUILTIN_STVX_V2DI:
12856 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
12857 case ALTIVEC_BUILTIN_STVX_V4SF:
12858 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
12859 case ALTIVEC_BUILTIN_STVX:
12860 case ALTIVEC_BUILTIN_STVX_V4SI:
12861 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
12862 case ALTIVEC_BUILTIN_STVX_V8HI:
12863 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
12864 case ALTIVEC_BUILTIN_STVX_V16QI:
12865 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
12866 case ALTIVEC_BUILTIN_STVEBX:
12867 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
12868 case ALTIVEC_BUILTIN_STVEHX:
12869 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
12870 case ALTIVEC_BUILTIN_STVEWX:
12871 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
12872 case ALTIVEC_BUILTIN_STVXL_V2DF:
12873 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
12874 case ALTIVEC_BUILTIN_STVXL_V2DI:
12875 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
12876 case ALTIVEC_BUILTIN_STVXL_V4SF:
12877 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
12878 case ALTIVEC_BUILTIN_STVXL:
12879 case ALTIVEC_BUILTIN_STVXL_V4SI:
12880 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
12881 case ALTIVEC_BUILTIN_STVXL_V8HI:
12882 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
12883 case ALTIVEC_BUILTIN_STVXL_V16QI:
12884 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
12886 case ALTIVEC_BUILTIN_STVLX:
12887 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
12888 case ALTIVEC_BUILTIN_STVLXL:
12889 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
12890 case ALTIVEC_BUILTIN_STVRX:
12891 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
12892 case ALTIVEC_BUILTIN_STVRXL:
12893 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
12895 case VSX_BUILTIN_STXVD2X_V1TI:
12896 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
12897 case VSX_BUILTIN_STXVD2X_V2DF:
12898 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
12899 case VSX_BUILTIN_STXVD2X_V2DI:
12900 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
12901 case VSX_BUILTIN_STXVW4X_V4SF:
12902 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
12903 case VSX_BUILTIN_STXVW4X_V4SI:
12904 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
12905 case VSX_BUILTIN_STXVW4X_V8HI:
12906 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
12907 case VSX_BUILTIN_STXVW4X_V16QI:
12908 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
12910 case ALTIVEC_BUILTIN_MFVSCR:
12911 icode = CODE_FOR_altivec_mfvscr;
12912 tmode = insn_data[icode].operand[0].mode;
12914 if (target == 0
12915 || GET_MODE (target) != tmode
12916 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12917 target = gen_reg_rtx (tmode);
12919 pat = GEN_FCN (icode) (target);
12920 if (! pat)
12921 return 0;
12922 emit_insn (pat);
12923 return target;
12925 case ALTIVEC_BUILTIN_MTVSCR:
12926 icode = CODE_FOR_altivec_mtvscr;
12927 arg0 = CALL_EXPR_ARG (exp, 0);
12928 op0 = expand_normal (arg0);
12929 mode0 = insn_data[icode].operand[0].mode;
12931 /* If we got invalid arguments bail out before generating bad rtl. */
12932 if (arg0 == error_mark_node)
12933 return const0_rtx;
12935 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12936 op0 = copy_to_mode_reg (mode0, op0);
12938 pat = GEN_FCN (icode) (op0);
12939 if (pat)
12940 emit_insn (pat);
12941 return NULL_RTX;
12943 case ALTIVEC_BUILTIN_DSSALL:
12944 emit_insn (gen_altivec_dssall ());
12945 return NULL_RTX;
12947 case ALTIVEC_BUILTIN_DSS:
12948 icode = CODE_FOR_altivec_dss;
12949 arg0 = CALL_EXPR_ARG (exp, 0);
12950 STRIP_NOPS (arg0);
12951 op0 = expand_normal (arg0);
12952 mode0 = insn_data[icode].operand[0].mode;
12954 /* If we got invalid arguments bail out before generating bad rtl. */
12955 if (arg0 == error_mark_node)
12956 return const0_rtx;
12958 if (TREE_CODE (arg0) != INTEGER_CST
12959 || TREE_INT_CST_LOW (arg0) & ~0x3)
12961 error ("argument to dss must be a 2-bit unsigned literal");
12962 return const0_rtx;
12965 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12966 op0 = copy_to_mode_reg (mode0, op0);
12968 emit_insn (gen_altivec_dss (op0));
12969 return NULL_RTX;
12971 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
12972 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
12973 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
12974 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
12975 case VSX_BUILTIN_VEC_INIT_V2DF:
12976 case VSX_BUILTIN_VEC_INIT_V2DI:
12977 case VSX_BUILTIN_VEC_INIT_V1TI:
12978 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
12980 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
12981 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
12982 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
12983 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
12984 case VSX_BUILTIN_VEC_SET_V2DF:
12985 case VSX_BUILTIN_VEC_SET_V2DI:
12986 case VSX_BUILTIN_VEC_SET_V1TI:
12987 return altivec_expand_vec_set_builtin (exp);
12989 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
12990 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
12991 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
12992 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
12993 case VSX_BUILTIN_VEC_EXT_V2DF:
12994 case VSX_BUILTIN_VEC_EXT_V2DI:
12995 case VSX_BUILTIN_VEC_EXT_V1TI:
12996 return altivec_expand_vec_ext_builtin (exp, target);
12998 default:
12999 break;
13000 /* Fall through. */
13003 /* Expand abs* operations. */
13004 d = bdesc_abs;
13005 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
13006 if (d->code == fcode)
13007 return altivec_expand_abs_builtin (d->icode, exp, target);
13009 /* Expand the AltiVec predicates. */
13010 d = bdesc_altivec_preds;
13011 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
13012 if (d->code == fcode)
13013 return altivec_expand_predicate_builtin (d->icode, exp, target);
13015 /* LV* are funky. We initialized them differently. */
13016 switch (fcode)
13018 case ALTIVEC_BUILTIN_LVSL:
13019 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
13020 exp, target, false);
13021 case ALTIVEC_BUILTIN_LVSR:
13022 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
13023 exp, target, false);
13024 case ALTIVEC_BUILTIN_LVEBX:
13025 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
13026 exp, target, false);
13027 case ALTIVEC_BUILTIN_LVEHX:
13028 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
13029 exp, target, false);
13030 case ALTIVEC_BUILTIN_LVEWX:
13031 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
13032 exp, target, false);
13033 case ALTIVEC_BUILTIN_LVXL_V2DF:
13034 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
13035 exp, target, false);
13036 case ALTIVEC_BUILTIN_LVXL_V2DI:
13037 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
13038 exp, target, false);
13039 case ALTIVEC_BUILTIN_LVXL_V4SF:
13040 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
13041 exp, target, false);
13042 case ALTIVEC_BUILTIN_LVXL:
13043 case ALTIVEC_BUILTIN_LVXL_V4SI:
13044 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
13045 exp, target, false);
13046 case ALTIVEC_BUILTIN_LVXL_V8HI:
13047 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
13048 exp, target, false);
13049 case ALTIVEC_BUILTIN_LVXL_V16QI:
13050 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
13051 exp, target, false);
13052 case ALTIVEC_BUILTIN_LVX_V2DF:
13053 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
13054 exp, target, false);
13055 case ALTIVEC_BUILTIN_LVX_V2DI:
13056 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
13057 exp, target, false);
13058 case ALTIVEC_BUILTIN_LVX_V4SF:
13059 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
13060 exp, target, false);
13061 case ALTIVEC_BUILTIN_LVX:
13062 case ALTIVEC_BUILTIN_LVX_V4SI:
13063 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
13064 exp, target, false);
13065 case ALTIVEC_BUILTIN_LVX_V8HI:
13066 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
13067 exp, target, false);
13068 case ALTIVEC_BUILTIN_LVX_V16QI:
13069 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
13070 exp, target, false);
13071 case ALTIVEC_BUILTIN_LVLX:
13072 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
13073 exp, target, true);
13074 case ALTIVEC_BUILTIN_LVLXL:
13075 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
13076 exp, target, true);
13077 case ALTIVEC_BUILTIN_LVRX:
13078 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
13079 exp, target, true);
13080 case ALTIVEC_BUILTIN_LVRXL:
13081 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
13082 exp, target, true);
13083 case VSX_BUILTIN_LXVD2X_V1TI:
13084 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
13085 exp, target, false);
13086 case VSX_BUILTIN_LXVD2X_V2DF:
13087 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
13088 exp, target, false);
13089 case VSX_BUILTIN_LXVD2X_V2DI:
13090 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
13091 exp, target, false);
13092 case VSX_BUILTIN_LXVW4X_V4SF:
13093 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
13094 exp, target, false);
13095 case VSX_BUILTIN_LXVW4X_V4SI:
13096 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
13097 exp, target, false);
13098 case VSX_BUILTIN_LXVW4X_V8HI:
13099 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
13100 exp, target, false);
13101 case VSX_BUILTIN_LXVW4X_V16QI:
13102 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
13103 exp, target, false);
13104 break;
13105 default:
13106 break;
13107 /* Fall through. */
13110 *expandedp = false;
13111 return NULL_RTX;
13114 /* Expand the builtin in EXP and store the result in TARGET. Store
13115 true in *EXPANDEDP if we found a builtin to expand. */
13116 static rtx
13117 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
13119 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13120 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13121 const struct builtin_description *d;
13122 size_t i;
13124 *expandedp = true;
13126 switch (fcode)
13128 case PAIRED_BUILTIN_STX:
13129 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
13130 case PAIRED_BUILTIN_LX:
13131 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
13132 default:
13133 break;
13134 /* Fall through. */
13137 /* Expand the paired predicates. */
13138 d = bdesc_paired_preds;
13139 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
13140 if (d->code == fcode)
13141 return paired_expand_predicate_builtin (d->icode, exp, target);
13143 *expandedp = false;
13144 return NULL_RTX;
13147 /* Binops that need to be initialized manually, but can be expanded
13148 automagically by rs6000_expand_binop_builtin. */
13149 static const struct builtin_description bdesc_2arg_spe[] =
13151 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
13152 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
13153 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
13154 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
13155 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
13156 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
13157 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
13158 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
13159 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
13160 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
13161 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
13162 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
13163 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
13164 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
13165 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
13166 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
13167 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
13168 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
13169 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
13170 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
13171 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
13172 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
13175 /* Expand the builtin in EXP and store the result in TARGET. Store
13176 true in *EXPANDEDP if we found a builtin to expand.
13178 This expands the SPE builtins that are not simple unary and binary
13179 operations. */
13180 static rtx
13181 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
13183 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13184 tree arg1, arg0;
13185 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13186 enum insn_code icode;
13187 enum machine_mode tmode, mode0;
13188 rtx pat, op0;
13189 const struct builtin_description *d;
13190 size_t i;
13192 *expandedp = true;
13194 /* Syntax check for a 5-bit unsigned immediate. */
13195 switch (fcode)
13197 case SPE_BUILTIN_EVSTDD:
13198 case SPE_BUILTIN_EVSTDH:
13199 case SPE_BUILTIN_EVSTDW:
13200 case SPE_BUILTIN_EVSTWHE:
13201 case SPE_BUILTIN_EVSTWHO:
13202 case SPE_BUILTIN_EVSTWWE:
13203 case SPE_BUILTIN_EVSTWWO:
13204 arg1 = CALL_EXPR_ARG (exp, 2);
13205 if (TREE_CODE (arg1) != INTEGER_CST
13206 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13208 error ("argument 2 must be a 5-bit unsigned literal");
13209 return const0_rtx;
13211 break;
13212 default:
13213 break;
13216 /* The evsplat*i instructions are not quite generic. */
13217 switch (fcode)
13219 case SPE_BUILTIN_EVSPLATFI:
13220 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
13221 exp, target);
13222 case SPE_BUILTIN_EVSPLATI:
13223 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
13224 exp, target);
13225 default:
13226 break;
13229 d = bdesc_2arg_spe;
13230 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
13231 if (d->code == fcode)
13232 return rs6000_expand_binop_builtin (d->icode, exp, target);
13234 d = bdesc_spe_predicates;
13235 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
13236 if (d->code == fcode)
13237 return spe_expand_predicate_builtin (d->icode, exp, target);
13239 d = bdesc_spe_evsel;
13240 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
13241 if (d->code == fcode)
13242 return spe_expand_evsel_builtin (d->icode, exp, target);
13244 switch (fcode)
13246 case SPE_BUILTIN_EVSTDDX:
13247 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
13248 case SPE_BUILTIN_EVSTDHX:
13249 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
13250 case SPE_BUILTIN_EVSTDWX:
13251 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
13252 case SPE_BUILTIN_EVSTWHEX:
13253 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
13254 case SPE_BUILTIN_EVSTWHOX:
13255 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
13256 case SPE_BUILTIN_EVSTWWEX:
13257 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
13258 case SPE_BUILTIN_EVSTWWOX:
13259 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
13260 case SPE_BUILTIN_EVSTDD:
13261 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
13262 case SPE_BUILTIN_EVSTDH:
13263 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
13264 case SPE_BUILTIN_EVSTDW:
13265 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
13266 case SPE_BUILTIN_EVSTWHE:
13267 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
13268 case SPE_BUILTIN_EVSTWHO:
13269 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
13270 case SPE_BUILTIN_EVSTWWE:
13271 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
13272 case SPE_BUILTIN_EVSTWWO:
13273 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
13274 case SPE_BUILTIN_MFSPEFSCR:
13275 icode = CODE_FOR_spe_mfspefscr;
13276 tmode = insn_data[icode].operand[0].mode;
13278 if (target == 0
13279 || GET_MODE (target) != tmode
13280 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13281 target = gen_reg_rtx (tmode);
13283 pat = GEN_FCN (icode) (target);
13284 if (! pat)
13285 return 0;
13286 emit_insn (pat);
13287 return target;
13288 case SPE_BUILTIN_MTSPEFSCR:
13289 icode = CODE_FOR_spe_mtspefscr;
13290 arg0 = CALL_EXPR_ARG (exp, 0);
13291 op0 = expand_normal (arg0);
13292 mode0 = insn_data[icode].operand[0].mode;
13294 if (arg0 == error_mark_node)
13295 return const0_rtx;
13297 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13298 op0 = copy_to_mode_reg (mode0, op0);
13300 pat = GEN_FCN (icode) (op0);
13301 if (pat)
13302 emit_insn (pat);
13303 return NULL_RTX;
13304 default:
13305 break;
13308 *expandedp = false;
13309 return NULL_RTX;
13312 static rtx
13313 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13315 rtx pat, scratch, tmp;
13316 tree form = CALL_EXPR_ARG (exp, 0);
13317 tree arg0 = CALL_EXPR_ARG (exp, 1);
13318 tree arg1 = CALL_EXPR_ARG (exp, 2);
13319 rtx op0 = expand_normal (arg0);
13320 rtx op1 = expand_normal (arg1);
13321 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13322 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13323 int form_int;
13324 enum rtx_code code;
13326 if (TREE_CODE (form) != INTEGER_CST)
13328 error ("argument 1 of __builtin_paired_predicate must be a constant");
13329 return const0_rtx;
13331 else
13332 form_int = TREE_INT_CST_LOW (form);
13334 gcc_assert (mode0 == mode1);
13336 if (arg0 == error_mark_node || arg1 == error_mark_node)
13337 return const0_rtx;
13339 if (target == 0
13340 || GET_MODE (target) != SImode
13341 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
13342 target = gen_reg_rtx (SImode);
13343 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
13344 op0 = copy_to_mode_reg (mode0, op0);
13345 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
13346 op1 = copy_to_mode_reg (mode1, op1);
13348 scratch = gen_reg_rtx (CCFPmode);
13350 pat = GEN_FCN (icode) (scratch, op0, op1);
13351 if (!pat)
13352 return const0_rtx;
13354 emit_insn (pat);
13356 switch (form_int)
13358 /* LT bit. */
13359 case 0:
13360 code = LT;
13361 break;
13362 /* GT bit. */
13363 case 1:
13364 code = GT;
13365 break;
13366 /* EQ bit. */
13367 case 2:
13368 code = EQ;
13369 break;
13370 /* UN bit. */
13371 case 3:
13372 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13373 return target;
13374 default:
13375 error ("argument 1 of __builtin_paired_predicate is out of range");
13376 return const0_rtx;
13379 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13380 emit_move_insn (target, tmp);
13381 return target;
13384 static rtx
13385 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13387 rtx pat, scratch, tmp;
13388 tree form = CALL_EXPR_ARG (exp, 0);
13389 tree arg0 = CALL_EXPR_ARG (exp, 1);
13390 tree arg1 = CALL_EXPR_ARG (exp, 2);
13391 rtx op0 = expand_normal (arg0);
13392 rtx op1 = expand_normal (arg1);
13393 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13394 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13395 int form_int;
13396 enum rtx_code code;
13398 if (TREE_CODE (form) != INTEGER_CST)
13400 error ("argument 1 of __builtin_spe_predicate must be a constant");
13401 return const0_rtx;
13403 else
13404 form_int = TREE_INT_CST_LOW (form);
13406 gcc_assert (mode0 == mode1);
13408 if (arg0 == error_mark_node || arg1 == error_mark_node)
13409 return const0_rtx;
13411 if (target == 0
13412 || GET_MODE (target) != SImode
13413 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
13414 target = gen_reg_rtx (SImode);
13416 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13417 op0 = copy_to_mode_reg (mode0, op0);
13418 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13419 op1 = copy_to_mode_reg (mode1, op1);
13421 scratch = gen_reg_rtx (CCmode);
13423 pat = GEN_FCN (icode) (scratch, op0, op1);
13424 if (! pat)
13425 return const0_rtx;
13426 emit_insn (pat);
13428 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
13429 _lower_. We use one compare, but look in different bits of the
13430 CR for each variant.
13432 There are 2 elements in each SPE simd type (upper/lower). The CR
13433 bits are set as follows:
13435 BIT0 | BIT 1 | BIT 2 | BIT 3
13436 U | L | (U | L) | (U & L)
13438 So, for an "all" relationship, BIT 3 would be set.
13439 For an "any" relationship, BIT 2 would be set. Etc.
13441 Following traditional nomenclature, these bits map to:
13443 BIT0 | BIT 1 | BIT 2 | BIT 3
13444 LT | GT | EQ | OV
13446 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
13449 switch (form_int)
13451 /* All variant. OV bit. */
13452 case 0:
13453 /* We need to get to the OV bit, which is the ORDERED bit. We
13454 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
13455 that's ugly and will make validate_condition_mode die.
13456 So let's just use another pattern. */
13457 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13458 return target;
13459 /* Any variant. EQ bit. */
13460 case 1:
13461 code = EQ;
13462 break;
13463 /* Upper variant. LT bit. */
13464 case 2:
13465 code = LT;
13466 break;
13467 /* Lower variant. GT bit. */
13468 case 3:
13469 code = GT;
13470 break;
13471 default:
13472 error ("argument 1 of __builtin_spe_predicate is out of range");
13473 return const0_rtx;
13476 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13477 emit_move_insn (target, tmp);
13479 return target;
13482 /* The evsel builtins look like this:
13484 e = __builtin_spe_evsel_OP (a, b, c, d);
13486 and work like this:
13488 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
13489 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
13492 static rtx
13493 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
13495 rtx pat, scratch;
13496 tree arg0 = CALL_EXPR_ARG (exp, 0);
13497 tree arg1 = CALL_EXPR_ARG (exp, 1);
13498 tree arg2 = CALL_EXPR_ARG (exp, 2);
13499 tree arg3 = CALL_EXPR_ARG (exp, 3);
13500 rtx op0 = expand_normal (arg0);
13501 rtx op1 = expand_normal (arg1);
13502 rtx op2 = expand_normal (arg2);
13503 rtx op3 = expand_normal (arg3);
13504 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13505 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13507 gcc_assert (mode0 == mode1);
13509 if (arg0 == error_mark_node || arg1 == error_mark_node
13510 || arg2 == error_mark_node || arg3 == error_mark_node)
13511 return const0_rtx;
13513 if (target == 0
13514 || GET_MODE (target) != mode0
13515 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
13516 target = gen_reg_rtx (mode0);
13518 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13519 op0 = copy_to_mode_reg (mode0, op0);
13520 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13521 op1 = copy_to_mode_reg (mode0, op1);
13522 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
13523 op2 = copy_to_mode_reg (mode0, op2);
13524 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
13525 op3 = copy_to_mode_reg (mode0, op3);
13527 /* Generate the compare. */
13528 scratch = gen_reg_rtx (CCmode);
13529 pat = GEN_FCN (icode) (scratch, op0, op1);
13530 if (! pat)
13531 return const0_rtx;
13532 emit_insn (pat);
13534 if (mode0 == V2SImode)
13535 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
13536 else
13537 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
13539 return target;
13542 /* Raise an error message for a builtin function that is called without the
13543 appropriate target options being set. */
13545 static void
13546 rs6000_invalid_builtin (enum rs6000_builtins fncode)
13548 size_t uns_fncode = (size_t)fncode;
13549 const char *name = rs6000_builtin_info[uns_fncode].name;
13550 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
13552 gcc_assert (name != NULL);
13553 if ((fnmask & RS6000_BTM_CELL) != 0)
13554 error ("Builtin function %s is only valid for the cell processor", name);
13555 else if ((fnmask & RS6000_BTM_VSX) != 0)
13556 error ("Builtin function %s requires the -mvsx option", name);
13557 else if ((fnmask & RS6000_BTM_HTM) != 0)
13558 error ("Builtin function %s requires the -mhtm option", name);
13559 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
13560 error ("Builtin function %s requires the -maltivec option", name);
13561 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
13562 error ("Builtin function %s requires the -mpaired option", name);
13563 else if ((fnmask & RS6000_BTM_SPE) != 0)
13564 error ("Builtin function %s requires the -mspe option", name);
13565 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
13566 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
13567 error ("Builtin function %s requires the -mhard-dfp and"
13568 "-mpower8-vector options", name);
13569 else if ((fnmask & RS6000_BTM_DFP) != 0)
13570 error ("Builtin function %s requires the -mhard-dfp option", name);
13571 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
13572 error ("Builtin function %s requires the -mpower8-vector option", name);
13573 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
13574 error ("Builtin function %s requires the -mhard-float option", name);
13575 else
13576 error ("Builtin function %s is not supported with the current options",
13577 name);
13580 /* Expand an expression EXP that calls a built-in function,
13581 with result going to TARGET if that's convenient
13582 (and in mode MODE if that's convenient).
13583 SUBTARGET may be used as the target for computing one of EXP's operands.
13584 IGNORE is nonzero if the value is to be ignored. */
13586 static rtx
13587 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13588 enum machine_mode mode ATTRIBUTE_UNUSED,
13589 int ignore ATTRIBUTE_UNUSED)
13591 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13592 enum rs6000_builtins fcode
13593 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
13594 size_t uns_fcode = (size_t)fcode;
13595 const struct builtin_description *d;
13596 size_t i;
13597 rtx ret;
13598 bool success;
13599 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
13600 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
13602 if (TARGET_DEBUG_BUILTIN)
13604 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
13605 const char *name1 = rs6000_builtin_info[uns_fcode].name;
13606 const char *name2 = ((icode != CODE_FOR_nothing)
13607 ? get_insn_name ((int)icode)
13608 : "nothing");
13609 const char *name3;
13611 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
13613 default: name3 = "unknown"; break;
13614 case RS6000_BTC_SPECIAL: name3 = "special"; break;
13615 case RS6000_BTC_UNARY: name3 = "unary"; break;
13616 case RS6000_BTC_BINARY: name3 = "binary"; break;
13617 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
13618 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
13619 case RS6000_BTC_ABS: name3 = "abs"; break;
13620 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
13621 case RS6000_BTC_DST: name3 = "dst"; break;
13625 fprintf (stderr,
13626 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
13627 (name1) ? name1 : "---", fcode,
13628 (name2) ? name2 : "---", (int)icode,
13629 name3,
13630 func_valid_p ? "" : ", not valid");
13633 if (!func_valid_p)
13635 rs6000_invalid_builtin (fcode);
13637 /* Given it is invalid, just generate a normal call. */
13638 return expand_call (exp, target, ignore);
13641 switch (fcode)
13643 case RS6000_BUILTIN_RECIP:
13644 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
13646 case RS6000_BUILTIN_RECIPF:
13647 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
13649 case RS6000_BUILTIN_RSQRTF:
13650 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
13652 case RS6000_BUILTIN_RSQRT:
13653 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
13655 case POWER7_BUILTIN_BPERMD:
13656 return rs6000_expand_binop_builtin (((TARGET_64BIT)
13657 ? CODE_FOR_bpermd_di
13658 : CODE_FOR_bpermd_si), exp, target);
13660 case RS6000_BUILTIN_GET_TB:
13661 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
13662 target);
13664 case RS6000_BUILTIN_MFTB:
13665 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
13666 ? CODE_FOR_rs6000_mftb_di
13667 : CODE_FOR_rs6000_mftb_si),
13668 target);
13670 case RS6000_BUILTIN_MFFS:
13671 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
13673 case RS6000_BUILTIN_MTFSF:
13674 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
13676 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
13677 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
13679 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr
13680 : (int) CODE_FOR_altivec_lvsl);
13681 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13682 enum machine_mode mode = insn_data[icode].operand[1].mode;
13683 tree arg;
13684 rtx op, addr, pat;
13686 gcc_assert (TARGET_ALTIVEC);
13688 arg = CALL_EXPR_ARG (exp, 0);
13689 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
13690 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
13691 addr = memory_address (mode, op);
13692 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
13693 op = addr;
13694 else
13696 /* For the load case need to negate the address. */
13697 op = gen_reg_rtx (GET_MODE (addr));
13698 emit_insn (gen_rtx_SET (VOIDmode, op,
13699 gen_rtx_NEG (GET_MODE (addr), addr)));
13701 op = gen_rtx_MEM (mode, op);
13703 if (target == 0
13704 || GET_MODE (target) != tmode
13705 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13706 target = gen_reg_rtx (tmode);
13708 /*pat = gen_altivec_lvsr (target, op);*/
13709 pat = GEN_FCN (icode) (target, op);
13710 if (!pat)
13711 return 0;
13712 emit_insn (pat);
13714 return target;
13717 case ALTIVEC_BUILTIN_VCFUX:
13718 case ALTIVEC_BUILTIN_VCFSX:
13719 case ALTIVEC_BUILTIN_VCTUXS:
13720 case ALTIVEC_BUILTIN_VCTSXS:
13721 /* FIXME: There's got to be a nicer way to handle this case than
13722 constructing a new CALL_EXPR. */
13723 if (call_expr_nargs (exp) == 1)
13725 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
13726 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
13728 break;
13730 default:
13731 break;
13734 if (TARGET_ALTIVEC)
13736 ret = altivec_expand_builtin (exp, target, &success);
13738 if (success)
13739 return ret;
13741 if (TARGET_SPE)
13743 ret = spe_expand_builtin (exp, target, &success);
13745 if (success)
13746 return ret;
13748 if (TARGET_PAIRED_FLOAT)
13750 ret = paired_expand_builtin (exp, target, &success);
13752 if (success)
13753 return ret;
13755 if (TARGET_HTM)
13757 ret = htm_expand_builtin (exp, target, &success);
13759 if (success)
13760 return ret;
13763 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
13764 gcc_assert (attr == RS6000_BTC_UNARY
13765 || attr == RS6000_BTC_BINARY
13766 || attr == RS6000_BTC_TERNARY);
13768 /* Handle simple unary operations. */
13769 d = bdesc_1arg;
13770 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13771 if (d->code == fcode)
13772 return rs6000_expand_unop_builtin (d->icode, exp, target);
13774 /* Handle simple binary operations. */
13775 d = bdesc_2arg;
13776 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13777 if (d->code == fcode)
13778 return rs6000_expand_binop_builtin (d->icode, exp, target);
13780 /* Handle simple ternary operations. */
13781 d = bdesc_3arg;
13782 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
13783 if (d->code == fcode)
13784 return rs6000_expand_ternop_builtin (d->icode, exp, target);
13786 gcc_unreachable ();
13789 static void
13790 rs6000_init_builtins (void)
13792 tree tdecl;
13793 tree ftype;
13794 enum machine_mode mode;
13796 if (TARGET_DEBUG_BUILTIN)
13797 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
13798 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
13799 (TARGET_SPE) ? ", spe" : "",
13800 (TARGET_ALTIVEC) ? ", altivec" : "",
13801 (TARGET_VSX) ? ", vsx" : "");
13803 V2SI_type_node = build_vector_type (intSI_type_node, 2);
13804 V2SF_type_node = build_vector_type (float_type_node, 2);
13805 V2DI_type_node = build_vector_type (intDI_type_node, 2);
13806 V2DF_type_node = build_vector_type (double_type_node, 2);
13807 V4HI_type_node = build_vector_type (intHI_type_node, 4);
13808 V4SI_type_node = build_vector_type (intSI_type_node, 4);
13809 V4SF_type_node = build_vector_type (float_type_node, 4);
13810 V8HI_type_node = build_vector_type (intHI_type_node, 8);
13811 V16QI_type_node = build_vector_type (intQI_type_node, 16);
13813 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
13814 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
13815 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
13816 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
13818 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
13819 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
13820 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
13821 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
13823 /* We use V1TI mode as a special container to hold __int128_t items that
13824 must live in VSX registers. */
13825 if (intTI_type_node)
13827 V1TI_type_node = build_vector_type (intTI_type_node, 1);
13828 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
13831 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
13832 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
13833 'vector unsigned short'. */
13835 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
13836 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
13837 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
13838 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
13839 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
13841 long_integer_type_internal_node = long_integer_type_node;
13842 long_unsigned_type_internal_node = long_unsigned_type_node;
13843 long_long_integer_type_internal_node = long_long_integer_type_node;
13844 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
13845 intQI_type_internal_node = intQI_type_node;
13846 uintQI_type_internal_node = unsigned_intQI_type_node;
13847 intHI_type_internal_node = intHI_type_node;
13848 uintHI_type_internal_node = unsigned_intHI_type_node;
13849 intSI_type_internal_node = intSI_type_node;
13850 uintSI_type_internal_node = unsigned_intSI_type_node;
13851 intDI_type_internal_node = intDI_type_node;
13852 uintDI_type_internal_node = unsigned_intDI_type_node;
13853 intTI_type_internal_node = intTI_type_node;
13854 uintTI_type_internal_node = unsigned_intTI_type_node;
13855 float_type_internal_node = float_type_node;
13856 double_type_internal_node = double_type_node;
13857 long_double_type_internal_node = long_double_type_node;
13858 dfloat64_type_internal_node = dfloat64_type_node;
13859 dfloat128_type_internal_node = dfloat128_type_node;
13860 void_type_internal_node = void_type_node;
13862 /* Initialize the modes for builtin_function_type, mapping a machine mode to
13863 tree type node. */
13864 builtin_mode_to_type[QImode][0] = integer_type_node;
13865 builtin_mode_to_type[HImode][0] = integer_type_node;
13866 builtin_mode_to_type[SImode][0] = intSI_type_node;
13867 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
13868 builtin_mode_to_type[DImode][0] = intDI_type_node;
13869 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
13870 builtin_mode_to_type[TImode][0] = intTI_type_node;
13871 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
13872 builtin_mode_to_type[SFmode][0] = float_type_node;
13873 builtin_mode_to_type[DFmode][0] = double_type_node;
13874 builtin_mode_to_type[TFmode][0] = long_double_type_node;
13875 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
13876 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
13877 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
13878 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
13879 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
13880 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
13881 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
13882 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
13883 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
13884 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
13885 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
13886 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
13887 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
13888 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
13889 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
13890 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
13891 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
13893 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
13894 TYPE_NAME (bool_char_type_node) = tdecl;
13896 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
13897 TYPE_NAME (bool_short_type_node) = tdecl;
13899 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
13900 TYPE_NAME (bool_int_type_node) = tdecl;
13902 tdecl = add_builtin_type ("__pixel", pixel_type_node);
13903 TYPE_NAME (pixel_type_node) = tdecl;
13905 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
13906 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
13907 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
13908 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
13909 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
13911 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
13912 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
13914 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
13915 TYPE_NAME (V16QI_type_node) = tdecl;
13917 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
13918 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
13920 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
13921 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
13923 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
13924 TYPE_NAME (V8HI_type_node) = tdecl;
13926 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
13927 TYPE_NAME (bool_V8HI_type_node) = tdecl;
13929 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
13930 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
13932 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
13933 TYPE_NAME (V4SI_type_node) = tdecl;
13935 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
13936 TYPE_NAME (bool_V4SI_type_node) = tdecl;
13938 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
13939 TYPE_NAME (V4SF_type_node) = tdecl;
13941 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
13942 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
13944 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
13945 TYPE_NAME (V2DF_type_node) = tdecl;
13947 if (TARGET_POWERPC64)
13949 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
13950 TYPE_NAME (V2DI_type_node) = tdecl;
13952 tdecl = add_builtin_type ("__vector unsigned long",
13953 unsigned_V2DI_type_node);
13954 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
13956 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
13957 TYPE_NAME (bool_V2DI_type_node) = tdecl;
13959 else
13961 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
13962 TYPE_NAME (V2DI_type_node) = tdecl;
13964 tdecl = add_builtin_type ("__vector unsigned long long",
13965 unsigned_V2DI_type_node);
13966 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
13968 tdecl = add_builtin_type ("__vector __bool long long",
13969 bool_V2DI_type_node);
13970 TYPE_NAME (bool_V2DI_type_node) = tdecl;
13973 if (V1TI_type_node)
13975 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
13976 TYPE_NAME (V1TI_type_node) = tdecl;
13978 tdecl = add_builtin_type ("__vector unsigned __int128",
13979 unsigned_V1TI_type_node);
13980 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
13983 /* Paired and SPE builtins are only available if you build a compiler with
13984 the appropriate options, so only create those builtins with the
13985 appropriate compiler option. Create Altivec and VSX builtins on machines
13986 with at least the general purpose extensions (970 and newer) to allow the
13987 use of the target attribute. */
13988 if (TARGET_PAIRED_FLOAT)
13989 paired_init_builtins ();
13990 if (TARGET_SPE)
13991 spe_init_builtins ();
13992 if (TARGET_EXTRA_BUILTINS)
13993 altivec_init_builtins ();
13994 if (TARGET_HTM)
13995 htm_init_builtins ();
13997 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
13998 rs6000_common_init_builtins ();
14000 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
14001 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
14002 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
14004 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
14005 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
14006 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
14008 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
14009 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
14010 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
14012 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
14013 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
14014 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
14016 mode = (TARGET_64BIT) ? DImode : SImode;
14017 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
14018 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
14019 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
14021 ftype = build_function_type_list (unsigned_intDI_type_node,
14022 NULL_TREE);
14023 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
14025 if (TARGET_64BIT)
14026 ftype = build_function_type_list (unsigned_intDI_type_node,
14027 NULL_TREE);
14028 else
14029 ftype = build_function_type_list (unsigned_intSI_type_node,
14030 NULL_TREE);
14031 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
14033 ftype = build_function_type_list (double_type_node, NULL_TREE);
14034 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
14036 ftype = build_function_type_list (void_type_node,
14037 intSI_type_node, double_type_node,
14038 NULL_TREE);
14039 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
14041 #if TARGET_XCOFF
14042 /* AIX libm provides clog as __clog. */
14043 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
14044 set_user_assembler_name (tdecl, "__clog");
14045 #endif
14047 #ifdef SUBTARGET_INIT_BUILTINS
14048 SUBTARGET_INIT_BUILTINS;
14049 #endif
14052 /* Returns the rs6000 builtin decl for CODE. */
14054 static tree
14055 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
14057 HOST_WIDE_INT fnmask;
14059 if (code >= RS6000_BUILTIN_COUNT)
14060 return error_mark_node;
14062 fnmask = rs6000_builtin_info[code].mask;
14063 if ((fnmask & rs6000_builtin_mask) != fnmask)
14065 rs6000_invalid_builtin ((enum rs6000_builtins)code);
14066 return error_mark_node;
14069 return rs6000_builtin_decls[code];
14072 static void
14073 spe_init_builtins (void)
14075 tree puint_type_node = build_pointer_type (unsigned_type_node);
14076 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
14077 const struct builtin_description *d;
14078 size_t i;
14080 tree v2si_ftype_4_v2si
14081 = build_function_type_list (opaque_V2SI_type_node,
14082 opaque_V2SI_type_node,
14083 opaque_V2SI_type_node,
14084 opaque_V2SI_type_node,
14085 opaque_V2SI_type_node,
14086 NULL_TREE);
14088 tree v2sf_ftype_4_v2sf
14089 = build_function_type_list (opaque_V2SF_type_node,
14090 opaque_V2SF_type_node,
14091 opaque_V2SF_type_node,
14092 opaque_V2SF_type_node,
14093 opaque_V2SF_type_node,
14094 NULL_TREE);
14096 tree int_ftype_int_v2si_v2si
14097 = build_function_type_list (integer_type_node,
14098 integer_type_node,
14099 opaque_V2SI_type_node,
14100 opaque_V2SI_type_node,
14101 NULL_TREE);
14103 tree int_ftype_int_v2sf_v2sf
14104 = build_function_type_list (integer_type_node,
14105 integer_type_node,
14106 opaque_V2SF_type_node,
14107 opaque_V2SF_type_node,
14108 NULL_TREE);
14110 tree void_ftype_v2si_puint_int
14111 = build_function_type_list (void_type_node,
14112 opaque_V2SI_type_node,
14113 puint_type_node,
14114 integer_type_node,
14115 NULL_TREE);
14117 tree void_ftype_v2si_puint_char
14118 = build_function_type_list (void_type_node,
14119 opaque_V2SI_type_node,
14120 puint_type_node,
14121 char_type_node,
14122 NULL_TREE);
14124 tree void_ftype_v2si_pv2si_int
14125 = build_function_type_list (void_type_node,
14126 opaque_V2SI_type_node,
14127 opaque_p_V2SI_type_node,
14128 integer_type_node,
14129 NULL_TREE);
14131 tree void_ftype_v2si_pv2si_char
14132 = build_function_type_list (void_type_node,
14133 opaque_V2SI_type_node,
14134 opaque_p_V2SI_type_node,
14135 char_type_node,
14136 NULL_TREE);
14138 tree void_ftype_int
14139 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14141 tree int_ftype_void
14142 = build_function_type_list (integer_type_node, NULL_TREE);
14144 tree v2si_ftype_pv2si_int
14145 = build_function_type_list (opaque_V2SI_type_node,
14146 opaque_p_V2SI_type_node,
14147 integer_type_node,
14148 NULL_TREE);
14150 tree v2si_ftype_puint_int
14151 = build_function_type_list (opaque_V2SI_type_node,
14152 puint_type_node,
14153 integer_type_node,
14154 NULL_TREE);
14156 tree v2si_ftype_pushort_int
14157 = build_function_type_list (opaque_V2SI_type_node,
14158 pushort_type_node,
14159 integer_type_node,
14160 NULL_TREE);
14162 tree v2si_ftype_signed_char
14163 = build_function_type_list (opaque_V2SI_type_node,
14164 signed_char_type_node,
14165 NULL_TREE);
14167 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
14169 /* Initialize irregular SPE builtins. */
14171 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
14172 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
14173 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
14174 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
14175 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
14176 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
14177 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
14178 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
14179 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
14180 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
14181 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
14182 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
14183 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
14184 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
14185 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
14186 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
14187 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
14188 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
14190 /* Loads. */
14191 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
14192 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
14193 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
14194 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
14195 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
14196 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
14197 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
14198 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
14199 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
14200 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
14201 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
14202 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
14203 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
14204 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
14205 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
14206 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
14207 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
14208 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
14209 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
14210 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
14211 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
14212 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
14214 /* Predicates. */
14215 d = bdesc_spe_predicates;
14216 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
14218 tree type;
14220 switch (insn_data[d->icode].operand[1].mode)
14222 case V2SImode:
14223 type = int_ftype_int_v2si_v2si;
14224 break;
14225 case V2SFmode:
14226 type = int_ftype_int_v2sf_v2sf;
14227 break;
14228 default:
14229 gcc_unreachable ();
14232 def_builtin (d->name, type, d->code);
14235 /* Evsel predicates. */
14236 d = bdesc_spe_evsel;
14237 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
14239 tree type;
14241 switch (insn_data[d->icode].operand[1].mode)
14243 case V2SImode:
14244 type = v2si_ftype_4_v2si;
14245 break;
14246 case V2SFmode:
14247 type = v2sf_ftype_4_v2sf;
14248 break;
14249 default:
14250 gcc_unreachable ();
14253 def_builtin (d->name, type, d->code);
14257 static void
14258 paired_init_builtins (void)
14260 const struct builtin_description *d;
14261 size_t i;
14263 tree int_ftype_int_v2sf_v2sf
14264 = build_function_type_list (integer_type_node,
14265 integer_type_node,
14266 V2SF_type_node,
14267 V2SF_type_node,
14268 NULL_TREE);
14269 tree pcfloat_type_node =
14270 build_pointer_type (build_qualified_type
14271 (float_type_node, TYPE_QUAL_CONST));
14273 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
14274 long_integer_type_node,
14275 pcfloat_type_node,
14276 NULL_TREE);
14277 tree void_ftype_v2sf_long_pcfloat =
14278 build_function_type_list (void_type_node,
14279 V2SF_type_node,
14280 long_integer_type_node,
14281 pcfloat_type_node,
14282 NULL_TREE);
14285 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
14286 PAIRED_BUILTIN_LX);
14289 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
14290 PAIRED_BUILTIN_STX);
14292 /* Predicates. */
14293 d = bdesc_paired_preds;
14294 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
14296 tree type;
14298 if (TARGET_DEBUG_BUILTIN)
14299 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
14300 (int)i, get_insn_name (d->icode), (int)d->icode,
14301 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
14303 switch (insn_data[d->icode].operand[1].mode)
14305 case V2SFmode:
14306 type = int_ftype_int_v2sf_v2sf;
14307 break;
14308 default:
14309 gcc_unreachable ();
14312 def_builtin (d->name, type, d->code);
14316 static void
14317 altivec_init_builtins (void)
14319 const struct builtin_description *d;
14320 size_t i;
14321 tree ftype;
14322 tree decl;
14324 tree pvoid_type_node = build_pointer_type (void_type_node);
14326 tree pcvoid_type_node
14327 = build_pointer_type (build_qualified_type (void_type_node,
14328 TYPE_QUAL_CONST));
14330 tree int_ftype_opaque
14331 = build_function_type_list (integer_type_node,
14332 opaque_V4SI_type_node, NULL_TREE);
14333 tree opaque_ftype_opaque
14334 = build_function_type_list (integer_type_node, NULL_TREE);
14335 tree opaque_ftype_opaque_int
14336 = build_function_type_list (opaque_V4SI_type_node,
14337 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
14338 tree opaque_ftype_opaque_opaque_int
14339 = build_function_type_list (opaque_V4SI_type_node,
14340 opaque_V4SI_type_node, opaque_V4SI_type_node,
14341 integer_type_node, NULL_TREE);
14342 tree int_ftype_int_opaque_opaque
14343 = build_function_type_list (integer_type_node,
14344 integer_type_node, opaque_V4SI_type_node,
14345 opaque_V4SI_type_node, NULL_TREE);
14346 tree int_ftype_int_v4si_v4si
14347 = build_function_type_list (integer_type_node,
14348 integer_type_node, V4SI_type_node,
14349 V4SI_type_node, NULL_TREE);
14350 tree int_ftype_int_v2di_v2di
14351 = build_function_type_list (integer_type_node,
14352 integer_type_node, V2DI_type_node,
14353 V2DI_type_node, NULL_TREE);
14354 tree void_ftype_v4si
14355 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
14356 tree v8hi_ftype_void
14357 = build_function_type_list (V8HI_type_node, NULL_TREE);
14358 tree void_ftype_void
14359 = build_function_type_list (void_type_node, NULL_TREE);
14360 tree void_ftype_int
14361 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14363 tree opaque_ftype_long_pcvoid
14364 = build_function_type_list (opaque_V4SI_type_node,
14365 long_integer_type_node, pcvoid_type_node,
14366 NULL_TREE);
14367 tree v16qi_ftype_long_pcvoid
14368 = build_function_type_list (V16QI_type_node,
14369 long_integer_type_node, pcvoid_type_node,
14370 NULL_TREE);
14371 tree v8hi_ftype_long_pcvoid
14372 = build_function_type_list (V8HI_type_node,
14373 long_integer_type_node, pcvoid_type_node,
14374 NULL_TREE);
14375 tree v4si_ftype_long_pcvoid
14376 = build_function_type_list (V4SI_type_node,
14377 long_integer_type_node, pcvoid_type_node,
14378 NULL_TREE);
14379 tree v4sf_ftype_long_pcvoid
14380 = build_function_type_list (V4SF_type_node,
14381 long_integer_type_node, pcvoid_type_node,
14382 NULL_TREE);
14383 tree v2df_ftype_long_pcvoid
14384 = build_function_type_list (V2DF_type_node,
14385 long_integer_type_node, pcvoid_type_node,
14386 NULL_TREE);
14387 tree v2di_ftype_long_pcvoid
14388 = build_function_type_list (V2DI_type_node,
14389 long_integer_type_node, pcvoid_type_node,
14390 NULL_TREE);
14392 tree void_ftype_opaque_long_pvoid
14393 = build_function_type_list (void_type_node,
14394 opaque_V4SI_type_node, long_integer_type_node,
14395 pvoid_type_node, NULL_TREE);
14396 tree void_ftype_v4si_long_pvoid
14397 = build_function_type_list (void_type_node,
14398 V4SI_type_node, long_integer_type_node,
14399 pvoid_type_node, NULL_TREE);
14400 tree void_ftype_v16qi_long_pvoid
14401 = build_function_type_list (void_type_node,
14402 V16QI_type_node, long_integer_type_node,
14403 pvoid_type_node, NULL_TREE);
14404 tree void_ftype_v8hi_long_pvoid
14405 = build_function_type_list (void_type_node,
14406 V8HI_type_node, long_integer_type_node,
14407 pvoid_type_node, NULL_TREE);
14408 tree void_ftype_v4sf_long_pvoid
14409 = build_function_type_list (void_type_node,
14410 V4SF_type_node, long_integer_type_node,
14411 pvoid_type_node, NULL_TREE);
14412 tree void_ftype_v2df_long_pvoid
14413 = build_function_type_list (void_type_node,
14414 V2DF_type_node, long_integer_type_node,
14415 pvoid_type_node, NULL_TREE);
14416 tree void_ftype_v2di_long_pvoid
14417 = build_function_type_list (void_type_node,
14418 V2DI_type_node, long_integer_type_node,
14419 pvoid_type_node, NULL_TREE);
14420 tree int_ftype_int_v8hi_v8hi
14421 = build_function_type_list (integer_type_node,
14422 integer_type_node, V8HI_type_node,
14423 V8HI_type_node, NULL_TREE);
14424 tree int_ftype_int_v16qi_v16qi
14425 = build_function_type_list (integer_type_node,
14426 integer_type_node, V16QI_type_node,
14427 V16QI_type_node, NULL_TREE);
14428 tree int_ftype_int_v4sf_v4sf
14429 = build_function_type_list (integer_type_node,
14430 integer_type_node, V4SF_type_node,
14431 V4SF_type_node, NULL_TREE);
14432 tree int_ftype_int_v2df_v2df
14433 = build_function_type_list (integer_type_node,
14434 integer_type_node, V2DF_type_node,
14435 V2DF_type_node, NULL_TREE);
14436 tree v2di_ftype_v2di
14437 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
14438 tree v4si_ftype_v4si
14439 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
14440 tree v8hi_ftype_v8hi
14441 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
14442 tree v16qi_ftype_v16qi
14443 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
14444 tree v4sf_ftype_v4sf
14445 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14446 tree v2df_ftype_v2df
14447 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14448 tree void_ftype_pcvoid_int_int
14449 = build_function_type_list (void_type_node,
14450 pcvoid_type_node, integer_type_node,
14451 integer_type_node, NULL_TREE);
14453 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
14454 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
14455 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
14456 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
14457 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
14458 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
14459 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
14460 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
14461 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
14462 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
14463 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
14464 ALTIVEC_BUILTIN_LVXL_V2DF);
14465 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
14466 ALTIVEC_BUILTIN_LVXL_V2DI);
14467 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
14468 ALTIVEC_BUILTIN_LVXL_V4SF);
14469 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
14470 ALTIVEC_BUILTIN_LVXL_V4SI);
14471 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
14472 ALTIVEC_BUILTIN_LVXL_V8HI);
14473 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
14474 ALTIVEC_BUILTIN_LVXL_V16QI);
14475 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
14476 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
14477 ALTIVEC_BUILTIN_LVX_V2DF);
14478 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
14479 ALTIVEC_BUILTIN_LVX_V2DI);
14480 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
14481 ALTIVEC_BUILTIN_LVX_V4SF);
14482 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
14483 ALTIVEC_BUILTIN_LVX_V4SI);
14484 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
14485 ALTIVEC_BUILTIN_LVX_V8HI);
14486 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
14487 ALTIVEC_BUILTIN_LVX_V16QI);
14488 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
14489 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
14490 ALTIVEC_BUILTIN_STVX_V2DF);
14491 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
14492 ALTIVEC_BUILTIN_STVX_V2DI);
14493 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
14494 ALTIVEC_BUILTIN_STVX_V4SF);
14495 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
14496 ALTIVEC_BUILTIN_STVX_V4SI);
14497 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
14498 ALTIVEC_BUILTIN_STVX_V8HI);
14499 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
14500 ALTIVEC_BUILTIN_STVX_V16QI);
14501 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
14502 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
14503 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
14504 ALTIVEC_BUILTIN_STVXL_V2DF);
14505 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
14506 ALTIVEC_BUILTIN_STVXL_V2DI);
14507 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
14508 ALTIVEC_BUILTIN_STVXL_V4SF);
14509 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
14510 ALTIVEC_BUILTIN_STVXL_V4SI);
14511 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
14512 ALTIVEC_BUILTIN_STVXL_V8HI);
14513 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
14514 ALTIVEC_BUILTIN_STVXL_V16QI);
14515 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
14516 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
14517 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
14518 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
14519 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
14520 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
14521 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
14522 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
14523 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
14524 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
14525 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
14526 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
14527 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
14528 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
14529 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
14530 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
14532 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
14533 VSX_BUILTIN_LXVD2X_V2DF);
14534 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
14535 VSX_BUILTIN_LXVD2X_V2DI);
14536 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
14537 VSX_BUILTIN_LXVW4X_V4SF);
14538 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
14539 VSX_BUILTIN_LXVW4X_V4SI);
14540 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
14541 VSX_BUILTIN_LXVW4X_V8HI);
14542 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
14543 VSX_BUILTIN_LXVW4X_V16QI);
14544 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
14545 VSX_BUILTIN_STXVD2X_V2DF);
14546 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
14547 VSX_BUILTIN_STXVD2X_V2DI);
14548 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
14549 VSX_BUILTIN_STXVW4X_V4SF);
14550 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
14551 VSX_BUILTIN_STXVW4X_V4SI);
14552 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
14553 VSX_BUILTIN_STXVW4X_V8HI);
14554 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
14555 VSX_BUILTIN_STXVW4X_V16QI);
14556 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
14557 VSX_BUILTIN_VEC_LD);
14558 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
14559 VSX_BUILTIN_VEC_ST);
14561 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
14562 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
14563 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
14565 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
14566 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
14567 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
14568 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
14569 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
14570 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
14571 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
14572 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
14573 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
14574 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
14575 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
14576 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
14578 /* Cell builtins. */
14579 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
14580 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
14581 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
14582 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
14584 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
14585 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
14586 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
14587 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
14589 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
14590 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
14591 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
14592 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
14594 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
14595 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
14596 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
14597 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
14599 /* Add the DST variants. */
14600 d = bdesc_dst;
14601 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14602 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
14604 /* Initialize the predicates. */
14605 d = bdesc_altivec_preds;
14606 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14608 enum machine_mode mode1;
14609 tree type;
14611 if (rs6000_overloaded_builtin_p (d->code))
14612 mode1 = VOIDmode;
14613 else
14614 mode1 = insn_data[d->icode].operand[1].mode;
14616 switch (mode1)
14618 case VOIDmode:
14619 type = int_ftype_int_opaque_opaque;
14620 break;
14621 case V2DImode:
14622 type = int_ftype_int_v2di_v2di;
14623 break;
14624 case V4SImode:
14625 type = int_ftype_int_v4si_v4si;
14626 break;
14627 case V8HImode:
14628 type = int_ftype_int_v8hi_v8hi;
14629 break;
14630 case V16QImode:
14631 type = int_ftype_int_v16qi_v16qi;
14632 break;
14633 case V4SFmode:
14634 type = int_ftype_int_v4sf_v4sf;
14635 break;
14636 case V2DFmode:
14637 type = int_ftype_int_v2df_v2df;
14638 break;
14639 default:
14640 gcc_unreachable ();
14643 def_builtin (d->name, type, d->code);
14646 /* Initialize the abs* operators. */
14647 d = bdesc_abs;
14648 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14650 enum machine_mode mode0;
14651 tree type;
14653 mode0 = insn_data[d->icode].operand[0].mode;
14655 switch (mode0)
14657 case V2DImode:
14658 type = v2di_ftype_v2di;
14659 break;
14660 case V4SImode:
14661 type = v4si_ftype_v4si;
14662 break;
14663 case V8HImode:
14664 type = v8hi_ftype_v8hi;
14665 break;
14666 case V16QImode:
14667 type = v16qi_ftype_v16qi;
14668 break;
14669 case V4SFmode:
14670 type = v4sf_ftype_v4sf;
14671 break;
14672 case V2DFmode:
14673 type = v2df_ftype_v2df;
14674 break;
14675 default:
14676 gcc_unreachable ();
14679 def_builtin (d->name, type, d->code);
14682 /* Initialize target builtin that implements
14683 targetm.vectorize.builtin_mask_for_load. */
14685 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
14686 v16qi_ftype_long_pcvoid,
14687 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
14688 BUILT_IN_MD, NULL, NULL_TREE);
14689 TREE_READONLY (decl) = 1;
14690 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
14691 altivec_builtin_mask_for_load = decl;
14693 /* Access to the vec_init patterns. */
14694 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
14695 integer_type_node, integer_type_node,
14696 integer_type_node, NULL_TREE);
14697 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
14699 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
14700 short_integer_type_node,
14701 short_integer_type_node,
14702 short_integer_type_node,
14703 short_integer_type_node,
14704 short_integer_type_node,
14705 short_integer_type_node,
14706 short_integer_type_node, NULL_TREE);
14707 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
14709 ftype = build_function_type_list (V16QI_type_node, char_type_node,
14710 char_type_node, char_type_node,
14711 char_type_node, char_type_node,
14712 char_type_node, char_type_node,
14713 char_type_node, char_type_node,
14714 char_type_node, char_type_node,
14715 char_type_node, char_type_node,
14716 char_type_node, char_type_node,
14717 char_type_node, NULL_TREE);
14718 def_builtin ("__builtin_vec_init_v16qi", ftype,
14719 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
14721 ftype = build_function_type_list (V4SF_type_node, float_type_node,
14722 float_type_node, float_type_node,
14723 float_type_node, NULL_TREE);
14724 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
14726 /* VSX builtins. */
14727 ftype = build_function_type_list (V2DF_type_node, double_type_node,
14728 double_type_node, NULL_TREE);
14729 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
14731 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
14732 intDI_type_node, NULL_TREE);
14733 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
14735 /* Access to the vec_set patterns. */
14736 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
14737 intSI_type_node,
14738 integer_type_node, NULL_TREE);
14739 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
14741 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14742 intHI_type_node,
14743 integer_type_node, NULL_TREE);
14744 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
14746 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
14747 intQI_type_node,
14748 integer_type_node, NULL_TREE);
14749 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
14751 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
14752 float_type_node,
14753 integer_type_node, NULL_TREE);
14754 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
14756 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
14757 double_type_node,
14758 integer_type_node, NULL_TREE);
14759 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
14761 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
14762 intDI_type_node,
14763 integer_type_node, NULL_TREE);
14764 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
14766 /* Access to the vec_extract patterns. */
14767 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14768 integer_type_node, NULL_TREE);
14769 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
14771 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14772 integer_type_node, NULL_TREE);
14773 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
14775 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
14776 integer_type_node, NULL_TREE);
14777 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
14779 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14780 integer_type_node, NULL_TREE);
14781 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
14783 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14784 integer_type_node, NULL_TREE);
14785 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
14787 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
14788 integer_type_node, NULL_TREE);
14789 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
14792 if (V1TI_type_node)
14794 tree v1ti_ftype_long_pcvoid
14795 = build_function_type_list (V1TI_type_node,
14796 long_integer_type_node, pcvoid_type_node,
14797 NULL_TREE);
14798 tree void_ftype_v1ti_long_pvoid
14799 = build_function_type_list (void_type_node,
14800 V1TI_type_node, long_integer_type_node,
14801 pvoid_type_node, NULL_TREE);
14802 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
14803 VSX_BUILTIN_LXVD2X_V1TI);
14804 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
14805 VSX_BUILTIN_STXVD2X_V1TI);
14806 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
14807 NULL_TREE, NULL_TREE);
14808 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
14809 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
14810 intTI_type_node,
14811 integer_type_node, NULL_TREE);
14812 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
14813 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
14814 integer_type_node, NULL_TREE);
14815 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
14820 static void
14821 htm_init_builtins (void)
14823 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
14824 const struct builtin_description *d;
14825 size_t i;
14827 d = bdesc_htm;
14828 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14830 tree op[MAX_HTM_OPERANDS], type;
14831 HOST_WIDE_INT mask = d->mask;
14832 unsigned attr = rs6000_builtin_info[d->code].attr;
14833 bool void_func = (attr & RS6000_BTC_VOID);
14834 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
14835 int nopnds = 0;
14836 tree argtype = (attr & RS6000_BTC_SPR) ? long_unsigned_type_node
14837 : unsigned_type_node;
14839 if ((mask & builtin_mask) != mask)
14841 if (TARGET_DEBUG_BUILTIN)
14842 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
14843 continue;
14846 if (d->name == 0)
14848 if (TARGET_DEBUG_BUILTIN)
14849 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
14850 (long unsigned) i);
14851 continue;
14854 op[nopnds++] = (void_func) ? void_type_node : argtype;
14856 if (attr_args == RS6000_BTC_UNARY)
14857 op[nopnds++] = argtype;
14858 else if (attr_args == RS6000_BTC_BINARY)
14860 op[nopnds++] = argtype;
14861 op[nopnds++] = argtype;
14863 else if (attr_args == RS6000_BTC_TERNARY)
14865 op[nopnds++] = argtype;
14866 op[nopnds++] = argtype;
14867 op[nopnds++] = argtype;
14870 switch (nopnds)
14872 case 1:
14873 type = build_function_type_list (op[0], NULL_TREE);
14874 break;
14875 case 2:
14876 type = build_function_type_list (op[0], op[1], NULL_TREE);
14877 break;
14878 case 3:
14879 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
14880 break;
14881 case 4:
14882 type = build_function_type_list (op[0], op[1], op[2], op[3],
14883 NULL_TREE);
14884 break;
14885 default:
14886 gcc_unreachable ();
14889 def_builtin (d->name, type, d->code);
14893 /* Hash function for builtin functions with up to 3 arguments and a return
14894 type. */
14895 static unsigned
14896 builtin_hash_function (const void *hash_entry)
14898 unsigned ret = 0;
14899 int i;
14900 const struct builtin_hash_struct *bh =
14901 (const struct builtin_hash_struct *) hash_entry;
14903 for (i = 0; i < 4; i++)
14905 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
14906 ret = (ret * 2) + bh->uns_p[i];
14909 return ret;
14912 /* Compare builtin hash entries H1 and H2 for equivalence. */
14913 static int
14914 builtin_hash_eq (const void *h1, const void *h2)
14916 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
14917 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
14919 return ((p1->mode[0] == p2->mode[0])
14920 && (p1->mode[1] == p2->mode[1])
14921 && (p1->mode[2] == p2->mode[2])
14922 && (p1->mode[3] == p2->mode[3])
14923 && (p1->uns_p[0] == p2->uns_p[0])
14924 && (p1->uns_p[1] == p2->uns_p[1])
14925 && (p1->uns_p[2] == p2->uns_p[2])
14926 && (p1->uns_p[3] == p2->uns_p[3]));
14929 /* Map types for builtin functions with an explicit return type and up to 3
14930 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
14931 of the argument. */
14932 static tree
14933 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
14934 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
14935 enum rs6000_builtins builtin, const char *name)
14937 struct builtin_hash_struct h;
14938 struct builtin_hash_struct *h2;
14939 void **found;
14940 int num_args = 3;
14941 int i;
14942 tree ret_type = NULL_TREE;
14943 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
14945 /* Create builtin_hash_table. */
14946 if (builtin_hash_table == NULL)
14947 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
14948 builtin_hash_eq, NULL);
14950 h.type = NULL_TREE;
14951 h.mode[0] = mode_ret;
14952 h.mode[1] = mode_arg0;
14953 h.mode[2] = mode_arg1;
14954 h.mode[3] = mode_arg2;
14955 h.uns_p[0] = 0;
14956 h.uns_p[1] = 0;
14957 h.uns_p[2] = 0;
14958 h.uns_p[3] = 0;
14960 /* If the builtin is a type that produces unsigned results or takes unsigned
14961 arguments, and it is returned as a decl for the vectorizer (such as
14962 widening multiplies, permute), make sure the arguments and return value
14963 are type correct. */
14964 switch (builtin)
14966 /* unsigned 1 argument functions. */
14967 case CRYPTO_BUILTIN_VSBOX:
14968 case P8V_BUILTIN_VGBBD:
14969 case MISC_BUILTIN_CDTBCD:
14970 case MISC_BUILTIN_CBCDTD:
14971 h.uns_p[0] = 1;
14972 h.uns_p[1] = 1;
14973 break;
14975 /* unsigned 2 argument functions. */
14976 case ALTIVEC_BUILTIN_VMULEUB_UNS:
14977 case ALTIVEC_BUILTIN_VMULEUH_UNS:
14978 case ALTIVEC_BUILTIN_VMULOUB_UNS:
14979 case ALTIVEC_BUILTIN_VMULOUH_UNS:
14980 case CRYPTO_BUILTIN_VCIPHER:
14981 case CRYPTO_BUILTIN_VCIPHERLAST:
14982 case CRYPTO_BUILTIN_VNCIPHER:
14983 case CRYPTO_BUILTIN_VNCIPHERLAST:
14984 case CRYPTO_BUILTIN_VPMSUMB:
14985 case CRYPTO_BUILTIN_VPMSUMH:
14986 case CRYPTO_BUILTIN_VPMSUMW:
14987 case CRYPTO_BUILTIN_VPMSUMD:
14988 case CRYPTO_BUILTIN_VPMSUM:
14989 case MISC_BUILTIN_ADDG6S:
14990 case MISC_BUILTIN_DIVWEU:
14991 case MISC_BUILTIN_DIVWEUO:
14992 case MISC_BUILTIN_DIVDEU:
14993 case MISC_BUILTIN_DIVDEUO:
14994 h.uns_p[0] = 1;
14995 h.uns_p[1] = 1;
14996 h.uns_p[2] = 1;
14997 break;
14999 /* unsigned 3 argument functions. */
15000 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
15001 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
15002 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
15003 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
15004 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
15005 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
15006 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
15007 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
15008 case VSX_BUILTIN_VPERM_16QI_UNS:
15009 case VSX_BUILTIN_VPERM_8HI_UNS:
15010 case VSX_BUILTIN_VPERM_4SI_UNS:
15011 case VSX_BUILTIN_VPERM_2DI_UNS:
15012 case VSX_BUILTIN_XXSEL_16QI_UNS:
15013 case VSX_BUILTIN_XXSEL_8HI_UNS:
15014 case VSX_BUILTIN_XXSEL_4SI_UNS:
15015 case VSX_BUILTIN_XXSEL_2DI_UNS:
15016 case CRYPTO_BUILTIN_VPERMXOR:
15017 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
15018 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
15019 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
15020 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
15021 case CRYPTO_BUILTIN_VSHASIGMAW:
15022 case CRYPTO_BUILTIN_VSHASIGMAD:
15023 case CRYPTO_BUILTIN_VSHASIGMA:
15024 h.uns_p[0] = 1;
15025 h.uns_p[1] = 1;
15026 h.uns_p[2] = 1;
15027 h.uns_p[3] = 1;
15028 break;
15030 /* signed permute functions with unsigned char mask. */
15031 case ALTIVEC_BUILTIN_VPERM_16QI:
15032 case ALTIVEC_BUILTIN_VPERM_8HI:
15033 case ALTIVEC_BUILTIN_VPERM_4SI:
15034 case ALTIVEC_BUILTIN_VPERM_4SF:
15035 case ALTIVEC_BUILTIN_VPERM_2DI:
15036 case ALTIVEC_BUILTIN_VPERM_2DF:
15037 case VSX_BUILTIN_VPERM_16QI:
15038 case VSX_BUILTIN_VPERM_8HI:
15039 case VSX_BUILTIN_VPERM_4SI:
15040 case VSX_BUILTIN_VPERM_4SF:
15041 case VSX_BUILTIN_VPERM_2DI:
15042 case VSX_BUILTIN_VPERM_2DF:
15043 h.uns_p[3] = 1;
15044 break;
15046 /* unsigned args, signed return. */
15047 case VSX_BUILTIN_XVCVUXDDP_UNS:
15048 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
15049 h.uns_p[1] = 1;
15050 break;
15052 /* signed args, unsigned return. */
15053 case VSX_BUILTIN_XVCVDPUXDS_UNS:
15054 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
15055 case MISC_BUILTIN_UNPACK_TD:
15056 case MISC_BUILTIN_UNPACK_V1TI:
15057 h.uns_p[0] = 1;
15058 break;
15060 /* unsigned arguments for 128-bit pack instructions. */
15061 case MISC_BUILTIN_PACK_TD:
15062 case MISC_BUILTIN_PACK_V1TI:
15063 h.uns_p[1] = 1;
15064 h.uns_p[2] = 1;
15065 break;
15067 default:
15068 break;
15071 /* Figure out how many args are present. */
15072 while (num_args > 0 && h.mode[num_args] == VOIDmode)
15073 num_args--;
15075 if (num_args == 0)
15076 fatal_error ("internal error: builtin function %s had no type", name);
15078 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
15079 if (!ret_type && h.uns_p[0])
15080 ret_type = builtin_mode_to_type[h.mode[0]][0];
15082 if (!ret_type)
15083 fatal_error ("internal error: builtin function %s had an unexpected "
15084 "return type %s", name, GET_MODE_NAME (h.mode[0]));
15086 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
15087 arg_type[i] = NULL_TREE;
15089 for (i = 0; i < num_args; i++)
15091 int m = (int) h.mode[i+1];
15092 int uns_p = h.uns_p[i+1];
15094 arg_type[i] = builtin_mode_to_type[m][uns_p];
15095 if (!arg_type[i] && uns_p)
15096 arg_type[i] = builtin_mode_to_type[m][0];
15098 if (!arg_type[i])
15099 fatal_error ("internal error: builtin function %s, argument %d "
15100 "had unexpected argument type %s", name, i,
15101 GET_MODE_NAME (m));
15104 found = htab_find_slot (builtin_hash_table, &h, INSERT);
15105 if (*found == NULL)
15107 h2 = ggc_alloc<builtin_hash_struct> ();
15108 *h2 = h;
15109 *found = (void *)h2;
15111 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
15112 arg_type[2], NULL_TREE);
15115 return ((struct builtin_hash_struct *)(*found))->type;
15118 static void
15119 rs6000_common_init_builtins (void)
15121 const struct builtin_description *d;
15122 size_t i;
15124 tree opaque_ftype_opaque = NULL_TREE;
15125 tree opaque_ftype_opaque_opaque = NULL_TREE;
15126 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
15127 tree v2si_ftype_qi = NULL_TREE;
15128 tree v2si_ftype_v2si_qi = NULL_TREE;
15129 tree v2si_ftype_int_qi = NULL_TREE;
15130 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
15132 if (!TARGET_PAIRED_FLOAT)
15134 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
15135 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
15138 /* Paired and SPE builtins are only available if you build a compiler with
15139 the appropriate options, so only create those builtins with the
15140 appropriate compiler option. Create Altivec and VSX builtins on machines
15141 with at least the general purpose extensions (970 and newer) to allow the
15142 use of the target attribute.. */
15144 if (TARGET_EXTRA_BUILTINS)
15145 builtin_mask |= RS6000_BTM_COMMON;
15147 /* Add the ternary operators. */
15148 d = bdesc_3arg;
15149 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
15151 tree type;
15152 HOST_WIDE_INT mask = d->mask;
15154 if ((mask & builtin_mask) != mask)
15156 if (TARGET_DEBUG_BUILTIN)
15157 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
15158 continue;
15161 if (rs6000_overloaded_builtin_p (d->code))
15163 if (! (type = opaque_ftype_opaque_opaque_opaque))
15164 type = opaque_ftype_opaque_opaque_opaque
15165 = build_function_type_list (opaque_V4SI_type_node,
15166 opaque_V4SI_type_node,
15167 opaque_V4SI_type_node,
15168 opaque_V4SI_type_node,
15169 NULL_TREE);
15171 else
15173 enum insn_code icode = d->icode;
15174 if (d->name == 0)
15176 if (TARGET_DEBUG_BUILTIN)
15177 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
15178 (long unsigned)i);
15180 continue;
15183 if (icode == CODE_FOR_nothing)
15185 if (TARGET_DEBUG_BUILTIN)
15186 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
15187 d->name);
15189 continue;
15192 type = builtin_function_type (insn_data[icode].operand[0].mode,
15193 insn_data[icode].operand[1].mode,
15194 insn_data[icode].operand[2].mode,
15195 insn_data[icode].operand[3].mode,
15196 d->code, d->name);
15199 def_builtin (d->name, type, d->code);
15202 /* Add the binary operators. */
15203 d = bdesc_2arg;
15204 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15206 enum machine_mode mode0, mode1, mode2;
15207 tree type;
15208 HOST_WIDE_INT mask = d->mask;
15210 if ((mask & builtin_mask) != mask)
15212 if (TARGET_DEBUG_BUILTIN)
15213 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
15214 continue;
15217 if (rs6000_overloaded_builtin_p (d->code))
15219 if (! (type = opaque_ftype_opaque_opaque))
15220 type = opaque_ftype_opaque_opaque
15221 = build_function_type_list (opaque_V4SI_type_node,
15222 opaque_V4SI_type_node,
15223 opaque_V4SI_type_node,
15224 NULL_TREE);
15226 else
15228 enum insn_code icode = d->icode;
15229 if (d->name == 0)
15231 if (TARGET_DEBUG_BUILTIN)
15232 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
15233 (long unsigned)i);
15235 continue;
15238 if (icode == CODE_FOR_nothing)
15240 if (TARGET_DEBUG_BUILTIN)
15241 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
15242 d->name);
15244 continue;
15247 mode0 = insn_data[icode].operand[0].mode;
15248 mode1 = insn_data[icode].operand[1].mode;
15249 mode2 = insn_data[icode].operand[2].mode;
15251 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
15253 if (! (type = v2si_ftype_v2si_qi))
15254 type = v2si_ftype_v2si_qi
15255 = build_function_type_list (opaque_V2SI_type_node,
15256 opaque_V2SI_type_node,
15257 char_type_node,
15258 NULL_TREE);
15261 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
15262 && mode2 == QImode)
15264 if (! (type = v2si_ftype_int_qi))
15265 type = v2si_ftype_int_qi
15266 = build_function_type_list (opaque_V2SI_type_node,
15267 integer_type_node,
15268 char_type_node,
15269 NULL_TREE);
15272 else
15273 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
15274 d->code, d->name);
15277 def_builtin (d->name, type, d->code);
15280 /* Add the simple unary operators. */
15281 d = bdesc_1arg;
15282 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15284 enum machine_mode mode0, mode1;
15285 tree type;
15286 HOST_WIDE_INT mask = d->mask;
15288 if ((mask & builtin_mask) != mask)
15290 if (TARGET_DEBUG_BUILTIN)
15291 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
15292 continue;
15295 if (rs6000_overloaded_builtin_p (d->code))
15297 if (! (type = opaque_ftype_opaque))
15298 type = opaque_ftype_opaque
15299 = build_function_type_list (opaque_V4SI_type_node,
15300 opaque_V4SI_type_node,
15301 NULL_TREE);
15303 else
15305 enum insn_code icode = d->icode;
15306 if (d->name == 0)
15308 if (TARGET_DEBUG_BUILTIN)
15309 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
15310 (long unsigned)i);
15312 continue;
15315 if (icode == CODE_FOR_nothing)
15317 if (TARGET_DEBUG_BUILTIN)
15318 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
15319 d->name);
15321 continue;
15324 mode0 = insn_data[icode].operand[0].mode;
15325 mode1 = insn_data[icode].operand[1].mode;
15327 if (mode0 == V2SImode && mode1 == QImode)
15329 if (! (type = v2si_ftype_qi))
15330 type = v2si_ftype_qi
15331 = build_function_type_list (opaque_V2SI_type_node,
15332 char_type_node,
15333 NULL_TREE);
15336 else
15337 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
15338 d->code, d->name);
15341 def_builtin (d->name, type, d->code);
15345 static void
15346 rs6000_init_libfuncs (void)
15348 if (!TARGET_IEEEQUAD)
15349 /* AIX/Darwin/64-bit Linux quad floating point routines. */
15350 if (!TARGET_XL_COMPAT)
15352 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
15353 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
15354 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
15355 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
15357 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
15359 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
15360 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
15361 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
15362 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
15363 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
15364 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
15365 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
15367 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
15368 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
15369 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
15370 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
15371 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
15372 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
15373 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
15374 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
15377 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
15378 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
15380 else
15382 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
15383 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
15384 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
15385 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
15387 else
15389 /* 32-bit SVR4 quad floating point routines. */
15391 set_optab_libfunc (add_optab, TFmode, "_q_add");
15392 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
15393 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
15394 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
15395 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
15396 if (TARGET_PPC_GPOPT)
15397 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
15399 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
15400 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
15401 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
15402 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
15403 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
15404 set_optab_libfunc (le_optab, TFmode, "_q_fle");
15406 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
15407 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
15408 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
15409 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
15410 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
15411 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
15412 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
15413 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
15418 /* Expand a block clear operation, and return 1 if successful. Return 0
15419 if we should let the compiler generate normal code.
15421 operands[0] is the destination
15422 operands[1] is the length
15423 operands[3] is the alignment */
15426 expand_block_clear (rtx operands[])
15428 rtx orig_dest = operands[0];
15429 rtx bytes_rtx = operands[1];
15430 rtx align_rtx = operands[3];
15431 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
15432 HOST_WIDE_INT align;
15433 HOST_WIDE_INT bytes;
15434 int offset;
15435 int clear_bytes;
15436 int clear_step;
15438 /* If this is not a fixed size move, just call memcpy */
15439 if (! constp)
15440 return 0;
15442 /* This must be a fixed size alignment */
15443 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15444 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15446 /* Anything to clear? */
15447 bytes = INTVAL (bytes_rtx);
15448 if (bytes <= 0)
15449 return 1;
15451 /* Use the builtin memset after a point, to avoid huge code bloat.
15452 When optimize_size, avoid any significant code bloat; calling
15453 memset is about 4 instructions, so allow for one instruction to
15454 load zero and three to do clearing. */
15455 if (TARGET_ALTIVEC && align >= 128)
15456 clear_step = 16;
15457 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
15458 clear_step = 8;
15459 else if (TARGET_SPE && align >= 64)
15460 clear_step = 8;
15461 else
15462 clear_step = 4;
15464 if (optimize_size && bytes > 3 * clear_step)
15465 return 0;
15466 if (! optimize_size && bytes > 8 * clear_step)
15467 return 0;
15469 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
15471 enum machine_mode mode = BLKmode;
15472 rtx dest;
15474 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
15476 clear_bytes = 16;
15477 mode = V4SImode;
15479 else if (bytes >= 8 && TARGET_SPE && align >= 64)
15481 clear_bytes = 8;
15482 mode = V2SImode;
15484 else if (bytes >= 8 && TARGET_POWERPC64
15485 && (align >= 64 || !STRICT_ALIGNMENT))
15487 clear_bytes = 8;
15488 mode = DImode;
15489 if (offset == 0 && align < 64)
15491 rtx addr;
15493 /* If the address form is reg+offset with offset not a
15494 multiple of four, reload into reg indirect form here
15495 rather than waiting for reload. This way we get one
15496 reload, not one per store. */
15497 addr = XEXP (orig_dest, 0);
15498 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15499 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15500 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15502 addr = copy_addr_to_reg (addr);
15503 orig_dest = replace_equiv_address (orig_dest, addr);
15507 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15508 { /* move 4 bytes */
15509 clear_bytes = 4;
15510 mode = SImode;
15512 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15513 { /* move 2 bytes */
15514 clear_bytes = 2;
15515 mode = HImode;
15517 else /* move 1 byte at a time */
15519 clear_bytes = 1;
15520 mode = QImode;
15523 dest = adjust_address (orig_dest, mode, offset);
15525 emit_move_insn (dest, CONST0_RTX (mode));
15528 return 1;
15532 /* Expand a block move operation, and return 1 if successful. Return 0
15533 if we should let the compiler generate normal code.
15535 operands[0] is the destination
15536 operands[1] is the source
15537 operands[2] is the length
15538 operands[3] is the alignment */
15540 #define MAX_MOVE_REG 4
15543 expand_block_move (rtx operands[])
15545 rtx orig_dest = operands[0];
15546 rtx orig_src = operands[1];
15547 rtx bytes_rtx = operands[2];
15548 rtx align_rtx = operands[3];
15549 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
15550 int align;
15551 int bytes;
15552 int offset;
15553 int move_bytes;
15554 rtx stores[MAX_MOVE_REG];
15555 int num_reg = 0;
15557 /* If this is not a fixed size move, just call memcpy */
15558 if (! constp)
15559 return 0;
15561 /* This must be a fixed size alignment */
15562 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15563 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15565 /* Anything to move? */
15566 bytes = INTVAL (bytes_rtx);
15567 if (bytes <= 0)
15568 return 1;
15570 if (bytes > rs6000_block_move_inline_limit)
15571 return 0;
15573 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
15575 union {
15576 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
15577 rtx (*mov) (rtx, rtx);
15578 } gen_func;
15579 enum machine_mode mode = BLKmode;
15580 rtx src, dest;
15582 /* Altivec first, since it will be faster than a string move
15583 when it applies, and usually not significantly larger. */
15584 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
15586 move_bytes = 16;
15587 mode = V4SImode;
15588 gen_func.mov = gen_movv4si;
15590 else if (TARGET_SPE && bytes >= 8 && align >= 64)
15592 move_bytes = 8;
15593 mode = V2SImode;
15594 gen_func.mov = gen_movv2si;
15596 else if (TARGET_STRING
15597 && bytes > 24 /* move up to 32 bytes at a time */
15598 && ! fixed_regs[5]
15599 && ! fixed_regs[6]
15600 && ! fixed_regs[7]
15601 && ! fixed_regs[8]
15602 && ! fixed_regs[9]
15603 && ! fixed_regs[10]
15604 && ! fixed_regs[11]
15605 && ! fixed_regs[12])
15607 move_bytes = (bytes > 32) ? 32 : bytes;
15608 gen_func.movmemsi = gen_movmemsi_8reg;
15610 else if (TARGET_STRING
15611 && bytes > 16 /* move up to 24 bytes at a time */
15612 && ! fixed_regs[5]
15613 && ! fixed_regs[6]
15614 && ! fixed_regs[7]
15615 && ! fixed_regs[8]
15616 && ! fixed_regs[9]
15617 && ! fixed_regs[10])
15619 move_bytes = (bytes > 24) ? 24 : bytes;
15620 gen_func.movmemsi = gen_movmemsi_6reg;
15622 else if (TARGET_STRING
15623 && bytes > 8 /* move up to 16 bytes at a time */
15624 && ! fixed_regs[5]
15625 && ! fixed_regs[6]
15626 && ! fixed_regs[7]
15627 && ! fixed_regs[8])
15629 move_bytes = (bytes > 16) ? 16 : bytes;
15630 gen_func.movmemsi = gen_movmemsi_4reg;
15632 else if (bytes >= 8 && TARGET_POWERPC64
15633 && (align >= 64 || !STRICT_ALIGNMENT))
15635 move_bytes = 8;
15636 mode = DImode;
15637 gen_func.mov = gen_movdi;
15638 if (offset == 0 && align < 64)
15640 rtx addr;
15642 /* If the address form is reg+offset with offset not a
15643 multiple of four, reload into reg indirect form here
15644 rather than waiting for reload. This way we get one
15645 reload, not one per load and/or store. */
15646 addr = XEXP (orig_dest, 0);
15647 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15648 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15649 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15651 addr = copy_addr_to_reg (addr);
15652 orig_dest = replace_equiv_address (orig_dest, addr);
15654 addr = XEXP (orig_src, 0);
15655 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15656 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15657 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15659 addr = copy_addr_to_reg (addr);
15660 orig_src = replace_equiv_address (orig_src, addr);
15664 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
15665 { /* move up to 8 bytes at a time */
15666 move_bytes = (bytes > 8) ? 8 : bytes;
15667 gen_func.movmemsi = gen_movmemsi_2reg;
15669 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15670 { /* move 4 bytes */
15671 move_bytes = 4;
15672 mode = SImode;
15673 gen_func.mov = gen_movsi;
15675 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15676 { /* move 2 bytes */
15677 move_bytes = 2;
15678 mode = HImode;
15679 gen_func.mov = gen_movhi;
15681 else if (TARGET_STRING && bytes > 1)
15682 { /* move up to 4 bytes at a time */
15683 move_bytes = (bytes > 4) ? 4 : bytes;
15684 gen_func.movmemsi = gen_movmemsi_1reg;
15686 else /* move 1 byte at a time */
15688 move_bytes = 1;
15689 mode = QImode;
15690 gen_func.mov = gen_movqi;
15693 src = adjust_address (orig_src, mode, offset);
15694 dest = adjust_address (orig_dest, mode, offset);
15696 if (mode != BLKmode)
15698 rtx tmp_reg = gen_reg_rtx (mode);
15700 emit_insn ((*gen_func.mov) (tmp_reg, src));
15701 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
15704 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
15706 int i;
15707 for (i = 0; i < num_reg; i++)
15708 emit_insn (stores[i]);
15709 num_reg = 0;
15712 if (mode == BLKmode)
15714 /* Move the address into scratch registers. The movmemsi
15715 patterns require zero offset. */
15716 if (!REG_P (XEXP (src, 0)))
15718 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
15719 src = replace_equiv_address (src, src_reg);
15721 set_mem_size (src, move_bytes);
15723 if (!REG_P (XEXP (dest, 0)))
15725 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
15726 dest = replace_equiv_address (dest, dest_reg);
15728 set_mem_size (dest, move_bytes);
15730 emit_insn ((*gen_func.movmemsi) (dest, src,
15731 GEN_INT (move_bytes & 31),
15732 align_rtx));
15736 return 1;
15740 /* Return a string to perform a load_multiple operation.
15741 operands[0] is the vector.
15742 operands[1] is the source address.
15743 operands[2] is the first destination register. */
15745 const char *
15746 rs6000_output_load_multiple (rtx operands[3])
15748 /* We have to handle the case where the pseudo used to contain the address
15749 is assigned to one of the output registers. */
15750 int i, j;
15751 int words = XVECLEN (operands[0], 0);
15752 rtx xop[10];
15754 if (XVECLEN (operands[0], 0) == 1)
15755 return "lwz %2,0(%1)";
15757 for (i = 0; i < words; i++)
15758 if (refers_to_regno_p (REGNO (operands[2]) + i,
15759 REGNO (operands[2]) + i + 1, operands[1], 0))
15761 if (i == words-1)
15763 xop[0] = GEN_INT (4 * (words-1));
15764 xop[1] = operands[1];
15765 xop[2] = operands[2];
15766 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
15767 return "";
15769 else if (i == 0)
15771 xop[0] = GEN_INT (4 * (words-1));
15772 xop[1] = operands[1];
15773 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
15774 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
15775 return "";
15777 else
15779 for (j = 0; j < words; j++)
15780 if (j != i)
15782 xop[0] = GEN_INT (j * 4);
15783 xop[1] = operands[1];
15784 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
15785 output_asm_insn ("lwz %2,%0(%1)", xop);
15787 xop[0] = GEN_INT (i * 4);
15788 xop[1] = operands[1];
15789 output_asm_insn ("lwz %1,%0(%1)", xop);
15790 return "";
15794 return "lswi %2,%1,%N0";
15798 /* A validation routine: say whether CODE, a condition code, and MODE
15799 match. The other alternatives either don't make sense or should
15800 never be generated. */
15802 void
15803 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
15805 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
15806 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
15807 && GET_MODE_CLASS (mode) == MODE_CC);
15809 /* These don't make sense. */
15810 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
15811 || mode != CCUNSmode);
15813 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
15814 || mode == CCUNSmode);
15816 gcc_assert (mode == CCFPmode
15817 || (code != ORDERED && code != UNORDERED
15818 && code != UNEQ && code != LTGT
15819 && code != UNGT && code != UNLT
15820 && code != UNGE && code != UNLE));
15822 /* These should never be generated except for
15823 flag_finite_math_only. */
15824 gcc_assert (mode != CCFPmode
15825 || flag_finite_math_only
15826 || (code != LE && code != GE
15827 && code != UNEQ && code != LTGT
15828 && code != UNGT && code != UNLT));
15830 /* These are invalid; the information is not there. */
15831 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
15835 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
15836 mask required to convert the result of a rotate insn into a shift
15837 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
15840 includes_lshift_p (rtx shiftop, rtx andop)
15842 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
15844 shift_mask <<= INTVAL (shiftop);
15846 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
15849 /* Similar, but for right shift. */
15852 includes_rshift_p (rtx shiftop, rtx andop)
15854 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
15856 shift_mask >>= INTVAL (shiftop);
15858 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
15861 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
15862 to perform a left shift. It must have exactly SHIFTOP least
15863 significant 0's, then one or more 1's, then zero or more 0's. */
15866 includes_rldic_lshift_p (rtx shiftop, rtx andop)
15868 if (GET_CODE (andop) == CONST_INT)
15870 HOST_WIDE_INT c, lsb, shift_mask;
15872 c = INTVAL (andop);
15873 if (c == 0 || c == ~0)
15874 return 0;
15876 shift_mask = ~0;
15877 shift_mask <<= INTVAL (shiftop);
15879 /* Find the least significant one bit. */
15880 lsb = c & -c;
15882 /* It must coincide with the LSB of the shift mask. */
15883 if (-lsb != shift_mask)
15884 return 0;
15886 /* Invert to look for the next transition (if any). */
15887 c = ~c;
15889 /* Remove the low group of ones (originally low group of zeros). */
15890 c &= -lsb;
15892 /* Again find the lsb, and check we have all 1's above. */
15893 lsb = c & -c;
15894 return c == -lsb;
15896 else
15897 return 0;
15900 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
15901 to perform a left shift. It must have SHIFTOP or more least
15902 significant 0's, with the remainder of the word 1's. */
15905 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
15907 if (GET_CODE (andop) == CONST_INT)
15909 HOST_WIDE_INT c, lsb, shift_mask;
15911 shift_mask = ~0;
15912 shift_mask <<= INTVAL (shiftop);
15913 c = INTVAL (andop);
15915 /* Find the least significant one bit. */
15916 lsb = c & -c;
15918 /* It must be covered by the shift mask.
15919 This test also rejects c == 0. */
15920 if ((lsb & shift_mask) == 0)
15921 return 0;
15923 /* Check we have all 1's above the transition, and reject all 1's. */
15924 return c == -lsb && lsb != 1;
15926 else
15927 return 0;
15930 /* Return 1 if operands will generate a valid arguments to rlwimi
15931 instruction for insert with right shift in 64-bit mode. The mask may
15932 not start on the first bit or stop on the last bit because wrap-around
15933 effects of instruction do not correspond to semantics of RTL insn. */
15936 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
15938 if (INTVAL (startop) > 32
15939 && INTVAL (startop) < 64
15940 && INTVAL (sizeop) > 1
15941 && INTVAL (sizeop) + INTVAL (startop) < 64
15942 && INTVAL (shiftop) > 0
15943 && INTVAL (sizeop) + INTVAL (shiftop) < 32
15944 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
15945 return 1;
15947 return 0;
15950 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
15951 for lfq and stfq insns iff the registers are hard registers. */
15954 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
15956 /* We might have been passed a SUBREG. */
15957 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
15958 return 0;
15960 /* We might have been passed non floating point registers. */
15961 if (!FP_REGNO_P (REGNO (reg1))
15962 || !FP_REGNO_P (REGNO (reg2)))
15963 return 0;
15965 return (REGNO (reg1) == REGNO (reg2) - 1);
15968 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
15969 addr1 and addr2 must be in consecutive memory locations
15970 (addr2 == addr1 + 8). */
15973 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
15975 rtx addr1, addr2;
15976 unsigned int reg1, reg2;
15977 int offset1, offset2;
15979 /* The mems cannot be volatile. */
15980 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
15981 return 0;
15983 addr1 = XEXP (mem1, 0);
15984 addr2 = XEXP (mem2, 0);
15986 /* Extract an offset (if used) from the first addr. */
15987 if (GET_CODE (addr1) == PLUS)
15989 /* If not a REG, return zero. */
15990 if (GET_CODE (XEXP (addr1, 0)) != REG)
15991 return 0;
15992 else
15994 reg1 = REGNO (XEXP (addr1, 0));
15995 /* The offset must be constant! */
15996 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
15997 return 0;
15998 offset1 = INTVAL (XEXP (addr1, 1));
16001 else if (GET_CODE (addr1) != REG)
16002 return 0;
16003 else
16005 reg1 = REGNO (addr1);
16006 /* This was a simple (mem (reg)) expression. Offset is 0. */
16007 offset1 = 0;
16010 /* And now for the second addr. */
16011 if (GET_CODE (addr2) == PLUS)
16013 /* If not a REG, return zero. */
16014 if (GET_CODE (XEXP (addr2, 0)) != REG)
16015 return 0;
16016 else
16018 reg2 = REGNO (XEXP (addr2, 0));
16019 /* The offset must be constant. */
16020 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
16021 return 0;
16022 offset2 = INTVAL (XEXP (addr2, 1));
16025 else if (GET_CODE (addr2) != REG)
16026 return 0;
16027 else
16029 reg2 = REGNO (addr2);
16030 /* This was a simple (mem (reg)) expression. Offset is 0. */
16031 offset2 = 0;
16034 /* Both of these must have the same base register. */
16035 if (reg1 != reg2)
16036 return 0;
16038 /* The offset for the second addr must be 8 more than the first addr. */
16039 if (offset2 != offset1 + 8)
16040 return 0;
16042 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
16043 instructions. */
16044 return 1;
16049 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
16051 static bool eliminated = false;
16052 rtx ret;
16054 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
16055 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
16056 else
16058 rtx mem = cfun->machine->sdmode_stack_slot;
16059 gcc_assert (mem != NULL_RTX);
16061 if (!eliminated)
16063 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
16064 cfun->machine->sdmode_stack_slot = mem;
16065 eliminated = true;
16067 ret = mem;
16070 if (TARGET_DEBUG_ADDR)
16072 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
16073 GET_MODE_NAME (mode));
16074 if (!ret)
16075 fprintf (stderr, "\tNULL_RTX\n");
16076 else
16077 debug_rtx (ret);
16080 return ret;
16083 /* Return the mode to be used for memory when a secondary memory
16084 location is needed. For SDmode values we need to use DDmode, in
16085 all other cases we can use the same mode. */
16086 enum machine_mode
16087 rs6000_secondary_memory_needed_mode (enum machine_mode mode)
16089 if (lra_in_progress && mode == SDmode)
16090 return DDmode;
16091 return mode;
16094 static tree
16095 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
16097 /* Don't walk into types. */
16098 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
16100 *walk_subtrees = 0;
16101 return NULL_TREE;
16104 switch (TREE_CODE (*tp))
16106 case VAR_DECL:
16107 case PARM_DECL:
16108 case FIELD_DECL:
16109 case RESULT_DECL:
16110 case SSA_NAME:
16111 case REAL_CST:
16112 case MEM_REF:
16113 case VIEW_CONVERT_EXPR:
16114 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
16115 return *tp;
16116 break;
16117 default:
16118 break;
16121 return NULL_TREE;
16124 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
16125 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
16126 only work on the traditional altivec registers, note if an altivec register
16127 was chosen. */
16129 static enum rs6000_reg_type
16130 register_to_reg_type (rtx reg, bool *is_altivec)
16132 HOST_WIDE_INT regno;
16133 enum reg_class rclass;
16135 if (GET_CODE (reg) == SUBREG)
16136 reg = SUBREG_REG (reg);
16138 if (!REG_P (reg))
16139 return NO_REG_TYPE;
16141 regno = REGNO (reg);
16142 if (regno >= FIRST_PSEUDO_REGISTER)
16144 if (!lra_in_progress && !reload_in_progress && !reload_completed)
16145 return PSEUDO_REG_TYPE;
16147 regno = true_regnum (reg);
16148 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16149 return PSEUDO_REG_TYPE;
16152 gcc_assert (regno >= 0);
16154 if (is_altivec && ALTIVEC_REGNO_P (regno))
16155 *is_altivec = true;
16157 rclass = rs6000_regno_regclass[regno];
16158 return reg_class_to_reg_type[(int)rclass];
16161 /* Helper function for rs6000_secondary_reload to return true if a move to a
16162 different register classe is really a simple move. */
16164 static bool
16165 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
16166 enum rs6000_reg_type from_type,
16167 enum machine_mode mode)
16169 int size;
16171 /* Add support for various direct moves available. In this function, we only
16172 look at cases where we don't need any extra registers, and one or more
16173 simple move insns are issued. At present, 32-bit integers are not allowed
16174 in FPR/VSX registers. Single precision binary floating is not a simple
16175 move because we need to convert to the single precision memory layout.
16176 The 4-byte SDmode can be moved. */
16177 size = GET_MODE_SIZE (mode);
16178 if (TARGET_DIRECT_MOVE
16179 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
16180 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16181 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
16182 return true;
16184 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
16185 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
16186 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16187 return true;
16189 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
16190 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
16191 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16192 return true;
16194 return false;
16197 /* Power8 helper function for rs6000_secondary_reload, handle all of the
16198 special direct moves that involve allocating an extra register, return the
16199 insn code of the helper function if there is such a function or
16200 CODE_FOR_nothing if not. */
16202 static bool
16203 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
16204 enum rs6000_reg_type from_type,
16205 enum machine_mode mode,
16206 secondary_reload_info *sri,
16207 bool altivec_p)
16209 bool ret = false;
16210 enum insn_code icode = CODE_FOR_nothing;
16211 int cost = 0;
16212 int size = GET_MODE_SIZE (mode);
16214 if (TARGET_POWERPC64)
16216 if (size == 16)
16218 /* Handle moving 128-bit values from GPRs to VSX point registers on
16219 power8 when running in 64-bit mode using XXPERMDI to glue the two
16220 64-bit values back together. */
16221 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16223 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16224 icode = reg_addr[mode].reload_vsx_gpr;
16227 /* Handle moving 128-bit values from VSX point registers to GPRs on
16228 power8 when running in 64-bit mode using XXPERMDI to get access to the
16229 bottom 64-bit value. */
16230 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16232 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16233 icode = reg_addr[mode].reload_gpr_vsx;
16237 else if (mode == SFmode)
16239 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16241 cost = 3; /* xscvdpspn, mfvsrd, and. */
16242 icode = reg_addr[mode].reload_gpr_vsx;
16245 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16247 cost = 2; /* mtvsrz, xscvspdpn. */
16248 icode = reg_addr[mode].reload_vsx_gpr;
16253 if (TARGET_POWERPC64 && size == 16)
16255 /* Handle moving 128-bit values from GPRs to VSX point registers on
16256 power8 when running in 64-bit mode using XXPERMDI to glue the two
16257 64-bit values back together. */
16258 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16260 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16261 icode = reg_addr[mode].reload_vsx_gpr;
16264 /* Handle moving 128-bit values from VSX point registers to GPRs on
16265 power8 when running in 64-bit mode using XXPERMDI to get access to the
16266 bottom 64-bit value. */
16267 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16269 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16270 icode = reg_addr[mode].reload_gpr_vsx;
16274 else if (!TARGET_POWERPC64 && size == 8)
16276 /* Handle moving 64-bit values from GPRs to floating point registers on
16277 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
16278 values back together. Altivec register classes must be handled
16279 specially since a different instruction is used, and the secondary
16280 reload support requires a single instruction class in the scratch
16281 register constraint. However, right now TFmode is not allowed in
16282 Altivec registers, so the pattern will never match. */
16283 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
16285 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
16286 icode = reg_addr[mode].reload_fpr_gpr;
16290 if (icode != CODE_FOR_nothing)
16292 ret = true;
16293 if (sri)
16295 sri->icode = icode;
16296 sri->extra_cost = cost;
16300 return ret;
16303 /* Return whether a move between two register classes can be done either
16304 directly (simple move) or via a pattern that uses a single extra temporary
16305 (using power8's direct move in this case. */
16307 static bool
16308 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
16309 enum rs6000_reg_type from_type,
16310 enum machine_mode mode,
16311 secondary_reload_info *sri,
16312 bool altivec_p)
16314 /* Fall back to load/store reloads if either type is not a register. */
16315 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
16316 return false;
16318 /* If we haven't allocated registers yet, assume the move can be done for the
16319 standard register types. */
16320 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
16321 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
16322 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
16323 return true;
16325 /* Moves to the same set of registers is a simple move for non-specialized
16326 registers. */
16327 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
16328 return true;
16330 /* Check whether a simple move can be done directly. */
16331 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
16333 if (sri)
16335 sri->icode = CODE_FOR_nothing;
16336 sri->extra_cost = 0;
16338 return true;
16341 /* Now check if we can do it in a few steps. */
16342 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
16343 altivec_p);
16346 /* Inform reload about cases where moving X with a mode MODE to a register in
16347 RCLASS requires an extra scratch or immediate register. Return the class
16348 needed for the immediate register.
16350 For VSX and Altivec, we may need a register to convert sp+offset into
16351 reg+sp.
16353 For misaligned 64-bit gpr loads and stores we need a register to
16354 convert an offset address to indirect. */
16356 static reg_class_t
16357 rs6000_secondary_reload (bool in_p,
16358 rtx x,
16359 reg_class_t rclass_i,
16360 enum machine_mode mode,
16361 secondary_reload_info *sri)
16363 enum reg_class rclass = (enum reg_class) rclass_i;
16364 reg_class_t ret = ALL_REGS;
16365 enum insn_code icode;
16366 bool default_p = false;
16368 sri->icode = CODE_FOR_nothing;
16369 icode = ((in_p)
16370 ? reg_addr[mode].reload_load
16371 : reg_addr[mode].reload_store);
16373 if (REG_P (x) || register_operand (x, mode))
16375 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
16376 bool altivec_p = (rclass == ALTIVEC_REGS);
16377 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
16379 if (!in_p)
16381 enum rs6000_reg_type exchange = to_type;
16382 to_type = from_type;
16383 from_type = exchange;
16386 /* Can we do a direct move of some sort? */
16387 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
16388 altivec_p))
16390 icode = (enum insn_code)sri->icode;
16391 default_p = false;
16392 ret = NO_REGS;
16396 /* Handle vector moves with reload helper functions. */
16397 if (ret == ALL_REGS && icode != CODE_FOR_nothing)
16399 ret = NO_REGS;
16400 sri->icode = CODE_FOR_nothing;
16401 sri->extra_cost = 0;
16403 if (GET_CODE (x) == MEM)
16405 rtx addr = XEXP (x, 0);
16407 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
16408 an extra register in that case, but it would need an extra
16409 register if the addressing is reg+reg or (reg+reg)&(-16). Special
16410 case load/store quad. */
16411 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
16413 if (TARGET_POWERPC64 && TARGET_QUAD_MEMORY
16414 && GET_MODE_SIZE (mode) == 16
16415 && quad_memory_operand (x, mode))
16417 sri->icode = icode;
16418 sri->extra_cost = 2;
16421 else if (!legitimate_indirect_address_p (addr, false)
16422 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16423 false, true))
16425 sri->icode = icode;
16426 /* account for splitting the loads, and converting the
16427 address from reg+reg to reg. */
16428 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
16429 + ((GET_CODE (addr) == AND) ? 1 : 0));
16432 /* Allow scalar loads to/from the traditional floating point
16433 registers, even if VSX memory is set. */
16434 else if ((rclass == FLOAT_REGS || rclass == NO_REGS)
16435 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
16436 && (legitimate_indirect_address_p (addr, false)
16437 || legitimate_indirect_address_p (addr, false)
16438 || rs6000_legitimate_offset_address_p (mode, addr,
16439 false, true)))
16442 /* Loads to and stores from vector registers can only do reg+reg
16443 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
16444 scalar modes loading up the traditional floating point registers
16445 to use offset addresses. */
16446 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
16447 || rclass == FLOAT_REGS || rclass == NO_REGS)
16449 if (!VECTOR_MEM_ALTIVEC_P (mode)
16450 && GET_CODE (addr) == AND
16451 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16452 && INTVAL (XEXP (addr, 1)) == -16
16453 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
16454 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
16456 sri->icode = icode;
16457 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
16458 ? 2 : 1);
16460 else if (!legitimate_indirect_address_p (addr, false)
16461 && (rclass == NO_REGS
16462 || !legitimate_indexed_address_p (addr, false)))
16464 sri->icode = icode;
16465 sri->extra_cost = 1;
16467 else
16468 icode = CODE_FOR_nothing;
16470 /* Any other loads, including to pseudo registers which haven't been
16471 assigned to a register yet, default to require a scratch
16472 register. */
16473 else
16475 sri->icode = icode;
16476 sri->extra_cost = 2;
16479 else if (REG_P (x))
16481 int regno = true_regnum (x);
16483 icode = CODE_FOR_nothing;
16484 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16485 default_p = true;
16486 else
16488 enum reg_class xclass = REGNO_REG_CLASS (regno);
16489 enum rs6000_reg_type rtype1 = reg_class_to_reg_type[(int)rclass];
16490 enum rs6000_reg_type rtype2 = reg_class_to_reg_type[(int)xclass];
16492 /* If memory is needed, use default_secondary_reload to create the
16493 stack slot. */
16494 if (rtype1 != rtype2 || !IS_STD_REG_TYPE (rtype1))
16495 default_p = true;
16496 else
16497 ret = NO_REGS;
16500 else
16501 default_p = true;
16503 else if (TARGET_POWERPC64
16504 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16505 && MEM_P (x)
16506 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
16508 rtx addr = XEXP (x, 0);
16509 rtx off = address_offset (addr);
16511 if (off != NULL_RTX)
16513 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16514 unsigned HOST_WIDE_INT offset = INTVAL (off);
16516 /* We need a secondary reload when our legitimate_address_p
16517 says the address is good (as otherwise the entire address
16518 will be reloaded), and the offset is not a multiple of
16519 four or we have an address wrap. Address wrap will only
16520 occur for LO_SUMs since legitimate_offset_address_p
16521 rejects addresses for 16-byte mems that will wrap. */
16522 if (GET_CODE (addr) == LO_SUM
16523 ? (1 /* legitimate_address_p allows any offset for lo_sum */
16524 && ((offset & 3) != 0
16525 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
16526 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
16527 && (offset & 3) != 0))
16529 if (in_p)
16530 sri->icode = CODE_FOR_reload_di_load;
16531 else
16532 sri->icode = CODE_FOR_reload_di_store;
16533 sri->extra_cost = 2;
16534 ret = NO_REGS;
16536 else
16537 default_p = true;
16539 else
16540 default_p = true;
16542 else if (!TARGET_POWERPC64
16543 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16544 && MEM_P (x)
16545 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
16547 rtx addr = XEXP (x, 0);
16548 rtx off = address_offset (addr);
16550 if (off != NULL_RTX)
16552 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16553 unsigned HOST_WIDE_INT offset = INTVAL (off);
16555 /* We need a secondary reload when our legitimate_address_p
16556 says the address is good (as otherwise the entire address
16557 will be reloaded), and we have a wrap.
16559 legitimate_lo_sum_address_p allows LO_SUM addresses to
16560 have any offset so test for wrap in the low 16 bits.
16562 legitimate_offset_address_p checks for the range
16563 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
16564 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
16565 [0x7ff4,0x7fff] respectively, so test for the
16566 intersection of these ranges, [0x7ffc,0x7fff] and
16567 [0x7ff4,0x7ff7] respectively.
16569 Note that the address we see here may have been
16570 manipulated by legitimize_reload_address. */
16571 if (GET_CODE (addr) == LO_SUM
16572 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
16573 : offset - (0x8000 - extra) < UNITS_PER_WORD)
16575 if (in_p)
16576 sri->icode = CODE_FOR_reload_si_load;
16577 else
16578 sri->icode = CODE_FOR_reload_si_store;
16579 sri->extra_cost = 2;
16580 ret = NO_REGS;
16582 else
16583 default_p = true;
16585 else
16586 default_p = true;
16588 else
16589 default_p = true;
16591 if (default_p)
16592 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
16594 gcc_assert (ret != ALL_REGS);
16596 if (TARGET_DEBUG_ADDR)
16598 fprintf (stderr,
16599 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
16600 "mode = %s",
16601 reg_class_names[ret],
16602 in_p ? "true" : "false",
16603 reg_class_names[rclass],
16604 GET_MODE_NAME (mode));
16606 if (default_p)
16607 fprintf (stderr, ", default secondary reload");
16609 if (sri->icode != CODE_FOR_nothing)
16610 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
16611 insn_data[sri->icode].name, sri->extra_cost);
16612 else
16613 fprintf (stderr, "\n");
16615 debug_rtx (x);
16618 return ret;
16621 /* Better tracing for rs6000_secondary_reload_inner. */
16623 static void
16624 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
16625 bool store_p)
16627 rtx set, clobber;
16629 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
16631 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
16632 store_p ? "store" : "load");
16634 if (store_p)
16635 set = gen_rtx_SET (VOIDmode, mem, reg);
16636 else
16637 set = gen_rtx_SET (VOIDmode, reg, mem);
16639 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
16640 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
16643 static void
16644 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
16645 bool store_p)
16647 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
16648 gcc_unreachable ();
16651 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
16652 to SP+reg addressing. */
16654 void
16655 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
16657 int regno = true_regnum (reg);
16658 enum machine_mode mode = GET_MODE (reg);
16659 enum reg_class rclass;
16660 rtx addr;
16661 rtx and_op2 = NULL_RTX;
16662 rtx addr_op1;
16663 rtx addr_op2;
16664 rtx scratch_or_premodify = scratch;
16665 rtx and_rtx;
16666 rtx cc_clobber;
16668 if (TARGET_DEBUG_ADDR)
16669 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
16671 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16672 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16674 if (GET_CODE (mem) != MEM)
16675 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16677 rclass = REGNO_REG_CLASS (regno);
16678 addr = find_replacement (&XEXP (mem, 0));
16680 switch (rclass)
16682 /* GPRs can handle reg + small constant, all other addresses need to use
16683 the scratch register. */
16684 case GENERAL_REGS:
16685 case BASE_REGS:
16686 if (GET_CODE (addr) == AND)
16688 and_op2 = XEXP (addr, 1);
16689 addr = find_replacement (&XEXP (addr, 0));
16692 if (GET_CODE (addr) == PRE_MODIFY)
16694 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
16695 if (!REG_P (scratch_or_premodify))
16696 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16698 addr = find_replacement (&XEXP (addr, 1));
16699 if (GET_CODE (addr) != PLUS)
16700 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16703 if (GET_CODE (addr) == PLUS
16704 && (and_op2 != NULL_RTX
16705 || !rs6000_legitimate_offset_address_p (PTImode, addr,
16706 false, true)))
16708 /* find_replacement already recurses into both operands of
16709 PLUS so we don't need to call it here. */
16710 addr_op1 = XEXP (addr, 0);
16711 addr_op2 = XEXP (addr, 1);
16712 if (!legitimate_indirect_address_p (addr_op1, false))
16713 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16715 if (!REG_P (addr_op2)
16716 && (GET_CODE (addr_op2) != CONST_INT
16717 || !satisfies_constraint_I (addr_op2)))
16719 if (TARGET_DEBUG_ADDR)
16721 fprintf (stderr,
16722 "\nMove plus addr to register %s, mode = %s: ",
16723 rs6000_reg_names[REGNO (scratch)],
16724 GET_MODE_NAME (mode));
16725 debug_rtx (addr_op2);
16727 rs6000_emit_move (scratch, addr_op2, Pmode);
16728 addr_op2 = scratch;
16731 emit_insn (gen_rtx_SET (VOIDmode,
16732 scratch_or_premodify,
16733 gen_rtx_PLUS (Pmode,
16734 addr_op1,
16735 addr_op2)));
16737 addr = scratch_or_premodify;
16738 scratch_or_premodify = scratch;
16740 else if (!legitimate_indirect_address_p (addr, false)
16741 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16742 false, true))
16744 if (TARGET_DEBUG_ADDR)
16746 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
16747 rs6000_reg_names[REGNO (scratch_or_premodify)],
16748 GET_MODE_NAME (mode));
16749 debug_rtx (addr);
16751 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16752 addr = scratch_or_premodify;
16753 scratch_or_premodify = scratch;
16755 break;
16757 /* Float registers can do offset+reg addressing for scalar types. */
16758 case FLOAT_REGS:
16759 if (legitimate_indirect_address_p (addr, false) /* reg */
16760 || legitimate_indexed_address_p (addr, false) /* reg+reg */
16761 || ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
16762 && and_op2 == NULL_RTX
16763 && scratch_or_premodify == scratch
16764 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
16765 break;
16767 /* If this isn't a legacy floating point load/store, fall through to the
16768 VSX defaults. */
16770 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
16771 addresses into a scratch register. */
16772 case VSX_REGS:
16773 case ALTIVEC_REGS:
16775 /* With float regs, we need to handle the AND ourselves, since we can't
16776 use the Altivec instruction with an implicit AND -16. Allow scalar
16777 loads to float registers to use reg+offset even if VSX. */
16778 if (GET_CODE (addr) == AND
16779 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
16780 || GET_CODE (XEXP (addr, 1)) != CONST_INT
16781 || INTVAL (XEXP (addr, 1)) != -16
16782 || !VECTOR_MEM_ALTIVEC_P (mode)))
16784 and_op2 = XEXP (addr, 1);
16785 addr = find_replacement (&XEXP (addr, 0));
16788 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
16789 as the address later. */
16790 if (GET_CODE (addr) == PRE_MODIFY
16791 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode)
16792 && (rclass != FLOAT_REGS
16793 || (GET_MODE_SIZE (mode) != 4 && GET_MODE_SIZE (mode) != 8)))
16794 || and_op2 != NULL_RTX
16795 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
16797 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
16798 if (!legitimate_indirect_address_p (scratch_or_premodify, false))
16799 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16801 addr = find_replacement (&XEXP (addr, 1));
16802 if (GET_CODE (addr) != PLUS)
16803 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16806 if (legitimate_indirect_address_p (addr, false) /* reg */
16807 || legitimate_indexed_address_p (addr, false) /* reg+reg */
16808 || (GET_CODE (addr) == AND /* Altivec memory */
16809 && rclass == ALTIVEC_REGS
16810 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16811 && INTVAL (XEXP (addr, 1)) == -16
16812 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
16813 || legitimate_indexed_address_p (XEXP (addr, 0), false))))
16816 else if (GET_CODE (addr) == PLUS)
16818 addr_op1 = XEXP (addr, 0);
16819 addr_op2 = XEXP (addr, 1);
16820 if (!REG_P (addr_op1))
16821 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16823 if (TARGET_DEBUG_ADDR)
16825 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
16826 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
16827 debug_rtx (addr_op2);
16829 rs6000_emit_move (scratch, addr_op2, Pmode);
16830 emit_insn (gen_rtx_SET (VOIDmode,
16831 scratch_or_premodify,
16832 gen_rtx_PLUS (Pmode,
16833 addr_op1,
16834 scratch)));
16835 addr = scratch_or_premodify;
16836 scratch_or_premodify = scratch;
16839 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
16840 || GET_CODE (addr) == CONST_INT || GET_CODE (addr) == LO_SUM
16841 || REG_P (addr))
16843 if (TARGET_DEBUG_ADDR)
16845 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
16846 rs6000_reg_names[REGNO (scratch_or_premodify)],
16847 GET_MODE_NAME (mode));
16848 debug_rtx (addr);
16851 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16852 addr = scratch_or_premodify;
16853 scratch_or_premodify = scratch;
16856 else
16857 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16859 break;
16861 default:
16862 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16865 /* If the original address involved a pre-modify that we couldn't use the VSX
16866 memory instruction with update, and we haven't taken care of already,
16867 store the address in the pre-modify register and use that as the
16868 address. */
16869 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
16871 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
16872 addr = scratch_or_premodify;
16875 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
16876 memory instruction, recreate the AND now, including the clobber which is
16877 generated by the general ANDSI3/ANDDI3 patterns for the
16878 andi. instruction. */
16879 if (and_op2 != NULL_RTX)
16881 if (! legitimate_indirect_address_p (addr, false))
16883 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
16884 addr = scratch;
16887 if (TARGET_DEBUG_ADDR)
16889 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
16890 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
16891 debug_rtx (and_op2);
16894 and_rtx = gen_rtx_SET (VOIDmode,
16895 scratch,
16896 gen_rtx_AND (Pmode,
16897 addr,
16898 and_op2));
16900 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
16901 emit_insn (gen_rtx_PARALLEL (VOIDmode,
16902 gen_rtvec (2, and_rtx, cc_clobber)));
16903 addr = scratch;
16906 /* Adjust the address if it changed. */
16907 if (addr != XEXP (mem, 0))
16909 mem = replace_equiv_address_nv (mem, addr);
16910 if (TARGET_DEBUG_ADDR)
16911 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
16914 /* Now create the move. */
16915 if (store_p)
16916 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
16917 else
16918 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
16920 return;
16923 /* Convert reloads involving 64-bit gprs and misaligned offset
16924 addressing, or multiple 32-bit gprs and offsets that are too large,
16925 to use indirect addressing. */
16927 void
16928 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
16930 int regno = true_regnum (reg);
16931 enum reg_class rclass;
16932 rtx addr;
16933 rtx scratch_or_premodify = scratch;
16935 if (TARGET_DEBUG_ADDR)
16937 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
16938 store_p ? "store" : "load");
16939 fprintf (stderr, "reg:\n");
16940 debug_rtx (reg);
16941 fprintf (stderr, "mem:\n");
16942 debug_rtx (mem);
16943 fprintf (stderr, "scratch:\n");
16944 debug_rtx (scratch);
16947 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
16948 gcc_assert (GET_CODE (mem) == MEM);
16949 rclass = REGNO_REG_CLASS (regno);
16950 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
16951 addr = XEXP (mem, 0);
16953 if (GET_CODE (addr) == PRE_MODIFY)
16955 scratch_or_premodify = XEXP (addr, 0);
16956 gcc_assert (REG_P (scratch_or_premodify));
16957 addr = XEXP (addr, 1);
16959 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
16961 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16963 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
16965 /* Now create the move. */
16966 if (store_p)
16967 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
16968 else
16969 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
16971 return;
16974 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
16975 this function has any SDmode references. If we are on a power7 or later, we
16976 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
16977 can load/store the value. */
16979 static void
16980 rs6000_alloc_sdmode_stack_slot (void)
16982 tree t;
16983 basic_block bb;
16984 gimple_stmt_iterator gsi;
16986 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
16987 /* We use a different approach for dealing with the secondary
16988 memory in LRA. */
16989 if (ira_use_lra_p)
16990 return;
16992 if (TARGET_NO_SDMODE_STACK)
16993 return;
16995 FOR_EACH_BB_FN (bb, cfun)
16996 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
16998 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
16999 if (ret)
17001 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17002 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17003 SDmode, 0);
17004 return;
17008 /* Check for any SDmode parameters of the function. */
17009 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
17011 if (TREE_TYPE (t) == error_mark_node)
17012 continue;
17014 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
17015 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
17017 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17018 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17019 SDmode, 0);
17020 return;
17025 static void
17026 rs6000_instantiate_decls (void)
17028 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
17029 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
17032 /* Given an rtx X being reloaded into a reg required to be
17033 in class CLASS, return the class of reg to actually use.
17034 In general this is just CLASS; but on some machines
17035 in some cases it is preferable to use a more restrictive class.
17037 On the RS/6000, we have to return NO_REGS when we want to reload a
17038 floating-point CONST_DOUBLE to force it to be copied to memory.
17040 We also don't want to reload integer values into floating-point
17041 registers if we can at all help it. In fact, this can
17042 cause reload to die, if it tries to generate a reload of CTR
17043 into a FP register and discovers it doesn't have the memory location
17044 required.
17046 ??? Would it be a good idea to have reload do the converse, that is
17047 try to reload floating modes into FP registers if possible?
17050 static enum reg_class
17051 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
17053 enum machine_mode mode = GET_MODE (x);
17055 if (TARGET_VSX && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
17056 return rclass;
17058 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
17059 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
17060 && easy_vector_constant (x, mode))
17061 return ALTIVEC_REGS;
17063 if ((CONSTANT_P (x) || GET_CODE (x) == PLUS))
17065 if (reg_class_subset_p (GENERAL_REGS, rclass))
17066 return GENERAL_REGS;
17067 if (reg_class_subset_p (BASE_REGS, rclass))
17068 return BASE_REGS;
17069 return NO_REGS;
17072 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
17073 return GENERAL_REGS;
17075 /* For VSX, prefer the traditional registers for 64-bit values because we can
17076 use the non-VSX loads. Prefer the Altivec registers if Altivec is
17077 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
17078 prefer Altivec loads.. */
17079 if (rclass == VSX_REGS)
17081 if (GET_MODE_SIZE (mode) <= 8)
17082 return FLOAT_REGS;
17084 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
17085 || mode == V1TImode)
17086 return ALTIVEC_REGS;
17088 return rclass;
17091 return rclass;
17094 /* Debug version of rs6000_preferred_reload_class. */
17095 static enum reg_class
17096 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
17098 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
17100 fprintf (stderr,
17101 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
17102 "mode = %s, x:\n",
17103 reg_class_names[ret], reg_class_names[rclass],
17104 GET_MODE_NAME (GET_MODE (x)));
17105 debug_rtx (x);
17107 return ret;
17110 /* If we are copying between FP or AltiVec registers and anything else, we need
17111 a memory location. The exception is when we are targeting ppc64 and the
17112 move to/from fpr to gpr instructions are available. Also, under VSX, you
17113 can copy vector registers from the FP register set to the Altivec register
17114 set and vice versa. */
17116 static bool
17117 rs6000_secondary_memory_needed (enum reg_class from_class,
17118 enum reg_class to_class,
17119 enum machine_mode mode)
17121 enum rs6000_reg_type from_type, to_type;
17122 bool altivec_p = ((from_class == ALTIVEC_REGS)
17123 || (to_class == ALTIVEC_REGS));
17125 /* If a simple/direct move is available, we don't need secondary memory */
17126 from_type = reg_class_to_reg_type[(int)from_class];
17127 to_type = reg_class_to_reg_type[(int)to_class];
17129 if (rs6000_secondary_reload_move (to_type, from_type, mode,
17130 (secondary_reload_info *)0, altivec_p))
17131 return false;
17133 /* If we have a floating point or vector register class, we need to use
17134 memory to transfer the data. */
17135 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
17136 return true;
17138 return false;
17141 /* Debug version of rs6000_secondary_memory_needed. */
17142 static bool
17143 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
17144 enum reg_class to_class,
17145 enum machine_mode mode)
17147 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
17149 fprintf (stderr,
17150 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
17151 "to_class = %s, mode = %s\n",
17152 ret ? "true" : "false",
17153 reg_class_names[from_class],
17154 reg_class_names[to_class],
17155 GET_MODE_NAME (mode));
17157 return ret;
17160 /* Return the register class of a scratch register needed to copy IN into
17161 or out of a register in RCLASS in MODE. If it can be done directly,
17162 NO_REGS is returned. */
17164 static enum reg_class
17165 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
17166 rtx in)
17168 int regno;
17170 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
17171 #if TARGET_MACHO
17172 && MACHOPIC_INDIRECT
17173 #endif
17176 /* We cannot copy a symbolic operand directly into anything
17177 other than BASE_REGS for TARGET_ELF. So indicate that a
17178 register from BASE_REGS is needed as an intermediate
17179 register.
17181 On Darwin, pic addresses require a load from memory, which
17182 needs a base register. */
17183 if (rclass != BASE_REGS
17184 && (GET_CODE (in) == SYMBOL_REF
17185 || GET_CODE (in) == HIGH
17186 || GET_CODE (in) == LABEL_REF
17187 || GET_CODE (in) == CONST))
17188 return BASE_REGS;
17191 if (GET_CODE (in) == REG)
17193 regno = REGNO (in);
17194 if (regno >= FIRST_PSEUDO_REGISTER)
17196 regno = true_regnum (in);
17197 if (regno >= FIRST_PSEUDO_REGISTER)
17198 regno = -1;
17201 else if (GET_CODE (in) == SUBREG)
17203 regno = true_regnum (in);
17204 if (regno >= FIRST_PSEUDO_REGISTER)
17205 regno = -1;
17207 else
17208 regno = -1;
17210 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
17211 into anything. */
17212 if (rclass == GENERAL_REGS || rclass == BASE_REGS
17213 || (regno >= 0 && INT_REGNO_P (regno)))
17214 return NO_REGS;
17216 /* Constants, memory, and FP registers can go into FP registers. */
17217 if ((regno == -1 || FP_REGNO_P (regno))
17218 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
17219 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
17221 /* Memory, and FP/altivec registers can go into fp/altivec registers under
17222 VSX. However, for scalar variables, use the traditional floating point
17223 registers so that we can use offset+register addressing. */
17224 if (TARGET_VSX
17225 && (regno == -1 || VSX_REGNO_P (regno))
17226 && VSX_REG_CLASS_P (rclass))
17228 if (GET_MODE_SIZE (mode) < 16)
17229 return FLOAT_REGS;
17231 return NO_REGS;
17234 /* Memory, and AltiVec registers can go into AltiVec registers. */
17235 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
17236 && rclass == ALTIVEC_REGS)
17237 return NO_REGS;
17239 /* We can copy among the CR registers. */
17240 if ((rclass == CR_REGS || rclass == CR0_REGS)
17241 && regno >= 0 && CR_REGNO_P (regno))
17242 return NO_REGS;
17244 /* Otherwise, we need GENERAL_REGS. */
17245 return GENERAL_REGS;
17248 /* Debug version of rs6000_secondary_reload_class. */
17249 static enum reg_class
17250 rs6000_debug_secondary_reload_class (enum reg_class rclass,
17251 enum machine_mode mode, rtx in)
17253 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
17254 fprintf (stderr,
17255 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
17256 "mode = %s, input rtx:\n",
17257 reg_class_names[ret], reg_class_names[rclass],
17258 GET_MODE_NAME (mode));
17259 debug_rtx (in);
17261 return ret;
17264 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
17266 static bool
17267 rs6000_cannot_change_mode_class (enum machine_mode from,
17268 enum machine_mode to,
17269 enum reg_class rclass)
17271 unsigned from_size = GET_MODE_SIZE (from);
17272 unsigned to_size = GET_MODE_SIZE (to);
17274 if (from_size != to_size)
17276 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
17278 if (reg_classes_intersect_p (xclass, rclass))
17280 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
17281 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
17283 /* Don't allow 64-bit types to overlap with 128-bit types that take a
17284 single register under VSX because the scalar part of the register
17285 is in the upper 64-bits, and not the lower 64-bits. Types like
17286 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
17287 IEEE floating point can't overlap, and neither can small
17288 values. */
17290 if (TARGET_IEEEQUAD && (to == TFmode || from == TFmode))
17291 return true;
17293 /* TDmode in floating-mode registers must always go into a register
17294 pair with the most significant word in the even-numbered register
17295 to match ISA requirements. In little-endian mode, this does not
17296 match subreg numbering, so we cannot allow subregs. */
17297 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
17298 return true;
17300 if (from_size < 8 || to_size < 8)
17301 return true;
17303 if (from_size == 8 && (8 * to_nregs) != to_size)
17304 return true;
17306 if (to_size == 8 && (8 * from_nregs) != from_size)
17307 return true;
17309 return false;
17311 else
17312 return false;
17315 if (TARGET_E500_DOUBLE
17316 && ((((to) == DFmode) + ((from) == DFmode)) == 1
17317 || (((to) == TFmode) + ((from) == TFmode)) == 1
17318 || (((to) == DDmode) + ((from) == DDmode)) == 1
17319 || (((to) == TDmode) + ((from) == TDmode)) == 1
17320 || (((to) == DImode) + ((from) == DImode)) == 1))
17321 return true;
17323 /* Since the VSX register set includes traditional floating point registers
17324 and altivec registers, just check for the size being different instead of
17325 trying to check whether the modes are vector modes. Otherwise it won't
17326 allow say DF and DI to change classes. For types like TFmode and TDmode
17327 that take 2 64-bit registers, rather than a single 128-bit register, don't
17328 allow subregs of those types to other 128 bit types. */
17329 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
17331 unsigned num_regs = (from_size + 15) / 16;
17332 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
17333 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
17334 return true;
17336 return (from_size != 8 && from_size != 16);
17339 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
17340 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
17341 return true;
17343 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
17344 && reg_classes_intersect_p (GENERAL_REGS, rclass))
17345 return true;
17347 return false;
17350 /* Debug version of rs6000_cannot_change_mode_class. */
17351 static bool
17352 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
17353 enum machine_mode to,
17354 enum reg_class rclass)
17356 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
17358 fprintf (stderr,
17359 "rs6000_cannot_change_mode_class, return %s, from = %s, "
17360 "to = %s, rclass = %s\n",
17361 ret ? "true" : "false",
17362 GET_MODE_NAME (from), GET_MODE_NAME (to),
17363 reg_class_names[rclass]);
17365 return ret;
17368 /* Return a string to do a move operation of 128 bits of data. */
17370 const char *
17371 rs6000_output_move_128bit (rtx operands[])
17373 rtx dest = operands[0];
17374 rtx src = operands[1];
17375 enum machine_mode mode = GET_MODE (dest);
17376 int dest_regno;
17377 int src_regno;
17378 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
17379 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
17381 if (REG_P (dest))
17383 dest_regno = REGNO (dest);
17384 dest_gpr_p = INT_REGNO_P (dest_regno);
17385 dest_fp_p = FP_REGNO_P (dest_regno);
17386 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
17387 dest_vsx_p = dest_fp_p | dest_vmx_p;
17389 else
17391 dest_regno = -1;
17392 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
17395 if (REG_P (src))
17397 src_regno = REGNO (src);
17398 src_gpr_p = INT_REGNO_P (src_regno);
17399 src_fp_p = FP_REGNO_P (src_regno);
17400 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
17401 src_vsx_p = src_fp_p | src_vmx_p;
17403 else
17405 src_regno = -1;
17406 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
17409 /* Register moves. */
17410 if (dest_regno >= 0 && src_regno >= 0)
17412 if (dest_gpr_p)
17414 if (src_gpr_p)
17415 return "#";
17417 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
17418 return "#";
17421 else if (TARGET_VSX && dest_vsx_p)
17423 if (src_vsx_p)
17424 return "xxlor %x0,%x1,%x1";
17426 else if (TARGET_DIRECT_MOVE && src_gpr_p)
17427 return "#";
17430 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
17431 return "vor %0,%1,%1";
17433 else if (dest_fp_p && src_fp_p)
17434 return "#";
17437 /* Loads. */
17438 else if (dest_regno >= 0 && MEM_P (src))
17440 if (dest_gpr_p)
17442 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17443 return "lq %0,%1";
17444 else
17445 return "#";
17448 else if (TARGET_ALTIVEC && dest_vmx_p
17449 && altivec_indexed_or_indirect_operand (src, mode))
17450 return "lvx %0,%y1";
17452 else if (TARGET_VSX && dest_vsx_p)
17454 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17455 return "lxvw4x %x0,%y1";
17456 else
17457 return "lxvd2x %x0,%y1";
17460 else if (TARGET_ALTIVEC && dest_vmx_p)
17461 return "lvx %0,%y1";
17463 else if (dest_fp_p)
17464 return "#";
17467 /* Stores. */
17468 else if (src_regno >= 0 && MEM_P (dest))
17470 if (src_gpr_p)
17472 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17473 return "stq %1,%0";
17474 else
17475 return "#";
17478 else if (TARGET_ALTIVEC && src_vmx_p
17479 && altivec_indexed_or_indirect_operand (src, mode))
17480 return "stvx %1,%y0";
17482 else if (TARGET_VSX && src_vsx_p)
17484 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17485 return "stxvw4x %x1,%y0";
17486 else
17487 return "stxvd2x %x1,%y0";
17490 else if (TARGET_ALTIVEC && src_vmx_p)
17491 return "stvx %1,%y0";
17493 else if (src_fp_p)
17494 return "#";
17497 /* Constants. */
17498 else if (dest_regno >= 0
17499 && (GET_CODE (src) == CONST_INT
17500 || GET_CODE (src) == CONST_WIDE_INT
17501 || GET_CODE (src) == CONST_DOUBLE
17502 || GET_CODE (src) == CONST_VECTOR))
17504 if (dest_gpr_p)
17505 return "#";
17507 else if (TARGET_VSX && dest_vsx_p && zero_constant (src, mode))
17508 return "xxlxor %x0,%x0,%x0";
17510 else if (TARGET_ALTIVEC && dest_vmx_p)
17511 return output_vec_const_move (operands);
17514 if (TARGET_DEBUG_ADDR)
17516 fprintf (stderr, "\n===== Bad 128 bit move:\n");
17517 debug_rtx (gen_rtx_SET (VOIDmode, dest, src));
17520 gcc_unreachable ();
17523 /* Validate a 128-bit move. */
17524 bool
17525 rs6000_move_128bit_ok_p (rtx operands[])
17527 enum machine_mode mode = GET_MODE (operands[0]);
17528 return (gpc_reg_operand (operands[0], mode)
17529 || gpc_reg_operand (operands[1], mode));
17532 /* Return true if a 128-bit move needs to be split. */
17533 bool
17534 rs6000_split_128bit_ok_p (rtx operands[])
17536 if (!reload_completed)
17537 return false;
17539 if (!gpr_or_gpr_p (operands[0], operands[1]))
17540 return false;
17542 if (quad_load_store_p (operands[0], operands[1]))
17543 return false;
17545 return true;
17549 /* Given a comparison operation, return the bit number in CCR to test. We
17550 know this is a valid comparison.
17552 SCC_P is 1 if this is for an scc. That means that %D will have been
17553 used instead of %C, so the bits will be in different places.
17555 Return -1 if OP isn't a valid comparison for some reason. */
17558 ccr_bit (rtx op, int scc_p)
17560 enum rtx_code code = GET_CODE (op);
17561 enum machine_mode cc_mode;
17562 int cc_regnum;
17563 int base_bit;
17564 rtx reg;
17566 if (!COMPARISON_P (op))
17567 return -1;
17569 reg = XEXP (op, 0);
17571 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
17573 cc_mode = GET_MODE (reg);
17574 cc_regnum = REGNO (reg);
17575 base_bit = 4 * (cc_regnum - CR0_REGNO);
17577 validate_condition_mode (code, cc_mode);
17579 /* When generating a sCOND operation, only positive conditions are
17580 allowed. */
17581 gcc_assert (!scc_p
17582 || code == EQ || code == GT || code == LT || code == UNORDERED
17583 || code == GTU || code == LTU);
17585 switch (code)
17587 case NE:
17588 return scc_p ? base_bit + 3 : base_bit + 2;
17589 case EQ:
17590 return base_bit + 2;
17591 case GT: case GTU: case UNLE:
17592 return base_bit + 1;
17593 case LT: case LTU: case UNGE:
17594 return base_bit;
17595 case ORDERED: case UNORDERED:
17596 return base_bit + 3;
17598 case GE: case GEU:
17599 /* If scc, we will have done a cror to put the bit in the
17600 unordered position. So test that bit. For integer, this is ! LT
17601 unless this is an scc insn. */
17602 return scc_p ? base_bit + 3 : base_bit;
17604 case LE: case LEU:
17605 return scc_p ? base_bit + 3 : base_bit + 1;
17607 default:
17608 gcc_unreachable ();
17612 /* Return the GOT register. */
17615 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
17617 /* The second flow pass currently (June 1999) can't update
17618 regs_ever_live without disturbing other parts of the compiler, so
17619 update it here to make the prolog/epilogue code happy. */
17620 if (!can_create_pseudo_p ()
17621 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17622 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
17624 crtl->uses_pic_offset_table = 1;
17626 return pic_offset_table_rtx;
17629 static rs6000_stack_t stack_info;
17631 /* Function to init struct machine_function.
17632 This will be called, via a pointer variable,
17633 from push_function_context. */
17635 static struct machine_function *
17636 rs6000_init_machine_status (void)
17638 stack_info.reload_completed = 0;
17639 return ggc_cleared_alloc<machine_function> ();
17642 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
17645 extract_MB (rtx op)
17647 int i;
17648 unsigned long val = INTVAL (op);
17650 /* If the high bit is zero, the value is the first 1 bit we find
17651 from the left. */
17652 if ((val & 0x80000000) == 0)
17654 gcc_assert (val & 0xffffffff);
17656 i = 1;
17657 while (((val <<= 1) & 0x80000000) == 0)
17658 ++i;
17659 return i;
17662 /* If the high bit is set and the low bit is not, or the mask is all
17663 1's, the value is zero. */
17664 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
17665 return 0;
17667 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17668 from the right. */
17669 i = 31;
17670 while (((val >>= 1) & 1) != 0)
17671 --i;
17673 return i;
17677 extract_ME (rtx op)
17679 int i;
17680 unsigned long val = INTVAL (op);
17682 /* If the low bit is zero, the value is the first 1 bit we find from
17683 the right. */
17684 if ((val & 1) == 0)
17686 gcc_assert (val & 0xffffffff);
17688 i = 30;
17689 while (((val >>= 1) & 1) == 0)
17690 --i;
17692 return i;
17695 /* If the low bit is set and the high bit is not, or the mask is all
17696 1's, the value is 31. */
17697 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
17698 return 31;
17700 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17701 from the left. */
17702 i = 0;
17703 while (((val <<= 1) & 0x80000000) != 0)
17704 ++i;
17706 return i;
17709 /* Locate some local-dynamic symbol still in use by this function
17710 so that we can print its name in some tls_ld pattern. */
17712 static const char *
17713 rs6000_get_some_local_dynamic_name (void)
17715 rtx insn;
17717 if (cfun->machine->some_ld_name)
17718 return cfun->machine->some_ld_name;
17720 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
17721 if (INSN_P (insn)
17722 && for_each_rtx (&PATTERN (insn),
17723 rs6000_get_some_local_dynamic_name_1, 0))
17724 return cfun->machine->some_ld_name;
17726 gcc_unreachable ();
17729 /* Helper function for rs6000_get_some_local_dynamic_name. */
17731 static int
17732 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
17734 rtx x = *px;
17736 if (GET_CODE (x) == SYMBOL_REF)
17738 const char *str = XSTR (x, 0);
17739 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
17741 cfun->machine->some_ld_name = str;
17742 return 1;
17746 return 0;
17749 /* Write out a function code label. */
17751 void
17752 rs6000_output_function_entry (FILE *file, const char *fname)
17754 if (fname[0] != '.')
17756 switch (DEFAULT_ABI)
17758 default:
17759 gcc_unreachable ();
17761 case ABI_AIX:
17762 if (DOT_SYMBOLS)
17763 putc ('.', file);
17764 else
17765 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
17766 break;
17768 case ABI_ELFv2:
17769 case ABI_V4:
17770 case ABI_DARWIN:
17771 break;
17775 RS6000_OUTPUT_BASENAME (file, fname);
17778 /* Print an operand. Recognize special options, documented below. */
17780 #if TARGET_ELF
17781 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
17782 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
17783 #else
17784 #define SMALL_DATA_RELOC "sda21"
17785 #define SMALL_DATA_REG 0
17786 #endif
17788 void
17789 print_operand (FILE *file, rtx x, int code)
17791 int i;
17792 unsigned HOST_WIDE_INT uval;
17794 switch (code)
17796 /* %a is output_address. */
17798 case 'b':
17799 /* If constant, low-order 16 bits of constant, unsigned.
17800 Otherwise, write normally. */
17801 if (INT_P (x))
17802 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
17803 else
17804 print_operand (file, x, 0);
17805 return;
17807 case 'B':
17808 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
17809 for 64-bit mask direction. */
17810 putc (((INTVAL (x) & 1) == 0 ? 'r' : 'l'), file);
17811 return;
17813 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
17814 output_operand. */
17816 case 'D':
17817 /* Like 'J' but get to the GT bit only. */
17818 gcc_assert (REG_P (x));
17820 /* Bit 1 is GT bit. */
17821 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
17823 /* Add one for shift count in rlinm for scc. */
17824 fprintf (file, "%d", i + 1);
17825 return;
17827 case 'E':
17828 /* X is a CR register. Print the number of the EQ bit of the CR */
17829 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17830 output_operand_lossage ("invalid %%E value");
17831 else
17832 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
17833 return;
17835 case 'f':
17836 /* X is a CR register. Print the shift count needed to move it
17837 to the high-order four bits. */
17838 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17839 output_operand_lossage ("invalid %%f value");
17840 else
17841 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
17842 return;
17844 case 'F':
17845 /* Similar, but print the count for the rotate in the opposite
17846 direction. */
17847 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
17848 output_operand_lossage ("invalid %%F value");
17849 else
17850 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
17851 return;
17853 case 'G':
17854 /* X is a constant integer. If it is negative, print "m",
17855 otherwise print "z". This is to make an aze or ame insn. */
17856 if (GET_CODE (x) != CONST_INT)
17857 output_operand_lossage ("invalid %%G value");
17858 else if (INTVAL (x) >= 0)
17859 putc ('z', file);
17860 else
17861 putc ('m', file);
17862 return;
17864 case 'h':
17865 /* If constant, output low-order five bits. Otherwise, write
17866 normally. */
17867 if (INT_P (x))
17868 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
17869 else
17870 print_operand (file, x, 0);
17871 return;
17873 case 'H':
17874 /* If constant, output low-order six bits. Otherwise, write
17875 normally. */
17876 if (INT_P (x))
17877 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
17878 else
17879 print_operand (file, x, 0);
17880 return;
17882 case 'I':
17883 /* Print `i' if this is a constant, else nothing. */
17884 if (INT_P (x))
17885 putc ('i', file);
17886 return;
17888 case 'j':
17889 /* Write the bit number in CCR for jump. */
17890 i = ccr_bit (x, 0);
17891 if (i == -1)
17892 output_operand_lossage ("invalid %%j code");
17893 else
17894 fprintf (file, "%d", i);
17895 return;
17897 case 'J':
17898 /* Similar, but add one for shift count in rlinm for scc and pass
17899 scc flag to `ccr_bit'. */
17900 i = ccr_bit (x, 1);
17901 if (i == -1)
17902 output_operand_lossage ("invalid %%J code");
17903 else
17904 /* If we want bit 31, write a shift count of zero, not 32. */
17905 fprintf (file, "%d", i == 31 ? 0 : i + 1);
17906 return;
17908 case 'k':
17909 /* X must be a constant. Write the 1's complement of the
17910 constant. */
17911 if (! INT_P (x))
17912 output_operand_lossage ("invalid %%k value");
17913 else
17914 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
17915 return;
17917 case 'K':
17918 /* X must be a symbolic constant on ELF. Write an
17919 expression suitable for an 'addi' that adds in the low 16
17920 bits of the MEM. */
17921 if (GET_CODE (x) == CONST)
17923 if (GET_CODE (XEXP (x, 0)) != PLUS
17924 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
17925 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
17926 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
17927 output_operand_lossage ("invalid %%K value");
17929 print_operand_address (file, x);
17930 fputs ("@l", file);
17931 return;
17933 /* %l is output_asm_label. */
17935 case 'L':
17936 /* Write second word of DImode or DFmode reference. Works on register
17937 or non-indexed memory only. */
17938 if (REG_P (x))
17939 fputs (reg_names[REGNO (x) + 1], file);
17940 else if (MEM_P (x))
17942 /* Handle possible auto-increment. Since it is pre-increment and
17943 we have already done it, we can just use an offset of word. */
17944 if (GET_CODE (XEXP (x, 0)) == PRE_INC
17945 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
17946 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
17947 UNITS_PER_WORD));
17948 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
17949 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
17950 UNITS_PER_WORD));
17951 else
17952 output_address (XEXP (adjust_address_nv (x, SImode,
17953 UNITS_PER_WORD),
17954 0));
17956 if (small_data_operand (x, GET_MODE (x)))
17957 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
17958 reg_names[SMALL_DATA_REG]);
17960 return;
17962 case 'm':
17963 /* MB value for a mask operand. */
17964 if (! mask_operand (x, SImode))
17965 output_operand_lossage ("invalid %%m value");
17967 fprintf (file, "%d", extract_MB (x));
17968 return;
17970 case 'M':
17971 /* ME value for a mask operand. */
17972 if (! mask_operand (x, SImode))
17973 output_operand_lossage ("invalid %%M value");
17975 fprintf (file, "%d", extract_ME (x));
17976 return;
17978 /* %n outputs the negative of its operand. */
17980 case 'N':
17981 /* Write the number of elements in the vector times 4. */
17982 if (GET_CODE (x) != PARALLEL)
17983 output_operand_lossage ("invalid %%N value");
17984 else
17985 fprintf (file, "%d", XVECLEN (x, 0) * 4);
17986 return;
17988 case 'O':
17989 /* Similar, but subtract 1 first. */
17990 if (GET_CODE (x) != PARALLEL)
17991 output_operand_lossage ("invalid %%O value");
17992 else
17993 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
17994 return;
17996 case 'p':
17997 /* X is a CONST_INT that is a power of two. Output the logarithm. */
17998 if (! INT_P (x)
17999 || INTVAL (x) < 0
18000 || (i = exact_log2 (INTVAL (x))) < 0)
18001 output_operand_lossage ("invalid %%p value");
18002 else
18003 fprintf (file, "%d", i);
18004 return;
18006 case 'P':
18007 /* The operand must be an indirect memory reference. The result
18008 is the register name. */
18009 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
18010 || REGNO (XEXP (x, 0)) >= 32)
18011 output_operand_lossage ("invalid %%P value");
18012 else
18013 fputs (reg_names[REGNO (XEXP (x, 0))], file);
18014 return;
18016 case 'q':
18017 /* This outputs the logical code corresponding to a boolean
18018 expression. The expression may have one or both operands
18019 negated (if one, only the first one). For condition register
18020 logical operations, it will also treat the negated
18021 CR codes as NOTs, but not handle NOTs of them. */
18023 const char *const *t = 0;
18024 const char *s;
18025 enum rtx_code code = GET_CODE (x);
18026 static const char * const tbl[3][3] = {
18027 { "and", "andc", "nor" },
18028 { "or", "orc", "nand" },
18029 { "xor", "eqv", "xor" } };
18031 if (code == AND)
18032 t = tbl[0];
18033 else if (code == IOR)
18034 t = tbl[1];
18035 else if (code == XOR)
18036 t = tbl[2];
18037 else
18038 output_operand_lossage ("invalid %%q value");
18040 if (GET_CODE (XEXP (x, 0)) != NOT)
18041 s = t[0];
18042 else
18044 if (GET_CODE (XEXP (x, 1)) == NOT)
18045 s = t[2];
18046 else
18047 s = t[1];
18050 fputs (s, file);
18052 return;
18054 case 'Q':
18055 if (! TARGET_MFCRF)
18056 return;
18057 fputc (',', file);
18058 /* FALLTHRU */
18060 case 'R':
18061 /* X is a CR register. Print the mask for `mtcrf'. */
18062 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18063 output_operand_lossage ("invalid %%R value");
18064 else
18065 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
18066 return;
18068 case 's':
18069 /* Low 5 bits of 32 - value */
18070 if (! INT_P (x))
18071 output_operand_lossage ("invalid %%s value");
18072 else
18073 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
18074 return;
18076 case 'S':
18077 /* PowerPC64 mask position. All 0's is excluded.
18078 CONST_INT 32-bit mask is considered sign-extended so any
18079 transition must occur within the CONST_INT, not on the boundary. */
18080 if (! mask64_operand (x, DImode))
18081 output_operand_lossage ("invalid %%S value");
18083 uval = INTVAL (x);
18085 if (uval & 1) /* Clear Left */
18087 #if HOST_BITS_PER_WIDE_INT > 64
18088 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18089 #endif
18090 i = 64;
18092 else /* Clear Right */
18094 uval = ~uval;
18095 #if HOST_BITS_PER_WIDE_INT > 64
18096 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18097 #endif
18098 i = 63;
18100 while (uval != 0)
18101 --i, uval >>= 1;
18102 gcc_assert (i >= 0);
18103 fprintf (file, "%d", i);
18104 return;
18106 case 't':
18107 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
18108 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
18110 /* Bit 3 is OV bit. */
18111 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
18113 /* If we want bit 31, write a shift count of zero, not 32. */
18114 fprintf (file, "%d", i == 31 ? 0 : i + 1);
18115 return;
18117 case 'T':
18118 /* Print the symbolic name of a branch target register. */
18119 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
18120 && REGNO (x) != CTR_REGNO))
18121 output_operand_lossage ("invalid %%T value");
18122 else if (REGNO (x) == LR_REGNO)
18123 fputs ("lr", file);
18124 else
18125 fputs ("ctr", file);
18126 return;
18128 case 'u':
18129 /* High-order 16 bits of constant for use in unsigned operand. */
18130 if (! INT_P (x))
18131 output_operand_lossage ("invalid %%u value");
18132 else
18133 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
18134 (INTVAL (x) >> 16) & 0xffff);
18135 return;
18137 case 'v':
18138 /* High-order 16 bits of constant for use in signed operand. */
18139 if (! INT_P (x))
18140 output_operand_lossage ("invalid %%v value");
18141 else
18142 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
18143 (INTVAL (x) >> 16) & 0xffff);
18144 return;
18146 case 'U':
18147 /* Print `u' if this has an auto-increment or auto-decrement. */
18148 if (MEM_P (x)
18149 && (GET_CODE (XEXP (x, 0)) == PRE_INC
18150 || GET_CODE (XEXP (x, 0)) == PRE_DEC
18151 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
18152 putc ('u', file);
18153 return;
18155 case 'V':
18156 /* Print the trap code for this operand. */
18157 switch (GET_CODE (x))
18159 case EQ:
18160 fputs ("eq", file); /* 4 */
18161 break;
18162 case NE:
18163 fputs ("ne", file); /* 24 */
18164 break;
18165 case LT:
18166 fputs ("lt", file); /* 16 */
18167 break;
18168 case LE:
18169 fputs ("le", file); /* 20 */
18170 break;
18171 case GT:
18172 fputs ("gt", file); /* 8 */
18173 break;
18174 case GE:
18175 fputs ("ge", file); /* 12 */
18176 break;
18177 case LTU:
18178 fputs ("llt", file); /* 2 */
18179 break;
18180 case LEU:
18181 fputs ("lle", file); /* 6 */
18182 break;
18183 case GTU:
18184 fputs ("lgt", file); /* 1 */
18185 break;
18186 case GEU:
18187 fputs ("lge", file); /* 5 */
18188 break;
18189 default:
18190 gcc_unreachable ();
18192 break;
18194 case 'w':
18195 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
18196 normally. */
18197 if (INT_P (x))
18198 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
18199 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
18200 else
18201 print_operand (file, x, 0);
18202 return;
18204 case 'W':
18205 /* MB value for a PowerPC64 rldic operand. */
18206 i = clz_hwi (INTVAL (x));
18208 fprintf (file, "%d", i);
18209 return;
18211 case 'x':
18212 /* X is a FPR or Altivec register used in a VSX context. */
18213 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
18214 output_operand_lossage ("invalid %%x value");
18215 else
18217 int reg = REGNO (x);
18218 int vsx_reg = (FP_REGNO_P (reg)
18219 ? reg - 32
18220 : reg - FIRST_ALTIVEC_REGNO + 32);
18222 #ifdef TARGET_REGNAMES
18223 if (TARGET_REGNAMES)
18224 fprintf (file, "%%vs%d", vsx_reg);
18225 else
18226 #endif
18227 fprintf (file, "%d", vsx_reg);
18229 return;
18231 case 'X':
18232 if (MEM_P (x)
18233 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
18234 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
18235 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
18236 putc ('x', file);
18237 return;
18239 case 'Y':
18240 /* Like 'L', for third word of TImode/PTImode */
18241 if (REG_P (x))
18242 fputs (reg_names[REGNO (x) + 2], file);
18243 else if (MEM_P (x))
18245 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18246 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18247 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18248 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18249 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18250 else
18251 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
18252 if (small_data_operand (x, GET_MODE (x)))
18253 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18254 reg_names[SMALL_DATA_REG]);
18256 return;
18258 case 'z':
18259 /* X is a SYMBOL_REF. Write out the name preceded by a
18260 period and without any trailing data in brackets. Used for function
18261 names. If we are configured for System V (or the embedded ABI) on
18262 the PowerPC, do not emit the period, since those systems do not use
18263 TOCs and the like. */
18264 gcc_assert (GET_CODE (x) == SYMBOL_REF);
18266 /* For macho, check to see if we need a stub. */
18267 if (TARGET_MACHO)
18269 const char *name = XSTR (x, 0);
18270 #if TARGET_MACHO
18271 if (darwin_emit_branch_islands
18272 && MACHOPIC_INDIRECT
18273 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
18274 name = machopic_indirection_name (x, /*stub_p=*/true);
18275 #endif
18276 assemble_name (file, name);
18278 else if (!DOT_SYMBOLS)
18279 assemble_name (file, XSTR (x, 0));
18280 else
18281 rs6000_output_function_entry (file, XSTR (x, 0));
18282 return;
18284 case 'Z':
18285 /* Like 'L', for last word of TImode/PTImode. */
18286 if (REG_P (x))
18287 fputs (reg_names[REGNO (x) + 3], file);
18288 else if (MEM_P (x))
18290 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18291 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18292 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18293 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18294 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18295 else
18296 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
18297 if (small_data_operand (x, GET_MODE (x)))
18298 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18299 reg_names[SMALL_DATA_REG]);
18301 return;
18303 /* Print AltiVec or SPE memory operand. */
18304 case 'y':
18306 rtx tmp;
18308 gcc_assert (MEM_P (x));
18310 tmp = XEXP (x, 0);
18312 /* Ugly hack because %y is overloaded. */
18313 if ((TARGET_SPE || TARGET_E500_DOUBLE)
18314 && (GET_MODE_SIZE (GET_MODE (x)) == 8
18315 || GET_MODE (x) == TFmode
18316 || GET_MODE (x) == TImode
18317 || GET_MODE (x) == PTImode))
18319 /* Handle [reg]. */
18320 if (REG_P (tmp))
18322 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
18323 break;
18325 /* Handle [reg+UIMM]. */
18326 else if (GET_CODE (tmp) == PLUS &&
18327 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
18329 int x;
18331 gcc_assert (REG_P (XEXP (tmp, 0)));
18333 x = INTVAL (XEXP (tmp, 1));
18334 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
18335 break;
18338 /* Fall through. Must be [reg+reg]. */
18340 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
18341 && GET_CODE (tmp) == AND
18342 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
18343 && INTVAL (XEXP (tmp, 1)) == -16)
18344 tmp = XEXP (tmp, 0);
18345 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
18346 && GET_CODE (tmp) == PRE_MODIFY)
18347 tmp = XEXP (tmp, 1);
18348 if (REG_P (tmp))
18349 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
18350 else
18352 if (!GET_CODE (tmp) == PLUS
18353 || !REG_P (XEXP (tmp, 0))
18354 || !REG_P (XEXP (tmp, 1)))
18356 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
18357 break;
18360 if (REGNO (XEXP (tmp, 0)) == 0)
18361 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
18362 reg_names[ REGNO (XEXP (tmp, 0)) ]);
18363 else
18364 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
18365 reg_names[ REGNO (XEXP (tmp, 1)) ]);
18367 break;
18370 case 0:
18371 if (REG_P (x))
18372 fprintf (file, "%s", reg_names[REGNO (x)]);
18373 else if (MEM_P (x))
18375 /* We need to handle PRE_INC and PRE_DEC here, since we need to
18376 know the width from the mode. */
18377 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
18378 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
18379 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18380 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
18381 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
18382 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18383 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18384 output_address (XEXP (XEXP (x, 0), 1));
18385 else
18386 output_address (XEXP (x, 0));
18388 else
18390 if (toc_relative_expr_p (x, false))
18391 /* This hack along with a corresponding hack in
18392 rs6000_output_addr_const_extra arranges to output addends
18393 where the assembler expects to find them. eg.
18394 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
18395 without this hack would be output as "x@toc+4". We
18396 want "x+4@toc". */
18397 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18398 else
18399 output_addr_const (file, x);
18401 return;
18403 case '&':
18404 assemble_name (file, rs6000_get_some_local_dynamic_name ());
18405 return;
18407 default:
18408 output_operand_lossage ("invalid %%xn code");
18412 /* Print the address of an operand. */
18414 void
18415 print_operand_address (FILE *file, rtx x)
18417 if (REG_P (x))
18418 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
18419 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
18420 || GET_CODE (x) == LABEL_REF)
18422 output_addr_const (file, x);
18423 if (small_data_operand (x, GET_MODE (x)))
18424 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18425 reg_names[SMALL_DATA_REG]);
18426 else
18427 gcc_assert (!TARGET_TOC);
18429 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18430 && REG_P (XEXP (x, 1)))
18432 if (REGNO (XEXP (x, 0)) == 0)
18433 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
18434 reg_names[ REGNO (XEXP (x, 0)) ]);
18435 else
18436 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
18437 reg_names[ REGNO (XEXP (x, 1)) ]);
18439 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18440 && GET_CODE (XEXP (x, 1)) == CONST_INT)
18441 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
18442 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
18443 #if TARGET_MACHO
18444 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18445 && CONSTANT_P (XEXP (x, 1)))
18447 fprintf (file, "lo16(");
18448 output_addr_const (file, XEXP (x, 1));
18449 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18451 #endif
18452 #if TARGET_ELF
18453 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18454 && CONSTANT_P (XEXP (x, 1)))
18456 output_addr_const (file, XEXP (x, 1));
18457 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18459 #endif
18460 else if (toc_relative_expr_p (x, false))
18462 /* This hack along with a corresponding hack in
18463 rs6000_output_addr_const_extra arranges to output addends
18464 where the assembler expects to find them. eg.
18465 (lo_sum (reg 9)
18466 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
18467 without this hack would be output as "x@toc+8@l(9)". We
18468 want "x+8@toc@l(9)". */
18469 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18470 if (GET_CODE (x) == LO_SUM)
18471 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
18472 else
18473 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
18475 else
18476 gcc_unreachable ();
18479 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
18481 static bool
18482 rs6000_output_addr_const_extra (FILE *file, rtx x)
18484 if (GET_CODE (x) == UNSPEC)
18485 switch (XINT (x, 1))
18487 case UNSPEC_TOCREL:
18488 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
18489 && REG_P (XVECEXP (x, 0, 1))
18490 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
18491 output_addr_const (file, XVECEXP (x, 0, 0));
18492 if (x == tocrel_base && tocrel_offset != const0_rtx)
18494 if (INTVAL (tocrel_offset) >= 0)
18495 fprintf (file, "+");
18496 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
18498 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
18500 putc ('-', file);
18501 assemble_name (file, toc_label_name);
18503 else if (TARGET_ELF)
18504 fputs ("@toc", file);
18505 return true;
18507 #if TARGET_MACHO
18508 case UNSPEC_MACHOPIC_OFFSET:
18509 output_addr_const (file, XVECEXP (x, 0, 0));
18510 putc ('-', file);
18511 machopic_output_function_base_name (file);
18512 return true;
18513 #endif
18515 return false;
18518 /* Target hook for assembling integer objects. The PowerPC version has
18519 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
18520 is defined. It also needs to handle DI-mode objects on 64-bit
18521 targets. */
18523 static bool
18524 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
18526 #ifdef RELOCATABLE_NEEDS_FIXUP
18527 /* Special handling for SI values. */
18528 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
18530 static int recurse = 0;
18532 /* For -mrelocatable, we mark all addresses that need to be fixed up in
18533 the .fixup section. Since the TOC section is already relocated, we
18534 don't need to mark it here. We used to skip the text section, but it
18535 should never be valid for relocated addresses to be placed in the text
18536 section. */
18537 if (TARGET_RELOCATABLE
18538 && in_section != toc_section
18539 && !recurse
18540 && !CONST_SCALAR_INT_P (x)
18541 && CONSTANT_P (x))
18543 char buf[256];
18545 recurse = 1;
18546 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
18547 fixuplabelno++;
18548 ASM_OUTPUT_LABEL (asm_out_file, buf);
18549 fprintf (asm_out_file, "\t.long\t(");
18550 output_addr_const (asm_out_file, x);
18551 fprintf (asm_out_file, ")@fixup\n");
18552 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
18553 ASM_OUTPUT_ALIGN (asm_out_file, 2);
18554 fprintf (asm_out_file, "\t.long\t");
18555 assemble_name (asm_out_file, buf);
18556 fprintf (asm_out_file, "\n\t.previous\n");
18557 recurse = 0;
18558 return true;
18560 /* Remove initial .'s to turn a -mcall-aixdesc function
18561 address into the address of the descriptor, not the function
18562 itself. */
18563 else if (GET_CODE (x) == SYMBOL_REF
18564 && XSTR (x, 0)[0] == '.'
18565 && DEFAULT_ABI == ABI_AIX)
18567 const char *name = XSTR (x, 0);
18568 while (*name == '.')
18569 name++;
18571 fprintf (asm_out_file, "\t.long\t%s\n", name);
18572 return true;
18575 #endif /* RELOCATABLE_NEEDS_FIXUP */
18576 return default_assemble_integer (x, size, aligned_p);
18579 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
18580 /* Emit an assembler directive to set symbol visibility for DECL to
18581 VISIBILITY_TYPE. */
18583 static void
18584 rs6000_assemble_visibility (tree decl, int vis)
18586 if (TARGET_XCOFF)
18587 return;
18589 /* Functions need to have their entry point symbol visibility set as
18590 well as their descriptor symbol visibility. */
18591 if (DEFAULT_ABI == ABI_AIX
18592 && DOT_SYMBOLS
18593 && TREE_CODE (decl) == FUNCTION_DECL)
18595 static const char * const visibility_types[] = {
18596 NULL, "internal", "hidden", "protected"
18599 const char *name, *type;
18601 name = ((* targetm.strip_name_encoding)
18602 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
18603 type = visibility_types[vis];
18605 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
18606 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
18608 else
18609 default_assemble_visibility (decl, vis);
18611 #endif
18613 enum rtx_code
18614 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
18616 /* Reversal of FP compares takes care -- an ordered compare
18617 becomes an unordered compare and vice versa. */
18618 if (mode == CCFPmode
18619 && (!flag_finite_math_only
18620 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
18621 || code == UNEQ || code == LTGT))
18622 return reverse_condition_maybe_unordered (code);
18623 else
18624 return reverse_condition (code);
18627 /* Generate a compare for CODE. Return a brand-new rtx that
18628 represents the result of the compare. */
18630 static rtx
18631 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
18633 enum machine_mode comp_mode;
18634 rtx compare_result;
18635 enum rtx_code code = GET_CODE (cmp);
18636 rtx op0 = XEXP (cmp, 0);
18637 rtx op1 = XEXP (cmp, 1);
18639 if (FLOAT_MODE_P (mode))
18640 comp_mode = CCFPmode;
18641 else if (code == GTU || code == LTU
18642 || code == GEU || code == LEU)
18643 comp_mode = CCUNSmode;
18644 else if ((code == EQ || code == NE)
18645 && unsigned_reg_p (op0)
18646 && (unsigned_reg_p (op1)
18647 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
18648 /* These are unsigned values, perhaps there will be a later
18649 ordering compare that can be shared with this one. */
18650 comp_mode = CCUNSmode;
18651 else
18652 comp_mode = CCmode;
18654 /* If we have an unsigned compare, make sure we don't have a signed value as
18655 an immediate. */
18656 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
18657 && INTVAL (op1) < 0)
18659 op0 = copy_rtx_if_shared (op0);
18660 op1 = force_reg (GET_MODE (op0), op1);
18661 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
18664 /* First, the compare. */
18665 compare_result = gen_reg_rtx (comp_mode);
18667 /* E500 FP compare instructions on the GPRs. Yuck! */
18668 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
18669 && FLOAT_MODE_P (mode))
18671 rtx cmp, or_result, compare_result2;
18672 enum machine_mode op_mode = GET_MODE (op0);
18673 bool reverse_p;
18675 if (op_mode == VOIDmode)
18676 op_mode = GET_MODE (op1);
18678 /* First reverse the condition codes that aren't directly supported. */
18679 switch (code)
18681 case NE:
18682 case UNLT:
18683 case UNLE:
18684 case UNGT:
18685 case UNGE:
18686 code = reverse_condition_maybe_unordered (code);
18687 reverse_p = true;
18688 break;
18690 case EQ:
18691 case LT:
18692 case LE:
18693 case GT:
18694 case GE:
18695 reverse_p = false;
18696 break;
18698 default:
18699 gcc_unreachable ();
18702 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
18703 This explains the following mess. */
18705 switch (code)
18707 case EQ:
18708 switch (op_mode)
18710 case SFmode:
18711 cmp = (flag_finite_math_only && !flag_trapping_math)
18712 ? gen_tstsfeq_gpr (compare_result, op0, op1)
18713 : gen_cmpsfeq_gpr (compare_result, op0, op1);
18714 break;
18716 case DFmode:
18717 cmp = (flag_finite_math_only && !flag_trapping_math)
18718 ? gen_tstdfeq_gpr (compare_result, op0, op1)
18719 : gen_cmpdfeq_gpr (compare_result, op0, op1);
18720 break;
18722 case TFmode:
18723 cmp = (flag_finite_math_only && !flag_trapping_math)
18724 ? gen_tsttfeq_gpr (compare_result, op0, op1)
18725 : gen_cmptfeq_gpr (compare_result, op0, op1);
18726 break;
18728 default:
18729 gcc_unreachable ();
18731 break;
18733 case GT:
18734 case GE:
18735 switch (op_mode)
18737 case SFmode:
18738 cmp = (flag_finite_math_only && !flag_trapping_math)
18739 ? gen_tstsfgt_gpr (compare_result, op0, op1)
18740 : gen_cmpsfgt_gpr (compare_result, op0, op1);
18741 break;
18743 case DFmode:
18744 cmp = (flag_finite_math_only && !flag_trapping_math)
18745 ? gen_tstdfgt_gpr (compare_result, op0, op1)
18746 : gen_cmpdfgt_gpr (compare_result, op0, op1);
18747 break;
18749 case TFmode:
18750 cmp = (flag_finite_math_only && !flag_trapping_math)
18751 ? gen_tsttfgt_gpr (compare_result, op0, op1)
18752 : gen_cmptfgt_gpr (compare_result, op0, op1);
18753 break;
18755 default:
18756 gcc_unreachable ();
18758 break;
18760 case LT:
18761 case LE:
18762 switch (op_mode)
18764 case SFmode:
18765 cmp = (flag_finite_math_only && !flag_trapping_math)
18766 ? gen_tstsflt_gpr (compare_result, op0, op1)
18767 : gen_cmpsflt_gpr (compare_result, op0, op1);
18768 break;
18770 case DFmode:
18771 cmp = (flag_finite_math_only && !flag_trapping_math)
18772 ? gen_tstdflt_gpr (compare_result, op0, op1)
18773 : gen_cmpdflt_gpr (compare_result, op0, op1);
18774 break;
18776 case TFmode:
18777 cmp = (flag_finite_math_only && !flag_trapping_math)
18778 ? gen_tsttflt_gpr (compare_result, op0, op1)
18779 : gen_cmptflt_gpr (compare_result, op0, op1);
18780 break;
18782 default:
18783 gcc_unreachable ();
18785 break;
18787 default:
18788 gcc_unreachable ();
18791 /* Synthesize LE and GE from LT/GT || EQ. */
18792 if (code == LE || code == GE)
18794 emit_insn (cmp);
18796 compare_result2 = gen_reg_rtx (CCFPmode);
18798 /* Do the EQ. */
18799 switch (op_mode)
18801 case SFmode:
18802 cmp = (flag_finite_math_only && !flag_trapping_math)
18803 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
18804 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
18805 break;
18807 case DFmode:
18808 cmp = (flag_finite_math_only && !flag_trapping_math)
18809 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
18810 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
18811 break;
18813 case TFmode:
18814 cmp = (flag_finite_math_only && !flag_trapping_math)
18815 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
18816 : gen_cmptfeq_gpr (compare_result2, op0, op1);
18817 break;
18819 default:
18820 gcc_unreachable ();
18823 emit_insn (cmp);
18825 /* OR them together. */
18826 or_result = gen_reg_rtx (CCFPmode);
18827 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
18828 compare_result2);
18829 compare_result = or_result;
18832 code = reverse_p ? NE : EQ;
18834 emit_insn (cmp);
18836 else
18838 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
18839 CLOBBERs to match cmptf_internal2 pattern. */
18840 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
18841 && GET_MODE (op0) == TFmode
18842 && !TARGET_IEEEQUAD
18843 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
18844 emit_insn (gen_rtx_PARALLEL (VOIDmode,
18845 gen_rtvec (10,
18846 gen_rtx_SET (VOIDmode,
18847 compare_result,
18848 gen_rtx_COMPARE (comp_mode, op0, op1)),
18849 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18850 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18851 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18852 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18853 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18854 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18855 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18856 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
18857 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
18858 else if (GET_CODE (op1) == UNSPEC
18859 && XINT (op1, 1) == UNSPEC_SP_TEST)
18861 rtx op1b = XVECEXP (op1, 0, 0);
18862 comp_mode = CCEQmode;
18863 compare_result = gen_reg_rtx (CCEQmode);
18864 if (TARGET_64BIT)
18865 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
18866 else
18867 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
18869 else
18870 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
18871 gen_rtx_COMPARE (comp_mode, op0, op1)));
18874 /* Some kinds of FP comparisons need an OR operation;
18875 under flag_finite_math_only we don't bother. */
18876 if (FLOAT_MODE_P (mode)
18877 && !flag_finite_math_only
18878 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
18879 && (code == LE || code == GE
18880 || code == UNEQ || code == LTGT
18881 || code == UNGT || code == UNLT))
18883 enum rtx_code or1, or2;
18884 rtx or1_rtx, or2_rtx, compare2_rtx;
18885 rtx or_result = gen_reg_rtx (CCEQmode);
18887 switch (code)
18889 case LE: or1 = LT; or2 = EQ; break;
18890 case GE: or1 = GT; or2 = EQ; break;
18891 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
18892 case LTGT: or1 = LT; or2 = GT; break;
18893 case UNGT: or1 = UNORDERED; or2 = GT; break;
18894 case UNLT: or1 = UNORDERED; or2 = LT; break;
18895 default: gcc_unreachable ();
18897 validate_condition_mode (or1, comp_mode);
18898 validate_condition_mode (or2, comp_mode);
18899 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
18900 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
18901 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
18902 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
18903 const_true_rtx);
18904 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
18906 compare_result = or_result;
18907 code = EQ;
18910 validate_condition_mode (code, GET_MODE (compare_result));
18912 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
18916 /* Emit the RTL for an sISEL pattern. */
18918 void
18919 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
18921 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
18924 void
18925 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
18927 rtx condition_rtx;
18928 enum machine_mode op_mode;
18929 enum rtx_code cond_code;
18930 rtx result = operands[0];
18932 if (TARGET_ISEL && (mode == SImode || mode == DImode))
18934 rs6000_emit_sISEL (mode, operands);
18935 return;
18938 condition_rtx = rs6000_generate_compare (operands[1], mode);
18939 cond_code = GET_CODE (condition_rtx);
18941 if (FLOAT_MODE_P (mode)
18942 && !TARGET_FPRS && TARGET_HARD_FLOAT)
18944 rtx t;
18946 PUT_MODE (condition_rtx, SImode);
18947 t = XEXP (condition_rtx, 0);
18949 gcc_assert (cond_code == NE || cond_code == EQ);
18951 if (cond_code == NE)
18952 emit_insn (gen_e500_flip_gt_bit (t, t));
18954 emit_insn (gen_move_from_CR_gt_bit (result, t));
18955 return;
18958 if (cond_code == NE
18959 || cond_code == GE || cond_code == LE
18960 || cond_code == GEU || cond_code == LEU
18961 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
18963 rtx not_result = gen_reg_rtx (CCEQmode);
18964 rtx not_op, rev_cond_rtx;
18965 enum machine_mode cc_mode;
18967 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
18969 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
18970 SImode, XEXP (condition_rtx, 0), const0_rtx);
18971 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
18972 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
18973 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
18976 op_mode = GET_MODE (XEXP (operands[1], 0));
18977 if (op_mode == VOIDmode)
18978 op_mode = GET_MODE (XEXP (operands[1], 1));
18980 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
18982 PUT_MODE (condition_rtx, DImode);
18983 convert_move (result, condition_rtx, 0);
18985 else
18987 PUT_MODE (condition_rtx, SImode);
18988 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
18992 /* Emit a branch of kind CODE to location LOC. */
18994 void
18995 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
18997 rtx condition_rtx, loc_ref;
18999 condition_rtx = rs6000_generate_compare (operands[0], mode);
19000 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
19001 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
19002 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
19003 loc_ref, pc_rtx)));
19006 /* Return the string to output a conditional branch to LABEL, which is
19007 the operand template of the label, or NULL if the branch is really a
19008 conditional return.
19010 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
19011 condition code register and its mode specifies what kind of
19012 comparison we made.
19014 REVERSED is nonzero if we should reverse the sense of the comparison.
19016 INSN is the insn. */
19018 char *
19019 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
19021 static char string[64];
19022 enum rtx_code code = GET_CODE (op);
19023 rtx cc_reg = XEXP (op, 0);
19024 enum machine_mode mode = GET_MODE (cc_reg);
19025 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
19026 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
19027 int really_reversed = reversed ^ need_longbranch;
19028 char *s = string;
19029 const char *ccode;
19030 const char *pred;
19031 rtx note;
19033 validate_condition_mode (code, mode);
19035 /* Work out which way this really branches. We could use
19036 reverse_condition_maybe_unordered here always but this
19037 makes the resulting assembler clearer. */
19038 if (really_reversed)
19040 /* Reversal of FP compares takes care -- an ordered compare
19041 becomes an unordered compare and vice versa. */
19042 if (mode == CCFPmode)
19043 code = reverse_condition_maybe_unordered (code);
19044 else
19045 code = reverse_condition (code);
19048 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
19050 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
19051 to the GT bit. */
19052 switch (code)
19054 case EQ:
19055 /* Opposite of GT. */
19056 code = GT;
19057 break;
19059 case NE:
19060 code = UNLE;
19061 break;
19063 default:
19064 gcc_unreachable ();
19068 switch (code)
19070 /* Not all of these are actually distinct opcodes, but
19071 we distinguish them for clarity of the resulting assembler. */
19072 case NE: case LTGT:
19073 ccode = "ne"; break;
19074 case EQ: case UNEQ:
19075 ccode = "eq"; break;
19076 case GE: case GEU:
19077 ccode = "ge"; break;
19078 case GT: case GTU: case UNGT:
19079 ccode = "gt"; break;
19080 case LE: case LEU:
19081 ccode = "le"; break;
19082 case LT: case LTU: case UNLT:
19083 ccode = "lt"; break;
19084 case UNORDERED: ccode = "un"; break;
19085 case ORDERED: ccode = "nu"; break;
19086 case UNGE: ccode = "nl"; break;
19087 case UNLE: ccode = "ng"; break;
19088 default:
19089 gcc_unreachable ();
19092 /* Maybe we have a guess as to how likely the branch is. */
19093 pred = "";
19094 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
19095 if (note != NULL_RTX)
19097 /* PROB is the difference from 50%. */
19098 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
19100 /* Only hint for highly probable/improbable branches on newer
19101 cpus as static prediction overrides processor dynamic
19102 prediction. For older cpus we may as well always hint, but
19103 assume not taken for branches that are very close to 50% as a
19104 mispredicted taken branch is more expensive than a
19105 mispredicted not-taken branch. */
19106 if (rs6000_always_hint
19107 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
19108 && br_prob_note_reliable_p (note)))
19110 if (abs (prob) > REG_BR_PROB_BASE / 20
19111 && ((prob > 0) ^ need_longbranch))
19112 pred = "+";
19113 else
19114 pred = "-";
19118 if (label == NULL)
19119 s += sprintf (s, "b%slr%s ", ccode, pred);
19120 else
19121 s += sprintf (s, "b%s%s ", ccode, pred);
19123 /* We need to escape any '%' characters in the reg_names string.
19124 Assume they'd only be the first character.... */
19125 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
19126 *s++ = '%';
19127 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
19129 if (label != NULL)
19131 /* If the branch distance was too far, we may have to use an
19132 unconditional branch to go the distance. */
19133 if (need_longbranch)
19134 s += sprintf (s, ",$+8\n\tb %s", label);
19135 else
19136 s += sprintf (s, ",%s", label);
19139 return string;
19142 /* Return the string to flip the GT bit on a CR. */
19143 char *
19144 output_e500_flip_gt_bit (rtx dst, rtx src)
19146 static char string[64];
19147 int a, b;
19149 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
19150 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
19152 /* GT bit. */
19153 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
19154 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
19156 sprintf (string, "crnot %d,%d", a, b);
19157 return string;
19160 /* Return insn for VSX or Altivec comparisons. */
19162 static rtx
19163 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
19165 rtx mask;
19166 enum machine_mode mode = GET_MODE (op0);
19168 switch (code)
19170 default:
19171 break;
19173 case GE:
19174 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
19175 return NULL_RTX;
19177 case EQ:
19178 case GT:
19179 case GTU:
19180 case ORDERED:
19181 case UNORDERED:
19182 case UNEQ:
19183 case LTGT:
19184 mask = gen_reg_rtx (mode);
19185 emit_insn (gen_rtx_SET (VOIDmode,
19186 mask,
19187 gen_rtx_fmt_ee (code, mode, op0, op1)));
19188 return mask;
19191 return NULL_RTX;
19194 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
19195 DMODE is expected destination mode. This is a recursive function. */
19197 static rtx
19198 rs6000_emit_vector_compare (enum rtx_code rcode,
19199 rtx op0, rtx op1,
19200 enum machine_mode dmode)
19202 rtx mask;
19203 bool swap_operands = false;
19204 bool try_again = false;
19206 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
19207 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
19209 /* See if the comparison works as is. */
19210 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19211 if (mask)
19212 return mask;
19214 switch (rcode)
19216 case LT:
19217 rcode = GT;
19218 swap_operands = true;
19219 try_again = true;
19220 break;
19221 case LTU:
19222 rcode = GTU;
19223 swap_operands = true;
19224 try_again = true;
19225 break;
19226 case NE:
19227 case UNLE:
19228 case UNLT:
19229 case UNGE:
19230 case UNGT:
19231 /* Invert condition and try again.
19232 e.g., A != B becomes ~(A==B). */
19234 enum rtx_code rev_code;
19235 enum insn_code nor_code;
19236 rtx mask2;
19238 rev_code = reverse_condition_maybe_unordered (rcode);
19239 if (rev_code == UNKNOWN)
19240 return NULL_RTX;
19242 nor_code = optab_handler (one_cmpl_optab, dmode);
19243 if (nor_code == CODE_FOR_nothing)
19244 return NULL_RTX;
19246 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
19247 if (!mask2)
19248 return NULL_RTX;
19250 mask = gen_reg_rtx (dmode);
19251 emit_insn (GEN_FCN (nor_code) (mask, mask2));
19252 return mask;
19254 break;
19255 case GE:
19256 case GEU:
19257 case LE:
19258 case LEU:
19259 /* Try GT/GTU/LT/LTU OR EQ */
19261 rtx c_rtx, eq_rtx;
19262 enum insn_code ior_code;
19263 enum rtx_code new_code;
19265 switch (rcode)
19267 case GE:
19268 new_code = GT;
19269 break;
19271 case GEU:
19272 new_code = GTU;
19273 break;
19275 case LE:
19276 new_code = LT;
19277 break;
19279 case LEU:
19280 new_code = LTU;
19281 break;
19283 default:
19284 gcc_unreachable ();
19287 ior_code = optab_handler (ior_optab, dmode);
19288 if (ior_code == CODE_FOR_nothing)
19289 return NULL_RTX;
19291 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
19292 if (!c_rtx)
19293 return NULL_RTX;
19295 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
19296 if (!eq_rtx)
19297 return NULL_RTX;
19299 mask = gen_reg_rtx (dmode);
19300 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
19301 return mask;
19303 break;
19304 default:
19305 return NULL_RTX;
19308 if (try_again)
19310 if (swap_operands)
19312 rtx tmp;
19313 tmp = op0;
19314 op0 = op1;
19315 op1 = tmp;
19318 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19319 if (mask)
19320 return mask;
19323 /* You only get two chances. */
19324 return NULL_RTX;
19327 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
19328 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
19329 operands for the relation operation COND. */
19332 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
19333 rtx cond, rtx cc_op0, rtx cc_op1)
19335 enum machine_mode dest_mode = GET_MODE (dest);
19336 enum machine_mode mask_mode = GET_MODE (cc_op0);
19337 enum rtx_code rcode = GET_CODE (cond);
19338 enum machine_mode cc_mode = CCmode;
19339 rtx mask;
19340 rtx cond2;
19341 rtx tmp;
19342 bool invert_move = false;
19344 if (VECTOR_UNIT_NONE_P (dest_mode))
19345 return 0;
19347 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
19348 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
19350 switch (rcode)
19352 /* Swap operands if we can, and fall back to doing the operation as
19353 specified, and doing a NOR to invert the test. */
19354 case NE:
19355 case UNLE:
19356 case UNLT:
19357 case UNGE:
19358 case UNGT:
19359 /* Invert condition and try again.
19360 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
19361 invert_move = true;
19362 rcode = reverse_condition_maybe_unordered (rcode);
19363 if (rcode == UNKNOWN)
19364 return 0;
19365 break;
19367 /* Mark unsigned tests with CCUNSmode. */
19368 case GTU:
19369 case GEU:
19370 case LTU:
19371 case LEU:
19372 cc_mode = CCUNSmode;
19373 break;
19375 default:
19376 break;
19379 /* Get the vector mask for the given relational operations. */
19380 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
19382 if (!mask)
19383 return 0;
19385 if (invert_move)
19387 tmp = op_true;
19388 op_true = op_false;
19389 op_false = tmp;
19392 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
19393 CONST0_RTX (dest_mode));
19394 emit_insn (gen_rtx_SET (VOIDmode,
19395 dest,
19396 gen_rtx_IF_THEN_ELSE (dest_mode,
19397 cond2,
19398 op_true,
19399 op_false)));
19400 return 1;
19403 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
19404 operands of the last comparison is nonzero/true, FALSE_COND if it
19405 is zero/false. Return 0 if the hardware has no such operation. */
19408 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19410 enum rtx_code code = GET_CODE (op);
19411 rtx op0 = XEXP (op, 0);
19412 rtx op1 = XEXP (op, 1);
19413 REAL_VALUE_TYPE c1;
19414 enum machine_mode compare_mode = GET_MODE (op0);
19415 enum machine_mode result_mode = GET_MODE (dest);
19416 rtx temp;
19417 bool is_against_zero;
19419 /* These modes should always match. */
19420 if (GET_MODE (op1) != compare_mode
19421 /* In the isel case however, we can use a compare immediate, so
19422 op1 may be a small constant. */
19423 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
19424 return 0;
19425 if (GET_MODE (true_cond) != result_mode)
19426 return 0;
19427 if (GET_MODE (false_cond) != result_mode)
19428 return 0;
19430 /* Don't allow using floating point comparisons for integer results for
19431 now. */
19432 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
19433 return 0;
19435 /* First, work out if the hardware can do this at all, or
19436 if it's too slow.... */
19437 if (!FLOAT_MODE_P (compare_mode))
19439 if (TARGET_ISEL)
19440 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
19441 return 0;
19443 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
19444 && SCALAR_FLOAT_MODE_P (compare_mode))
19445 return 0;
19447 is_against_zero = op1 == CONST0_RTX (compare_mode);
19449 /* A floating-point subtract might overflow, underflow, or produce
19450 an inexact result, thus changing the floating-point flags, so it
19451 can't be generated if we care about that. It's safe if one side
19452 of the construct is zero, since then no subtract will be
19453 generated. */
19454 if (SCALAR_FLOAT_MODE_P (compare_mode)
19455 && flag_trapping_math && ! is_against_zero)
19456 return 0;
19458 /* Eliminate half of the comparisons by switching operands, this
19459 makes the remaining code simpler. */
19460 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
19461 || code == LTGT || code == LT || code == UNLE)
19463 code = reverse_condition_maybe_unordered (code);
19464 temp = true_cond;
19465 true_cond = false_cond;
19466 false_cond = temp;
19469 /* UNEQ and LTGT take four instructions for a comparison with zero,
19470 it'll probably be faster to use a branch here too. */
19471 if (code == UNEQ && HONOR_NANS (compare_mode))
19472 return 0;
19474 if (GET_CODE (op1) == CONST_DOUBLE)
19475 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
19477 /* We're going to try to implement comparisons by performing
19478 a subtract, then comparing against zero. Unfortunately,
19479 Inf - Inf is NaN which is not zero, and so if we don't
19480 know that the operand is finite and the comparison
19481 would treat EQ different to UNORDERED, we can't do it. */
19482 if (HONOR_INFINITIES (compare_mode)
19483 && code != GT && code != UNGE
19484 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
19485 /* Constructs of the form (a OP b ? a : b) are safe. */
19486 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
19487 || (! rtx_equal_p (op0, true_cond)
19488 && ! rtx_equal_p (op1, true_cond))))
19489 return 0;
19491 /* At this point we know we can use fsel. */
19493 /* Reduce the comparison to a comparison against zero. */
19494 if (! is_against_zero)
19496 temp = gen_reg_rtx (compare_mode);
19497 emit_insn (gen_rtx_SET (VOIDmode, temp,
19498 gen_rtx_MINUS (compare_mode, op0, op1)));
19499 op0 = temp;
19500 op1 = CONST0_RTX (compare_mode);
19503 /* If we don't care about NaNs we can reduce some of the comparisons
19504 down to faster ones. */
19505 if (! HONOR_NANS (compare_mode))
19506 switch (code)
19508 case GT:
19509 code = LE;
19510 temp = true_cond;
19511 true_cond = false_cond;
19512 false_cond = temp;
19513 break;
19514 case UNGE:
19515 code = GE;
19516 break;
19517 case UNEQ:
19518 code = EQ;
19519 break;
19520 default:
19521 break;
19524 /* Now, reduce everything down to a GE. */
19525 switch (code)
19527 case GE:
19528 break;
19530 case LE:
19531 temp = gen_reg_rtx (compare_mode);
19532 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19533 op0 = temp;
19534 break;
19536 case ORDERED:
19537 temp = gen_reg_rtx (compare_mode);
19538 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
19539 op0 = temp;
19540 break;
19542 case EQ:
19543 temp = gen_reg_rtx (compare_mode);
19544 emit_insn (gen_rtx_SET (VOIDmode, temp,
19545 gen_rtx_NEG (compare_mode,
19546 gen_rtx_ABS (compare_mode, op0))));
19547 op0 = temp;
19548 break;
19550 case UNGE:
19551 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
19552 temp = gen_reg_rtx (result_mode);
19553 emit_insn (gen_rtx_SET (VOIDmode, temp,
19554 gen_rtx_IF_THEN_ELSE (result_mode,
19555 gen_rtx_GE (VOIDmode,
19556 op0, op1),
19557 true_cond, false_cond)));
19558 false_cond = true_cond;
19559 true_cond = temp;
19561 temp = gen_reg_rtx (compare_mode);
19562 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19563 op0 = temp;
19564 break;
19566 case GT:
19567 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
19568 temp = gen_reg_rtx (result_mode);
19569 emit_insn (gen_rtx_SET (VOIDmode, temp,
19570 gen_rtx_IF_THEN_ELSE (result_mode,
19571 gen_rtx_GE (VOIDmode,
19572 op0, op1),
19573 true_cond, false_cond)));
19574 true_cond = false_cond;
19575 false_cond = temp;
19577 temp = gen_reg_rtx (compare_mode);
19578 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19579 op0 = temp;
19580 break;
19582 default:
19583 gcc_unreachable ();
19586 emit_insn (gen_rtx_SET (VOIDmode, dest,
19587 gen_rtx_IF_THEN_ELSE (result_mode,
19588 gen_rtx_GE (VOIDmode,
19589 op0, op1),
19590 true_cond, false_cond)));
19591 return 1;
19594 /* Same as above, but for ints (isel). */
19596 static int
19597 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19599 rtx condition_rtx, cr;
19600 enum machine_mode mode = GET_MODE (dest);
19601 enum rtx_code cond_code;
19602 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
19603 bool signedp;
19605 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
19606 return 0;
19608 /* We still have to do the compare, because isel doesn't do a
19609 compare, it just looks at the CRx bits set by a previous compare
19610 instruction. */
19611 condition_rtx = rs6000_generate_compare (op, mode);
19612 cond_code = GET_CODE (condition_rtx);
19613 cr = XEXP (condition_rtx, 0);
19614 signedp = GET_MODE (cr) == CCmode;
19616 isel_func = (mode == SImode
19617 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
19618 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
19620 switch (cond_code)
19622 case LT: case GT: case LTU: case GTU: case EQ:
19623 /* isel handles these directly. */
19624 break;
19626 default:
19627 /* We need to swap the sense of the comparison. */
19629 rtx t = true_cond;
19630 true_cond = false_cond;
19631 false_cond = t;
19632 PUT_CODE (condition_rtx, reverse_condition (cond_code));
19634 break;
19637 false_cond = force_reg (mode, false_cond);
19638 if (true_cond != const0_rtx)
19639 true_cond = force_reg (mode, true_cond);
19641 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
19643 return 1;
19646 const char *
19647 output_isel (rtx *operands)
19649 enum rtx_code code;
19651 code = GET_CODE (operands[1]);
19653 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
19655 gcc_assert (GET_CODE (operands[2]) == REG
19656 && GET_CODE (operands[3]) == REG);
19657 PUT_CODE (operands[1], reverse_condition (code));
19658 return "isel %0,%3,%2,%j1";
19661 return "isel %0,%2,%3,%j1";
19664 void
19665 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
19667 enum machine_mode mode = GET_MODE (op0);
19668 enum rtx_code c;
19669 rtx target;
19671 /* VSX/altivec have direct min/max insns. */
19672 if ((code == SMAX || code == SMIN)
19673 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
19674 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
19676 emit_insn (gen_rtx_SET (VOIDmode,
19677 dest,
19678 gen_rtx_fmt_ee (code, mode, op0, op1)));
19679 return;
19682 if (code == SMAX || code == SMIN)
19683 c = GE;
19684 else
19685 c = GEU;
19687 if (code == SMAX || code == UMAX)
19688 target = emit_conditional_move (dest, c, op0, op1, mode,
19689 op0, op1, mode, 0);
19690 else
19691 target = emit_conditional_move (dest, c, op0, op1, mode,
19692 op1, op0, mode, 0);
19693 gcc_assert (target);
19694 if (target != dest)
19695 emit_move_insn (dest, target);
19698 /* A subroutine of the atomic operation splitters. Jump to LABEL if
19699 COND is true. Mark the jump as unlikely to be taken. */
19701 static void
19702 emit_unlikely_jump (rtx cond, rtx label)
19704 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
19705 rtx x;
19707 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
19708 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
19709 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
19712 /* A subroutine of the atomic operation splitters. Emit a load-locked
19713 instruction in MODE. For QI/HImode, possibly use a pattern than includes
19714 the zero_extend operation. */
19716 static void
19717 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
19719 rtx (*fn) (rtx, rtx) = NULL;
19721 switch (mode)
19723 case QImode:
19724 fn = gen_load_lockedqi;
19725 break;
19726 case HImode:
19727 fn = gen_load_lockedhi;
19728 break;
19729 case SImode:
19730 if (GET_MODE (mem) == QImode)
19731 fn = gen_load_lockedqi_si;
19732 else if (GET_MODE (mem) == HImode)
19733 fn = gen_load_lockedhi_si;
19734 else
19735 fn = gen_load_lockedsi;
19736 break;
19737 case DImode:
19738 fn = gen_load_lockeddi;
19739 break;
19740 case TImode:
19741 fn = gen_load_lockedti;
19742 break;
19743 default:
19744 gcc_unreachable ();
19746 emit_insn (fn (reg, mem));
19749 /* A subroutine of the atomic operation splitters. Emit a store-conditional
19750 instruction in MODE. */
19752 static void
19753 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
19755 rtx (*fn) (rtx, rtx, rtx) = NULL;
19757 switch (mode)
19759 case QImode:
19760 fn = gen_store_conditionalqi;
19761 break;
19762 case HImode:
19763 fn = gen_store_conditionalhi;
19764 break;
19765 case SImode:
19766 fn = gen_store_conditionalsi;
19767 break;
19768 case DImode:
19769 fn = gen_store_conditionaldi;
19770 break;
19771 case TImode:
19772 fn = gen_store_conditionalti;
19773 break;
19774 default:
19775 gcc_unreachable ();
19778 /* Emit sync before stwcx. to address PPC405 Erratum. */
19779 if (PPC405_ERRATUM77)
19780 emit_insn (gen_hwsync ());
19782 emit_insn (fn (res, mem, val));
19785 /* Expand barriers before and after a load_locked/store_cond sequence. */
19787 static rtx
19788 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
19790 rtx addr = XEXP (mem, 0);
19791 int strict_p = (reload_in_progress || reload_completed);
19793 if (!legitimate_indirect_address_p (addr, strict_p)
19794 && !legitimate_indexed_address_p (addr, strict_p))
19796 addr = force_reg (Pmode, addr);
19797 mem = replace_equiv_address_nv (mem, addr);
19800 switch (model)
19802 case MEMMODEL_RELAXED:
19803 case MEMMODEL_CONSUME:
19804 case MEMMODEL_ACQUIRE:
19805 break;
19806 case MEMMODEL_RELEASE:
19807 case MEMMODEL_ACQ_REL:
19808 emit_insn (gen_lwsync ());
19809 break;
19810 case MEMMODEL_SEQ_CST:
19811 emit_insn (gen_hwsync ());
19812 break;
19813 default:
19814 gcc_unreachable ();
19816 return mem;
19819 static void
19820 rs6000_post_atomic_barrier (enum memmodel model)
19822 switch (model)
19824 case MEMMODEL_RELAXED:
19825 case MEMMODEL_CONSUME:
19826 case MEMMODEL_RELEASE:
19827 break;
19828 case MEMMODEL_ACQUIRE:
19829 case MEMMODEL_ACQ_REL:
19830 case MEMMODEL_SEQ_CST:
19831 emit_insn (gen_isync ());
19832 break;
19833 default:
19834 gcc_unreachable ();
19838 /* A subroutine of the various atomic expanders. For sub-word operations,
19839 we must adjust things to operate on SImode. Given the original MEM,
19840 return a new aligned memory. Also build and return the quantities by
19841 which to shift and mask. */
19843 static rtx
19844 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
19846 rtx addr, align, shift, mask, mem;
19847 HOST_WIDE_INT shift_mask;
19848 enum machine_mode mode = GET_MODE (orig_mem);
19850 /* For smaller modes, we have to implement this via SImode. */
19851 shift_mask = (mode == QImode ? 0x18 : 0x10);
19853 addr = XEXP (orig_mem, 0);
19854 addr = force_reg (GET_MODE (addr), addr);
19856 /* Aligned memory containing subword. Generate a new memory. We
19857 do not want any of the existing MEM_ATTR data, as we're now
19858 accessing memory outside the original object. */
19859 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
19860 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19861 mem = gen_rtx_MEM (SImode, align);
19862 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
19863 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
19864 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
19866 /* Shift amount for subword relative to aligned word. */
19867 shift = gen_reg_rtx (SImode);
19868 addr = gen_lowpart (SImode, addr);
19869 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
19870 if (WORDS_BIG_ENDIAN)
19871 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
19872 shift, 1, OPTAB_LIB_WIDEN);
19873 *pshift = shift;
19875 /* Mask for insertion. */
19876 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
19877 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
19878 *pmask = mask;
19880 return mem;
19883 /* A subroutine of the various atomic expanders. For sub-word operands,
19884 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
19886 static rtx
19887 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
19889 rtx x;
19891 x = gen_reg_rtx (SImode);
19892 emit_insn (gen_rtx_SET (VOIDmode, x,
19893 gen_rtx_AND (SImode,
19894 gen_rtx_NOT (SImode, mask),
19895 oldval)));
19897 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
19899 return x;
19902 /* A subroutine of the various atomic expanders. For sub-word operands,
19903 extract WIDE to NARROW via SHIFT. */
19905 static void
19906 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
19908 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
19909 wide, 1, OPTAB_LIB_WIDEN);
19910 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
19913 /* Expand an atomic compare and swap operation. */
19915 void
19916 rs6000_expand_atomic_compare_and_swap (rtx operands[])
19918 rtx boolval, retval, mem, oldval, newval, cond;
19919 rtx label1, label2, x, mask, shift;
19920 enum machine_mode mode, orig_mode;
19921 enum memmodel mod_s, mod_f;
19922 bool is_weak;
19924 boolval = operands[0];
19925 retval = operands[1];
19926 mem = operands[2];
19927 oldval = operands[3];
19928 newval = operands[4];
19929 is_weak = (INTVAL (operands[5]) != 0);
19930 mod_s = (enum memmodel) INTVAL (operands[6]);
19931 mod_f = (enum memmodel) INTVAL (operands[7]);
19932 orig_mode = mode = GET_MODE (mem);
19934 mask = shift = NULL_RTX;
19935 if (mode == QImode || mode == HImode)
19937 /* Before power8, we didn't have access to lbarx/lharx, so generate a
19938 lwarx and shift/mask operations. With power8, we need to do the
19939 comparison in SImode, but the store is still done in QI/HImode. */
19940 oldval = convert_modes (SImode, mode, oldval, 1);
19942 if (!TARGET_SYNC_HI_QI)
19944 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
19946 /* Shift and mask OLDVAL into position with the word. */
19947 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
19948 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19950 /* Shift and mask NEWVAL into position within the word. */
19951 newval = convert_modes (SImode, mode, newval, 1);
19952 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
19953 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19956 /* Prepare to adjust the return value. */
19957 retval = gen_reg_rtx (SImode);
19958 mode = SImode;
19960 else if (reg_overlap_mentioned_p (retval, oldval))
19961 oldval = copy_to_reg (oldval);
19963 mem = rs6000_pre_atomic_barrier (mem, mod_s);
19965 label1 = NULL_RTX;
19966 if (!is_weak)
19968 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
19969 emit_label (XEXP (label1, 0));
19971 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
19973 emit_load_locked (mode, retval, mem);
19975 x = retval;
19976 if (mask)
19978 x = expand_simple_binop (SImode, AND, retval, mask,
19979 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19982 cond = gen_reg_rtx (CCmode);
19983 /* If we have TImode, synthesize a comparison. */
19984 if (mode != TImode)
19985 x = gen_rtx_COMPARE (CCmode, x, oldval);
19986 else
19988 rtx xor1_result = gen_reg_rtx (DImode);
19989 rtx xor2_result = gen_reg_rtx (DImode);
19990 rtx or_result = gen_reg_rtx (DImode);
19991 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
19992 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
19993 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
19994 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
19996 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
19997 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
19998 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
19999 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
20002 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
20004 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20005 emit_unlikely_jump (x, label2);
20007 x = newval;
20008 if (mask)
20009 x = rs6000_mask_atomic_subword (retval, newval, mask);
20011 emit_store_conditional (orig_mode, cond, mem, x);
20013 if (!is_weak)
20015 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20016 emit_unlikely_jump (x, label1);
20019 if (mod_f != MEMMODEL_RELAXED)
20020 emit_label (XEXP (label2, 0));
20022 rs6000_post_atomic_barrier (mod_s);
20024 if (mod_f == MEMMODEL_RELAXED)
20025 emit_label (XEXP (label2, 0));
20027 if (shift)
20028 rs6000_finish_atomic_subword (operands[1], retval, shift);
20029 else if (mode != GET_MODE (operands[1]))
20030 convert_move (operands[1], retval, 1);
20032 /* In all cases, CR0 contains EQ on success, and NE on failure. */
20033 x = gen_rtx_EQ (SImode, cond, const0_rtx);
20034 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
20037 /* Expand an atomic exchange operation. */
20039 void
20040 rs6000_expand_atomic_exchange (rtx operands[])
20042 rtx retval, mem, val, cond;
20043 enum machine_mode mode;
20044 enum memmodel model;
20045 rtx label, x, mask, shift;
20047 retval = operands[0];
20048 mem = operands[1];
20049 val = operands[2];
20050 model = (enum memmodel) INTVAL (operands[3]);
20051 mode = GET_MODE (mem);
20053 mask = shift = NULL_RTX;
20054 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
20056 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20058 /* Shift and mask VAL into position with the word. */
20059 val = convert_modes (SImode, mode, val, 1);
20060 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20061 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20063 /* Prepare to adjust the return value. */
20064 retval = gen_reg_rtx (SImode);
20065 mode = SImode;
20068 mem = rs6000_pre_atomic_barrier (mem, model);
20070 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20071 emit_label (XEXP (label, 0));
20073 emit_load_locked (mode, retval, mem);
20075 x = val;
20076 if (mask)
20077 x = rs6000_mask_atomic_subword (retval, val, mask);
20079 cond = gen_reg_rtx (CCmode);
20080 emit_store_conditional (mode, cond, mem, x);
20082 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20083 emit_unlikely_jump (x, label);
20085 rs6000_post_atomic_barrier (model);
20087 if (shift)
20088 rs6000_finish_atomic_subword (operands[0], retval, shift);
20091 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
20092 to perform. MEM is the memory on which to operate. VAL is the second
20093 operand of the binary operator. BEFORE and AFTER are optional locations to
20094 return the value of MEM either before of after the operation. MODEL_RTX
20095 is a CONST_INT containing the memory model to use. */
20097 void
20098 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
20099 rtx orig_before, rtx orig_after, rtx model_rtx)
20101 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
20102 enum machine_mode mode = GET_MODE (mem);
20103 enum machine_mode store_mode = mode;
20104 rtx label, x, cond, mask, shift;
20105 rtx before = orig_before, after = orig_after;
20107 mask = shift = NULL_RTX;
20108 /* On power8, we want to use SImode for the operation. On previous systems,
20109 use the operation in a subword and shift/mask to get the proper byte or
20110 halfword. */
20111 if (mode == QImode || mode == HImode)
20113 if (TARGET_SYNC_HI_QI)
20115 val = convert_modes (SImode, mode, val, 1);
20117 /* Prepare to adjust the return value. */
20118 before = gen_reg_rtx (SImode);
20119 if (after)
20120 after = gen_reg_rtx (SImode);
20121 mode = SImode;
20123 else
20125 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20127 /* Shift and mask VAL into position with the word. */
20128 val = convert_modes (SImode, mode, val, 1);
20129 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20130 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20132 switch (code)
20134 case IOR:
20135 case XOR:
20136 /* We've already zero-extended VAL. That is sufficient to
20137 make certain that it does not affect other bits. */
20138 mask = NULL;
20139 break;
20141 case AND:
20142 /* If we make certain that all of the other bits in VAL are
20143 set, that will be sufficient to not affect other bits. */
20144 x = gen_rtx_NOT (SImode, mask);
20145 x = gen_rtx_IOR (SImode, x, val);
20146 emit_insn (gen_rtx_SET (VOIDmode, val, x));
20147 mask = NULL;
20148 break;
20150 case NOT:
20151 case PLUS:
20152 case MINUS:
20153 /* These will all affect bits outside the field and need
20154 adjustment via MASK within the loop. */
20155 break;
20157 default:
20158 gcc_unreachable ();
20161 /* Prepare to adjust the return value. */
20162 before = gen_reg_rtx (SImode);
20163 if (after)
20164 after = gen_reg_rtx (SImode);
20165 store_mode = mode = SImode;
20169 mem = rs6000_pre_atomic_barrier (mem, model);
20171 label = gen_label_rtx ();
20172 emit_label (label);
20173 label = gen_rtx_LABEL_REF (VOIDmode, label);
20175 if (before == NULL_RTX)
20176 before = gen_reg_rtx (mode);
20178 emit_load_locked (mode, before, mem);
20180 if (code == NOT)
20182 x = expand_simple_binop (mode, AND, before, val,
20183 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20184 after = expand_simple_unop (mode, NOT, x, after, 1);
20186 else
20188 after = expand_simple_binop (mode, code, before, val,
20189 after, 1, OPTAB_LIB_WIDEN);
20192 x = after;
20193 if (mask)
20195 x = expand_simple_binop (SImode, AND, after, mask,
20196 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20197 x = rs6000_mask_atomic_subword (before, x, mask);
20199 else if (store_mode != mode)
20200 x = convert_modes (store_mode, mode, x, 1);
20202 cond = gen_reg_rtx (CCmode);
20203 emit_store_conditional (store_mode, cond, mem, x);
20205 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20206 emit_unlikely_jump (x, label);
20208 rs6000_post_atomic_barrier (model);
20210 if (shift)
20212 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
20213 then do the calcuations in a SImode register. */
20214 if (orig_before)
20215 rs6000_finish_atomic_subword (orig_before, before, shift);
20216 if (orig_after)
20217 rs6000_finish_atomic_subword (orig_after, after, shift);
20219 else if (store_mode != mode)
20221 /* QImode/HImode on machines with lbarx/lharx where we do the native
20222 operation and then do the calcuations in a SImode register. */
20223 if (orig_before)
20224 convert_move (orig_before, before, 1);
20225 if (orig_after)
20226 convert_move (orig_after, after, 1);
20228 else if (orig_after && after != orig_after)
20229 emit_move_insn (orig_after, after);
20232 /* Emit instructions to move SRC to DST. Called by splitters for
20233 multi-register moves. It will emit at most one instruction for
20234 each register that is accessed; that is, it won't emit li/lis pairs
20235 (or equivalent for 64-bit code). One of SRC or DST must be a hard
20236 register. */
20238 void
20239 rs6000_split_multireg_move (rtx dst, rtx src)
20241 /* The register number of the first register being moved. */
20242 int reg;
20243 /* The mode that is to be moved. */
20244 enum machine_mode mode;
20245 /* The mode that the move is being done in, and its size. */
20246 enum machine_mode reg_mode;
20247 int reg_mode_size;
20248 /* The number of registers that will be moved. */
20249 int nregs;
20251 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
20252 mode = GET_MODE (dst);
20253 nregs = hard_regno_nregs[reg][mode];
20254 if (FP_REGNO_P (reg))
20255 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
20256 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
20257 else if (ALTIVEC_REGNO_P (reg))
20258 reg_mode = V16QImode;
20259 else if (TARGET_E500_DOUBLE && mode == TFmode)
20260 reg_mode = DFmode;
20261 else
20262 reg_mode = word_mode;
20263 reg_mode_size = GET_MODE_SIZE (reg_mode);
20265 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
20267 /* TDmode residing in FP registers is special, since the ISA requires that
20268 the lower-numbered word of a register pair is always the most significant
20269 word, even in little-endian mode. This does not match the usual subreg
20270 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
20271 the appropriate constituent registers "by hand" in little-endian mode.
20273 Note we do not need to check for destructive overlap here since TDmode
20274 can only reside in even/odd register pairs. */
20275 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
20277 rtx p_src, p_dst;
20278 int i;
20280 for (i = 0; i < nregs; i++)
20282 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
20283 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
20284 else
20285 p_src = simplify_gen_subreg (reg_mode, src, mode,
20286 i * reg_mode_size);
20288 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
20289 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
20290 else
20291 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
20292 i * reg_mode_size);
20294 emit_insn (gen_rtx_SET (VOIDmode, p_dst, p_src));
20297 return;
20300 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
20302 /* Move register range backwards, if we might have destructive
20303 overlap. */
20304 int i;
20305 for (i = nregs - 1; i >= 0; i--)
20306 emit_insn (gen_rtx_SET (VOIDmode,
20307 simplify_gen_subreg (reg_mode, dst, mode,
20308 i * reg_mode_size),
20309 simplify_gen_subreg (reg_mode, src, mode,
20310 i * reg_mode_size)));
20312 else
20314 int i;
20315 int j = -1;
20316 bool used_update = false;
20317 rtx restore_basereg = NULL_RTX;
20319 if (MEM_P (src) && INT_REGNO_P (reg))
20321 rtx breg;
20323 if (GET_CODE (XEXP (src, 0)) == PRE_INC
20324 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
20326 rtx delta_rtx;
20327 breg = XEXP (XEXP (src, 0), 0);
20328 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
20329 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
20330 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
20331 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20332 src = replace_equiv_address (src, breg);
20334 else if (! rs6000_offsettable_memref_p (src, reg_mode))
20336 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
20338 rtx basereg = XEXP (XEXP (src, 0), 0);
20339 if (TARGET_UPDATE)
20341 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
20342 emit_insn (gen_rtx_SET (VOIDmode, ndst,
20343 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
20344 used_update = true;
20346 else
20347 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20348 XEXP (XEXP (src, 0), 1)));
20349 src = replace_equiv_address (src, basereg);
20351 else
20353 rtx basereg = gen_rtx_REG (Pmode, reg);
20354 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
20355 src = replace_equiv_address (src, basereg);
20359 breg = XEXP (src, 0);
20360 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
20361 breg = XEXP (breg, 0);
20363 /* If the base register we are using to address memory is
20364 also a destination reg, then change that register last. */
20365 if (REG_P (breg)
20366 && REGNO (breg) >= REGNO (dst)
20367 && REGNO (breg) < REGNO (dst) + nregs)
20368 j = REGNO (breg) - REGNO (dst);
20370 else if (MEM_P (dst) && INT_REGNO_P (reg))
20372 rtx breg;
20374 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
20375 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
20377 rtx delta_rtx;
20378 breg = XEXP (XEXP (dst, 0), 0);
20379 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
20380 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
20381 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
20383 /* We have to update the breg before doing the store.
20384 Use store with update, if available. */
20386 if (TARGET_UPDATE)
20388 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20389 emit_insn (TARGET_32BIT
20390 ? (TARGET_POWERPC64
20391 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
20392 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
20393 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
20394 used_update = true;
20396 else
20397 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20398 dst = replace_equiv_address (dst, breg);
20400 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
20401 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
20403 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
20405 rtx basereg = XEXP (XEXP (dst, 0), 0);
20406 if (TARGET_UPDATE)
20408 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20409 emit_insn (gen_rtx_SET (VOIDmode,
20410 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
20411 used_update = true;
20413 else
20414 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20415 XEXP (XEXP (dst, 0), 1)));
20416 dst = replace_equiv_address (dst, basereg);
20418 else
20420 rtx basereg = XEXP (XEXP (dst, 0), 0);
20421 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
20422 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
20423 && REG_P (basereg)
20424 && REG_P (offsetreg)
20425 && REGNO (basereg) != REGNO (offsetreg));
20426 if (REGNO (basereg) == 0)
20428 rtx tmp = offsetreg;
20429 offsetreg = basereg;
20430 basereg = tmp;
20432 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
20433 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
20434 dst = replace_equiv_address (dst, basereg);
20437 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
20438 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
20441 for (i = 0; i < nregs; i++)
20443 /* Calculate index to next subword. */
20444 ++j;
20445 if (j == nregs)
20446 j = 0;
20448 /* If compiler already emitted move of first word by
20449 store with update, no need to do anything. */
20450 if (j == 0 && used_update)
20451 continue;
20453 emit_insn (gen_rtx_SET (VOIDmode,
20454 simplify_gen_subreg (reg_mode, dst, mode,
20455 j * reg_mode_size),
20456 simplify_gen_subreg (reg_mode, src, mode,
20457 j * reg_mode_size)));
20459 if (restore_basereg != NULL_RTX)
20460 emit_insn (restore_basereg);
20465 /* This page contains routines that are used to determine what the
20466 function prologue and epilogue code will do and write them out. */
20468 static inline bool
20469 save_reg_p (int r)
20471 return !call_used_regs[r] && df_regs_ever_live_p (r);
20474 /* Return the first fixed-point register that is required to be
20475 saved. 32 if none. */
20478 first_reg_to_save (void)
20480 int first_reg;
20482 /* Find lowest numbered live register. */
20483 for (first_reg = 13; first_reg <= 31; first_reg++)
20484 if (save_reg_p (first_reg))
20485 break;
20487 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
20488 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
20489 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
20490 || (TARGET_TOC && TARGET_MINIMAL_TOC))
20491 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20492 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
20494 #if TARGET_MACHO
20495 if (flag_pic
20496 && crtl->uses_pic_offset_table
20497 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
20498 return RS6000_PIC_OFFSET_TABLE_REGNUM;
20499 #endif
20501 return first_reg;
20504 /* Similar, for FP regs. */
20507 first_fp_reg_to_save (void)
20509 int first_reg;
20511 /* Find lowest numbered live register. */
20512 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
20513 if (save_reg_p (first_reg))
20514 break;
20516 return first_reg;
20519 /* Similar, for AltiVec regs. */
20521 static int
20522 first_altivec_reg_to_save (void)
20524 int i;
20526 /* Stack frame remains as is unless we are in AltiVec ABI. */
20527 if (! TARGET_ALTIVEC_ABI)
20528 return LAST_ALTIVEC_REGNO + 1;
20530 /* On Darwin, the unwind routines are compiled without
20531 TARGET_ALTIVEC, and use save_world to save/restore the
20532 altivec registers when necessary. */
20533 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20534 && ! TARGET_ALTIVEC)
20535 return FIRST_ALTIVEC_REGNO + 20;
20537 /* Find lowest numbered live register. */
20538 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
20539 if (save_reg_p (i))
20540 break;
20542 return i;
20545 /* Return a 32-bit mask of the AltiVec registers we need to set in
20546 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
20547 the 32-bit word is 0. */
20549 static unsigned int
20550 compute_vrsave_mask (void)
20552 unsigned int i, mask = 0;
20554 /* On Darwin, the unwind routines are compiled without
20555 TARGET_ALTIVEC, and use save_world to save/restore the
20556 call-saved altivec registers when necessary. */
20557 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20558 && ! TARGET_ALTIVEC)
20559 mask |= 0xFFF;
20561 /* First, find out if we use _any_ altivec registers. */
20562 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
20563 if (df_regs_ever_live_p (i))
20564 mask |= ALTIVEC_REG_BIT (i);
20566 if (mask == 0)
20567 return mask;
20569 /* Next, remove the argument registers from the set. These must
20570 be in the VRSAVE mask set by the caller, so we don't need to add
20571 them in again. More importantly, the mask we compute here is
20572 used to generate CLOBBERs in the set_vrsave insn, and we do not
20573 wish the argument registers to die. */
20574 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
20575 mask &= ~ALTIVEC_REG_BIT (i);
20577 /* Similarly, remove the return value from the set. */
20579 bool yes = false;
20580 diddle_return_value (is_altivec_return_reg, &yes);
20581 if (yes)
20582 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
20585 return mask;
20588 /* For a very restricted set of circumstances, we can cut down the
20589 size of prologues/epilogues by calling our own save/restore-the-world
20590 routines. */
20592 static void
20593 compute_save_world_info (rs6000_stack_t *info_ptr)
20595 info_ptr->world_save_p = 1;
20596 info_ptr->world_save_p
20597 = (WORLD_SAVE_P (info_ptr)
20598 && DEFAULT_ABI == ABI_DARWIN
20599 && !cfun->has_nonlocal_label
20600 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
20601 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
20602 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
20603 && info_ptr->cr_save_p);
20605 /* This will not work in conjunction with sibcalls. Make sure there
20606 are none. (This check is expensive, but seldom executed.) */
20607 if (WORLD_SAVE_P (info_ptr))
20609 rtx insn;
20610 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
20611 if (CALL_P (insn) && SIBLING_CALL_P (insn))
20613 info_ptr->world_save_p = 0;
20614 break;
20618 if (WORLD_SAVE_P (info_ptr))
20620 /* Even if we're not touching VRsave, make sure there's room on the
20621 stack for it, if it looks like we're calling SAVE_WORLD, which
20622 will attempt to save it. */
20623 info_ptr->vrsave_size = 4;
20625 /* If we are going to save the world, we need to save the link register too. */
20626 info_ptr->lr_save_p = 1;
20628 /* "Save" the VRsave register too if we're saving the world. */
20629 if (info_ptr->vrsave_mask == 0)
20630 info_ptr->vrsave_mask = compute_vrsave_mask ();
20632 /* Because the Darwin register save/restore routines only handle
20633 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
20634 check. */
20635 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
20636 && (info_ptr->first_altivec_reg_save
20637 >= FIRST_SAVED_ALTIVEC_REGNO));
20639 return;
20643 static void
20644 is_altivec_return_reg (rtx reg, void *xyes)
20646 bool *yes = (bool *) xyes;
20647 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
20648 *yes = true;
20652 /* Look for user-defined global regs in the range FIRST to LAST-1.
20653 We should not restore these, and so cannot use lmw or out-of-line
20654 restore functions if there are any. We also can't save them
20655 (well, emit frame notes for them), because frame unwinding during
20656 exception handling will restore saved registers. */
20658 static bool
20659 global_regs_p (unsigned first, unsigned last)
20661 while (first < last)
20662 if (global_regs[first++])
20663 return true;
20664 return false;
20667 /* Determine the strategy for savings/restoring registers. */
20669 enum {
20670 SAVRES_MULTIPLE = 0x1,
20671 SAVE_INLINE_FPRS = 0x2,
20672 SAVE_INLINE_GPRS = 0x4,
20673 REST_INLINE_FPRS = 0x8,
20674 REST_INLINE_GPRS = 0x10,
20675 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
20676 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
20677 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
20678 SAVE_INLINE_VRS = 0x100,
20679 REST_INLINE_VRS = 0x200
20682 static int
20683 rs6000_savres_strategy (rs6000_stack_t *info,
20684 bool using_static_chain_p)
20686 int strategy = 0;
20687 bool lr_save_p;
20689 if (TARGET_MULTIPLE
20690 && !TARGET_POWERPC64
20691 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
20692 && info->first_gp_reg_save < 31
20693 && !global_regs_p (info->first_gp_reg_save, 32))
20694 strategy |= SAVRES_MULTIPLE;
20696 if (crtl->calls_eh_return
20697 || cfun->machine->ra_need_lr)
20698 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
20699 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
20700 | SAVE_INLINE_VRS | REST_INLINE_VRS);
20702 if (info->first_fp_reg_save == 64
20703 /* The out-of-line FP routines use double-precision stores;
20704 we can't use those routines if we don't have such stores. */
20705 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
20706 || global_regs_p (info->first_fp_reg_save, 64))
20707 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20709 if (info->first_gp_reg_save == 32
20710 || (!(strategy & SAVRES_MULTIPLE)
20711 && global_regs_p (info->first_gp_reg_save, 32)))
20712 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20714 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
20715 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
20716 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20718 /* Define cutoff for using out-of-line functions to save registers. */
20719 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
20721 if (!optimize_size)
20723 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20724 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20725 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20727 else
20729 /* Prefer out-of-line restore if it will exit. */
20730 if (info->first_fp_reg_save > 61)
20731 strategy |= SAVE_INLINE_FPRS;
20732 if (info->first_gp_reg_save > 29)
20734 if (info->first_fp_reg_save == 64)
20735 strategy |= SAVE_INLINE_GPRS;
20736 else
20737 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20739 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
20740 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20743 else if (DEFAULT_ABI == ABI_DARWIN)
20745 if (info->first_fp_reg_save > 60)
20746 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20747 if (info->first_gp_reg_save > 29)
20748 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20749 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20751 else
20753 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
20754 if (info->first_fp_reg_save > 61)
20755 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20756 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20757 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20760 /* Don't bother to try to save things out-of-line if r11 is occupied
20761 by the static chain. It would require too much fiddling and the
20762 static chain is rarely used anyway. FPRs are saved w.r.t the stack
20763 pointer on Darwin, and AIX uses r1 or r12. */
20764 if (using_static_chain_p
20765 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
20766 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
20767 | SAVE_INLINE_GPRS
20768 | SAVE_INLINE_VRS | REST_INLINE_VRS);
20770 /* We can only use the out-of-line routines to restore if we've
20771 saved all the registers from first_fp_reg_save in the prologue.
20772 Otherwise, we risk loading garbage. */
20773 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
20775 int i;
20777 for (i = info->first_fp_reg_save; i < 64; i++)
20778 if (!save_reg_p (i))
20780 strategy |= REST_INLINE_FPRS;
20781 break;
20785 /* If we are going to use store multiple, then don't even bother
20786 with the out-of-line routines, since the store-multiple
20787 instruction will always be smaller. */
20788 if ((strategy & SAVRES_MULTIPLE))
20789 strategy |= SAVE_INLINE_GPRS;
20791 /* info->lr_save_p isn't yet set if the only reason lr needs to be
20792 saved is an out-of-line save or restore. Set up the value for
20793 the next test (excluding out-of-line gpr restore). */
20794 lr_save_p = (info->lr_save_p
20795 || !(strategy & SAVE_INLINE_GPRS)
20796 || !(strategy & SAVE_INLINE_FPRS)
20797 || !(strategy & SAVE_INLINE_VRS)
20798 || !(strategy & REST_INLINE_FPRS)
20799 || !(strategy & REST_INLINE_VRS));
20801 /* The situation is more complicated with load multiple. We'd
20802 prefer to use the out-of-line routines for restores, since the
20803 "exit" out-of-line routines can handle the restore of LR and the
20804 frame teardown. However if doesn't make sense to use the
20805 out-of-line routine if that is the only reason we'd need to save
20806 LR, and we can't use the "exit" out-of-line gpr restore if we
20807 have saved some fprs; In those cases it is advantageous to use
20808 load multiple when available. */
20809 if ((strategy & SAVRES_MULTIPLE)
20810 && (!lr_save_p
20811 || info->first_fp_reg_save != 64))
20812 strategy |= REST_INLINE_GPRS;
20814 /* Saving CR interferes with the exit routines used on the SPE, so
20815 just punt here. */
20816 if (TARGET_SPE_ABI
20817 && info->spe_64bit_regs_used
20818 && info->cr_save_p)
20819 strategy |= REST_INLINE_GPRS;
20821 /* We can only use load multiple or the out-of-line routines to
20822 restore if we've used store multiple or out-of-line routines
20823 in the prologue, i.e. if we've saved all the registers from
20824 first_gp_reg_save. Otherwise, we risk loading garbage. */
20825 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
20826 == SAVE_INLINE_GPRS)
20828 int i;
20830 for (i = info->first_gp_reg_save; i < 32; i++)
20831 if (!save_reg_p (i))
20833 strategy |= REST_INLINE_GPRS;
20834 break;
20838 if (TARGET_ELF && TARGET_64BIT)
20840 if (!(strategy & SAVE_INLINE_FPRS))
20841 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
20842 else if (!(strategy & SAVE_INLINE_GPRS)
20843 && info->first_fp_reg_save == 64)
20844 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
20846 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
20847 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
20849 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
20850 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
20852 return strategy;
20855 /* Calculate the stack information for the current function. This is
20856 complicated by having two separate calling sequences, the AIX calling
20857 sequence and the V.4 calling sequence.
20859 AIX (and Darwin/Mac OS X) stack frames look like:
20860 32-bit 64-bit
20861 SP----> +---------------------------------------+
20862 | back chain to caller | 0 0
20863 +---------------------------------------+
20864 | saved CR | 4 8 (8-11)
20865 +---------------------------------------+
20866 | saved LR | 8 16
20867 +---------------------------------------+
20868 | reserved for compilers | 12 24
20869 +---------------------------------------+
20870 | reserved for binders | 16 32
20871 +---------------------------------------+
20872 | saved TOC pointer | 20 40
20873 +---------------------------------------+
20874 | Parameter save area (P) | 24 48
20875 +---------------------------------------+
20876 | Alloca space (A) | 24+P etc.
20877 +---------------------------------------+
20878 | Local variable space (L) | 24+P+A
20879 +---------------------------------------+
20880 | Float/int conversion temporary (X) | 24+P+A+L
20881 +---------------------------------------+
20882 | Save area for AltiVec registers (W) | 24+P+A+L+X
20883 +---------------------------------------+
20884 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
20885 +---------------------------------------+
20886 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
20887 +---------------------------------------+
20888 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
20889 +---------------------------------------+
20890 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
20891 +---------------------------------------+
20892 old SP->| back chain to caller's caller |
20893 +---------------------------------------+
20895 The required alignment for AIX configurations is two words (i.e., 8
20896 or 16 bytes).
20898 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
20900 SP----> +---------------------------------------+
20901 | Back chain to caller | 0
20902 +---------------------------------------+
20903 | Save area for CR | 8
20904 +---------------------------------------+
20905 | Saved LR | 16
20906 +---------------------------------------+
20907 | Saved TOC pointer | 24
20908 +---------------------------------------+
20909 | Parameter save area (P) | 32
20910 +---------------------------------------+
20911 | Alloca space (A) | 32+P
20912 +---------------------------------------+
20913 | Local variable space (L) | 32+P+A
20914 +---------------------------------------+
20915 | Save area for AltiVec registers (W) | 32+P+A+L
20916 +---------------------------------------+
20917 | AltiVec alignment padding (Y) | 32+P+A+L+W
20918 +---------------------------------------+
20919 | Save area for GP registers (G) | 32+P+A+L+W+Y
20920 +---------------------------------------+
20921 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
20922 +---------------------------------------+
20923 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
20924 +---------------------------------------+
20927 V.4 stack frames look like:
20929 SP----> +---------------------------------------+
20930 | back chain to caller | 0
20931 +---------------------------------------+
20932 | caller's saved LR | 4
20933 +---------------------------------------+
20934 | Parameter save area (P) | 8
20935 +---------------------------------------+
20936 | Alloca space (A) | 8+P
20937 +---------------------------------------+
20938 | Varargs save area (V) | 8+P+A
20939 +---------------------------------------+
20940 | Local variable space (L) | 8+P+A+V
20941 +---------------------------------------+
20942 | Float/int conversion temporary (X) | 8+P+A+V+L
20943 +---------------------------------------+
20944 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
20945 +---------------------------------------+
20946 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
20947 +---------------------------------------+
20948 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
20949 +---------------------------------------+
20950 | SPE: area for 64-bit GP registers |
20951 +---------------------------------------+
20952 | SPE alignment padding |
20953 +---------------------------------------+
20954 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
20955 +---------------------------------------+
20956 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
20957 +---------------------------------------+
20958 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
20959 +---------------------------------------+
20960 old SP->| back chain to caller's caller |
20961 +---------------------------------------+
20963 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
20964 given. (But note below and in sysv4.h that we require only 8 and
20965 may round up the size of our stack frame anyways. The historical
20966 reason is early versions of powerpc-linux which didn't properly
20967 align the stack at program startup. A happy side-effect is that
20968 -mno-eabi libraries can be used with -meabi programs.)
20970 The EABI configuration defaults to the V.4 layout. However,
20971 the stack alignment requirements may differ. If -mno-eabi is not
20972 given, the required stack alignment is 8 bytes; if -mno-eabi is
20973 given, the required alignment is 16 bytes. (But see V.4 comment
20974 above.) */
20976 #ifndef ABI_STACK_BOUNDARY
20977 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
20978 #endif
20980 static rs6000_stack_t *
20981 rs6000_stack_info (void)
20983 rs6000_stack_t *info_ptr = &stack_info;
20984 int reg_size = TARGET_32BIT ? 4 : 8;
20985 int ehrd_size;
20986 int ehcr_size;
20987 int save_align;
20988 int first_gp;
20989 HOST_WIDE_INT non_fixed_size;
20990 bool using_static_chain_p;
20992 if (reload_completed && info_ptr->reload_completed)
20993 return info_ptr;
20995 memset (info_ptr, 0, sizeof (*info_ptr));
20996 info_ptr->reload_completed = reload_completed;
20998 if (TARGET_SPE)
21000 /* Cache value so we don't rescan instruction chain over and over. */
21001 if (cfun->machine->insn_chain_scanned_p == 0)
21002 cfun->machine->insn_chain_scanned_p
21003 = spe_func_has_64bit_regs_p () + 1;
21004 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
21007 /* Select which calling sequence. */
21008 info_ptr->abi = DEFAULT_ABI;
21010 /* Calculate which registers need to be saved & save area size. */
21011 info_ptr->first_gp_reg_save = first_reg_to_save ();
21012 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
21013 even if it currently looks like we won't. Reload may need it to
21014 get at a constant; if so, it will have already created a constant
21015 pool entry for it. */
21016 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
21017 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
21018 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
21019 && crtl->uses_const_pool
21020 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
21021 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
21022 else
21023 first_gp = info_ptr->first_gp_reg_save;
21025 info_ptr->gp_size = reg_size * (32 - first_gp);
21027 /* For the SPE, we have an additional upper 32-bits on each GPR.
21028 Ideally we should save the entire 64-bits only when the upper
21029 half is used in SIMD instructions. Since we only record
21030 registers live (not the size they are used in), this proves
21031 difficult because we'd have to traverse the instruction chain at
21032 the right time, taking reload into account. This is a real pain,
21033 so we opt to save the GPRs in 64-bits always if but one register
21034 gets used in 64-bits. Otherwise, all the registers in the frame
21035 get saved in 32-bits.
21037 So... since when we save all GPRs (except the SP) in 64-bits, the
21038 traditional GP save area will be empty. */
21039 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21040 info_ptr->gp_size = 0;
21042 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
21043 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
21045 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
21046 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
21047 - info_ptr->first_altivec_reg_save);
21049 /* Does this function call anything? */
21050 info_ptr->calls_p = (! crtl->is_leaf
21051 || cfun->machine->ra_needs_full_frame);
21053 /* Determine if we need to save the condition code registers. */
21054 if (df_regs_ever_live_p (CR2_REGNO)
21055 || df_regs_ever_live_p (CR3_REGNO)
21056 || df_regs_ever_live_p (CR4_REGNO))
21058 info_ptr->cr_save_p = 1;
21059 if (DEFAULT_ABI == ABI_V4)
21060 info_ptr->cr_size = reg_size;
21063 /* If the current function calls __builtin_eh_return, then we need
21064 to allocate stack space for registers that will hold data for
21065 the exception handler. */
21066 if (crtl->calls_eh_return)
21068 unsigned int i;
21069 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
21070 continue;
21072 /* SPE saves EH registers in 64-bits. */
21073 ehrd_size = i * (TARGET_SPE_ABI
21074 && info_ptr->spe_64bit_regs_used != 0
21075 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
21077 else
21078 ehrd_size = 0;
21080 /* In the ELFv2 ABI, we also need to allocate space for separate
21081 CR field save areas if the function calls __builtin_eh_return. */
21082 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
21084 /* This hard-codes that we have three call-saved CR fields. */
21085 ehcr_size = 3 * reg_size;
21086 /* We do *not* use the regular CR save mechanism. */
21087 info_ptr->cr_save_p = 0;
21089 else
21090 ehcr_size = 0;
21092 /* Determine various sizes. */
21093 info_ptr->reg_size = reg_size;
21094 info_ptr->fixed_size = RS6000_SAVE_AREA;
21095 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
21096 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
21097 TARGET_ALTIVEC ? 16 : 8);
21098 if (FRAME_GROWS_DOWNWARD)
21099 info_ptr->vars_size
21100 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
21101 + info_ptr->parm_size,
21102 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
21103 - (info_ptr->fixed_size + info_ptr->vars_size
21104 + info_ptr->parm_size);
21106 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21107 info_ptr->spe_gp_size = 8 * (32 - first_gp);
21108 else
21109 info_ptr->spe_gp_size = 0;
21111 if (TARGET_ALTIVEC_ABI)
21112 info_ptr->vrsave_mask = compute_vrsave_mask ();
21113 else
21114 info_ptr->vrsave_mask = 0;
21116 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
21117 info_ptr->vrsave_size = 4;
21118 else
21119 info_ptr->vrsave_size = 0;
21121 compute_save_world_info (info_ptr);
21123 /* Calculate the offsets. */
21124 switch (DEFAULT_ABI)
21126 case ABI_NONE:
21127 default:
21128 gcc_unreachable ();
21130 case ABI_AIX:
21131 case ABI_ELFv2:
21132 case ABI_DARWIN:
21133 info_ptr->fp_save_offset = - info_ptr->fp_size;
21134 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21136 if (TARGET_ALTIVEC_ABI)
21138 info_ptr->vrsave_save_offset
21139 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
21141 /* Align stack so vector save area is on a quadword boundary.
21142 The padding goes above the vectors. */
21143 if (info_ptr->altivec_size != 0)
21144 info_ptr->altivec_padding_size
21145 = info_ptr->vrsave_save_offset & 0xF;
21146 else
21147 info_ptr->altivec_padding_size = 0;
21149 info_ptr->altivec_save_offset
21150 = info_ptr->vrsave_save_offset
21151 - info_ptr->altivec_padding_size
21152 - info_ptr->altivec_size;
21153 gcc_assert (info_ptr->altivec_size == 0
21154 || info_ptr->altivec_save_offset % 16 == 0);
21156 /* Adjust for AltiVec case. */
21157 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
21159 else
21160 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
21162 info_ptr->ehcr_offset = info_ptr->ehrd_offset - ehcr_size;
21163 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
21164 info_ptr->lr_save_offset = 2*reg_size;
21165 break;
21167 case ABI_V4:
21168 info_ptr->fp_save_offset = - info_ptr->fp_size;
21169 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21170 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
21172 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21174 /* Align stack so SPE GPR save area is aligned on a
21175 double-word boundary. */
21176 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
21177 info_ptr->spe_padding_size
21178 = 8 - (-info_ptr->cr_save_offset % 8);
21179 else
21180 info_ptr->spe_padding_size = 0;
21182 info_ptr->spe_gp_save_offset
21183 = info_ptr->cr_save_offset
21184 - info_ptr->spe_padding_size
21185 - info_ptr->spe_gp_size;
21187 /* Adjust for SPE case. */
21188 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
21190 else if (TARGET_ALTIVEC_ABI)
21192 info_ptr->vrsave_save_offset
21193 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
21195 /* Align stack so vector save area is on a quadword boundary. */
21196 if (info_ptr->altivec_size != 0)
21197 info_ptr->altivec_padding_size
21198 = 16 - (-info_ptr->vrsave_save_offset % 16);
21199 else
21200 info_ptr->altivec_padding_size = 0;
21202 info_ptr->altivec_save_offset
21203 = info_ptr->vrsave_save_offset
21204 - info_ptr->altivec_padding_size
21205 - info_ptr->altivec_size;
21207 /* Adjust for AltiVec case. */
21208 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
21210 else
21211 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
21212 info_ptr->ehrd_offset -= ehrd_size;
21213 info_ptr->lr_save_offset = reg_size;
21214 break;
21217 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
21218 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
21219 + info_ptr->gp_size
21220 + info_ptr->altivec_size
21221 + info_ptr->altivec_padding_size
21222 + info_ptr->spe_gp_size
21223 + info_ptr->spe_padding_size
21224 + ehrd_size
21225 + ehcr_size
21226 + info_ptr->cr_size
21227 + info_ptr->vrsave_size,
21228 save_align);
21230 non_fixed_size = (info_ptr->vars_size
21231 + info_ptr->parm_size
21232 + info_ptr->save_size);
21234 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
21235 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
21237 /* Determine if we need to save the link register. */
21238 if (info_ptr->calls_p
21239 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21240 && crtl->profile
21241 && !TARGET_PROFILE_KERNEL)
21242 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
21243 #ifdef TARGET_RELOCATABLE
21244 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
21245 #endif
21246 || rs6000_ra_ever_killed ())
21247 info_ptr->lr_save_p = 1;
21249 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
21250 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
21251 && call_used_regs[STATIC_CHAIN_REGNUM]);
21252 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
21253 using_static_chain_p);
21255 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
21256 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
21257 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
21258 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
21259 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
21260 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
21261 info_ptr->lr_save_p = 1;
21263 if (info_ptr->lr_save_p)
21264 df_set_regs_ever_live (LR_REGNO, true);
21266 /* Determine if we need to allocate any stack frame:
21268 For AIX we need to push the stack if a frame pointer is needed
21269 (because the stack might be dynamically adjusted), if we are
21270 debugging, if we make calls, or if the sum of fp_save, gp_save,
21271 and local variables are more than the space needed to save all
21272 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
21273 + 18*8 = 288 (GPR13 reserved).
21275 For V.4 we don't have the stack cushion that AIX uses, but assume
21276 that the debugger can handle stackless frames. */
21278 if (info_ptr->calls_p)
21279 info_ptr->push_p = 1;
21281 else if (DEFAULT_ABI == ABI_V4)
21282 info_ptr->push_p = non_fixed_size != 0;
21284 else if (frame_pointer_needed)
21285 info_ptr->push_p = 1;
21287 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
21288 info_ptr->push_p = 1;
21290 else
21291 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
21293 /* Zero offsets if we're not saving those registers. */
21294 if (info_ptr->fp_size == 0)
21295 info_ptr->fp_save_offset = 0;
21297 if (info_ptr->gp_size == 0)
21298 info_ptr->gp_save_offset = 0;
21300 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
21301 info_ptr->altivec_save_offset = 0;
21303 /* Zero VRSAVE offset if not saved and restored. */
21304 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
21305 info_ptr->vrsave_save_offset = 0;
21307 if (! TARGET_SPE_ABI
21308 || info_ptr->spe_64bit_regs_used == 0
21309 || info_ptr->spe_gp_size == 0)
21310 info_ptr->spe_gp_save_offset = 0;
21312 if (! info_ptr->lr_save_p)
21313 info_ptr->lr_save_offset = 0;
21315 if (! info_ptr->cr_save_p)
21316 info_ptr->cr_save_offset = 0;
21318 return info_ptr;
21321 /* Return true if the current function uses any GPRs in 64-bit SIMD
21322 mode. */
21324 static bool
21325 spe_func_has_64bit_regs_p (void)
21327 rtx insns, insn;
21329 /* Functions that save and restore all the call-saved registers will
21330 need to save/restore the registers in 64-bits. */
21331 if (crtl->calls_eh_return
21332 || cfun->calls_setjmp
21333 || crtl->has_nonlocal_goto)
21334 return true;
21336 insns = get_insns ();
21338 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
21340 if (INSN_P (insn))
21342 rtx i;
21344 /* FIXME: This should be implemented with attributes...
21346 (set_attr "spe64" "true")....then,
21347 if (get_spe64(insn)) return true;
21349 It's the only reliable way to do the stuff below. */
21351 i = PATTERN (insn);
21352 if (GET_CODE (i) == SET)
21354 enum machine_mode mode = GET_MODE (SET_SRC (i));
21356 if (SPE_VECTOR_MODE (mode))
21357 return true;
21358 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
21359 return true;
21364 return false;
21367 static void
21368 debug_stack_info (rs6000_stack_t *info)
21370 const char *abi_string;
21372 if (! info)
21373 info = rs6000_stack_info ();
21375 fprintf (stderr, "\nStack information for function %s:\n",
21376 ((current_function_decl && DECL_NAME (current_function_decl))
21377 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
21378 : "<unknown>"));
21380 switch (info->abi)
21382 default: abi_string = "Unknown"; break;
21383 case ABI_NONE: abi_string = "NONE"; break;
21384 case ABI_AIX: abi_string = "AIX"; break;
21385 case ABI_ELFv2: abi_string = "ELFv2"; break;
21386 case ABI_DARWIN: abi_string = "Darwin"; break;
21387 case ABI_V4: abi_string = "V.4"; break;
21390 fprintf (stderr, "\tABI = %5s\n", abi_string);
21392 if (TARGET_ALTIVEC_ABI)
21393 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
21395 if (TARGET_SPE_ABI)
21396 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
21398 if (info->first_gp_reg_save != 32)
21399 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
21401 if (info->first_fp_reg_save != 64)
21402 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
21404 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
21405 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
21406 info->first_altivec_reg_save);
21408 if (info->lr_save_p)
21409 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
21411 if (info->cr_save_p)
21412 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
21414 if (info->vrsave_mask)
21415 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
21417 if (info->push_p)
21418 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
21420 if (info->calls_p)
21421 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
21423 if (info->gp_save_offset)
21424 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
21426 if (info->fp_save_offset)
21427 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
21429 if (info->altivec_save_offset)
21430 fprintf (stderr, "\taltivec_save_offset = %5d\n",
21431 info->altivec_save_offset);
21433 if (info->spe_gp_save_offset)
21434 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
21435 info->spe_gp_save_offset);
21437 if (info->vrsave_save_offset)
21438 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
21439 info->vrsave_save_offset);
21441 if (info->lr_save_offset)
21442 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
21444 if (info->cr_save_offset)
21445 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
21447 if (info->varargs_save_offset)
21448 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
21450 if (info->total_size)
21451 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21452 info->total_size);
21454 if (info->vars_size)
21455 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21456 info->vars_size);
21458 if (info->parm_size)
21459 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
21461 if (info->fixed_size)
21462 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
21464 if (info->gp_size)
21465 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
21467 if (info->spe_gp_size)
21468 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
21470 if (info->fp_size)
21471 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
21473 if (info->altivec_size)
21474 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
21476 if (info->vrsave_size)
21477 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
21479 if (info->altivec_padding_size)
21480 fprintf (stderr, "\taltivec_padding_size= %5d\n",
21481 info->altivec_padding_size);
21483 if (info->spe_padding_size)
21484 fprintf (stderr, "\tspe_padding_size = %5d\n",
21485 info->spe_padding_size);
21487 if (info->cr_size)
21488 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
21490 if (info->save_size)
21491 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
21493 if (info->reg_size != 4)
21494 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
21496 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
21498 fprintf (stderr, "\n");
21502 rs6000_return_addr (int count, rtx frame)
21504 /* Currently we don't optimize very well between prolog and body
21505 code and for PIC code the code can be actually quite bad, so
21506 don't try to be too clever here. */
21507 if (count != 0
21508 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
21510 cfun->machine->ra_needs_full_frame = 1;
21512 return
21513 gen_rtx_MEM
21514 (Pmode,
21515 memory_address
21516 (Pmode,
21517 plus_constant (Pmode,
21518 copy_to_reg
21519 (gen_rtx_MEM (Pmode,
21520 memory_address (Pmode, frame))),
21521 RETURN_ADDRESS_OFFSET)));
21524 cfun->machine->ra_need_lr = 1;
21525 return get_hard_reg_initial_val (Pmode, LR_REGNO);
21528 /* Say whether a function is a candidate for sibcall handling or not. */
21530 static bool
21531 rs6000_function_ok_for_sibcall (tree decl, tree exp)
21533 tree fntype;
21535 if (decl)
21536 fntype = TREE_TYPE (decl);
21537 else
21538 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
21540 /* We can't do it if the called function has more vector parameters
21541 than the current function; there's nowhere to put the VRsave code. */
21542 if (TARGET_ALTIVEC_ABI
21543 && TARGET_ALTIVEC_VRSAVE
21544 && !(decl && decl == current_function_decl))
21546 function_args_iterator args_iter;
21547 tree type;
21548 int nvreg = 0;
21550 /* Functions with vector parameters are required to have a
21551 prototype, so the argument type info must be available
21552 here. */
21553 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
21554 if (TREE_CODE (type) == VECTOR_TYPE
21555 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21556 nvreg++;
21558 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
21559 if (TREE_CODE (type) == VECTOR_TYPE
21560 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21561 nvreg--;
21563 if (nvreg > 0)
21564 return false;
21567 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
21568 functions, because the callee may have a different TOC pointer to
21569 the caller and there's no way to ensure we restore the TOC when
21570 we return. With the secure-plt SYSV ABI we can't make non-local
21571 calls when -fpic/PIC because the plt call stubs use r30. */
21572 if (DEFAULT_ABI == ABI_DARWIN
21573 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21574 && decl
21575 && !DECL_EXTERNAL (decl)
21576 && (*targetm.binds_local_p) (decl))
21577 || (DEFAULT_ABI == ABI_V4
21578 && (!TARGET_SECURE_PLT
21579 || !flag_pic
21580 || (decl
21581 && (*targetm.binds_local_p) (decl)))))
21583 tree attr_list = TYPE_ATTRIBUTES (fntype);
21585 if (!lookup_attribute ("longcall", attr_list)
21586 || lookup_attribute ("shortcall", attr_list))
21587 return true;
21590 return false;
21593 static int
21594 rs6000_ra_ever_killed (void)
21596 rtx top;
21597 rtx reg;
21598 rtx insn;
21600 if (cfun->is_thunk)
21601 return 0;
21603 if (cfun->machine->lr_save_state)
21604 return cfun->machine->lr_save_state - 1;
21606 /* regs_ever_live has LR marked as used if any sibcalls are present,
21607 but this should not force saving and restoring in the
21608 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
21609 clobbers LR, so that is inappropriate. */
21611 /* Also, the prologue can generate a store into LR that
21612 doesn't really count, like this:
21614 move LR->R0
21615 bcl to set PIC register
21616 move LR->R31
21617 move R0->LR
21619 When we're called from the epilogue, we need to avoid counting
21620 this as a store. */
21622 push_topmost_sequence ();
21623 top = get_insns ();
21624 pop_topmost_sequence ();
21625 reg = gen_rtx_REG (Pmode, LR_REGNO);
21627 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
21629 if (INSN_P (insn))
21631 if (CALL_P (insn))
21633 if (!SIBLING_CALL_P (insn))
21634 return 1;
21636 else if (find_regno_note (insn, REG_INC, LR_REGNO))
21637 return 1;
21638 else if (set_of (reg, insn) != NULL_RTX
21639 && !prologue_epilogue_contains (insn))
21640 return 1;
21643 return 0;
21646 /* Emit instructions needed to load the TOC register.
21647 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
21648 a constant pool; or for SVR4 -fpic. */
21650 void
21651 rs6000_emit_load_toc_table (int fromprolog)
21653 rtx dest;
21654 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
21656 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
21658 char buf[30];
21659 rtx lab, tmp1, tmp2, got;
21661 lab = gen_label_rtx ();
21662 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
21663 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21664 if (flag_pic == 2)
21665 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21666 else
21667 got = rs6000_got_sym ();
21668 tmp1 = tmp2 = dest;
21669 if (!fromprolog)
21671 tmp1 = gen_reg_rtx (Pmode);
21672 tmp2 = gen_reg_rtx (Pmode);
21674 emit_insn (gen_load_toc_v4_PIC_1 (lab));
21675 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
21676 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
21677 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
21679 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
21681 emit_insn (gen_load_toc_v4_pic_si ());
21682 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21684 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
21686 char buf[30];
21687 rtx temp0 = (fromprolog
21688 ? gen_rtx_REG (Pmode, 0)
21689 : gen_reg_rtx (Pmode));
21691 if (fromprolog)
21693 rtx symF, symL;
21695 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
21696 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21698 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
21699 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21701 emit_insn (gen_load_toc_v4_PIC_1 (symF));
21702 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21703 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
21705 else
21707 rtx tocsym, lab;
21709 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21710 lab = gen_label_rtx ();
21711 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
21712 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21713 if (TARGET_LINK_STACK)
21714 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
21715 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
21717 emit_insn (gen_addsi3 (dest, temp0, dest));
21719 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
21721 /* This is for AIX code running in non-PIC ELF32. */
21722 char buf[30];
21723 rtx realsym;
21724 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
21725 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21727 emit_insn (gen_elf_high (dest, realsym));
21728 emit_insn (gen_elf_low (dest, dest, realsym));
21730 else
21732 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
21734 if (TARGET_32BIT)
21735 emit_insn (gen_load_toc_aix_si (dest));
21736 else
21737 emit_insn (gen_load_toc_aix_di (dest));
21741 /* Emit instructions to restore the link register after determining where
21742 its value has been stored. */
21744 void
21745 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
21747 rs6000_stack_t *info = rs6000_stack_info ();
21748 rtx operands[2];
21750 operands[0] = source;
21751 operands[1] = scratch;
21753 if (info->lr_save_p)
21755 rtx frame_rtx = stack_pointer_rtx;
21756 HOST_WIDE_INT sp_offset = 0;
21757 rtx tmp;
21759 if (frame_pointer_needed
21760 || cfun->calls_alloca
21761 || info->total_size > 32767)
21763 tmp = gen_frame_mem (Pmode, frame_rtx);
21764 emit_move_insn (operands[1], tmp);
21765 frame_rtx = operands[1];
21767 else if (info->push_p)
21768 sp_offset = info->total_size;
21770 tmp = plus_constant (Pmode, frame_rtx,
21771 info->lr_save_offset + sp_offset);
21772 tmp = gen_frame_mem (Pmode, tmp);
21773 emit_move_insn (tmp, operands[0]);
21775 else
21776 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
21778 /* Freeze lr_save_p. We've just emitted rtl that depends on the
21779 state of lr_save_p so any change from here on would be a bug. In
21780 particular, stop rs6000_ra_ever_killed from considering the SET
21781 of lr we may have added just above. */
21782 cfun->machine->lr_save_state = info->lr_save_p + 1;
21785 static GTY(()) alias_set_type set = -1;
21787 alias_set_type
21788 get_TOC_alias_set (void)
21790 if (set == -1)
21791 set = new_alias_set ();
21792 return set;
21795 /* This returns nonzero if the current function uses the TOC. This is
21796 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
21797 is generated by the ABI_V4 load_toc_* patterns. */
21798 #if TARGET_ELF
21799 static int
21800 uses_TOC (void)
21802 rtx insn;
21804 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
21805 if (INSN_P (insn))
21807 rtx pat = PATTERN (insn);
21808 int i;
21810 if (GET_CODE (pat) == PARALLEL)
21811 for (i = 0; i < XVECLEN (pat, 0); i++)
21813 rtx sub = XVECEXP (pat, 0, i);
21814 if (GET_CODE (sub) == USE)
21816 sub = XEXP (sub, 0);
21817 if (GET_CODE (sub) == UNSPEC
21818 && XINT (sub, 1) == UNSPEC_TOC)
21819 return 1;
21823 return 0;
21825 #endif
21828 create_TOC_reference (rtx symbol, rtx largetoc_reg)
21830 rtx tocrel, tocreg, hi;
21832 if (TARGET_DEBUG_ADDR)
21834 if (GET_CODE (symbol) == SYMBOL_REF)
21835 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
21836 XSTR (symbol, 0));
21837 else
21839 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
21840 GET_RTX_NAME (GET_CODE (symbol)));
21841 debug_rtx (symbol);
21845 if (!can_create_pseudo_p ())
21846 df_set_regs_ever_live (TOC_REGISTER, true);
21848 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
21849 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
21850 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
21851 return tocrel;
21853 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
21854 if (largetoc_reg != NULL)
21856 emit_move_insn (largetoc_reg, hi);
21857 hi = largetoc_reg;
21859 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
21862 /* Issue assembly directives that create a reference to the given DWARF
21863 FRAME_TABLE_LABEL from the current function section. */
21864 void
21865 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
21867 fprintf (asm_out_file, "\t.ref %s\n",
21868 (* targetm.strip_name_encoding) (frame_table_label));
21871 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
21872 and the change to the stack pointer. */
21874 static void
21875 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
21877 rtvec p;
21878 int i;
21879 rtx regs[3];
21881 i = 0;
21882 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
21883 if (hard_frame_needed)
21884 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
21885 if (!(REGNO (fp) == STACK_POINTER_REGNUM
21886 || (hard_frame_needed
21887 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
21888 regs[i++] = fp;
21890 p = rtvec_alloc (i);
21891 while (--i >= 0)
21893 rtx mem = gen_frame_mem (BLKmode, regs[i]);
21894 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
21897 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
21900 /* Emit the correct code for allocating stack space, as insns.
21901 If COPY_REG, make sure a copy of the old frame is left there.
21902 The generated code may use hard register 0 as a temporary. */
21904 static void
21905 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
21907 rtx insn;
21908 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
21909 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
21910 rtx todec = gen_int_mode (-size, Pmode);
21911 rtx par, set, mem;
21913 if (INTVAL (todec) != -size)
21915 warning (0, "stack frame too large");
21916 emit_insn (gen_trap ());
21917 return;
21920 if (crtl->limit_stack)
21922 if (REG_P (stack_limit_rtx)
21923 && REGNO (stack_limit_rtx) > 1
21924 && REGNO (stack_limit_rtx) <= 31)
21926 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
21927 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
21928 const0_rtx));
21930 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
21931 && TARGET_32BIT
21932 && DEFAULT_ABI == ABI_V4)
21934 rtx toload = gen_rtx_CONST (VOIDmode,
21935 gen_rtx_PLUS (Pmode,
21936 stack_limit_rtx,
21937 GEN_INT (size)));
21939 emit_insn (gen_elf_high (tmp_reg, toload));
21940 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
21941 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
21942 const0_rtx));
21944 else
21945 warning (0, "stack limit expression is not supported");
21948 if (copy_reg)
21950 if (copy_off != 0)
21951 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
21952 else
21953 emit_move_insn (copy_reg, stack_reg);
21956 if (size > 32767)
21958 /* Need a note here so that try_split doesn't get confused. */
21959 if (get_last_insn () == NULL_RTX)
21960 emit_note (NOTE_INSN_DELETED);
21961 insn = emit_move_insn (tmp_reg, todec);
21962 try_split (PATTERN (insn), insn, 0);
21963 todec = tmp_reg;
21966 insn = emit_insn (TARGET_32BIT
21967 ? gen_movsi_update_stack (stack_reg, stack_reg,
21968 todec, stack_reg)
21969 : gen_movdi_di_update_stack (stack_reg, stack_reg,
21970 todec, stack_reg));
21971 /* Since we didn't use gen_frame_mem to generate the MEM, grab
21972 it now and set the alias set/attributes. The above gen_*_update
21973 calls will generate a PARALLEL with the MEM set being the first
21974 operation. */
21975 par = PATTERN (insn);
21976 gcc_assert (GET_CODE (par) == PARALLEL);
21977 set = XVECEXP (par, 0, 0);
21978 gcc_assert (GET_CODE (set) == SET);
21979 mem = SET_DEST (set);
21980 gcc_assert (MEM_P (mem));
21981 MEM_NOTRAP_P (mem) = 1;
21982 set_mem_alias_set (mem, get_frame_alias_set ());
21984 RTX_FRAME_RELATED_P (insn) = 1;
21985 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
21986 gen_rtx_SET (VOIDmode, stack_reg,
21987 gen_rtx_PLUS (Pmode, stack_reg,
21988 GEN_INT (-size))));
21991 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
21993 #if PROBE_INTERVAL > 32768
21994 #error Cannot use indexed addressing mode for stack probing
21995 #endif
21997 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
21998 inclusive. These are offsets from the current stack pointer. */
22000 static void
22001 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
22003 /* See if we have a constant small number of probes to generate. If so,
22004 that's the easy case. */
22005 if (first + size <= 32768)
22007 HOST_WIDE_INT i;
22009 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
22010 it exceeds SIZE. If only one probe is needed, this will not
22011 generate any code. Then probe at FIRST + SIZE. */
22012 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
22013 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22014 -(first + i)));
22016 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22017 -(first + size)));
22020 /* Otherwise, do the same as above, but in a loop. Note that we must be
22021 extra careful with variables wrapping around because we might be at
22022 the very top (or the very bottom) of the address space and we have
22023 to be able to handle this case properly; in particular, we use an
22024 equality test for the loop condition. */
22025 else
22027 HOST_WIDE_INT rounded_size;
22028 rtx r12 = gen_rtx_REG (Pmode, 12);
22029 rtx r0 = gen_rtx_REG (Pmode, 0);
22031 /* Sanity check for the addressing mode we're going to use. */
22032 gcc_assert (first <= 32768);
22034 /* Step 1: round SIZE to the previous multiple of the interval. */
22036 rounded_size = size & -PROBE_INTERVAL;
22039 /* Step 2: compute initial and final value of the loop counter. */
22041 /* TEST_ADDR = SP + FIRST. */
22042 emit_insn (gen_rtx_SET (VOIDmode, r12,
22043 plus_constant (Pmode, stack_pointer_rtx,
22044 -first)));
22046 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
22047 if (rounded_size > 32768)
22049 emit_move_insn (r0, GEN_INT (-rounded_size));
22050 emit_insn (gen_rtx_SET (VOIDmode, r0,
22051 gen_rtx_PLUS (Pmode, r12, r0)));
22053 else
22054 emit_insn (gen_rtx_SET (VOIDmode, r0,
22055 plus_constant (Pmode, r12, -rounded_size)));
22058 /* Step 3: the loop
22060 while (TEST_ADDR != LAST_ADDR)
22062 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
22063 probe at TEST_ADDR
22066 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
22067 until it is equal to ROUNDED_SIZE. */
22069 if (TARGET_64BIT)
22070 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
22071 else
22072 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
22075 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
22076 that SIZE is equal to ROUNDED_SIZE. */
22078 if (size != rounded_size)
22079 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
22083 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
22084 absolute addresses. */
22086 const char *
22087 output_probe_stack_range (rtx reg1, rtx reg2)
22089 static int labelno = 0;
22090 char loop_lab[32], end_lab[32];
22091 rtx xops[2];
22093 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
22094 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
22096 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
22098 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
22099 xops[0] = reg1;
22100 xops[1] = reg2;
22101 if (TARGET_64BIT)
22102 output_asm_insn ("cmpd 0,%0,%1", xops);
22103 else
22104 output_asm_insn ("cmpw 0,%0,%1", xops);
22106 fputs ("\tbeq 0,", asm_out_file);
22107 assemble_name_raw (asm_out_file, end_lab);
22108 fputc ('\n', asm_out_file);
22110 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
22111 xops[1] = GEN_INT (-PROBE_INTERVAL);
22112 output_asm_insn ("addi %0,%0,%1", xops);
22114 /* Probe at TEST_ADDR and branch. */
22115 xops[1] = gen_rtx_REG (Pmode, 0);
22116 output_asm_insn ("stw %1,0(%0)", xops);
22117 fprintf (asm_out_file, "\tb ");
22118 assemble_name_raw (asm_out_file, loop_lab);
22119 fputc ('\n', asm_out_file);
22121 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
22123 return "";
22126 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
22127 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
22128 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
22129 deduce these equivalences by itself so it wasn't necessary to hold
22130 its hand so much. Don't be tempted to always supply d2_f_d_e with
22131 the actual cfa register, ie. r31 when we are using a hard frame
22132 pointer. That fails when saving regs off r1, and sched moves the
22133 r31 setup past the reg saves. */
22135 static rtx
22136 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
22137 rtx reg2, rtx rreg, rtx split_reg)
22139 rtx real, temp;
22141 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
22143 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
22144 int i;
22146 gcc_checking_assert (val == 0);
22147 real = PATTERN (insn);
22148 if (GET_CODE (real) == PARALLEL)
22149 for (i = 0; i < XVECLEN (real, 0); i++)
22150 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22152 rtx set = XVECEXP (real, 0, i);
22154 RTX_FRAME_RELATED_P (set) = 1;
22156 RTX_FRAME_RELATED_P (insn) = 1;
22157 return insn;
22160 /* copy_rtx will not make unique copies of registers, so we need to
22161 ensure we don't have unwanted sharing here. */
22162 if (reg == reg2)
22163 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22165 if (reg == rreg)
22166 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22168 real = copy_rtx (PATTERN (insn));
22170 if (reg2 != NULL_RTX)
22171 real = replace_rtx (real, reg2, rreg);
22173 if (REGNO (reg) == STACK_POINTER_REGNUM)
22174 gcc_checking_assert (val == 0);
22175 else
22176 real = replace_rtx (real, reg,
22177 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
22178 STACK_POINTER_REGNUM),
22179 GEN_INT (val)));
22181 /* We expect that 'real' is either a SET or a PARALLEL containing
22182 SETs (and possibly other stuff). In a PARALLEL, all the SETs
22183 are important so they all have to be marked RTX_FRAME_RELATED_P. */
22185 if (GET_CODE (real) == SET)
22187 rtx set = real;
22189 temp = simplify_rtx (SET_SRC (set));
22190 if (temp)
22191 SET_SRC (set) = temp;
22192 temp = simplify_rtx (SET_DEST (set));
22193 if (temp)
22194 SET_DEST (set) = temp;
22195 if (GET_CODE (SET_DEST (set)) == MEM)
22197 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22198 if (temp)
22199 XEXP (SET_DEST (set), 0) = temp;
22202 else
22204 int i;
22206 gcc_assert (GET_CODE (real) == PARALLEL);
22207 for (i = 0; i < XVECLEN (real, 0); i++)
22208 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22210 rtx set = XVECEXP (real, 0, i);
22212 temp = simplify_rtx (SET_SRC (set));
22213 if (temp)
22214 SET_SRC (set) = temp;
22215 temp = simplify_rtx (SET_DEST (set));
22216 if (temp)
22217 SET_DEST (set) = temp;
22218 if (GET_CODE (SET_DEST (set)) == MEM)
22220 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22221 if (temp)
22222 XEXP (SET_DEST (set), 0) = temp;
22224 RTX_FRAME_RELATED_P (set) = 1;
22228 /* If a store insn has been split into multiple insns, the
22229 true source register is given by split_reg. */
22230 if (split_reg != NULL_RTX)
22231 real = gen_rtx_SET (VOIDmode, SET_DEST (real), split_reg);
22233 RTX_FRAME_RELATED_P (insn) = 1;
22234 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
22236 return insn;
22239 /* Returns an insn that has a vrsave set operation with the
22240 appropriate CLOBBERs. */
22242 static rtx
22243 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
22245 int nclobs, i;
22246 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
22247 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
22249 clobs[0]
22250 = gen_rtx_SET (VOIDmode,
22251 vrsave,
22252 gen_rtx_UNSPEC_VOLATILE (SImode,
22253 gen_rtvec (2, reg, vrsave),
22254 UNSPECV_SET_VRSAVE));
22256 nclobs = 1;
22258 /* We need to clobber the registers in the mask so the scheduler
22259 does not move sets to VRSAVE before sets of AltiVec registers.
22261 However, if the function receives nonlocal gotos, reload will set
22262 all call saved registers live. We will end up with:
22264 (set (reg 999) (mem))
22265 (parallel [ (set (reg vrsave) (unspec blah))
22266 (clobber (reg 999))])
22268 The clobber will cause the store into reg 999 to be dead, and
22269 flow will attempt to delete an epilogue insn. In this case, we
22270 need an unspec use/set of the register. */
22272 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
22273 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
22275 if (!epiloguep || call_used_regs [i])
22276 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
22277 gen_rtx_REG (V4SImode, i));
22278 else
22280 rtx reg = gen_rtx_REG (V4SImode, i);
22282 clobs[nclobs++]
22283 = gen_rtx_SET (VOIDmode,
22284 reg,
22285 gen_rtx_UNSPEC (V4SImode,
22286 gen_rtvec (1, reg), 27));
22290 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
22292 for (i = 0; i < nclobs; ++i)
22293 XVECEXP (insn, 0, i) = clobs[i];
22295 return insn;
22298 static rtx
22299 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
22301 rtx addr, mem;
22303 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
22304 mem = gen_frame_mem (GET_MODE (reg), addr);
22305 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
22308 static rtx
22309 gen_frame_load (rtx reg, rtx frame_reg, int offset)
22311 return gen_frame_set (reg, frame_reg, offset, false);
22314 static rtx
22315 gen_frame_store (rtx reg, rtx frame_reg, int offset)
22317 return gen_frame_set (reg, frame_reg, offset, true);
22320 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
22321 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
22323 static rtx
22324 emit_frame_save (rtx frame_reg, enum machine_mode mode,
22325 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
22327 rtx reg, insn;
22329 /* Some cases that need register indexed addressing. */
22330 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
22331 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
22332 || (TARGET_E500_DOUBLE && mode == DFmode)
22333 || (TARGET_SPE_ABI
22334 && SPE_VECTOR_MODE (mode)
22335 && !SPE_CONST_OFFSET_OK (offset))));
22337 reg = gen_rtx_REG (mode, regno);
22338 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
22339 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
22340 NULL_RTX, NULL_RTX, NULL_RTX);
22343 /* Emit an offset memory reference suitable for a frame store, while
22344 converting to a valid addressing mode. */
22346 static rtx
22347 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
22349 rtx int_rtx, offset_rtx;
22351 int_rtx = GEN_INT (offset);
22353 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
22354 || (TARGET_E500_DOUBLE && mode == DFmode))
22356 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
22357 emit_move_insn (offset_rtx, int_rtx);
22359 else
22360 offset_rtx = int_rtx;
22362 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
22365 #ifndef TARGET_FIX_AND_CONTINUE
22366 #define TARGET_FIX_AND_CONTINUE 0
22367 #endif
22369 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
22370 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
22371 #define LAST_SAVRES_REGISTER 31
22372 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
22374 enum {
22375 SAVRES_LR = 0x1,
22376 SAVRES_SAVE = 0x2,
22377 SAVRES_REG = 0x0c,
22378 SAVRES_GPR = 0,
22379 SAVRES_FPR = 4,
22380 SAVRES_VR = 8
22383 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
22385 /* Temporary holding space for an out-of-line register save/restore
22386 routine name. */
22387 static char savres_routine_name[30];
22389 /* Return the name for an out-of-line register save/restore routine.
22390 We are saving/restoring GPRs if GPR is true. */
22392 static char *
22393 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
22395 const char *prefix = "";
22396 const char *suffix = "";
22398 /* Different targets are supposed to define
22399 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
22400 routine name could be defined with:
22402 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
22404 This is a nice idea in practice, but in reality, things are
22405 complicated in several ways:
22407 - ELF targets have save/restore routines for GPRs.
22409 - SPE targets use different prefixes for 32/64-bit registers, and
22410 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
22412 - PPC64 ELF targets have routines for save/restore of GPRs that
22413 differ in what they do with the link register, so having a set
22414 prefix doesn't work. (We only use one of the save routines at
22415 the moment, though.)
22417 - PPC32 elf targets have "exit" versions of the restore routines
22418 that restore the link register and can save some extra space.
22419 These require an extra suffix. (There are also "tail" versions
22420 of the restore routines and "GOT" versions of the save routines,
22421 but we don't generate those at present. Same problems apply,
22422 though.)
22424 We deal with all this by synthesizing our own prefix/suffix and
22425 using that for the simple sprintf call shown above. */
22426 if (TARGET_SPE)
22428 /* No floating point saves on the SPE. */
22429 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
22431 if ((sel & SAVRES_SAVE))
22432 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
22433 else
22434 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
22436 if ((sel & SAVRES_LR))
22437 suffix = "_x";
22439 else if (DEFAULT_ABI == ABI_V4)
22441 if (TARGET_64BIT)
22442 goto aix_names;
22444 if ((sel & SAVRES_REG) == SAVRES_GPR)
22445 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
22446 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22447 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
22448 else if ((sel & SAVRES_REG) == SAVRES_VR)
22449 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22450 else
22451 abort ();
22453 if ((sel & SAVRES_LR))
22454 suffix = "_x";
22456 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22458 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
22459 /* No out-of-line save/restore routines for GPRs on AIX. */
22460 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
22461 #endif
22463 aix_names:
22464 if ((sel & SAVRES_REG) == SAVRES_GPR)
22465 prefix = ((sel & SAVRES_SAVE)
22466 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
22467 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
22468 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22470 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
22471 if ((sel & SAVRES_LR))
22472 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
22473 else
22474 #endif
22476 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
22477 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
22480 else if ((sel & SAVRES_REG) == SAVRES_VR)
22481 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22482 else
22483 abort ();
22486 if (DEFAULT_ABI == ABI_DARWIN)
22488 /* The Darwin approach is (slightly) different, in order to be
22489 compatible with code generated by the system toolchain. There is a
22490 single symbol for the start of save sequence, and the code here
22491 embeds an offset into that code on the basis of the first register
22492 to be saved. */
22493 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
22494 if ((sel & SAVRES_REG) == SAVRES_GPR)
22495 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
22496 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
22497 (regno - 13) * 4, prefix, regno);
22498 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22499 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
22500 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
22501 else if ((sel & SAVRES_REG) == SAVRES_VR)
22502 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
22503 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
22504 else
22505 abort ();
22507 else
22508 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
22510 return savres_routine_name;
22513 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
22514 We are saving/restoring GPRs if GPR is true. */
22516 static rtx
22517 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
22519 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
22520 ? info->first_gp_reg_save
22521 : (sel & SAVRES_REG) == SAVRES_FPR
22522 ? info->first_fp_reg_save - 32
22523 : (sel & SAVRES_REG) == SAVRES_VR
22524 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
22525 : -1);
22526 rtx sym;
22527 int select = sel;
22529 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
22530 versions of the gpr routines. */
22531 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
22532 && info->spe_64bit_regs_used)
22533 select ^= SAVRES_FPR ^ SAVRES_GPR;
22535 /* Don't generate bogus routine names. */
22536 gcc_assert (FIRST_SAVRES_REGISTER <= regno
22537 && regno <= LAST_SAVRES_REGISTER
22538 && select >= 0 && select <= 12);
22540 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
22542 if (sym == NULL)
22544 char *name;
22546 name = rs6000_savres_routine_name (info, regno, sel);
22548 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
22549 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
22550 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
22553 return sym;
22556 /* Emit a sequence of insns, including a stack tie if needed, for
22557 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
22558 reset the stack pointer, but move the base of the frame into
22559 reg UPDT_REGNO for use by out-of-line register restore routines. */
22561 static rtx
22562 rs6000_emit_stack_reset (rs6000_stack_t *info,
22563 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
22564 unsigned updt_regno)
22566 rtx updt_reg_rtx;
22568 /* This blockage is needed so that sched doesn't decide to move
22569 the sp change before the register restores. */
22570 if (DEFAULT_ABI == ABI_V4
22571 || (TARGET_SPE_ABI
22572 && info->spe_64bit_regs_used != 0
22573 && info->first_gp_reg_save != 32))
22574 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
22576 /* If we are restoring registers out-of-line, we will be using the
22577 "exit" variants of the restore routines, which will reset the
22578 stack for us. But we do need to point updt_reg into the
22579 right place for those routines. */
22580 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
22582 if (frame_off != 0)
22583 return emit_insn (gen_add3_insn (updt_reg_rtx,
22584 frame_reg_rtx, GEN_INT (frame_off)));
22585 else if (REGNO (frame_reg_rtx) != updt_regno)
22586 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
22588 return NULL_RTX;
22591 /* Return the register number used as a pointer by out-of-line
22592 save/restore functions. */
22594 static inline unsigned
22595 ptr_regno_for_savres (int sel)
22597 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22598 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
22599 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
22602 /* Construct a parallel rtx describing the effect of a call to an
22603 out-of-line register save/restore routine, and emit the insn
22604 or jump_insn as appropriate. */
22606 static rtx
22607 rs6000_emit_savres_rtx (rs6000_stack_t *info,
22608 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
22609 enum machine_mode reg_mode, int sel)
22611 int i;
22612 int offset, start_reg, end_reg, n_regs, use_reg;
22613 int reg_size = GET_MODE_SIZE (reg_mode);
22614 rtx sym;
22615 rtvec p;
22616 rtx par, insn;
22618 offset = 0;
22619 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22620 ? info->first_gp_reg_save
22621 : (sel & SAVRES_REG) == SAVRES_FPR
22622 ? info->first_fp_reg_save
22623 : (sel & SAVRES_REG) == SAVRES_VR
22624 ? info->first_altivec_reg_save
22625 : -1);
22626 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22627 ? 32
22628 : (sel & SAVRES_REG) == SAVRES_FPR
22629 ? 64
22630 : (sel & SAVRES_REG) == SAVRES_VR
22631 ? LAST_ALTIVEC_REGNO + 1
22632 : -1);
22633 n_regs = end_reg - start_reg;
22634 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
22635 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
22636 + n_regs);
22638 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22639 RTVEC_ELT (p, offset++) = ret_rtx;
22641 RTVEC_ELT (p, offset++)
22642 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
22644 sym = rs6000_savres_routine_sym (info, sel);
22645 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
22647 use_reg = ptr_regno_for_savres (sel);
22648 if ((sel & SAVRES_REG) == SAVRES_VR)
22650 /* Vector regs are saved/restored using [reg+reg] addressing. */
22651 RTVEC_ELT (p, offset++)
22652 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22653 RTVEC_ELT (p, offset++)
22654 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
22656 else
22657 RTVEC_ELT (p, offset++)
22658 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22660 for (i = 0; i < end_reg - start_reg; i++)
22661 RTVEC_ELT (p, i + offset)
22662 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
22663 frame_reg_rtx, save_area_offset + reg_size * i,
22664 (sel & SAVRES_SAVE) != 0);
22666 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22667 RTVEC_ELT (p, i + offset)
22668 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
22670 par = gen_rtx_PARALLEL (VOIDmode, p);
22672 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22674 insn = emit_jump_insn (par);
22675 JUMP_LABEL (insn) = ret_rtx;
22677 else
22678 insn = emit_insn (par);
22679 return insn;
22682 /* Emit code to store CR fields that need to be saved into REG. */
22684 static void
22685 rs6000_emit_move_from_cr (rtx reg)
22687 /* Only the ELFv2 ABI allows storing only selected fields. */
22688 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
22690 int i, cr_reg[8], count = 0;
22692 /* Collect CR fields that must be saved. */
22693 for (i = 0; i < 8; i++)
22694 if (save_reg_p (CR0_REGNO + i))
22695 cr_reg[count++] = i;
22697 /* If it's just a single one, use mfcrf. */
22698 if (count == 1)
22700 rtvec p = rtvec_alloc (1);
22701 rtvec r = rtvec_alloc (2);
22702 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
22703 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
22704 RTVEC_ELT (p, 0)
22705 = gen_rtx_SET (VOIDmode, reg,
22706 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
22708 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22709 return;
22712 /* ??? It might be better to handle count == 2 / 3 cases here
22713 as well, using logical operations to combine the values. */
22716 emit_insn (gen_movesi_from_cr (reg));
22719 /* Determine whether the gp REG is really used. */
22721 static bool
22722 rs6000_reg_live_or_pic_offset_p (int reg)
22724 /* If the function calls eh_return, claim used all the registers that would
22725 be checked for liveness otherwise. This is required for the PIC offset
22726 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
22727 register allocation purposes in this case. */
22729 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
22730 && (!call_used_regs[reg]
22731 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
22732 && !TARGET_SINGLE_PIC_BASE
22733 && TARGET_TOC && TARGET_MINIMAL_TOC)))
22734 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
22735 && !TARGET_SINGLE_PIC_BASE
22736 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
22737 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
22740 /* Emit function prologue as insns. */
22742 void
22743 rs6000_emit_prologue (void)
22745 rs6000_stack_t *info = rs6000_stack_info ();
22746 enum machine_mode reg_mode = Pmode;
22747 int reg_size = TARGET_32BIT ? 4 : 8;
22748 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22749 rtx frame_reg_rtx = sp_reg_rtx;
22750 unsigned int cr_save_regno;
22751 rtx cr_save_rtx = NULL_RTX;
22752 rtx insn;
22753 int strategy;
22754 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
22755 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
22756 && call_used_regs[STATIC_CHAIN_REGNUM]);
22757 /* Offset to top of frame for frame_reg and sp respectively. */
22758 HOST_WIDE_INT frame_off = 0;
22759 HOST_WIDE_INT sp_off = 0;
22761 #ifdef ENABLE_CHECKING
22762 /* Track and check usage of r0, r11, r12. */
22763 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
22764 #define START_USE(R) do \
22766 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22767 reg_inuse |= 1 << (R); \
22768 } while (0)
22769 #define END_USE(R) do \
22771 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
22772 reg_inuse &= ~(1 << (R)); \
22773 } while (0)
22774 #define NOT_INUSE(R) do \
22776 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22777 } while (0)
22778 #else
22779 #define START_USE(R) do {} while (0)
22780 #define END_USE(R) do {} while (0)
22781 #define NOT_INUSE(R) do {} while (0)
22782 #endif
22784 if (DEFAULT_ABI == ABI_ELFv2)
22786 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
22788 /* With -mminimal-toc we may generate an extra use of r2 below. */
22789 if (!TARGET_SINGLE_PIC_BASE
22790 && TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
22791 cfun->machine->r2_setup_needed = true;
22795 if (flag_stack_usage_info)
22796 current_function_static_stack_size = info->total_size;
22798 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
22800 HOST_WIDE_INT size = info->total_size;
22802 if (crtl->is_leaf && !cfun->calls_alloca)
22804 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
22805 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
22806 size - STACK_CHECK_PROTECT);
22808 else if (size > 0)
22809 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
22812 if (TARGET_FIX_AND_CONTINUE)
22814 /* gdb on darwin arranges to forward a function from the old
22815 address by modifying the first 5 instructions of the function
22816 to branch to the overriding function. This is necessary to
22817 permit function pointers that point to the old function to
22818 actually forward to the new function. */
22819 emit_insn (gen_nop ());
22820 emit_insn (gen_nop ());
22821 emit_insn (gen_nop ());
22822 emit_insn (gen_nop ());
22823 emit_insn (gen_nop ());
22826 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
22828 reg_mode = V2SImode;
22829 reg_size = 8;
22832 /* Handle world saves specially here. */
22833 if (WORLD_SAVE_P (info))
22835 int i, j, sz;
22836 rtx treg;
22837 rtvec p;
22838 rtx reg0;
22840 /* save_world expects lr in r0. */
22841 reg0 = gen_rtx_REG (Pmode, 0);
22842 if (info->lr_save_p)
22844 insn = emit_move_insn (reg0,
22845 gen_rtx_REG (Pmode, LR_REGNO));
22846 RTX_FRAME_RELATED_P (insn) = 1;
22849 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
22850 assumptions about the offsets of various bits of the stack
22851 frame. */
22852 gcc_assert (info->gp_save_offset == -220
22853 && info->fp_save_offset == -144
22854 && info->lr_save_offset == 8
22855 && info->cr_save_offset == 4
22856 && info->push_p
22857 && info->lr_save_p
22858 && (!crtl->calls_eh_return
22859 || info->ehrd_offset == -432)
22860 && info->vrsave_save_offset == -224
22861 && info->altivec_save_offset == -416);
22863 treg = gen_rtx_REG (SImode, 11);
22864 emit_move_insn (treg, GEN_INT (-info->total_size));
22866 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
22867 in R11. It also clobbers R12, so beware! */
22869 /* Preserve CR2 for save_world prologues */
22870 sz = 5;
22871 sz += 32 - info->first_gp_reg_save;
22872 sz += 64 - info->first_fp_reg_save;
22873 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
22874 p = rtvec_alloc (sz);
22875 j = 0;
22876 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
22877 gen_rtx_REG (SImode,
22878 LR_REGNO));
22879 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
22880 gen_rtx_SYMBOL_REF (Pmode,
22881 "*save_world"));
22882 /* We do floats first so that the instruction pattern matches
22883 properly. */
22884 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
22885 RTVEC_ELT (p, j++)
22886 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
22887 ? DFmode : SFmode,
22888 info->first_fp_reg_save + i),
22889 frame_reg_rtx,
22890 info->fp_save_offset + frame_off + 8 * i);
22891 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
22892 RTVEC_ELT (p, j++)
22893 = gen_frame_store (gen_rtx_REG (V4SImode,
22894 info->first_altivec_reg_save + i),
22895 frame_reg_rtx,
22896 info->altivec_save_offset + frame_off + 16 * i);
22897 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
22898 RTVEC_ELT (p, j++)
22899 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
22900 frame_reg_rtx,
22901 info->gp_save_offset + frame_off + reg_size * i);
22903 /* CR register traditionally saved as CR2. */
22904 RTVEC_ELT (p, j++)
22905 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
22906 frame_reg_rtx, info->cr_save_offset + frame_off);
22907 /* Explain about use of R0. */
22908 if (info->lr_save_p)
22909 RTVEC_ELT (p, j++)
22910 = gen_frame_store (reg0,
22911 frame_reg_rtx, info->lr_save_offset + frame_off);
22912 /* Explain what happens to the stack pointer. */
22914 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
22915 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
22918 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22919 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
22920 treg, GEN_INT (-info->total_size), NULL_RTX);
22921 sp_off = frame_off = info->total_size;
22924 strategy = info->savres_strategy;
22926 /* For V.4, update stack before we do any saving and set back pointer. */
22927 if (! WORLD_SAVE_P (info)
22928 && info->push_p
22929 && (DEFAULT_ABI == ABI_V4
22930 || crtl->calls_eh_return))
22932 bool need_r11 = (TARGET_SPE
22933 ? (!(strategy & SAVE_INLINE_GPRS)
22934 && info->spe_64bit_regs_used == 0)
22935 : (!(strategy & SAVE_INLINE_FPRS)
22936 || !(strategy & SAVE_INLINE_GPRS)
22937 || !(strategy & SAVE_INLINE_VRS)));
22938 int ptr_regno = -1;
22939 rtx ptr_reg = NULL_RTX;
22940 int ptr_off = 0;
22942 if (info->total_size < 32767)
22943 frame_off = info->total_size;
22944 else if (need_r11)
22945 ptr_regno = 11;
22946 else if (info->cr_save_p
22947 || info->lr_save_p
22948 || info->first_fp_reg_save < 64
22949 || info->first_gp_reg_save < 32
22950 || info->altivec_size != 0
22951 || info->vrsave_mask != 0
22952 || crtl->calls_eh_return)
22953 ptr_regno = 12;
22954 else
22956 /* The prologue won't be saving any regs so there is no need
22957 to set up a frame register to access any frame save area.
22958 We also won't be using frame_off anywhere below, but set
22959 the correct value anyway to protect against future
22960 changes to this function. */
22961 frame_off = info->total_size;
22963 if (ptr_regno != -1)
22965 /* Set up the frame offset to that needed by the first
22966 out-of-line save function. */
22967 START_USE (ptr_regno);
22968 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
22969 frame_reg_rtx = ptr_reg;
22970 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
22971 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
22972 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
22973 ptr_off = info->gp_save_offset + info->gp_size;
22974 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
22975 ptr_off = info->altivec_save_offset + info->altivec_size;
22976 frame_off = -ptr_off;
22978 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
22979 sp_off = info->total_size;
22980 if (frame_reg_rtx != sp_reg_rtx)
22981 rs6000_emit_stack_tie (frame_reg_rtx, false);
22984 /* If we use the link register, get it into r0. */
22985 if (!WORLD_SAVE_P (info) && info->lr_save_p)
22987 rtx addr, reg, mem;
22989 reg = gen_rtx_REG (Pmode, 0);
22990 START_USE (0);
22991 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
22992 RTX_FRAME_RELATED_P (insn) = 1;
22994 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
22995 | SAVE_NOINLINE_FPRS_SAVES_LR)))
22997 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
22998 GEN_INT (info->lr_save_offset + frame_off));
22999 mem = gen_rtx_MEM (Pmode, addr);
23000 /* This should not be of rs6000_sr_alias_set, because of
23001 __builtin_return_address. */
23003 insn = emit_move_insn (mem, reg);
23004 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23005 NULL_RTX, NULL_RTX, NULL_RTX);
23006 END_USE (0);
23010 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
23011 r12 will be needed by out-of-line gpr restore. */
23012 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23013 && !(strategy & (SAVE_INLINE_GPRS
23014 | SAVE_NOINLINE_GPRS_SAVES_LR))
23015 ? 11 : 12);
23016 if (!WORLD_SAVE_P (info)
23017 && info->cr_save_p
23018 && REGNO (frame_reg_rtx) != cr_save_regno
23019 && !(using_static_chain_p && cr_save_regno == 11))
23021 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
23022 START_USE (cr_save_regno);
23023 rs6000_emit_move_from_cr (cr_save_rtx);
23026 /* Do any required saving of fpr's. If only one or two to save, do
23027 it ourselves. Otherwise, call function. */
23028 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
23030 int i;
23031 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23032 if (save_reg_p (info->first_fp_reg_save + i))
23033 emit_frame_save (frame_reg_rtx,
23034 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23035 ? DFmode : SFmode),
23036 info->first_fp_reg_save + i,
23037 info->fp_save_offset + frame_off + 8 * i,
23038 sp_off - frame_off);
23040 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
23042 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23043 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23044 unsigned ptr_regno = ptr_regno_for_savres (sel);
23045 rtx ptr_reg = frame_reg_rtx;
23047 if (REGNO (frame_reg_rtx) == ptr_regno)
23048 gcc_checking_assert (frame_off == 0);
23049 else
23051 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23052 NOT_INUSE (ptr_regno);
23053 emit_insn (gen_add3_insn (ptr_reg,
23054 frame_reg_rtx, GEN_INT (frame_off)));
23056 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23057 info->fp_save_offset,
23058 info->lr_save_offset,
23059 DFmode, sel);
23060 rs6000_frame_related (insn, ptr_reg, sp_off,
23061 NULL_RTX, NULL_RTX, NULL_RTX);
23062 if (lr)
23063 END_USE (0);
23066 /* Save GPRs. This is done as a PARALLEL if we are using
23067 the store-multiple instructions. */
23068 if (!WORLD_SAVE_P (info)
23069 && TARGET_SPE_ABI
23070 && info->spe_64bit_regs_used != 0
23071 && info->first_gp_reg_save != 32)
23073 int i;
23074 rtx spe_save_area_ptr;
23075 HOST_WIDE_INT save_off;
23076 int ool_adjust = 0;
23078 /* Determine whether we can address all of the registers that need
23079 to be saved with an offset from frame_reg_rtx that fits in
23080 the small const field for SPE memory instructions. */
23081 int spe_regs_addressable
23082 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
23083 + reg_size * (32 - info->first_gp_reg_save - 1))
23084 && (strategy & SAVE_INLINE_GPRS));
23086 if (spe_regs_addressable)
23088 spe_save_area_ptr = frame_reg_rtx;
23089 save_off = frame_off;
23091 else
23093 /* Make r11 point to the start of the SPE save area. We need
23094 to be careful here if r11 is holding the static chain. If
23095 it is, then temporarily save it in r0. */
23096 HOST_WIDE_INT offset;
23098 if (!(strategy & SAVE_INLINE_GPRS))
23099 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
23100 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
23101 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
23102 save_off = frame_off - offset;
23104 if (using_static_chain_p)
23106 rtx r0 = gen_rtx_REG (Pmode, 0);
23108 START_USE (0);
23109 gcc_assert (info->first_gp_reg_save > 11);
23111 emit_move_insn (r0, spe_save_area_ptr);
23113 else if (REGNO (frame_reg_rtx) != 11)
23114 START_USE (11);
23116 emit_insn (gen_addsi3 (spe_save_area_ptr,
23117 frame_reg_rtx, GEN_INT (offset)));
23118 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
23119 frame_off = -info->spe_gp_save_offset + ool_adjust;
23122 if ((strategy & SAVE_INLINE_GPRS))
23124 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23125 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23126 emit_frame_save (spe_save_area_ptr, reg_mode,
23127 info->first_gp_reg_save + i,
23128 (info->spe_gp_save_offset + save_off
23129 + reg_size * i),
23130 sp_off - save_off);
23132 else
23134 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
23135 info->spe_gp_save_offset + save_off,
23136 0, reg_mode,
23137 SAVRES_SAVE | SAVRES_GPR);
23139 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
23140 NULL_RTX, NULL_RTX, NULL_RTX);
23143 /* Move the static chain pointer back. */
23144 if (!spe_regs_addressable)
23146 if (using_static_chain_p)
23148 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
23149 END_USE (0);
23151 else if (REGNO (frame_reg_rtx) != 11)
23152 END_USE (11);
23155 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
23157 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
23158 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
23159 unsigned ptr_regno = ptr_regno_for_savres (sel);
23160 rtx ptr_reg = frame_reg_rtx;
23161 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
23162 int end_save = info->gp_save_offset + info->gp_size;
23163 int ptr_off;
23165 if (!ptr_set_up)
23166 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23168 /* Need to adjust r11 (r12) if we saved any FPRs. */
23169 if (end_save + frame_off != 0)
23171 rtx offset = GEN_INT (end_save + frame_off);
23173 if (ptr_set_up)
23174 frame_off = -end_save;
23175 else
23176 NOT_INUSE (ptr_regno);
23177 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23179 else if (!ptr_set_up)
23181 NOT_INUSE (ptr_regno);
23182 emit_move_insn (ptr_reg, frame_reg_rtx);
23184 ptr_off = -end_save;
23185 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23186 info->gp_save_offset + ptr_off,
23187 info->lr_save_offset + ptr_off,
23188 reg_mode, sel);
23189 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
23190 NULL_RTX, NULL_RTX, NULL_RTX);
23191 if (lr)
23192 END_USE (0);
23194 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
23196 rtvec p;
23197 int i;
23198 p = rtvec_alloc (32 - info->first_gp_reg_save);
23199 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23200 RTVEC_ELT (p, i)
23201 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23202 frame_reg_rtx,
23203 info->gp_save_offset + frame_off + reg_size * i);
23204 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23205 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23206 NULL_RTX, NULL_RTX, NULL_RTX);
23208 else if (!WORLD_SAVE_P (info))
23210 int i;
23211 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23212 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23213 emit_frame_save (frame_reg_rtx, reg_mode,
23214 info->first_gp_reg_save + i,
23215 info->gp_save_offset + frame_off + reg_size * i,
23216 sp_off - frame_off);
23219 if (crtl->calls_eh_return)
23221 unsigned int i;
23222 rtvec p;
23224 for (i = 0; ; ++i)
23226 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23227 if (regno == INVALID_REGNUM)
23228 break;
23231 p = rtvec_alloc (i);
23233 for (i = 0; ; ++i)
23235 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23236 if (regno == INVALID_REGNUM)
23237 break;
23239 insn
23240 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
23241 sp_reg_rtx,
23242 info->ehrd_offset + sp_off + reg_size * (int) i);
23243 RTVEC_ELT (p, i) = insn;
23244 RTX_FRAME_RELATED_P (insn) = 1;
23247 insn = emit_insn (gen_blockage ());
23248 RTX_FRAME_RELATED_P (insn) = 1;
23249 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
23252 /* In AIX ABI we need to make sure r2 is really saved. */
23253 if (TARGET_AIX && crtl->calls_eh_return)
23255 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
23256 rtx save_insn, join_insn, note;
23257 long toc_restore_insn;
23259 tmp_reg = gen_rtx_REG (Pmode, 11);
23260 tmp_reg_si = gen_rtx_REG (SImode, 11);
23261 if (using_static_chain_p)
23263 START_USE (0);
23264 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
23266 else
23267 START_USE (11);
23268 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
23269 /* Peek at instruction to which this function returns. If it's
23270 restoring r2, then we know we've already saved r2. We can't
23271 unconditionally save r2 because the value we have will already
23272 be updated if we arrived at this function via a plt call or
23273 toc adjusting stub. */
23274 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
23275 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
23276 + RS6000_TOC_SAVE_SLOT);
23277 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
23278 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
23279 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
23280 validate_condition_mode (EQ, CCUNSmode);
23281 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
23282 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
23283 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
23284 toc_save_done = gen_label_rtx ();
23285 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
23286 gen_rtx_EQ (VOIDmode, compare_result,
23287 const0_rtx),
23288 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
23289 pc_rtx);
23290 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
23291 JUMP_LABEL (jump) = toc_save_done;
23292 LABEL_NUSES (toc_save_done) += 1;
23294 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
23295 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
23296 sp_off - frame_off);
23298 emit_label (toc_save_done);
23300 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
23301 have a CFG that has different saves along different paths.
23302 Move the note to a dummy blockage insn, which describes that
23303 R2 is unconditionally saved after the label. */
23304 /* ??? An alternate representation might be a special insn pattern
23305 containing both the branch and the store. That might let the
23306 code that minimizes the number of DW_CFA_advance opcodes better
23307 freedom in placing the annotations. */
23308 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
23309 if (note)
23310 remove_note (save_insn, note);
23311 else
23312 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
23313 copy_rtx (PATTERN (save_insn)), NULL_RTX);
23314 RTX_FRAME_RELATED_P (save_insn) = 0;
23316 join_insn = emit_insn (gen_blockage ());
23317 REG_NOTES (join_insn) = note;
23318 RTX_FRAME_RELATED_P (join_insn) = 1;
23320 if (using_static_chain_p)
23322 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
23323 END_USE (0);
23325 else
23326 END_USE (11);
23329 /* Save CR if we use any that must be preserved. */
23330 if (!WORLD_SAVE_P (info) && info->cr_save_p)
23332 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
23333 GEN_INT (info->cr_save_offset + frame_off));
23334 rtx mem = gen_frame_mem (SImode, addr);
23336 /* If we didn't copy cr before, do so now using r0. */
23337 if (cr_save_rtx == NULL_RTX)
23339 START_USE (0);
23340 cr_save_rtx = gen_rtx_REG (SImode, 0);
23341 rs6000_emit_move_from_cr (cr_save_rtx);
23344 /* Saving CR requires a two-instruction sequence: one instruction
23345 to move the CR to a general-purpose register, and a second
23346 instruction that stores the GPR to memory.
23348 We do not emit any DWARF CFI records for the first of these,
23349 because we cannot properly represent the fact that CR is saved in
23350 a register. One reason is that we cannot express that multiple
23351 CR fields are saved; another reason is that on 64-bit, the size
23352 of the CR register in DWARF (4 bytes) differs from the size of
23353 a general-purpose register.
23355 This means if any intervening instruction were to clobber one of
23356 the call-saved CR fields, we'd have incorrect CFI. To prevent
23357 this from happening, we mark the store to memory as a use of
23358 those CR fields, which prevents any such instruction from being
23359 scheduled in between the two instructions. */
23360 rtx crsave_v[9];
23361 int n_crsave = 0;
23362 int i;
23364 crsave_v[n_crsave++] = gen_rtx_SET (VOIDmode, mem, cr_save_rtx);
23365 for (i = 0; i < 8; i++)
23366 if (save_reg_p (CR0_REGNO + i))
23367 crsave_v[n_crsave++]
23368 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23370 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
23371 gen_rtvec_v (n_crsave, crsave_v)));
23372 END_USE (REGNO (cr_save_rtx));
23374 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
23375 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
23376 so we need to construct a frame expression manually. */
23377 RTX_FRAME_RELATED_P (insn) = 1;
23379 /* Update address to be stack-pointer relative, like
23380 rs6000_frame_related would do. */
23381 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
23382 GEN_INT (info->cr_save_offset + sp_off));
23383 mem = gen_frame_mem (SImode, addr);
23385 if (DEFAULT_ABI == ABI_ELFv2)
23387 /* In the ELFv2 ABI we generate separate CFI records for each
23388 CR field that was actually saved. They all point to the
23389 same 32-bit stack slot. */
23390 rtx crframe[8];
23391 int n_crframe = 0;
23393 for (i = 0; i < 8; i++)
23394 if (save_reg_p (CR0_REGNO + i))
23396 crframe[n_crframe]
23397 = gen_rtx_SET (VOIDmode, mem,
23398 gen_rtx_REG (SImode, CR0_REGNO + i));
23400 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
23401 n_crframe++;
23404 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23405 gen_rtx_PARALLEL (VOIDmode,
23406 gen_rtvec_v (n_crframe, crframe)));
23408 else
23410 /* In other ABIs, by convention, we use a single CR regnum to
23411 represent the fact that all call-saved CR fields are saved.
23412 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
23413 rtx set = gen_rtx_SET (VOIDmode, mem,
23414 gen_rtx_REG (SImode, CR2_REGNO));
23415 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
23419 /* In the ELFv2 ABI we need to save all call-saved CR fields into
23420 *separate* slots if the routine calls __builtin_eh_return, so
23421 that they can be independently restored by the unwinder. */
23422 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
23424 int i, cr_off = info->ehcr_offset;
23425 rtx crsave;
23427 /* ??? We might get better performance by using multiple mfocrf
23428 instructions. */
23429 crsave = gen_rtx_REG (SImode, 0);
23430 emit_insn (gen_movesi_from_cr (crsave));
23432 for (i = 0; i < 8; i++)
23433 if (!call_used_regs[CR0_REGNO + i])
23435 rtvec p = rtvec_alloc (2);
23436 RTVEC_ELT (p, 0)
23437 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
23438 RTVEC_ELT (p, 1)
23439 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23441 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23443 RTX_FRAME_RELATED_P (insn) = 1;
23444 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23445 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
23446 sp_reg_rtx, cr_off + sp_off));
23448 cr_off += reg_size;
23452 /* Update stack and set back pointer unless this is V.4,
23453 for which it was done previously. */
23454 if (!WORLD_SAVE_P (info) && info->push_p
23455 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
23457 rtx ptr_reg = NULL;
23458 int ptr_off = 0;
23460 /* If saving altivec regs we need to be able to address all save
23461 locations using a 16-bit offset. */
23462 if ((strategy & SAVE_INLINE_VRS) == 0
23463 || (info->altivec_size != 0
23464 && (info->altivec_save_offset + info->altivec_size - 16
23465 + info->total_size - frame_off) > 32767)
23466 || (info->vrsave_size != 0
23467 && (info->vrsave_save_offset
23468 + info->total_size - frame_off) > 32767))
23470 int sel = SAVRES_SAVE | SAVRES_VR;
23471 unsigned ptr_regno = ptr_regno_for_savres (sel);
23473 if (using_static_chain_p
23474 && ptr_regno == STATIC_CHAIN_REGNUM)
23475 ptr_regno = 12;
23476 if (REGNO (frame_reg_rtx) != ptr_regno)
23477 START_USE (ptr_regno);
23478 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23479 frame_reg_rtx = ptr_reg;
23480 ptr_off = info->altivec_save_offset + info->altivec_size;
23481 frame_off = -ptr_off;
23483 else if (REGNO (frame_reg_rtx) == 1)
23484 frame_off = info->total_size;
23485 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
23486 sp_off = info->total_size;
23487 if (frame_reg_rtx != sp_reg_rtx)
23488 rs6000_emit_stack_tie (frame_reg_rtx, false);
23491 /* Set frame pointer, if needed. */
23492 if (frame_pointer_needed)
23494 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
23495 sp_reg_rtx);
23496 RTX_FRAME_RELATED_P (insn) = 1;
23499 /* Save AltiVec registers if needed. Save here because the red zone does
23500 not always include AltiVec registers. */
23501 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23502 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
23504 int end_save = info->altivec_save_offset + info->altivec_size;
23505 int ptr_off;
23506 /* Oddly, the vector save/restore functions point r0 at the end
23507 of the save area, then use r11 or r12 to load offsets for
23508 [reg+reg] addressing. */
23509 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
23510 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
23511 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
23513 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
23514 NOT_INUSE (0);
23515 if (end_save + frame_off != 0)
23517 rtx offset = GEN_INT (end_save + frame_off);
23519 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23521 else
23522 emit_move_insn (ptr_reg, frame_reg_rtx);
23524 ptr_off = -end_save;
23525 insn = rs6000_emit_savres_rtx (info, scratch_reg,
23526 info->altivec_save_offset + ptr_off,
23527 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
23528 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
23529 NULL_RTX, NULL_RTX, NULL_RTX);
23530 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
23532 /* The oddity mentioned above clobbered our frame reg. */
23533 emit_move_insn (frame_reg_rtx, ptr_reg);
23534 frame_off = ptr_off;
23537 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23538 && info->altivec_size != 0)
23540 int i;
23542 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
23543 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
23545 rtx areg, savereg, mem, split_reg;
23546 int offset;
23548 offset = (info->altivec_save_offset + frame_off
23549 + 16 * (i - info->first_altivec_reg_save));
23551 savereg = gen_rtx_REG (V4SImode, i);
23553 NOT_INUSE (0);
23554 areg = gen_rtx_REG (Pmode, 0);
23555 emit_move_insn (areg, GEN_INT (offset));
23557 /* AltiVec addressing mode is [reg+reg]. */
23558 mem = gen_frame_mem (V4SImode,
23559 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
23561 insn = emit_move_insn (mem, savereg);
23563 /* When we split a VSX store into two insns, we need to make
23564 sure the DWARF info knows which register we are storing.
23565 Pass it in to be used on the appropriate note. */
23566 if (!BYTES_BIG_ENDIAN
23567 && GET_CODE (PATTERN (insn)) == SET
23568 && GET_CODE (SET_SRC (PATTERN (insn))) == VEC_SELECT)
23569 split_reg = savereg;
23570 else
23571 split_reg = NULL_RTX;
23573 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23574 areg, GEN_INT (offset), split_reg);
23578 /* VRSAVE is a bit vector representing which AltiVec registers
23579 are used. The OS uses this to determine which vector
23580 registers to save on a context switch. We need to save
23581 VRSAVE on the stack frame, add whatever AltiVec registers we
23582 used in this function, and do the corresponding magic in the
23583 epilogue. */
23585 if (!WORLD_SAVE_P (info)
23586 && TARGET_ALTIVEC
23587 && TARGET_ALTIVEC_VRSAVE
23588 && info->vrsave_mask != 0)
23590 rtx reg, vrsave;
23591 int offset;
23592 int save_regno;
23594 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
23595 be using r12 as frame_reg_rtx and r11 as the static chain
23596 pointer for nested functions. */
23597 save_regno = 12;
23598 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23599 && !using_static_chain_p)
23600 save_regno = 11;
23601 else if (REGNO (frame_reg_rtx) == 12)
23603 save_regno = 11;
23604 if (using_static_chain_p)
23605 save_regno = 0;
23608 NOT_INUSE (save_regno);
23609 reg = gen_rtx_REG (SImode, save_regno);
23610 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
23611 if (TARGET_MACHO)
23612 emit_insn (gen_get_vrsave_internal (reg));
23613 else
23614 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
23616 /* Save VRSAVE. */
23617 offset = info->vrsave_save_offset + frame_off;
23618 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
23620 /* Include the registers in the mask. */
23621 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
23623 insn = emit_insn (generate_set_vrsave (reg, info, 0));
23626 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
23627 if (!TARGET_SINGLE_PIC_BASE
23628 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
23629 || (DEFAULT_ABI == ABI_V4
23630 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
23631 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
23633 /* If emit_load_toc_table will use the link register, we need to save
23634 it. We use R12 for this purpose because emit_load_toc_table
23635 can use register 0. This allows us to use a plain 'blr' to return
23636 from the procedure more often. */
23637 int save_LR_around_toc_setup = (TARGET_ELF
23638 && DEFAULT_ABI == ABI_V4
23639 && flag_pic
23640 && ! info->lr_save_p
23641 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
23642 if (save_LR_around_toc_setup)
23644 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23645 rtx tmp = gen_rtx_REG (Pmode, 12);
23647 insn = emit_move_insn (tmp, lr);
23648 RTX_FRAME_RELATED_P (insn) = 1;
23650 rs6000_emit_load_toc_table (TRUE);
23652 insn = emit_move_insn (lr, tmp);
23653 add_reg_note (insn, REG_CFA_RESTORE, lr);
23654 RTX_FRAME_RELATED_P (insn) = 1;
23656 else
23657 rs6000_emit_load_toc_table (TRUE);
23660 #if TARGET_MACHO
23661 if (!TARGET_SINGLE_PIC_BASE
23662 && DEFAULT_ABI == ABI_DARWIN
23663 && flag_pic && crtl->uses_pic_offset_table)
23665 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23666 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
23668 /* Save and restore LR locally around this call (in R0). */
23669 if (!info->lr_save_p)
23670 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
23672 emit_insn (gen_load_macho_picbase (src));
23674 emit_move_insn (gen_rtx_REG (Pmode,
23675 RS6000_PIC_OFFSET_TABLE_REGNUM),
23676 lr);
23678 if (!info->lr_save_p)
23679 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
23681 #endif
23683 /* If we need to, save the TOC register after doing the stack setup.
23684 Do not emit eh frame info for this save. The unwinder wants info,
23685 conceptually attached to instructions in this function, about
23686 register values in the caller of this function. This R2 may have
23687 already been changed from the value in the caller.
23688 We don't attempt to write accurate DWARF EH frame info for R2
23689 because code emitted by gcc for a (non-pointer) function call
23690 doesn't save and restore R2. Instead, R2 is managed out-of-line
23691 by a linker generated plt call stub when the function resides in
23692 a shared library. This behaviour is costly to describe in DWARF,
23693 both in terms of the size of DWARF info and the time taken in the
23694 unwinder to interpret it. R2 changes, apart from the
23695 calls_eh_return case earlier in this function, are handled by
23696 linux-unwind.h frob_update_context. */
23697 if (rs6000_save_toc_in_prologue_p ())
23699 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
23700 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
23704 /* Write function prologue. */
23706 static void
23707 rs6000_output_function_prologue (FILE *file,
23708 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
23710 rs6000_stack_t *info = rs6000_stack_info ();
23712 if (TARGET_DEBUG_STACK)
23713 debug_stack_info (info);
23715 /* Write .extern for any function we will call to save and restore
23716 fp values. */
23717 if (info->first_fp_reg_save < 64
23718 && !TARGET_MACHO
23719 && !TARGET_ELF)
23721 char *name;
23722 int regno = info->first_fp_reg_save - 32;
23724 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
23726 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23727 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23728 name = rs6000_savres_routine_name (info, regno, sel);
23729 fprintf (file, "\t.extern %s\n", name);
23731 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
23733 bool lr = (info->savres_strategy
23734 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
23735 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
23736 name = rs6000_savres_routine_name (info, regno, sel);
23737 fprintf (file, "\t.extern %s\n", name);
23741 /* ELFv2 ABI r2 setup code and local entry point. This must follow
23742 immediately after the global entry point label. */
23743 if (DEFAULT_ABI == ABI_ELFv2 && cfun->machine->r2_setup_needed)
23745 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
23747 fprintf (file, "0:\taddis 2,12,.TOC.-0b@ha\n");
23748 fprintf (file, "\taddi 2,2,.TOC.-0b@l\n");
23750 fputs ("\t.localentry\t", file);
23751 assemble_name (file, name);
23752 fputs (",.-", file);
23753 assemble_name (file, name);
23754 fputs ("\n", file);
23757 /* Output -mprofile-kernel code. This needs to be done here instead of
23758 in output_function_profile since it must go after the ELFv2 ABI
23759 local entry point. */
23760 if (TARGET_PROFILE_KERNEL && crtl->profile)
23762 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
23763 gcc_assert (!TARGET_32BIT);
23765 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
23766 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
23768 /* In the ELFv2 ABI we have no compiler stack word. It must be
23769 the resposibility of _mcount to preserve the static chain
23770 register if required. */
23771 if (DEFAULT_ABI != ABI_ELFv2
23772 && cfun->static_chain_decl != NULL)
23774 asm_fprintf (file, "\tstd %s,24(%s)\n",
23775 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23776 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23777 asm_fprintf (file, "\tld %s,24(%s)\n",
23778 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
23780 else
23781 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
23784 rs6000_pic_labelno++;
23787 /* Non-zero if vmx regs are restored before the frame pop, zero if
23788 we restore after the pop when possible. */
23789 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
23791 /* Restoring cr is a two step process: loading a reg from the frame
23792 save, then moving the reg to cr. For ABI_V4 we must let the
23793 unwinder know that the stack location is no longer valid at or
23794 before the stack deallocation, but we can't emit a cfa_restore for
23795 cr at the stack deallocation like we do for other registers.
23796 The trouble is that it is possible for the move to cr to be
23797 scheduled after the stack deallocation. So say exactly where cr
23798 is located on each of the two insns. */
23800 static rtx
23801 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
23803 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
23804 rtx reg = gen_rtx_REG (SImode, regno);
23805 rtx insn = emit_move_insn (reg, mem);
23807 if (!exit_func && DEFAULT_ABI == ABI_V4)
23809 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
23810 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
23812 add_reg_note (insn, REG_CFA_REGISTER, set);
23813 RTX_FRAME_RELATED_P (insn) = 1;
23815 return reg;
23818 /* Reload CR from REG. */
23820 static void
23821 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
23823 int count = 0;
23824 int i;
23826 if (using_mfcr_multiple)
23828 for (i = 0; i < 8; i++)
23829 if (save_reg_p (CR0_REGNO + i))
23830 count++;
23831 gcc_assert (count);
23834 if (using_mfcr_multiple && count > 1)
23836 rtx insn;
23837 rtvec p;
23838 int ndx;
23840 p = rtvec_alloc (count);
23842 ndx = 0;
23843 for (i = 0; i < 8; i++)
23844 if (save_reg_p (CR0_REGNO + i))
23846 rtvec r = rtvec_alloc (2);
23847 RTVEC_ELT (r, 0) = reg;
23848 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
23849 RTVEC_ELT (p, ndx) =
23850 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
23851 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
23852 ndx++;
23854 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23855 gcc_assert (ndx == count);
23857 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
23858 CR field separately. */
23859 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
23861 for (i = 0; i < 8; i++)
23862 if (save_reg_p (CR0_REGNO + i))
23863 add_reg_note (insn, REG_CFA_RESTORE,
23864 gen_rtx_REG (SImode, CR0_REGNO + i));
23866 RTX_FRAME_RELATED_P (insn) = 1;
23869 else
23870 for (i = 0; i < 8; i++)
23871 if (save_reg_p (CR0_REGNO + i))
23873 rtx insn = emit_insn (gen_movsi_to_cr_one
23874 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
23876 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
23877 CR field separately, attached to the insn that in fact
23878 restores this particular CR field. */
23879 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
23881 add_reg_note (insn, REG_CFA_RESTORE,
23882 gen_rtx_REG (SImode, CR0_REGNO + i));
23884 RTX_FRAME_RELATED_P (insn) = 1;
23888 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
23889 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
23890 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
23892 rtx insn = get_last_insn ();
23893 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
23895 add_reg_note (insn, REG_CFA_RESTORE, cr);
23896 RTX_FRAME_RELATED_P (insn) = 1;
23900 /* Like cr, the move to lr instruction can be scheduled after the
23901 stack deallocation, but unlike cr, its stack frame save is still
23902 valid. So we only need to emit the cfa_restore on the correct
23903 instruction. */
23905 static void
23906 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
23908 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
23909 rtx reg = gen_rtx_REG (Pmode, regno);
23911 emit_move_insn (reg, mem);
23914 static void
23915 restore_saved_lr (int regno, bool exit_func)
23917 rtx reg = gen_rtx_REG (Pmode, regno);
23918 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23919 rtx insn = emit_move_insn (lr, reg);
23921 if (!exit_func && flag_shrink_wrap)
23923 add_reg_note (insn, REG_CFA_RESTORE, lr);
23924 RTX_FRAME_RELATED_P (insn) = 1;
23928 static rtx
23929 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
23931 if (DEFAULT_ABI == ABI_ELFv2)
23933 int i;
23934 for (i = 0; i < 8; i++)
23935 if (save_reg_p (CR0_REGNO + i))
23937 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
23938 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
23939 cfa_restores);
23942 else if (info->cr_save_p)
23943 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
23944 gen_rtx_REG (SImode, CR2_REGNO),
23945 cfa_restores);
23947 if (info->lr_save_p)
23948 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
23949 gen_rtx_REG (Pmode, LR_REGNO),
23950 cfa_restores);
23951 return cfa_restores;
23954 /* Return true if OFFSET from stack pointer can be clobbered by signals.
23955 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
23956 below stack pointer not cloberred by signals. */
23958 static inline bool
23959 offset_below_red_zone_p (HOST_WIDE_INT offset)
23961 return offset < (DEFAULT_ABI == ABI_V4
23963 : TARGET_32BIT ? -220 : -288);
23966 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
23968 static void
23969 emit_cfa_restores (rtx cfa_restores)
23971 rtx insn = get_last_insn ();
23972 rtx *loc = &REG_NOTES (insn);
23974 while (*loc)
23975 loc = &XEXP (*loc, 1);
23976 *loc = cfa_restores;
23977 RTX_FRAME_RELATED_P (insn) = 1;
23980 /* Emit function epilogue as insns. */
23982 void
23983 rs6000_emit_epilogue (int sibcall)
23985 rs6000_stack_t *info;
23986 int restoring_GPRs_inline;
23987 int restoring_FPRs_inline;
23988 int using_load_multiple;
23989 int using_mtcr_multiple;
23990 int use_backchain_to_restore_sp;
23991 int restore_lr;
23992 int strategy;
23993 HOST_WIDE_INT frame_off = 0;
23994 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
23995 rtx frame_reg_rtx = sp_reg_rtx;
23996 rtx cfa_restores = NULL_RTX;
23997 rtx insn;
23998 rtx cr_save_reg = NULL_RTX;
23999 enum machine_mode reg_mode = Pmode;
24000 int reg_size = TARGET_32BIT ? 4 : 8;
24001 int i;
24002 bool exit_func;
24003 unsigned ptr_regno;
24005 info = rs6000_stack_info ();
24007 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24009 reg_mode = V2SImode;
24010 reg_size = 8;
24013 strategy = info->savres_strategy;
24014 using_load_multiple = strategy & SAVRES_MULTIPLE;
24015 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
24016 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
24017 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
24018 || rs6000_cpu == PROCESSOR_PPC603
24019 || rs6000_cpu == PROCESSOR_PPC750
24020 || optimize_size);
24021 /* Restore via the backchain when we have a large frame, since this
24022 is more efficient than an addis, addi pair. The second condition
24023 here will not trigger at the moment; We don't actually need a
24024 frame pointer for alloca, but the generic parts of the compiler
24025 give us one anyway. */
24026 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
24027 || (cfun->calls_alloca
24028 && !frame_pointer_needed));
24029 restore_lr = (info->lr_save_p
24030 && (restoring_FPRs_inline
24031 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
24032 && (restoring_GPRs_inline
24033 || info->first_fp_reg_save < 64));
24035 if (WORLD_SAVE_P (info))
24037 int i, j;
24038 char rname[30];
24039 const char *alloc_rname;
24040 rtvec p;
24042 /* eh_rest_world_r10 will return to the location saved in the LR
24043 stack slot (which is not likely to be our caller.)
24044 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
24045 rest_world is similar, except any R10 parameter is ignored.
24046 The exception-handling stuff that was here in 2.95 is no
24047 longer necessary. */
24049 p = rtvec_alloc (9
24051 + 32 - info->first_gp_reg_save
24052 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
24053 + 63 + 1 - info->first_fp_reg_save);
24055 strcpy (rname, ((crtl->calls_eh_return) ?
24056 "*eh_rest_world_r10" : "*rest_world"));
24057 alloc_rname = ggc_strdup (rname);
24059 j = 0;
24060 RTVEC_ELT (p, j++) = ret_rtx;
24061 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
24062 gen_rtx_REG (Pmode,
24063 LR_REGNO));
24064 RTVEC_ELT (p, j++)
24065 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
24066 /* The instruction pattern requires a clobber here;
24067 it is shared with the restVEC helper. */
24068 RTVEC_ELT (p, j++)
24069 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
24072 /* CR register traditionally saved as CR2. */
24073 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
24074 RTVEC_ELT (p, j++)
24075 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
24076 if (flag_shrink_wrap)
24078 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24079 gen_rtx_REG (Pmode, LR_REGNO),
24080 cfa_restores);
24081 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24085 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24087 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24088 RTVEC_ELT (p, j++)
24089 = gen_frame_load (reg,
24090 frame_reg_rtx, info->gp_save_offset + reg_size * i);
24091 if (flag_shrink_wrap)
24092 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24094 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
24096 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
24097 RTVEC_ELT (p, j++)
24098 = gen_frame_load (reg,
24099 frame_reg_rtx, info->altivec_save_offset + 16 * i);
24100 if (flag_shrink_wrap)
24101 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24103 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
24105 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24106 ? DFmode : SFmode),
24107 info->first_fp_reg_save + i);
24108 RTVEC_ELT (p, j++)
24109 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
24110 if (flag_shrink_wrap)
24111 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24113 RTVEC_ELT (p, j++)
24114 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
24115 RTVEC_ELT (p, j++)
24116 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
24117 RTVEC_ELT (p, j++)
24118 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
24119 RTVEC_ELT (p, j++)
24120 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
24121 RTVEC_ELT (p, j++)
24122 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
24123 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
24125 if (flag_shrink_wrap)
24127 REG_NOTES (insn) = cfa_restores;
24128 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24129 RTX_FRAME_RELATED_P (insn) = 1;
24131 return;
24134 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
24135 if (info->push_p)
24136 frame_off = info->total_size;
24138 /* Restore AltiVec registers if we must do so before adjusting the
24139 stack. */
24140 if (TARGET_ALTIVEC_ABI
24141 && info->altivec_size != 0
24142 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24143 || (DEFAULT_ABI != ABI_V4
24144 && offset_below_red_zone_p (info->altivec_save_offset))))
24146 int i;
24147 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24149 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
24150 if (use_backchain_to_restore_sp)
24152 int frame_regno = 11;
24154 if ((strategy & REST_INLINE_VRS) == 0)
24156 /* Of r11 and r12, select the one not clobbered by an
24157 out-of-line restore function for the frame register. */
24158 frame_regno = 11 + 12 - scratch_regno;
24160 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
24161 emit_move_insn (frame_reg_rtx,
24162 gen_rtx_MEM (Pmode, sp_reg_rtx));
24163 frame_off = 0;
24165 else if (frame_pointer_needed)
24166 frame_reg_rtx = hard_frame_pointer_rtx;
24168 if ((strategy & REST_INLINE_VRS) == 0)
24170 int end_save = info->altivec_save_offset + info->altivec_size;
24171 int ptr_off;
24172 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24173 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24175 if (end_save + frame_off != 0)
24177 rtx offset = GEN_INT (end_save + frame_off);
24179 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24181 else
24182 emit_move_insn (ptr_reg, frame_reg_rtx);
24184 ptr_off = -end_save;
24185 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24186 info->altivec_save_offset + ptr_off,
24187 0, V4SImode, SAVRES_VR);
24189 else
24191 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24192 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24194 rtx addr, areg, mem, reg;
24196 areg = gen_rtx_REG (Pmode, 0);
24197 emit_move_insn
24198 (areg, GEN_INT (info->altivec_save_offset
24199 + frame_off
24200 + 16 * (i - info->first_altivec_reg_save)));
24202 /* AltiVec addressing mode is [reg+reg]. */
24203 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24204 mem = gen_frame_mem (V4SImode, addr);
24206 reg = gen_rtx_REG (V4SImode, i);
24207 emit_move_insn (reg, mem);
24211 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24212 if (((strategy & REST_INLINE_VRS) == 0
24213 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24214 && (flag_shrink_wrap
24215 || (offset_below_red_zone_p
24216 (info->altivec_save_offset
24217 + 16 * (i - info->first_altivec_reg_save)))))
24219 rtx reg = gen_rtx_REG (V4SImode, i);
24220 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24224 /* Restore VRSAVE if we must do so before adjusting the stack. */
24225 if (TARGET_ALTIVEC
24226 && TARGET_ALTIVEC_VRSAVE
24227 && info->vrsave_mask != 0
24228 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24229 || (DEFAULT_ABI != ABI_V4
24230 && offset_below_red_zone_p (info->vrsave_save_offset))))
24232 rtx reg;
24234 if (frame_reg_rtx == sp_reg_rtx)
24236 if (use_backchain_to_restore_sp)
24238 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24239 emit_move_insn (frame_reg_rtx,
24240 gen_rtx_MEM (Pmode, sp_reg_rtx));
24241 frame_off = 0;
24243 else if (frame_pointer_needed)
24244 frame_reg_rtx = hard_frame_pointer_rtx;
24247 reg = gen_rtx_REG (SImode, 12);
24248 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24249 info->vrsave_save_offset + frame_off));
24251 emit_insn (generate_set_vrsave (reg, info, 1));
24254 insn = NULL_RTX;
24255 /* If we have a large stack frame, restore the old stack pointer
24256 using the backchain. */
24257 if (use_backchain_to_restore_sp)
24259 if (frame_reg_rtx == sp_reg_rtx)
24261 /* Under V.4, don't reset the stack pointer until after we're done
24262 loading the saved registers. */
24263 if (DEFAULT_ABI == ABI_V4)
24264 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24266 insn = emit_move_insn (frame_reg_rtx,
24267 gen_rtx_MEM (Pmode, sp_reg_rtx));
24268 frame_off = 0;
24270 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24271 && DEFAULT_ABI == ABI_V4)
24272 /* frame_reg_rtx has been set up by the altivec restore. */
24274 else
24276 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
24277 frame_reg_rtx = sp_reg_rtx;
24280 /* If we have a frame pointer, we can restore the old stack pointer
24281 from it. */
24282 else if (frame_pointer_needed)
24284 frame_reg_rtx = sp_reg_rtx;
24285 if (DEFAULT_ABI == ABI_V4)
24286 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24287 /* Prevent reordering memory accesses against stack pointer restore. */
24288 else if (cfun->calls_alloca
24289 || offset_below_red_zone_p (-info->total_size))
24290 rs6000_emit_stack_tie (frame_reg_rtx, true);
24292 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
24293 GEN_INT (info->total_size)));
24294 frame_off = 0;
24296 else if (info->push_p
24297 && DEFAULT_ABI != ABI_V4
24298 && !crtl->calls_eh_return)
24300 /* Prevent reordering memory accesses against stack pointer restore. */
24301 if (cfun->calls_alloca
24302 || offset_below_red_zone_p (-info->total_size))
24303 rs6000_emit_stack_tie (frame_reg_rtx, false);
24304 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
24305 GEN_INT (info->total_size)));
24306 frame_off = 0;
24308 if (insn && frame_reg_rtx == sp_reg_rtx)
24310 if (cfa_restores)
24312 REG_NOTES (insn) = cfa_restores;
24313 cfa_restores = NULL_RTX;
24315 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24316 RTX_FRAME_RELATED_P (insn) = 1;
24319 /* Restore AltiVec registers if we have not done so already. */
24320 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24321 && TARGET_ALTIVEC_ABI
24322 && info->altivec_size != 0
24323 && (DEFAULT_ABI == ABI_V4
24324 || !offset_below_red_zone_p (info->altivec_save_offset)))
24326 int i;
24328 if ((strategy & REST_INLINE_VRS) == 0)
24330 int end_save = info->altivec_save_offset + info->altivec_size;
24331 int ptr_off;
24332 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24333 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24334 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24336 if (end_save + frame_off != 0)
24338 rtx offset = GEN_INT (end_save + frame_off);
24340 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24342 else
24343 emit_move_insn (ptr_reg, frame_reg_rtx);
24345 ptr_off = -end_save;
24346 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24347 info->altivec_save_offset + ptr_off,
24348 0, V4SImode, SAVRES_VR);
24349 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
24351 /* Frame reg was clobbered by out-of-line save. Restore it
24352 from ptr_reg, and if we are calling out-of-line gpr or
24353 fpr restore set up the correct pointer and offset. */
24354 unsigned newptr_regno = 1;
24355 if (!restoring_GPRs_inline)
24357 bool lr = info->gp_save_offset + info->gp_size == 0;
24358 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24359 newptr_regno = ptr_regno_for_savres (sel);
24360 end_save = info->gp_save_offset + info->gp_size;
24362 else if (!restoring_FPRs_inline)
24364 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
24365 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24366 newptr_regno = ptr_regno_for_savres (sel);
24367 end_save = info->gp_save_offset + info->gp_size;
24370 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
24371 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
24373 if (end_save + ptr_off != 0)
24375 rtx offset = GEN_INT (end_save + ptr_off);
24377 frame_off = -end_save;
24378 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
24380 else
24382 frame_off = ptr_off;
24383 emit_move_insn (frame_reg_rtx, ptr_reg);
24387 else
24389 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24390 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24392 rtx addr, areg, mem, reg;
24394 areg = gen_rtx_REG (Pmode, 0);
24395 emit_move_insn
24396 (areg, GEN_INT (info->altivec_save_offset
24397 + frame_off
24398 + 16 * (i - info->first_altivec_reg_save)));
24400 /* AltiVec addressing mode is [reg+reg]. */
24401 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24402 mem = gen_frame_mem (V4SImode, addr);
24404 reg = gen_rtx_REG (V4SImode, i);
24405 emit_move_insn (reg, mem);
24409 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24410 if (((strategy & REST_INLINE_VRS) == 0
24411 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24412 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
24414 rtx reg = gen_rtx_REG (V4SImode, i);
24415 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24419 /* Restore VRSAVE if we have not done so already. */
24420 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24421 && TARGET_ALTIVEC
24422 && TARGET_ALTIVEC_VRSAVE
24423 && info->vrsave_mask != 0
24424 && (DEFAULT_ABI == ABI_V4
24425 || !offset_below_red_zone_p (info->vrsave_save_offset)))
24427 rtx reg;
24429 reg = gen_rtx_REG (SImode, 12);
24430 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24431 info->vrsave_save_offset + frame_off));
24433 emit_insn (generate_set_vrsave (reg, info, 1));
24436 /* If we exit by an out-of-line restore function on ABI_V4 then that
24437 function will deallocate the stack, so we don't need to worry
24438 about the unwinder restoring cr from an invalid stack frame
24439 location. */
24440 exit_func = (!restoring_FPRs_inline
24441 || (!restoring_GPRs_inline
24442 && info->first_fp_reg_save == 64));
24444 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
24445 *separate* slots if the routine calls __builtin_eh_return, so
24446 that they can be independently restored by the unwinder. */
24447 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24449 int i, cr_off = info->ehcr_offset;
24451 for (i = 0; i < 8; i++)
24452 if (!call_used_regs[CR0_REGNO + i])
24454 rtx reg = gen_rtx_REG (SImode, 0);
24455 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24456 cr_off + frame_off));
24458 insn = emit_insn (gen_movsi_to_cr_one
24459 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
24461 if (!exit_func && flag_shrink_wrap)
24463 add_reg_note (insn, REG_CFA_RESTORE,
24464 gen_rtx_REG (SImode, CR0_REGNO + i));
24466 RTX_FRAME_RELATED_P (insn) = 1;
24469 cr_off += reg_size;
24473 /* Get the old lr if we saved it. If we are restoring registers
24474 out-of-line, then the out-of-line routines can do this for us. */
24475 if (restore_lr && restoring_GPRs_inline)
24476 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24478 /* Get the old cr if we saved it. */
24479 if (info->cr_save_p)
24481 unsigned cr_save_regno = 12;
24483 if (!restoring_GPRs_inline)
24485 /* Ensure we don't use the register used by the out-of-line
24486 gpr register restore below. */
24487 bool lr = info->gp_save_offset + info->gp_size == 0;
24488 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24489 int gpr_ptr_regno = ptr_regno_for_savres (sel);
24491 if (gpr_ptr_regno == 12)
24492 cr_save_regno = 11;
24493 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
24495 else if (REGNO (frame_reg_rtx) == 12)
24496 cr_save_regno = 11;
24498 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
24499 info->cr_save_offset + frame_off,
24500 exit_func);
24503 /* Set LR here to try to overlap restores below. */
24504 if (restore_lr && restoring_GPRs_inline)
24505 restore_saved_lr (0, exit_func);
24507 /* Load exception handler data registers, if needed. */
24508 if (crtl->calls_eh_return)
24510 unsigned int i, regno;
24512 if (TARGET_AIX)
24514 rtx reg = gen_rtx_REG (reg_mode, 2);
24515 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24516 frame_off + RS6000_TOC_SAVE_SLOT));
24519 for (i = 0; ; ++i)
24521 rtx mem;
24523 regno = EH_RETURN_DATA_REGNO (i);
24524 if (regno == INVALID_REGNUM)
24525 break;
24527 /* Note: possible use of r0 here to address SPE regs. */
24528 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
24529 info->ehrd_offset + frame_off
24530 + reg_size * (int) i);
24532 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
24536 /* Restore GPRs. This is done as a PARALLEL if we are using
24537 the load-multiple instructions. */
24538 if (TARGET_SPE_ABI
24539 && info->spe_64bit_regs_used
24540 && info->first_gp_reg_save != 32)
24542 /* Determine whether we can address all of the registers that need
24543 to be saved with an offset from frame_reg_rtx that fits in
24544 the small const field for SPE memory instructions. */
24545 int spe_regs_addressable
24546 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
24547 + reg_size * (32 - info->first_gp_reg_save - 1))
24548 && restoring_GPRs_inline);
24550 if (!spe_regs_addressable)
24552 int ool_adjust = 0;
24553 rtx old_frame_reg_rtx = frame_reg_rtx;
24554 /* Make r11 point to the start of the SPE save area. We worried about
24555 not clobbering it when we were saving registers in the prologue.
24556 There's no need to worry here because the static chain is passed
24557 anew to every function. */
24559 if (!restoring_GPRs_inline)
24560 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
24561 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24562 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
24563 GEN_INT (info->spe_gp_save_offset
24564 + frame_off
24565 - ool_adjust)));
24566 /* Keep the invariant that frame_reg_rtx + frame_off points
24567 at the top of the stack frame. */
24568 frame_off = -info->spe_gp_save_offset + ool_adjust;
24571 if (restoring_GPRs_inline)
24573 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
24575 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24576 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24578 rtx offset, addr, mem, reg;
24580 /* We're doing all this to ensure that the immediate offset
24581 fits into the immediate field of 'evldd'. */
24582 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
24584 offset = GEN_INT (spe_offset + reg_size * i);
24585 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
24586 mem = gen_rtx_MEM (V2SImode, addr);
24587 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24589 emit_move_insn (reg, mem);
24592 else
24593 rs6000_emit_savres_rtx (info, frame_reg_rtx,
24594 info->spe_gp_save_offset + frame_off,
24595 info->lr_save_offset + frame_off,
24596 reg_mode,
24597 SAVRES_GPR | SAVRES_LR);
24599 else if (!restoring_GPRs_inline)
24601 /* We are jumping to an out-of-line function. */
24602 rtx ptr_reg;
24603 int end_save = info->gp_save_offset + info->gp_size;
24604 bool can_use_exit = end_save == 0;
24605 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
24606 int ptr_off;
24608 /* Emit stack reset code if we need it. */
24609 ptr_regno = ptr_regno_for_savres (sel);
24610 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
24611 if (can_use_exit)
24612 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
24613 else if (end_save + frame_off != 0)
24614 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
24615 GEN_INT (end_save + frame_off)));
24616 else if (REGNO (frame_reg_rtx) != ptr_regno)
24617 emit_move_insn (ptr_reg, frame_reg_rtx);
24618 if (REGNO (frame_reg_rtx) == ptr_regno)
24619 frame_off = -end_save;
24621 if (can_use_exit && info->cr_save_p)
24622 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
24624 ptr_off = -end_save;
24625 rs6000_emit_savres_rtx (info, ptr_reg,
24626 info->gp_save_offset + ptr_off,
24627 info->lr_save_offset + ptr_off,
24628 reg_mode, sel);
24630 else if (using_load_multiple)
24632 rtvec p;
24633 p = rtvec_alloc (32 - info->first_gp_reg_save);
24634 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24635 RTVEC_ELT (p, i)
24636 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24637 frame_reg_rtx,
24638 info->gp_save_offset + frame_off + reg_size * i);
24639 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
24641 else
24643 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24644 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24645 emit_insn (gen_frame_load
24646 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24647 frame_reg_rtx,
24648 info->gp_save_offset + frame_off + reg_size * i));
24651 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24653 /* If the frame pointer was used then we can't delay emitting
24654 a REG_CFA_DEF_CFA note. This must happen on the insn that
24655 restores the frame pointer, r31. We may have already emitted
24656 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
24657 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
24658 be harmless if emitted. */
24659 if (frame_pointer_needed)
24661 insn = get_last_insn ();
24662 add_reg_note (insn, REG_CFA_DEF_CFA,
24663 plus_constant (Pmode, frame_reg_rtx, frame_off));
24664 RTX_FRAME_RELATED_P (insn) = 1;
24667 /* Set up cfa_restores. We always need these when
24668 shrink-wrapping. If not shrink-wrapping then we only need
24669 the cfa_restore when the stack location is no longer valid.
24670 The cfa_restores must be emitted on or before the insn that
24671 invalidates the stack, and of course must not be emitted
24672 before the insn that actually does the restore. The latter
24673 is why it is a bad idea to emit the cfa_restores as a group
24674 on the last instruction here that actually does a restore:
24675 That insn may be reordered with respect to others doing
24676 restores. */
24677 if (flag_shrink_wrap
24678 && !restoring_GPRs_inline
24679 && info->first_fp_reg_save == 64)
24680 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
24682 for (i = info->first_gp_reg_save; i < 32; i++)
24683 if (!restoring_GPRs_inline
24684 || using_load_multiple
24685 || rs6000_reg_live_or_pic_offset_p (i))
24687 rtx reg = gen_rtx_REG (reg_mode, i);
24689 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24693 if (!restoring_GPRs_inline
24694 && info->first_fp_reg_save == 64)
24696 /* We are jumping to an out-of-line function. */
24697 if (cfa_restores)
24698 emit_cfa_restores (cfa_restores);
24699 return;
24702 if (restore_lr && !restoring_GPRs_inline)
24704 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24705 restore_saved_lr (0, exit_func);
24708 /* Restore fpr's if we need to do it without calling a function. */
24709 if (restoring_FPRs_inline)
24710 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
24711 if (save_reg_p (info->first_fp_reg_save + i))
24713 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24714 ? DFmode : SFmode),
24715 info->first_fp_reg_save + i);
24716 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24717 info->fp_save_offset + frame_off + 8 * i));
24718 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24719 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24722 /* If we saved cr, restore it here. Just those that were used. */
24723 if (info->cr_save_p)
24724 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
24726 /* If this is V.4, unwind the stack pointer after all of the loads
24727 have been done, or set up r11 if we are restoring fp out of line. */
24728 ptr_regno = 1;
24729 if (!restoring_FPRs_inline)
24731 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24732 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24733 ptr_regno = ptr_regno_for_savres (sel);
24736 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
24737 if (REGNO (frame_reg_rtx) == ptr_regno)
24738 frame_off = 0;
24740 if (insn && restoring_FPRs_inline)
24742 if (cfa_restores)
24744 REG_NOTES (insn) = cfa_restores;
24745 cfa_restores = NULL_RTX;
24747 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24748 RTX_FRAME_RELATED_P (insn) = 1;
24751 if (crtl->calls_eh_return)
24753 rtx sa = EH_RETURN_STACKADJ_RTX;
24754 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
24757 if (!sibcall)
24759 rtvec p;
24760 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24761 if (! restoring_FPRs_inline)
24763 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
24764 RTVEC_ELT (p, 0) = ret_rtx;
24766 else
24768 if (cfa_restores)
24770 /* We can't hang the cfa_restores off a simple return,
24771 since the shrink-wrap code sometimes uses an existing
24772 return. This means there might be a path from
24773 pre-prologue code to this return, and dwarf2cfi code
24774 wants the eh_frame unwinder state to be the same on
24775 all paths to any point. So we need to emit the
24776 cfa_restores before the return. For -m64 we really
24777 don't need epilogue cfa_restores at all, except for
24778 this irritating dwarf2cfi with shrink-wrap
24779 requirement; The stack red-zone means eh_frame info
24780 from the prologue telling the unwinder to restore
24781 from the stack is perfectly good right to the end of
24782 the function. */
24783 emit_insn (gen_blockage ());
24784 emit_cfa_restores (cfa_restores);
24785 cfa_restores = NULL_RTX;
24787 p = rtvec_alloc (2);
24788 RTVEC_ELT (p, 0) = simple_return_rtx;
24791 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
24792 ? gen_rtx_USE (VOIDmode,
24793 gen_rtx_REG (Pmode, LR_REGNO))
24794 : gen_rtx_CLOBBER (VOIDmode,
24795 gen_rtx_REG (Pmode, LR_REGNO)));
24797 /* If we have to restore more than two FP registers, branch to the
24798 restore function. It will return to our caller. */
24799 if (! restoring_FPRs_inline)
24801 int i;
24802 int reg;
24803 rtx sym;
24805 if (flag_shrink_wrap)
24806 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
24808 sym = rs6000_savres_routine_sym (info,
24809 SAVRES_FPR | (lr ? SAVRES_LR : 0));
24810 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
24811 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
24812 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
24814 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
24816 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
24818 RTVEC_ELT (p, i + 4)
24819 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
24820 if (flag_shrink_wrap)
24821 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
24822 cfa_restores);
24826 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
24829 if (cfa_restores)
24831 if (sibcall)
24832 /* Ensure the cfa_restores are hung off an insn that won't
24833 be reordered above other restores. */
24834 emit_insn (gen_blockage ());
24836 emit_cfa_restores (cfa_restores);
24840 /* Write function epilogue. */
24842 static void
24843 rs6000_output_function_epilogue (FILE *file,
24844 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
24846 #if TARGET_MACHO
24847 macho_branch_islands ();
24848 /* Mach-O doesn't support labels at the end of objects, so if
24849 it looks like we might want one, insert a NOP. */
24851 rtx insn = get_last_insn ();
24852 rtx deleted_debug_label = NULL_RTX;
24853 while (insn
24854 && NOTE_P (insn)
24855 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
24857 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
24858 notes only, instead set their CODE_LABEL_NUMBER to -1,
24859 otherwise there would be code generation differences
24860 in between -g and -g0. */
24861 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
24862 deleted_debug_label = insn;
24863 insn = PREV_INSN (insn);
24865 if (insn
24866 && (LABEL_P (insn)
24867 || (NOTE_P (insn)
24868 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
24869 fputs ("\tnop\n", file);
24870 else if (deleted_debug_label)
24871 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
24872 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
24873 CODE_LABEL_NUMBER (insn) = -1;
24875 #endif
24877 /* Output a traceback table here. See /usr/include/sys/debug.h for info
24878 on its format.
24880 We don't output a traceback table if -finhibit-size-directive was
24881 used. The documentation for -finhibit-size-directive reads
24882 ``don't output a @code{.size} assembler directive, or anything
24883 else that would cause trouble if the function is split in the
24884 middle, and the two halves are placed at locations far apart in
24885 memory.'' The traceback table has this property, since it
24886 includes the offset from the start of the function to the
24887 traceback table itself.
24889 System V.4 Powerpc's (and the embedded ABI derived from it) use a
24890 different traceback table. */
24891 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24892 && ! flag_inhibit_size_directive
24893 && rs6000_traceback != traceback_none && !cfun->is_thunk)
24895 const char *fname = NULL;
24896 const char *language_string = lang_hooks.name;
24897 int fixed_parms = 0, float_parms = 0, parm_info = 0;
24898 int i;
24899 int optional_tbtab;
24900 rs6000_stack_t *info = rs6000_stack_info ();
24902 if (rs6000_traceback == traceback_full)
24903 optional_tbtab = 1;
24904 else if (rs6000_traceback == traceback_part)
24905 optional_tbtab = 0;
24906 else
24907 optional_tbtab = !optimize_size && !TARGET_ELF;
24909 if (optional_tbtab)
24911 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
24912 while (*fname == '.') /* V.4 encodes . in the name */
24913 fname++;
24915 /* Need label immediately before tbtab, so we can compute
24916 its offset from the function start. */
24917 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
24918 ASM_OUTPUT_LABEL (file, fname);
24921 /* The .tbtab pseudo-op can only be used for the first eight
24922 expressions, since it can't handle the possibly variable
24923 length fields that follow. However, if you omit the optional
24924 fields, the assembler outputs zeros for all optional fields
24925 anyways, giving each variable length field is minimum length
24926 (as defined in sys/debug.h). Thus we can not use the .tbtab
24927 pseudo-op at all. */
24929 /* An all-zero word flags the start of the tbtab, for debuggers
24930 that have to find it by searching forward from the entry
24931 point or from the current pc. */
24932 fputs ("\t.long 0\n", file);
24934 /* Tbtab format type. Use format type 0. */
24935 fputs ("\t.byte 0,", file);
24937 /* Language type. Unfortunately, there does not seem to be any
24938 official way to discover the language being compiled, so we
24939 use language_string.
24940 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
24941 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
24942 a number, so for now use 9. LTO and Go aren't assigned numbers
24943 either, so for now use 0. */
24944 if (! strcmp (language_string, "GNU C")
24945 || ! strcmp (language_string, "GNU GIMPLE")
24946 || ! strcmp (language_string, "GNU Go"))
24947 i = 0;
24948 else if (! strcmp (language_string, "GNU F77")
24949 || ! strcmp (language_string, "GNU Fortran"))
24950 i = 1;
24951 else if (! strcmp (language_string, "GNU Pascal"))
24952 i = 2;
24953 else if (! strcmp (language_string, "GNU Ada"))
24954 i = 3;
24955 else if (! strcmp (language_string, "GNU C++")
24956 || ! strcmp (language_string, "GNU Objective-C++"))
24957 i = 9;
24958 else if (! strcmp (language_string, "GNU Java"))
24959 i = 13;
24960 else if (! strcmp (language_string, "GNU Objective-C"))
24961 i = 14;
24962 else
24963 gcc_unreachable ();
24964 fprintf (file, "%d,", i);
24966 /* 8 single bit fields: global linkage (not set for C extern linkage,
24967 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
24968 from start of procedure stored in tbtab, internal function, function
24969 has controlled storage, function has no toc, function uses fp,
24970 function logs/aborts fp operations. */
24971 /* Assume that fp operations are used if any fp reg must be saved. */
24972 fprintf (file, "%d,",
24973 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
24975 /* 6 bitfields: function is interrupt handler, name present in
24976 proc table, function calls alloca, on condition directives
24977 (controls stack walks, 3 bits), saves condition reg, saves
24978 link reg. */
24979 /* The `function calls alloca' bit seems to be set whenever reg 31 is
24980 set up as a frame pointer, even when there is no alloca call. */
24981 fprintf (file, "%d,",
24982 ((optional_tbtab << 6)
24983 | ((optional_tbtab & frame_pointer_needed) << 5)
24984 | (info->cr_save_p << 1)
24985 | (info->lr_save_p)));
24987 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
24988 (6 bits). */
24989 fprintf (file, "%d,",
24990 (info->push_p << 7) | (64 - info->first_fp_reg_save));
24992 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
24993 fprintf (file, "%d,", (32 - first_reg_to_save ()));
24995 if (optional_tbtab)
24997 /* Compute the parameter info from the function decl argument
24998 list. */
24999 tree decl;
25000 int next_parm_info_bit = 31;
25002 for (decl = DECL_ARGUMENTS (current_function_decl);
25003 decl; decl = DECL_CHAIN (decl))
25005 rtx parameter = DECL_INCOMING_RTL (decl);
25006 enum machine_mode mode = GET_MODE (parameter);
25008 if (GET_CODE (parameter) == REG)
25010 if (SCALAR_FLOAT_MODE_P (mode))
25012 int bits;
25014 float_parms++;
25016 switch (mode)
25018 case SFmode:
25019 case SDmode:
25020 bits = 0x2;
25021 break;
25023 case DFmode:
25024 case DDmode:
25025 case TFmode:
25026 case TDmode:
25027 bits = 0x3;
25028 break;
25030 default:
25031 gcc_unreachable ();
25034 /* If only one bit will fit, don't or in this entry. */
25035 if (next_parm_info_bit > 0)
25036 parm_info |= (bits << (next_parm_info_bit - 1));
25037 next_parm_info_bit -= 2;
25039 else
25041 fixed_parms += ((GET_MODE_SIZE (mode)
25042 + (UNITS_PER_WORD - 1))
25043 / UNITS_PER_WORD);
25044 next_parm_info_bit -= 1;
25050 /* Number of fixed point parameters. */
25051 /* This is actually the number of words of fixed point parameters; thus
25052 an 8 byte struct counts as 2; and thus the maximum value is 8. */
25053 fprintf (file, "%d,", fixed_parms);
25055 /* 2 bitfields: number of floating point parameters (7 bits), parameters
25056 all on stack. */
25057 /* This is actually the number of fp registers that hold parameters;
25058 and thus the maximum value is 13. */
25059 /* Set parameters on stack bit if parameters are not in their original
25060 registers, regardless of whether they are on the stack? Xlc
25061 seems to set the bit when not optimizing. */
25062 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
25064 if (! optional_tbtab)
25065 return;
25067 /* Optional fields follow. Some are variable length. */
25069 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
25070 11 double float. */
25071 /* There is an entry for each parameter in a register, in the order that
25072 they occur in the parameter list. Any intervening arguments on the
25073 stack are ignored. If the list overflows a long (max possible length
25074 34 bits) then completely leave off all elements that don't fit. */
25075 /* Only emit this long if there was at least one parameter. */
25076 if (fixed_parms || float_parms)
25077 fprintf (file, "\t.long %d\n", parm_info);
25079 /* Offset from start of code to tb table. */
25080 fputs ("\t.long ", file);
25081 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
25082 RS6000_OUTPUT_BASENAME (file, fname);
25083 putc ('-', file);
25084 rs6000_output_function_entry (file, fname);
25085 putc ('\n', file);
25087 /* Interrupt handler mask. */
25088 /* Omit this long, since we never set the interrupt handler bit
25089 above. */
25091 /* Number of CTL (controlled storage) anchors. */
25092 /* Omit this long, since the has_ctl bit is never set above. */
25094 /* Displacement into stack of each CTL anchor. */
25095 /* Omit this list of longs, because there are no CTL anchors. */
25097 /* Length of function name. */
25098 if (*fname == '*')
25099 ++fname;
25100 fprintf (file, "\t.short %d\n", (int) strlen (fname));
25102 /* Function name. */
25103 assemble_string (fname, strlen (fname));
25105 /* Register for alloca automatic storage; this is always reg 31.
25106 Only emit this if the alloca bit was set above. */
25107 if (frame_pointer_needed)
25108 fputs ("\t.byte 31\n", file);
25110 fputs ("\t.align 2\n", file);
25114 /* A C compound statement that outputs the assembler code for a thunk
25115 function, used to implement C++ virtual function calls with
25116 multiple inheritance. The thunk acts as a wrapper around a virtual
25117 function, adjusting the implicit object parameter before handing
25118 control off to the real function.
25120 First, emit code to add the integer DELTA to the location that
25121 contains the incoming first argument. Assume that this argument
25122 contains a pointer, and is the one used to pass the `this' pointer
25123 in C++. This is the incoming argument *before* the function
25124 prologue, e.g. `%o0' on a sparc. The addition must preserve the
25125 values of all other incoming arguments.
25127 After the addition, emit code to jump to FUNCTION, which is a
25128 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
25129 not touch the return address. Hence returning from FUNCTION will
25130 return to whoever called the current `thunk'.
25132 The effect must be as if FUNCTION had been called directly with the
25133 adjusted first argument. This macro is responsible for emitting
25134 all of the code for a thunk function; output_function_prologue()
25135 and output_function_epilogue() are not invoked.
25137 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
25138 been extracted from it.) It might possibly be useful on some
25139 targets, but probably not.
25141 If you do not define this macro, the target-independent code in the
25142 C++ frontend will generate a less efficient heavyweight thunk that
25143 calls FUNCTION instead of jumping to it. The generic approach does
25144 not support varargs. */
25146 static void
25147 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
25148 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
25149 tree function)
25151 rtx this_rtx, insn, funexp;
25153 reload_completed = 1;
25154 epilogue_completed = 1;
25156 /* Mark the end of the (empty) prologue. */
25157 emit_note (NOTE_INSN_PROLOGUE_END);
25159 /* Find the "this" pointer. If the function returns a structure,
25160 the structure return pointer is in r3. */
25161 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
25162 this_rtx = gen_rtx_REG (Pmode, 4);
25163 else
25164 this_rtx = gen_rtx_REG (Pmode, 3);
25166 /* Apply the constant offset, if required. */
25167 if (delta)
25168 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
25170 /* Apply the offset from the vtable, if required. */
25171 if (vcall_offset)
25173 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
25174 rtx tmp = gen_rtx_REG (Pmode, 12);
25176 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
25177 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
25179 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
25180 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
25182 else
25184 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
25186 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
25188 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
25191 /* Generate a tail call to the target function. */
25192 if (!TREE_USED (function))
25194 assemble_external (function);
25195 TREE_USED (function) = 1;
25197 funexp = XEXP (DECL_RTL (function), 0);
25198 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
25200 #if TARGET_MACHO
25201 if (MACHOPIC_INDIRECT)
25202 funexp = machopic_indirect_call_target (funexp);
25203 #endif
25205 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
25206 generate sibcall RTL explicitly. */
25207 insn = emit_call_insn (
25208 gen_rtx_PARALLEL (VOIDmode,
25209 gen_rtvec (4,
25210 gen_rtx_CALL (VOIDmode,
25211 funexp, const0_rtx),
25212 gen_rtx_USE (VOIDmode, const0_rtx),
25213 gen_rtx_USE (VOIDmode,
25214 gen_rtx_REG (SImode,
25215 LR_REGNO)),
25216 simple_return_rtx)));
25217 SIBLING_CALL_P (insn) = 1;
25218 emit_barrier ();
25220 /* Ensure we have a global entry point for the thunk. ??? We could
25221 avoid that if the target routine doesn't need a global entry point,
25222 but we do not know whether this is the case at this point. */
25223 if (DEFAULT_ABI == ABI_ELFv2)
25224 cfun->machine->r2_setup_needed = true;
25226 /* Run just enough of rest_of_compilation to get the insns emitted.
25227 There's not really enough bulk here to make other passes such as
25228 instruction scheduling worth while. Note that use_thunk calls
25229 assemble_start_function and assemble_end_function. */
25230 insn = get_insns ();
25231 shorten_branches (insn);
25232 final_start_function (insn, file, 1);
25233 final (insn, file, 1);
25234 final_end_function ();
25236 reload_completed = 0;
25237 epilogue_completed = 0;
25240 /* A quick summary of the various types of 'constant-pool tables'
25241 under PowerPC:
25243 Target Flags Name One table per
25244 AIX (none) AIX TOC object file
25245 AIX -mfull-toc AIX TOC object file
25246 AIX -mminimal-toc AIX minimal TOC translation unit
25247 SVR4/EABI (none) SVR4 SDATA object file
25248 SVR4/EABI -fpic SVR4 pic object file
25249 SVR4/EABI -fPIC SVR4 PIC translation unit
25250 SVR4/EABI -mrelocatable EABI TOC function
25251 SVR4/EABI -maix AIX TOC object file
25252 SVR4/EABI -maix -mminimal-toc
25253 AIX minimal TOC translation unit
25255 Name Reg. Set by entries contains:
25256 made by addrs? fp? sum?
25258 AIX TOC 2 crt0 as Y option option
25259 AIX minimal TOC 30 prolog gcc Y Y option
25260 SVR4 SDATA 13 crt0 gcc N Y N
25261 SVR4 pic 30 prolog ld Y not yet N
25262 SVR4 PIC 30 prolog gcc Y option option
25263 EABI TOC 30 prolog gcc Y option option
25267 /* Hash functions for the hash table. */
25269 static unsigned
25270 rs6000_hash_constant (rtx k)
25272 enum rtx_code code = GET_CODE (k);
25273 enum machine_mode mode = GET_MODE (k);
25274 unsigned result = (code << 3) ^ mode;
25275 const char *format;
25276 int flen, fidx;
25278 format = GET_RTX_FORMAT (code);
25279 flen = strlen (format);
25280 fidx = 0;
25282 switch (code)
25284 case LABEL_REF:
25285 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
25287 case CONST_WIDE_INT:
25289 int i;
25290 flen = CONST_WIDE_INT_NUNITS (k);
25291 for (i = 0; i < flen; i++)
25292 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
25293 return result;
25296 case CONST_DOUBLE:
25297 if (mode != VOIDmode)
25298 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
25299 flen = 2;
25300 break;
25302 case CODE_LABEL:
25303 fidx = 3;
25304 break;
25306 default:
25307 break;
25310 for (; fidx < flen; fidx++)
25311 switch (format[fidx])
25313 case 's':
25315 unsigned i, len;
25316 const char *str = XSTR (k, fidx);
25317 len = strlen (str);
25318 result = result * 613 + len;
25319 for (i = 0; i < len; i++)
25320 result = result * 613 + (unsigned) str[i];
25321 break;
25323 case 'u':
25324 case 'e':
25325 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
25326 break;
25327 case 'i':
25328 case 'n':
25329 result = result * 613 + (unsigned) XINT (k, fidx);
25330 break;
25331 case 'w':
25332 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
25333 result = result * 613 + (unsigned) XWINT (k, fidx);
25334 else
25336 size_t i;
25337 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
25338 result = result * 613 + (unsigned) (XWINT (k, fidx)
25339 >> CHAR_BIT * i);
25341 break;
25342 case '0':
25343 break;
25344 default:
25345 gcc_unreachable ();
25348 return result;
25351 static unsigned
25352 toc_hash_function (const void *hash_entry)
25354 const struct toc_hash_struct *thc =
25355 (const struct toc_hash_struct *) hash_entry;
25356 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
25359 /* Compare H1 and H2 for equivalence. */
25361 static int
25362 toc_hash_eq (const void *h1, const void *h2)
25364 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
25365 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
25367 if (((const struct toc_hash_struct *) h1)->key_mode
25368 != ((const struct toc_hash_struct *) h2)->key_mode)
25369 return 0;
25371 return rtx_equal_p (r1, r2);
25374 /* These are the names given by the C++ front-end to vtables, and
25375 vtable-like objects. Ideally, this logic should not be here;
25376 instead, there should be some programmatic way of inquiring as
25377 to whether or not an object is a vtable. */
25379 #define VTABLE_NAME_P(NAME) \
25380 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
25381 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
25382 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
25383 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
25384 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
25386 #ifdef NO_DOLLAR_IN_LABEL
25387 /* Return a GGC-allocated character string translating dollar signs in
25388 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
25390 const char *
25391 rs6000_xcoff_strip_dollar (const char *name)
25393 char *strip, *p;
25394 const char *q;
25395 size_t len;
25397 q = (const char *) strchr (name, '$');
25399 if (q == 0 || q == name)
25400 return name;
25402 len = strlen (name);
25403 strip = XALLOCAVEC (char, len + 1);
25404 strcpy (strip, name);
25405 p = strip + (q - name);
25406 while (p)
25408 *p = '_';
25409 p = strchr (p + 1, '$');
25412 return ggc_alloc_string (strip, len);
25414 #endif
25416 void
25417 rs6000_output_symbol_ref (FILE *file, rtx x)
25419 /* Currently C++ toc references to vtables can be emitted before it
25420 is decided whether the vtable is public or private. If this is
25421 the case, then the linker will eventually complain that there is
25422 a reference to an unknown section. Thus, for vtables only,
25423 we emit the TOC reference to reference the symbol and not the
25424 section. */
25425 const char *name = XSTR (x, 0);
25427 if (VTABLE_NAME_P (name))
25429 RS6000_OUTPUT_BASENAME (file, name);
25431 else
25432 assemble_name (file, name);
25435 /* Output a TOC entry. We derive the entry name from what is being
25436 written. */
25438 void
25439 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
25441 char buf[256];
25442 const char *name = buf;
25443 rtx base = x;
25444 HOST_WIDE_INT offset = 0;
25446 gcc_assert (!TARGET_NO_TOC);
25448 /* When the linker won't eliminate them, don't output duplicate
25449 TOC entries (this happens on AIX if there is any kind of TOC,
25450 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
25451 CODE_LABELs. */
25452 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
25454 struct toc_hash_struct *h;
25455 void * * found;
25457 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
25458 time because GGC is not initialized at that point. */
25459 if (toc_hash_table == NULL)
25460 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
25461 toc_hash_eq, NULL);
25463 h = ggc_alloc<toc_hash_struct> ();
25464 h->key = x;
25465 h->key_mode = mode;
25466 h->labelno = labelno;
25468 found = htab_find_slot (toc_hash_table, h, INSERT);
25469 if (*found == NULL)
25470 *found = h;
25471 else /* This is indeed a duplicate.
25472 Set this label equal to that label. */
25474 fputs ("\t.set ", file);
25475 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25476 fprintf (file, "%d,", labelno);
25477 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25478 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25479 found)->labelno));
25481 #ifdef HAVE_AS_TLS
25482 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
25483 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
25484 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
25486 fputs ("\t.set ", file);
25487 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25488 fprintf (file, "%d,", labelno);
25489 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25490 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25491 found)->labelno));
25493 #endif
25494 return;
25498 /* If we're going to put a double constant in the TOC, make sure it's
25499 aligned properly when strict alignment is on. */
25500 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
25501 && STRICT_ALIGNMENT
25502 && GET_MODE_BITSIZE (mode) >= 64
25503 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
25504 ASM_OUTPUT_ALIGN (file, 3);
25507 (*targetm.asm_out.internal_label) (file, "LC", labelno);
25509 /* Handle FP constants specially. Note that if we have a minimal
25510 TOC, things we put here aren't actually in the TOC, so we can allow
25511 FP constants. */
25512 if (GET_CODE (x) == CONST_DOUBLE &&
25513 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
25515 REAL_VALUE_TYPE rv;
25516 long k[4];
25518 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25519 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25520 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
25521 else
25522 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
25524 if (TARGET_64BIT)
25526 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25527 fputs (DOUBLE_INT_ASM_OP, file);
25528 else
25529 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25530 k[0] & 0xffffffff, k[1] & 0xffffffff,
25531 k[2] & 0xffffffff, k[3] & 0xffffffff);
25532 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
25533 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25534 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
25535 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
25536 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
25537 return;
25539 else
25541 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25542 fputs ("\t.long ", file);
25543 else
25544 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25545 k[0] & 0xffffffff, k[1] & 0xffffffff,
25546 k[2] & 0xffffffff, k[3] & 0xffffffff);
25547 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
25548 k[0] & 0xffffffff, k[1] & 0xffffffff,
25549 k[2] & 0xffffffff, k[3] & 0xffffffff);
25550 return;
25553 else if (GET_CODE (x) == CONST_DOUBLE &&
25554 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
25556 REAL_VALUE_TYPE rv;
25557 long k[2];
25559 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25561 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25562 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
25563 else
25564 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
25566 if (TARGET_64BIT)
25568 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25569 fputs (DOUBLE_INT_ASM_OP, file);
25570 else
25571 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25572 k[0] & 0xffffffff, k[1] & 0xffffffff);
25573 fprintf (file, "0x%lx%08lx\n",
25574 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25575 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
25576 return;
25578 else
25580 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25581 fputs ("\t.long ", file);
25582 else
25583 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25584 k[0] & 0xffffffff, k[1] & 0xffffffff);
25585 fprintf (file, "0x%lx,0x%lx\n",
25586 k[0] & 0xffffffff, k[1] & 0xffffffff);
25587 return;
25590 else if (GET_CODE (x) == CONST_DOUBLE &&
25591 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
25593 REAL_VALUE_TYPE rv;
25594 long l;
25596 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25597 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25598 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
25599 else
25600 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
25602 if (TARGET_64BIT)
25604 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25605 fputs (DOUBLE_INT_ASM_OP, file);
25606 else
25607 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25608 if (WORDS_BIG_ENDIAN)
25609 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
25610 else
25611 fprintf (file, "0x%lx\n", l & 0xffffffff);
25612 return;
25614 else
25616 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25617 fputs ("\t.long ", file);
25618 else
25619 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25620 fprintf (file, "0x%lx\n", l & 0xffffffff);
25621 return;
25624 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
25626 unsigned HOST_WIDE_INT low;
25627 HOST_WIDE_INT high;
25629 low = INTVAL (x) & 0xffffffff;
25630 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
25632 /* TOC entries are always Pmode-sized, so when big-endian
25633 smaller integer constants in the TOC need to be padded.
25634 (This is still a win over putting the constants in
25635 a separate constant pool, because then we'd have
25636 to have both a TOC entry _and_ the actual constant.)
25638 For a 32-bit target, CONST_INT values are loaded and shifted
25639 entirely within `low' and can be stored in one TOC entry. */
25641 /* It would be easy to make this work, but it doesn't now. */
25642 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
25644 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
25646 low |= high << 32;
25647 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
25648 high = (HOST_WIDE_INT) low >> 32;
25649 low &= 0xffffffff;
25652 if (TARGET_64BIT)
25654 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25655 fputs (DOUBLE_INT_ASM_OP, file);
25656 else
25657 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25658 (long) high & 0xffffffff, (long) low & 0xffffffff);
25659 fprintf (file, "0x%lx%08lx\n",
25660 (long) high & 0xffffffff, (long) low & 0xffffffff);
25661 return;
25663 else
25665 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
25667 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25668 fputs ("\t.long ", file);
25669 else
25670 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25671 (long) high & 0xffffffff, (long) low & 0xffffffff);
25672 fprintf (file, "0x%lx,0x%lx\n",
25673 (long) high & 0xffffffff, (long) low & 0xffffffff);
25675 else
25677 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25678 fputs ("\t.long ", file);
25679 else
25680 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
25681 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
25683 return;
25687 if (GET_CODE (x) == CONST)
25689 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
25690 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
25692 base = XEXP (XEXP (x, 0), 0);
25693 offset = INTVAL (XEXP (XEXP (x, 0), 1));
25696 switch (GET_CODE (base))
25698 case SYMBOL_REF:
25699 name = XSTR (base, 0);
25700 break;
25702 case LABEL_REF:
25703 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
25704 CODE_LABEL_NUMBER (XEXP (base, 0)));
25705 break;
25707 case CODE_LABEL:
25708 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
25709 break;
25711 default:
25712 gcc_unreachable ();
25715 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25716 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
25717 else
25719 fputs ("\t.tc ", file);
25720 RS6000_OUTPUT_BASENAME (file, name);
25722 if (offset < 0)
25723 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
25724 else if (offset)
25725 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
25727 /* Mark large TOC symbols on AIX with [TE] so they are mapped
25728 after other TOC symbols, reducing overflow of small TOC access
25729 to [TC] symbols. */
25730 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
25731 ? "[TE]," : "[TC],", file);
25734 /* Currently C++ toc references to vtables can be emitted before it
25735 is decided whether the vtable is public or private. If this is
25736 the case, then the linker will eventually complain that there is
25737 a TOC reference to an unknown section. Thus, for vtables only,
25738 we emit the TOC reference to reference the symbol and not the
25739 section. */
25740 if (VTABLE_NAME_P (name))
25742 RS6000_OUTPUT_BASENAME (file, name);
25743 if (offset < 0)
25744 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
25745 else if (offset > 0)
25746 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
25748 else
25749 output_addr_const (file, x);
25751 #if HAVE_AS_TLS
25752 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
25753 && SYMBOL_REF_TLS_MODEL (base) != 0)
25755 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
25756 fputs ("@le", file);
25757 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
25758 fputs ("@ie", file);
25759 /* Use global-dynamic for local-dynamic. */
25760 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
25761 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
25763 putc ('\n', file);
25764 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
25765 fputs ("\t.tc .", file);
25766 RS6000_OUTPUT_BASENAME (file, name);
25767 fputs ("[TC],", file);
25768 output_addr_const (file, x);
25769 fputs ("@m", file);
25772 #endif
25774 putc ('\n', file);
25777 /* Output an assembler pseudo-op to write an ASCII string of N characters
25778 starting at P to FILE.
25780 On the RS/6000, we have to do this using the .byte operation and
25781 write out special characters outside the quoted string.
25782 Also, the assembler is broken; very long strings are truncated,
25783 so we must artificially break them up early. */
25785 void
25786 output_ascii (FILE *file, const char *p, int n)
25788 char c;
25789 int i, count_string;
25790 const char *for_string = "\t.byte \"";
25791 const char *for_decimal = "\t.byte ";
25792 const char *to_close = NULL;
25794 count_string = 0;
25795 for (i = 0; i < n; i++)
25797 c = *p++;
25798 if (c >= ' ' && c < 0177)
25800 if (for_string)
25801 fputs (for_string, file);
25802 putc (c, file);
25804 /* Write two quotes to get one. */
25805 if (c == '"')
25807 putc (c, file);
25808 ++count_string;
25811 for_string = NULL;
25812 for_decimal = "\"\n\t.byte ";
25813 to_close = "\"\n";
25814 ++count_string;
25816 if (count_string >= 512)
25818 fputs (to_close, file);
25820 for_string = "\t.byte \"";
25821 for_decimal = "\t.byte ";
25822 to_close = NULL;
25823 count_string = 0;
25826 else
25828 if (for_decimal)
25829 fputs (for_decimal, file);
25830 fprintf (file, "%d", c);
25832 for_string = "\n\t.byte \"";
25833 for_decimal = ", ";
25834 to_close = "\n";
25835 count_string = 0;
25839 /* Now close the string if we have written one. Then end the line. */
25840 if (to_close)
25841 fputs (to_close, file);
25844 /* Generate a unique section name for FILENAME for a section type
25845 represented by SECTION_DESC. Output goes into BUF.
25847 SECTION_DESC can be any string, as long as it is different for each
25848 possible section type.
25850 We name the section in the same manner as xlc. The name begins with an
25851 underscore followed by the filename (after stripping any leading directory
25852 names) with the last period replaced by the string SECTION_DESC. If
25853 FILENAME does not contain a period, SECTION_DESC is appended to the end of
25854 the name. */
25856 void
25857 rs6000_gen_section_name (char **buf, const char *filename,
25858 const char *section_desc)
25860 const char *q, *after_last_slash, *last_period = 0;
25861 char *p;
25862 int len;
25864 after_last_slash = filename;
25865 for (q = filename; *q; q++)
25867 if (*q == '/')
25868 after_last_slash = q + 1;
25869 else if (*q == '.')
25870 last_period = q;
25873 len = strlen (after_last_slash) + strlen (section_desc) + 2;
25874 *buf = (char *) xmalloc (len);
25876 p = *buf;
25877 *p++ = '_';
25879 for (q = after_last_slash; *q; q++)
25881 if (q == last_period)
25883 strcpy (p, section_desc);
25884 p += strlen (section_desc);
25885 break;
25888 else if (ISALNUM (*q))
25889 *p++ = *q;
25892 if (last_period == 0)
25893 strcpy (p, section_desc);
25894 else
25895 *p = '\0';
25898 /* Emit profile function. */
25900 void
25901 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
25903 /* Non-standard profiling for kernels, which just saves LR then calls
25904 _mcount without worrying about arg saves. The idea is to change
25905 the function prologue as little as possible as it isn't easy to
25906 account for arg save/restore code added just for _mcount. */
25907 if (TARGET_PROFILE_KERNEL)
25908 return;
25910 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25912 #ifndef NO_PROFILE_COUNTERS
25913 # define NO_PROFILE_COUNTERS 0
25914 #endif
25915 if (NO_PROFILE_COUNTERS)
25916 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
25917 LCT_NORMAL, VOIDmode, 0);
25918 else
25920 char buf[30];
25921 const char *label_name;
25922 rtx fun;
25924 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
25925 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
25926 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
25928 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
25929 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
25932 else if (DEFAULT_ABI == ABI_DARWIN)
25934 const char *mcount_name = RS6000_MCOUNT;
25935 int caller_addr_regno = LR_REGNO;
25937 /* Be conservative and always set this, at least for now. */
25938 crtl->uses_pic_offset_table = 1;
25940 #if TARGET_MACHO
25941 /* For PIC code, set up a stub and collect the caller's address
25942 from r0, which is where the prologue puts it. */
25943 if (MACHOPIC_INDIRECT
25944 && crtl->uses_pic_offset_table)
25945 caller_addr_regno = 0;
25946 #endif
25947 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
25948 LCT_NORMAL, VOIDmode, 1,
25949 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
25953 /* Write function profiler code. */
25955 void
25956 output_function_profiler (FILE *file, int labelno)
25958 char buf[100];
25960 switch (DEFAULT_ABI)
25962 default:
25963 gcc_unreachable ();
25965 case ABI_V4:
25966 if (!TARGET_32BIT)
25968 warning (0, "no profiling of 64-bit code for this ABI");
25969 return;
25971 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
25972 fprintf (file, "\tmflr %s\n", reg_names[0]);
25973 if (NO_PROFILE_COUNTERS)
25975 asm_fprintf (file, "\tstw %s,4(%s)\n",
25976 reg_names[0], reg_names[1]);
25978 else if (TARGET_SECURE_PLT && flag_pic)
25980 if (TARGET_LINK_STACK)
25982 char name[32];
25983 get_ppc476_thunk_name (name);
25984 asm_fprintf (file, "\tbl %s\n", name);
25986 else
25987 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
25988 asm_fprintf (file, "\tstw %s,4(%s)\n",
25989 reg_names[0], reg_names[1]);
25990 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
25991 asm_fprintf (file, "\taddis %s,%s,",
25992 reg_names[12], reg_names[12]);
25993 assemble_name (file, buf);
25994 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
25995 assemble_name (file, buf);
25996 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
25998 else if (flag_pic == 1)
26000 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
26001 asm_fprintf (file, "\tstw %s,4(%s)\n",
26002 reg_names[0], reg_names[1]);
26003 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
26004 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
26005 assemble_name (file, buf);
26006 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
26008 else if (flag_pic > 1)
26010 asm_fprintf (file, "\tstw %s,4(%s)\n",
26011 reg_names[0], reg_names[1]);
26012 /* Now, we need to get the address of the label. */
26013 if (TARGET_LINK_STACK)
26015 char name[32];
26016 get_ppc476_thunk_name (name);
26017 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
26018 assemble_name (file, buf);
26019 fputs ("-.\n1:", file);
26020 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26021 asm_fprintf (file, "\taddi %s,%s,4\n",
26022 reg_names[11], reg_names[11]);
26024 else
26026 fputs ("\tbcl 20,31,1f\n\t.long ", file);
26027 assemble_name (file, buf);
26028 fputs ("-.\n1:", file);
26029 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26031 asm_fprintf (file, "\tlwz %s,0(%s)\n",
26032 reg_names[0], reg_names[11]);
26033 asm_fprintf (file, "\tadd %s,%s,%s\n",
26034 reg_names[0], reg_names[0], reg_names[11]);
26036 else
26038 asm_fprintf (file, "\tlis %s,", reg_names[12]);
26039 assemble_name (file, buf);
26040 fputs ("@ha\n", file);
26041 asm_fprintf (file, "\tstw %s,4(%s)\n",
26042 reg_names[0], reg_names[1]);
26043 asm_fprintf (file, "\tla %s,", reg_names[0]);
26044 assemble_name (file, buf);
26045 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
26048 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
26049 fprintf (file, "\tbl %s%s\n",
26050 RS6000_MCOUNT, flag_pic ? "@plt" : "");
26051 break;
26053 case ABI_AIX:
26054 case ABI_ELFv2:
26055 case ABI_DARWIN:
26056 /* Don't do anything, done in output_profile_hook (). */
26057 break;
26063 /* The following variable value is the last issued insn. */
26065 static rtx last_scheduled_insn;
26067 /* The following variable helps to balance issuing of load and
26068 store instructions */
26070 static int load_store_pendulum;
26072 /* Power4 load update and store update instructions are cracked into a
26073 load or store and an integer insn which are executed in the same cycle.
26074 Branches have their own dispatch slot which does not count against the
26075 GCC issue rate, but it changes the program flow so there are no other
26076 instructions to issue in this cycle. */
26078 static int
26079 rs6000_variable_issue_1 (rtx insn, int more)
26081 last_scheduled_insn = insn;
26082 if (GET_CODE (PATTERN (insn)) == USE
26083 || GET_CODE (PATTERN (insn)) == CLOBBER)
26085 cached_can_issue_more = more;
26086 return cached_can_issue_more;
26089 if (insn_terminates_group_p (insn, current_group))
26091 cached_can_issue_more = 0;
26092 return cached_can_issue_more;
26095 /* If no reservation, but reach here */
26096 if (recog_memoized (insn) < 0)
26097 return more;
26099 if (rs6000_sched_groups)
26101 if (is_microcoded_insn (insn))
26102 cached_can_issue_more = 0;
26103 else if (is_cracked_insn (insn))
26104 cached_can_issue_more = more > 2 ? more - 2 : 0;
26105 else
26106 cached_can_issue_more = more - 1;
26108 return cached_can_issue_more;
26111 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
26112 return 0;
26114 cached_can_issue_more = more - 1;
26115 return cached_can_issue_more;
26118 static int
26119 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
26121 int r = rs6000_variable_issue_1 (insn, more);
26122 if (verbose)
26123 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
26124 return r;
26127 /* Adjust the cost of a scheduling dependency. Return the new cost of
26128 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
26130 static int
26131 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
26133 enum attr_type attr_type;
26135 if (! recog_memoized (insn))
26136 return 0;
26138 switch (REG_NOTE_KIND (link))
26140 case REG_DEP_TRUE:
26142 /* Data dependency; DEP_INSN writes a register that INSN reads
26143 some cycles later. */
26145 /* Separate a load from a narrower, dependent store. */
26146 if (rs6000_sched_groups
26147 && GET_CODE (PATTERN (insn)) == SET
26148 && GET_CODE (PATTERN (dep_insn)) == SET
26149 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
26150 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
26151 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
26152 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
26153 return cost + 14;
26155 attr_type = get_attr_type (insn);
26157 switch (attr_type)
26159 case TYPE_JMPREG:
26160 /* Tell the first scheduling pass about the latency between
26161 a mtctr and bctr (and mtlr and br/blr). The first
26162 scheduling pass will not know about this latency since
26163 the mtctr instruction, which has the latency associated
26164 to it, will be generated by reload. */
26165 return 4;
26166 case TYPE_BRANCH:
26167 /* Leave some extra cycles between a compare and its
26168 dependent branch, to inhibit expensive mispredicts. */
26169 if ((rs6000_cpu_attr == CPU_PPC603
26170 || rs6000_cpu_attr == CPU_PPC604
26171 || rs6000_cpu_attr == CPU_PPC604E
26172 || rs6000_cpu_attr == CPU_PPC620
26173 || rs6000_cpu_attr == CPU_PPC630
26174 || rs6000_cpu_attr == CPU_PPC750
26175 || rs6000_cpu_attr == CPU_PPC7400
26176 || rs6000_cpu_attr == CPU_PPC7450
26177 || rs6000_cpu_attr == CPU_PPCE5500
26178 || rs6000_cpu_attr == CPU_PPCE6500
26179 || rs6000_cpu_attr == CPU_POWER4
26180 || rs6000_cpu_attr == CPU_POWER5
26181 || rs6000_cpu_attr == CPU_POWER7
26182 || rs6000_cpu_attr == CPU_POWER8
26183 || rs6000_cpu_attr == CPU_CELL)
26184 && recog_memoized (dep_insn)
26185 && (INSN_CODE (dep_insn) >= 0))
26187 switch (get_attr_type (dep_insn))
26189 case TYPE_CMP:
26190 case TYPE_COMPARE:
26191 case TYPE_DELAYED_COMPARE:
26192 case TYPE_FPCOMPARE:
26193 case TYPE_CR_LOGICAL:
26194 case TYPE_DELAYED_CR:
26195 return cost + 2;
26196 case TYPE_MUL:
26197 if (get_attr_dot (dep_insn) == DOT_YES)
26198 return cost + 2;
26199 else
26200 break;
26201 default:
26202 break;
26204 break;
26206 case TYPE_STORE:
26207 case TYPE_FPSTORE:
26208 if ((rs6000_cpu == PROCESSOR_POWER6)
26209 && recog_memoized (dep_insn)
26210 && (INSN_CODE (dep_insn) >= 0))
26213 if (GET_CODE (PATTERN (insn)) != SET)
26214 /* If this happens, we have to extend this to schedule
26215 optimally. Return default for now. */
26216 return cost;
26218 /* Adjust the cost for the case where the value written
26219 by a fixed point operation is used as the address
26220 gen value on a store. */
26221 switch (get_attr_type (dep_insn))
26223 case TYPE_LOAD:
26224 case TYPE_CNTLZ:
26226 if (! store_data_bypass_p (dep_insn, insn))
26227 return get_attr_sign_extend (dep_insn)
26228 == SIGN_EXTEND_YES ? 6 : 4;
26229 break;
26231 case TYPE_VAR_SHIFT_ROTATE:
26232 case TYPE_VAR_DELAYED_COMPARE:
26234 if (! store_data_bypass_p (dep_insn, insn))
26235 return 6;
26236 break;
26238 case TYPE_INTEGER:
26239 case TYPE_COMPARE:
26240 case TYPE_FAST_COMPARE:
26241 case TYPE_EXTS:
26242 case TYPE_SHIFT:
26243 case TYPE_INSERT:
26245 if (! store_data_bypass_p (dep_insn, insn))
26246 return 3;
26247 break;
26249 case TYPE_STORE:
26250 case TYPE_FPLOAD:
26251 case TYPE_FPSTORE:
26253 if (get_attr_update (dep_insn) == UPDATE_YES
26254 && ! store_data_bypass_p (dep_insn, insn))
26255 return 3;
26256 break;
26258 case TYPE_MUL:
26260 if (! store_data_bypass_p (dep_insn, insn))
26261 return 17;
26262 break;
26264 case TYPE_IDIV:
26266 if (! store_data_bypass_p (dep_insn, insn))
26267 return 45;
26268 break;
26270 case TYPE_LDIV:
26272 if (! store_data_bypass_p (dep_insn, insn))
26273 return 57;
26274 break;
26276 default:
26277 break;
26280 break;
26282 case TYPE_LOAD:
26283 if ((rs6000_cpu == PROCESSOR_POWER6)
26284 && recog_memoized (dep_insn)
26285 && (INSN_CODE (dep_insn) >= 0))
26288 /* Adjust the cost for the case where the value written
26289 by a fixed point instruction is used within the address
26290 gen portion of a subsequent load(u)(x) */
26291 switch (get_attr_type (dep_insn))
26293 case TYPE_LOAD:
26294 case TYPE_CNTLZ:
26296 if (set_to_load_agen (dep_insn, insn))
26297 return get_attr_sign_extend (dep_insn)
26298 == SIGN_EXTEND_YES ? 6 : 4;
26299 break;
26301 case TYPE_VAR_SHIFT_ROTATE:
26302 case TYPE_VAR_DELAYED_COMPARE:
26304 if (set_to_load_agen (dep_insn, insn))
26305 return 6;
26306 break;
26308 case TYPE_INTEGER:
26309 case TYPE_COMPARE:
26310 case TYPE_FAST_COMPARE:
26311 case TYPE_EXTS:
26312 case TYPE_SHIFT:
26313 case TYPE_INSERT:
26315 if (set_to_load_agen (dep_insn, insn))
26316 return 3;
26317 break;
26319 case TYPE_STORE:
26320 case TYPE_FPLOAD:
26321 case TYPE_FPSTORE:
26323 if (get_attr_update (dep_insn) == UPDATE_YES
26324 && set_to_load_agen (dep_insn, insn))
26325 return 3;
26326 break;
26328 case TYPE_MUL:
26330 if (set_to_load_agen (dep_insn, insn))
26331 return 17;
26332 break;
26334 case TYPE_IDIV:
26336 if (set_to_load_agen (dep_insn, insn))
26337 return 45;
26338 break;
26340 case TYPE_LDIV:
26342 if (set_to_load_agen (dep_insn, insn))
26343 return 57;
26344 break;
26346 default:
26347 break;
26350 break;
26352 case TYPE_FPLOAD:
26353 if ((rs6000_cpu == PROCESSOR_POWER6)
26354 && get_attr_update (insn) == UPDATE_NO
26355 && recog_memoized (dep_insn)
26356 && (INSN_CODE (dep_insn) >= 0)
26357 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
26358 return 2;
26360 default:
26361 break;
26364 /* Fall out to return default cost. */
26366 break;
26368 case REG_DEP_OUTPUT:
26369 /* Output dependency; DEP_INSN writes a register that INSN writes some
26370 cycles later. */
26371 if ((rs6000_cpu == PROCESSOR_POWER6)
26372 && recog_memoized (dep_insn)
26373 && (INSN_CODE (dep_insn) >= 0))
26375 attr_type = get_attr_type (insn);
26377 switch (attr_type)
26379 case TYPE_FP:
26380 if (get_attr_type (dep_insn) == TYPE_FP)
26381 return 1;
26382 break;
26383 case TYPE_FPLOAD:
26384 if (get_attr_update (insn) == UPDATE_NO
26385 && get_attr_type (dep_insn) == TYPE_MFFGPR)
26386 return 2;
26387 break;
26388 default:
26389 break;
26392 case REG_DEP_ANTI:
26393 /* Anti dependency; DEP_INSN reads a register that INSN writes some
26394 cycles later. */
26395 return 0;
26397 default:
26398 gcc_unreachable ();
26401 return cost;
26404 /* Debug version of rs6000_adjust_cost. */
26406 static int
26407 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
26409 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
26411 if (ret != cost)
26413 const char *dep;
26415 switch (REG_NOTE_KIND (link))
26417 default: dep = "unknown depencency"; break;
26418 case REG_DEP_TRUE: dep = "data dependency"; break;
26419 case REG_DEP_OUTPUT: dep = "output dependency"; break;
26420 case REG_DEP_ANTI: dep = "anti depencency"; break;
26423 fprintf (stderr,
26424 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
26425 "%s, insn:\n", ret, cost, dep);
26427 debug_rtx (insn);
26430 return ret;
26433 /* The function returns a true if INSN is microcoded.
26434 Return false otherwise. */
26436 static bool
26437 is_microcoded_insn (rtx insn)
26439 if (!insn || !NONDEBUG_INSN_P (insn)
26440 || GET_CODE (PATTERN (insn)) == USE
26441 || GET_CODE (PATTERN (insn)) == CLOBBER)
26442 return false;
26444 if (rs6000_cpu_attr == CPU_CELL)
26445 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
26447 if (rs6000_sched_groups
26448 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26450 enum attr_type type = get_attr_type (insn);
26451 if ((type == TYPE_LOAD
26452 && get_attr_update (insn) == UPDATE_YES
26453 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
26454 || ((type == TYPE_LOAD || type == TYPE_STORE)
26455 && get_attr_update (insn) == UPDATE_YES
26456 && get_attr_indexed (insn) == INDEXED_YES)
26457 || type == TYPE_MFCR)
26458 return true;
26461 return false;
26464 /* The function returns true if INSN is cracked into 2 instructions
26465 by the processor (and therefore occupies 2 issue slots). */
26467 static bool
26468 is_cracked_insn (rtx insn)
26470 if (!insn || !NONDEBUG_INSN_P (insn)
26471 || GET_CODE (PATTERN (insn)) == USE
26472 || GET_CODE (PATTERN (insn)) == CLOBBER)
26473 return false;
26475 if (rs6000_sched_groups
26476 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26478 enum attr_type type = get_attr_type (insn);
26479 if ((type == TYPE_LOAD
26480 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26481 && get_attr_update (insn) == UPDATE_NO)
26482 || (type == TYPE_LOAD
26483 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
26484 && get_attr_update (insn) == UPDATE_YES
26485 && get_attr_indexed (insn) == INDEXED_NO)
26486 || (type == TYPE_STORE
26487 && get_attr_update (insn) == UPDATE_YES
26488 && get_attr_indexed (insn) == INDEXED_NO)
26489 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
26490 && get_attr_update (insn) == UPDATE_YES)
26491 || type == TYPE_DELAYED_CR
26492 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
26493 || (type == TYPE_MUL
26494 && get_attr_dot (insn) == DOT_YES)
26495 || type == TYPE_IDIV || type == TYPE_LDIV
26496 || (type == TYPE_INSERT
26497 && get_attr_size (insn) == SIZE_32))
26498 return true;
26501 return false;
26504 /* The function returns true if INSN can be issued only from
26505 the branch slot. */
26507 static bool
26508 is_branch_slot_insn (rtx insn)
26510 if (!insn || !NONDEBUG_INSN_P (insn)
26511 || GET_CODE (PATTERN (insn)) == USE
26512 || GET_CODE (PATTERN (insn)) == CLOBBER)
26513 return false;
26515 if (rs6000_sched_groups)
26517 enum attr_type type = get_attr_type (insn);
26518 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
26519 return true;
26520 return false;
26523 return false;
26526 /* The function returns true if out_inst sets a value that is
26527 used in the address generation computation of in_insn */
26528 static bool
26529 set_to_load_agen (rtx out_insn, rtx in_insn)
26531 rtx out_set, in_set;
26533 /* For performance reasons, only handle the simple case where
26534 both loads are a single_set. */
26535 out_set = single_set (out_insn);
26536 if (out_set)
26538 in_set = single_set (in_insn);
26539 if (in_set)
26540 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
26543 return false;
26546 /* Try to determine base/offset/size parts of the given MEM.
26547 Return true if successful, false if all the values couldn't
26548 be determined.
26550 This function only looks for REG or REG+CONST address forms.
26551 REG+REG address form will return false. */
26553 static bool
26554 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
26555 HOST_WIDE_INT *size)
26557 rtx addr_rtx;
26558 if MEM_SIZE_KNOWN_P (mem)
26559 *size = MEM_SIZE (mem);
26560 else
26561 return false;
26563 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
26564 addr_rtx = XEXP (XEXP (mem, 0), 1);
26565 else
26566 addr_rtx = (XEXP (mem, 0));
26568 if (GET_CODE (addr_rtx) == REG)
26570 *base = addr_rtx;
26571 *offset = 0;
26573 else if (GET_CODE (addr_rtx) == PLUS
26574 && CONST_INT_P (XEXP (addr_rtx, 1)))
26576 *base = XEXP (addr_rtx, 0);
26577 *offset = INTVAL (XEXP (addr_rtx, 1));
26579 else
26580 return false;
26582 return true;
26585 /* The function returns true if the target storage location of
26586 mem1 is adjacent to the target storage location of mem2 */
26587 /* Return 1 if memory locations are adjacent. */
26589 static bool
26590 adjacent_mem_locations (rtx mem1, rtx mem2)
26592 rtx reg1, reg2;
26593 HOST_WIDE_INT off1, size1, off2, size2;
26595 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26596 && get_memref_parts (mem2, &reg2, &off2, &size2))
26597 return ((REGNO (reg1) == REGNO (reg2))
26598 && ((off1 + size1 == off2)
26599 || (off2 + size2 == off1)));
26601 return false;
26604 /* This function returns true if it can be determined that the two MEM
26605 locations overlap by at least 1 byte based on base reg/offset/size. */
26607 static bool
26608 mem_locations_overlap (rtx mem1, rtx mem2)
26610 rtx reg1, reg2;
26611 HOST_WIDE_INT off1, size1, off2, size2;
26613 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26614 && get_memref_parts (mem2, &reg2, &off2, &size2))
26615 return ((REGNO (reg1) == REGNO (reg2))
26616 && (((off1 <= off2) && (off1 + size1 > off2))
26617 || ((off2 <= off1) && (off2 + size2 > off1))));
26619 return false;
26622 /* A C statement (sans semicolon) to update the integer scheduling
26623 priority INSN_PRIORITY (INSN). Increase the priority to execute the
26624 INSN earlier, reduce the priority to execute INSN later. Do not
26625 define this macro if you do not need to adjust the scheduling
26626 priorities of insns. */
26628 static int
26629 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
26631 rtx load_mem, str_mem;
26632 /* On machines (like the 750) which have asymmetric integer units,
26633 where one integer unit can do multiply and divides and the other
26634 can't, reduce the priority of multiply/divide so it is scheduled
26635 before other integer operations. */
26637 #if 0
26638 if (! INSN_P (insn))
26639 return priority;
26641 if (GET_CODE (PATTERN (insn)) == USE)
26642 return priority;
26644 switch (rs6000_cpu_attr) {
26645 case CPU_PPC750:
26646 switch (get_attr_type (insn))
26648 default:
26649 break;
26651 case TYPE_MUL:
26652 case TYPE_IDIV:
26653 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
26654 priority, priority);
26655 if (priority >= 0 && priority < 0x01000000)
26656 priority >>= 3;
26657 break;
26660 #endif
26662 if (insn_must_be_first_in_group (insn)
26663 && reload_completed
26664 && current_sched_info->sched_max_insns_priority
26665 && rs6000_sched_restricted_insns_priority)
26668 /* Prioritize insns that can be dispatched only in the first
26669 dispatch slot. */
26670 if (rs6000_sched_restricted_insns_priority == 1)
26671 /* Attach highest priority to insn. This means that in
26672 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
26673 precede 'priority' (critical path) considerations. */
26674 return current_sched_info->sched_max_insns_priority;
26675 else if (rs6000_sched_restricted_insns_priority == 2)
26676 /* Increase priority of insn by a minimal amount. This means that in
26677 haifa-sched.c:ready_sort(), only 'priority' (critical path)
26678 considerations precede dispatch-slot restriction considerations. */
26679 return (priority + 1);
26682 if (rs6000_cpu == PROCESSOR_POWER6
26683 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
26684 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
26685 /* Attach highest priority to insn if the scheduler has just issued two
26686 stores and this instruction is a load, or two loads and this instruction
26687 is a store. Power6 wants loads and stores scheduled alternately
26688 when possible */
26689 return current_sched_info->sched_max_insns_priority;
26691 return priority;
26694 /* Return true if the instruction is nonpipelined on the Cell. */
26695 static bool
26696 is_nonpipeline_insn (rtx insn)
26698 enum attr_type type;
26699 if (!insn || !NONDEBUG_INSN_P (insn)
26700 || GET_CODE (PATTERN (insn)) == USE
26701 || GET_CODE (PATTERN (insn)) == CLOBBER)
26702 return false;
26704 type = get_attr_type (insn);
26705 if (type == TYPE_MUL
26706 || type == TYPE_IDIV
26707 || type == TYPE_LDIV
26708 || type == TYPE_SDIV
26709 || type == TYPE_DDIV
26710 || type == TYPE_SSQRT
26711 || type == TYPE_DSQRT
26712 || type == TYPE_MFCR
26713 || type == TYPE_MFCRF
26714 || type == TYPE_MFJMPR)
26716 return true;
26718 return false;
26722 /* Return how many instructions the machine can issue per cycle. */
26724 static int
26725 rs6000_issue_rate (void)
26727 /* Unless scheduling for register pressure, use issue rate of 1 for
26728 first scheduling pass to decrease degradation. */
26729 if (!reload_completed && !flag_sched_pressure)
26730 return 1;
26732 switch (rs6000_cpu_attr) {
26733 case CPU_RS64A:
26734 case CPU_PPC601: /* ? */
26735 case CPU_PPC7450:
26736 return 3;
26737 case CPU_PPC440:
26738 case CPU_PPC603:
26739 case CPU_PPC750:
26740 case CPU_PPC7400:
26741 case CPU_PPC8540:
26742 case CPU_PPC8548:
26743 case CPU_CELL:
26744 case CPU_PPCE300C2:
26745 case CPU_PPCE300C3:
26746 case CPU_PPCE500MC:
26747 case CPU_PPCE500MC64:
26748 case CPU_PPCE5500:
26749 case CPU_PPCE6500:
26750 case CPU_TITAN:
26751 return 2;
26752 case CPU_PPC476:
26753 case CPU_PPC604:
26754 case CPU_PPC604E:
26755 case CPU_PPC620:
26756 case CPU_PPC630:
26757 return 4;
26758 case CPU_POWER4:
26759 case CPU_POWER5:
26760 case CPU_POWER6:
26761 case CPU_POWER7:
26762 return 5;
26763 case CPU_POWER8:
26764 return 7;
26765 default:
26766 return 1;
26770 /* Return how many instructions to look ahead for better insn
26771 scheduling. */
26773 static int
26774 rs6000_use_sched_lookahead (void)
26776 switch (rs6000_cpu_attr)
26778 case CPU_PPC8540:
26779 case CPU_PPC8548:
26780 return 4;
26782 case CPU_CELL:
26783 return (reload_completed ? 8 : 0);
26785 default:
26786 return 0;
26790 /* We are choosing insn from the ready queue. Return zero if INSN can be
26791 chosen. */
26792 static int
26793 rs6000_use_sched_lookahead_guard (rtx insn, int ready_index)
26795 if (ready_index == 0)
26796 return 0;
26798 if (rs6000_cpu_attr != CPU_CELL)
26799 return 0;
26801 gcc_assert (insn != NULL_RTX && INSN_P (insn));
26803 if (!reload_completed
26804 || is_nonpipeline_insn (insn)
26805 || is_microcoded_insn (insn))
26806 return 1;
26808 return 0;
26811 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
26812 and return true. */
26814 static bool
26815 find_mem_ref (rtx pat, rtx *mem_ref)
26817 const char * fmt;
26818 int i, j;
26820 /* stack_tie does not produce any real memory traffic. */
26821 if (tie_operand (pat, VOIDmode))
26822 return false;
26824 if (GET_CODE (pat) == MEM)
26826 *mem_ref = pat;
26827 return true;
26830 /* Recursively process the pattern. */
26831 fmt = GET_RTX_FORMAT (GET_CODE (pat));
26833 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
26835 if (fmt[i] == 'e')
26837 if (find_mem_ref (XEXP (pat, i), mem_ref))
26838 return true;
26840 else if (fmt[i] == 'E')
26841 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
26843 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
26844 return true;
26848 return false;
26851 /* Determine if PAT is a PATTERN of a load insn. */
26853 static bool
26854 is_load_insn1 (rtx pat, rtx *load_mem)
26856 if (!pat || pat == NULL_RTX)
26857 return false;
26859 if (GET_CODE (pat) == SET)
26860 return find_mem_ref (SET_SRC (pat), load_mem);
26862 if (GET_CODE (pat) == PARALLEL)
26864 int i;
26866 for (i = 0; i < XVECLEN (pat, 0); i++)
26867 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
26868 return true;
26871 return false;
26874 /* Determine if INSN loads from memory. */
26876 static bool
26877 is_load_insn (rtx insn, rtx *load_mem)
26879 if (!insn || !INSN_P (insn))
26880 return false;
26882 if (CALL_P (insn))
26883 return false;
26885 return is_load_insn1 (PATTERN (insn), load_mem);
26888 /* Determine if PAT is a PATTERN of a store insn. */
26890 static bool
26891 is_store_insn1 (rtx pat, rtx *str_mem)
26893 if (!pat || pat == NULL_RTX)
26894 return false;
26896 if (GET_CODE (pat) == SET)
26897 return find_mem_ref (SET_DEST (pat), str_mem);
26899 if (GET_CODE (pat) == PARALLEL)
26901 int i;
26903 for (i = 0; i < XVECLEN (pat, 0); i++)
26904 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
26905 return true;
26908 return false;
26911 /* Determine if INSN stores to memory. */
26913 static bool
26914 is_store_insn (rtx insn, rtx *str_mem)
26916 if (!insn || !INSN_P (insn))
26917 return false;
26919 return is_store_insn1 (PATTERN (insn), str_mem);
26922 /* Returns whether the dependence between INSN and NEXT is considered
26923 costly by the given target. */
26925 static bool
26926 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
26928 rtx insn;
26929 rtx next;
26930 rtx load_mem, str_mem;
26932 /* If the flag is not enabled - no dependence is considered costly;
26933 allow all dependent insns in the same group.
26934 This is the most aggressive option. */
26935 if (rs6000_sched_costly_dep == no_dep_costly)
26936 return false;
26938 /* If the flag is set to 1 - a dependence is always considered costly;
26939 do not allow dependent instructions in the same group.
26940 This is the most conservative option. */
26941 if (rs6000_sched_costly_dep == all_deps_costly)
26942 return true;
26944 insn = DEP_PRO (dep);
26945 next = DEP_CON (dep);
26947 if (rs6000_sched_costly_dep == store_to_load_dep_costly
26948 && is_load_insn (next, &load_mem)
26949 && is_store_insn (insn, &str_mem))
26950 /* Prevent load after store in the same group. */
26951 return true;
26953 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
26954 && is_load_insn (next, &load_mem)
26955 && is_store_insn (insn, &str_mem)
26956 && DEP_TYPE (dep) == REG_DEP_TRUE
26957 && mem_locations_overlap(str_mem, load_mem))
26958 /* Prevent load after store in the same group if it is a true
26959 dependence. */
26960 return true;
26962 /* The flag is set to X; dependences with latency >= X are considered costly,
26963 and will not be scheduled in the same group. */
26964 if (rs6000_sched_costly_dep <= max_dep_latency
26965 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
26966 return true;
26968 return false;
26971 /* Return the next insn after INSN that is found before TAIL is reached,
26972 skipping any "non-active" insns - insns that will not actually occupy
26973 an issue slot. Return NULL_RTX if such an insn is not found. */
26975 static rtx
26976 get_next_active_insn (rtx insn, rtx tail)
26978 if (insn == NULL_RTX || insn == tail)
26979 return NULL_RTX;
26981 while (1)
26983 insn = NEXT_INSN (insn);
26984 if (insn == NULL_RTX || insn == tail)
26985 return NULL_RTX;
26987 if (CALL_P (insn)
26988 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
26989 || (NONJUMP_INSN_P (insn)
26990 && GET_CODE (PATTERN (insn)) != USE
26991 && GET_CODE (PATTERN (insn)) != CLOBBER
26992 && INSN_CODE (insn) != CODE_FOR_stack_tie))
26993 break;
26995 return insn;
26998 /* We are about to begin issuing insns for this clock cycle. */
27000 static int
27001 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
27002 rtx *ready ATTRIBUTE_UNUSED,
27003 int *pn_ready ATTRIBUTE_UNUSED,
27004 int clock_var ATTRIBUTE_UNUSED)
27006 int n_ready = *pn_ready;
27008 if (sched_verbose)
27009 fprintf (dump, "// rs6000_sched_reorder :\n");
27011 /* Reorder the ready list, if the second to last ready insn
27012 is a nonepipeline insn. */
27013 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
27015 if (is_nonpipeline_insn (ready[n_ready - 1])
27016 && (recog_memoized (ready[n_ready - 2]) > 0))
27017 /* Simply swap first two insns. */
27019 rtx tmp = ready[n_ready - 1];
27020 ready[n_ready - 1] = ready[n_ready - 2];
27021 ready[n_ready - 2] = tmp;
27025 if (rs6000_cpu == PROCESSOR_POWER6)
27026 load_store_pendulum = 0;
27028 return rs6000_issue_rate ();
27031 /* Like rs6000_sched_reorder, but called after issuing each insn. */
27033 static int
27034 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
27035 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
27037 if (sched_verbose)
27038 fprintf (dump, "// rs6000_sched_reorder2 :\n");
27040 /* For Power6, we need to handle some special cases to try and keep the
27041 store queue from overflowing and triggering expensive flushes.
27043 This code monitors how load and store instructions are being issued
27044 and skews the ready list one way or the other to increase the likelihood
27045 that a desired instruction is issued at the proper time.
27047 A couple of things are done. First, we maintain a "load_store_pendulum"
27048 to track the current state of load/store issue.
27050 - If the pendulum is at zero, then no loads or stores have been
27051 issued in the current cycle so we do nothing.
27053 - If the pendulum is 1, then a single load has been issued in this
27054 cycle and we attempt to locate another load in the ready list to
27055 issue with it.
27057 - If the pendulum is -2, then two stores have already been
27058 issued in this cycle, so we increase the priority of the first load
27059 in the ready list to increase it's likelihood of being chosen first
27060 in the next cycle.
27062 - If the pendulum is -1, then a single store has been issued in this
27063 cycle and we attempt to locate another store in the ready list to
27064 issue with it, preferring a store to an adjacent memory location to
27065 facilitate store pairing in the store queue.
27067 - If the pendulum is 2, then two loads have already been
27068 issued in this cycle, so we increase the priority of the first store
27069 in the ready list to increase it's likelihood of being chosen first
27070 in the next cycle.
27072 - If the pendulum < -2 or > 2, then do nothing.
27074 Note: This code covers the most common scenarios. There exist non
27075 load/store instructions which make use of the LSU and which
27076 would need to be accounted for to strictly model the behavior
27077 of the machine. Those instructions are currently unaccounted
27078 for to help minimize compile time overhead of this code.
27080 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
27082 int pos;
27083 int i;
27084 rtx tmp, load_mem, str_mem;
27086 if (is_store_insn (last_scheduled_insn, &str_mem))
27087 /* Issuing a store, swing the load_store_pendulum to the left */
27088 load_store_pendulum--;
27089 else if (is_load_insn (last_scheduled_insn, &load_mem))
27090 /* Issuing a load, swing the load_store_pendulum to the right */
27091 load_store_pendulum++;
27092 else
27093 return cached_can_issue_more;
27095 /* If the pendulum is balanced, or there is only one instruction on
27096 the ready list, then all is well, so return. */
27097 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
27098 return cached_can_issue_more;
27100 if (load_store_pendulum == 1)
27102 /* A load has been issued in this cycle. Scan the ready list
27103 for another load to issue with it */
27104 pos = *pn_ready-1;
27106 while (pos >= 0)
27108 if (is_load_insn (ready[pos], &load_mem))
27110 /* Found a load. Move it to the head of the ready list,
27111 and adjust it's priority so that it is more likely to
27112 stay there */
27113 tmp = ready[pos];
27114 for (i=pos; i<*pn_ready-1; i++)
27115 ready[i] = ready[i + 1];
27116 ready[*pn_ready-1] = tmp;
27118 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27119 INSN_PRIORITY (tmp)++;
27120 break;
27122 pos--;
27125 else if (load_store_pendulum == -2)
27127 /* Two stores have been issued in this cycle. Increase the
27128 priority of the first load in the ready list to favor it for
27129 issuing in the next cycle. */
27130 pos = *pn_ready-1;
27132 while (pos >= 0)
27134 if (is_load_insn (ready[pos], &load_mem)
27135 && !sel_sched_p ()
27136 && INSN_PRIORITY_KNOWN (ready[pos]))
27138 INSN_PRIORITY (ready[pos])++;
27140 /* Adjust the pendulum to account for the fact that a load
27141 was found and increased in priority. This is to prevent
27142 increasing the priority of multiple loads */
27143 load_store_pendulum--;
27145 break;
27147 pos--;
27150 else if (load_store_pendulum == -1)
27152 /* A store has been issued in this cycle. Scan the ready list for
27153 another store to issue with it, preferring a store to an adjacent
27154 memory location */
27155 int first_store_pos = -1;
27157 pos = *pn_ready-1;
27159 while (pos >= 0)
27161 if (is_store_insn (ready[pos], &str_mem))
27163 rtx str_mem2;
27164 /* Maintain the index of the first store found on the
27165 list */
27166 if (first_store_pos == -1)
27167 first_store_pos = pos;
27169 if (is_store_insn (last_scheduled_insn, &str_mem2)
27170 && adjacent_mem_locations (str_mem, str_mem2))
27172 /* Found an adjacent store. Move it to the head of the
27173 ready list, and adjust it's priority so that it is
27174 more likely to stay there */
27175 tmp = ready[pos];
27176 for (i=pos; i<*pn_ready-1; i++)
27177 ready[i] = ready[i + 1];
27178 ready[*pn_ready-1] = tmp;
27180 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27181 INSN_PRIORITY (tmp)++;
27183 first_store_pos = -1;
27185 break;
27188 pos--;
27191 if (first_store_pos >= 0)
27193 /* An adjacent store wasn't found, but a non-adjacent store was,
27194 so move the non-adjacent store to the front of the ready
27195 list, and adjust its priority so that it is more likely to
27196 stay there. */
27197 tmp = ready[first_store_pos];
27198 for (i=first_store_pos; i<*pn_ready-1; i++)
27199 ready[i] = ready[i + 1];
27200 ready[*pn_ready-1] = tmp;
27201 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27202 INSN_PRIORITY (tmp)++;
27205 else if (load_store_pendulum == 2)
27207 /* Two loads have been issued in this cycle. Increase the priority
27208 of the first store in the ready list to favor it for issuing in
27209 the next cycle. */
27210 pos = *pn_ready-1;
27212 while (pos >= 0)
27214 if (is_store_insn (ready[pos], &str_mem)
27215 && !sel_sched_p ()
27216 && INSN_PRIORITY_KNOWN (ready[pos]))
27218 INSN_PRIORITY (ready[pos])++;
27220 /* Adjust the pendulum to account for the fact that a store
27221 was found and increased in priority. This is to prevent
27222 increasing the priority of multiple stores */
27223 load_store_pendulum++;
27225 break;
27227 pos--;
27232 return cached_can_issue_more;
27235 /* Return whether the presence of INSN causes a dispatch group termination
27236 of group WHICH_GROUP.
27238 If WHICH_GROUP == current_group, this function will return true if INSN
27239 causes the termination of the current group (i.e, the dispatch group to
27240 which INSN belongs). This means that INSN will be the last insn in the
27241 group it belongs to.
27243 If WHICH_GROUP == previous_group, this function will return true if INSN
27244 causes the termination of the previous group (i.e, the dispatch group that
27245 precedes the group to which INSN belongs). This means that INSN will be
27246 the first insn in the group it belongs to). */
27248 static bool
27249 insn_terminates_group_p (rtx insn, enum group_termination which_group)
27251 bool first, last;
27253 if (! insn)
27254 return false;
27256 first = insn_must_be_first_in_group (insn);
27257 last = insn_must_be_last_in_group (insn);
27259 if (first && last)
27260 return true;
27262 if (which_group == current_group)
27263 return last;
27264 else if (which_group == previous_group)
27265 return first;
27267 return false;
27271 static bool
27272 insn_must_be_first_in_group (rtx insn)
27274 enum attr_type type;
27276 if (!insn
27277 || NOTE_P (insn)
27278 || DEBUG_INSN_P (insn)
27279 || GET_CODE (PATTERN (insn)) == USE
27280 || GET_CODE (PATTERN (insn)) == CLOBBER)
27281 return false;
27283 switch (rs6000_cpu)
27285 case PROCESSOR_POWER5:
27286 if (is_cracked_insn (insn))
27287 return true;
27288 case PROCESSOR_POWER4:
27289 if (is_microcoded_insn (insn))
27290 return true;
27292 if (!rs6000_sched_groups)
27293 return false;
27295 type = get_attr_type (insn);
27297 switch (type)
27299 case TYPE_MFCR:
27300 case TYPE_MFCRF:
27301 case TYPE_MTCR:
27302 case TYPE_DELAYED_CR:
27303 case TYPE_CR_LOGICAL:
27304 case TYPE_MTJMPR:
27305 case TYPE_MFJMPR:
27306 case TYPE_IDIV:
27307 case TYPE_LDIV:
27308 case TYPE_LOAD_L:
27309 case TYPE_STORE_C:
27310 case TYPE_ISYNC:
27311 case TYPE_SYNC:
27312 return true;
27313 default:
27314 break;
27316 break;
27317 case PROCESSOR_POWER6:
27318 type = get_attr_type (insn);
27320 switch (type)
27322 case TYPE_EXTS:
27323 case TYPE_CNTLZ:
27324 case TYPE_SHIFT:
27325 case TYPE_VAR_SHIFT_ROTATE:
27326 case TYPE_TRAP:
27327 case TYPE_MUL:
27328 case TYPE_IDIV:
27329 case TYPE_INSERT:
27330 case TYPE_DELAYED_COMPARE:
27331 case TYPE_FPCOMPARE:
27332 case TYPE_MFCR:
27333 case TYPE_MTCR:
27334 case TYPE_MFJMPR:
27335 case TYPE_MTJMPR:
27336 case TYPE_ISYNC:
27337 case TYPE_SYNC:
27338 case TYPE_LOAD_L:
27339 case TYPE_STORE_C:
27340 return true;
27341 case TYPE_LOAD:
27342 case TYPE_STORE:
27343 case TYPE_FPLOAD:
27344 case TYPE_FPSTORE:
27345 if (get_attr_update (insn) == UPDATE_YES)
27346 return true;
27347 else
27348 break;
27349 default:
27350 break;
27352 break;
27353 case PROCESSOR_POWER7:
27354 type = get_attr_type (insn);
27356 switch (type)
27358 case TYPE_CR_LOGICAL:
27359 case TYPE_MFCR:
27360 case TYPE_MFCRF:
27361 case TYPE_MTCR:
27362 case TYPE_IDIV:
27363 case TYPE_LDIV:
27364 case TYPE_COMPARE:
27365 case TYPE_DELAYED_COMPARE:
27366 case TYPE_VAR_DELAYED_COMPARE:
27367 case TYPE_ISYNC:
27368 case TYPE_LOAD_L:
27369 case TYPE_STORE_C:
27370 case TYPE_MFJMPR:
27371 case TYPE_MTJMPR:
27372 return true;
27373 case TYPE_MUL:
27374 if (get_attr_dot (insn) == DOT_YES)
27375 return true;
27376 else
27377 break;
27378 case TYPE_LOAD:
27379 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27380 || get_attr_update (insn) == UPDATE_YES)
27381 return true;
27382 else
27383 break;
27384 case TYPE_STORE:
27385 case TYPE_FPLOAD:
27386 case TYPE_FPSTORE:
27387 if (get_attr_update (insn) == UPDATE_YES)
27388 return true;
27389 else
27390 break;
27391 default:
27392 break;
27394 break;
27395 case PROCESSOR_POWER8:
27396 type = get_attr_type (insn);
27398 switch (type)
27400 case TYPE_CR_LOGICAL:
27401 case TYPE_DELAYED_CR:
27402 case TYPE_MFCR:
27403 case TYPE_MFCRF:
27404 case TYPE_MTCR:
27405 case TYPE_COMPARE:
27406 case TYPE_DELAYED_COMPARE:
27407 case TYPE_VAR_DELAYED_COMPARE:
27408 case TYPE_SYNC:
27409 case TYPE_ISYNC:
27410 case TYPE_LOAD_L:
27411 case TYPE_STORE_C:
27412 case TYPE_VECSTORE:
27413 case TYPE_MFJMPR:
27414 case TYPE_MTJMPR:
27415 return true;
27416 case TYPE_LOAD:
27417 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27418 || get_attr_update (insn) == UPDATE_YES)
27419 return true;
27420 else
27421 break;
27422 case TYPE_STORE:
27423 if (get_attr_update (insn) == UPDATE_YES
27424 && get_attr_indexed (insn) == INDEXED_YES)
27425 return true;
27426 else
27427 break;
27428 default:
27429 break;
27431 break;
27432 default:
27433 break;
27436 return false;
27439 static bool
27440 insn_must_be_last_in_group (rtx insn)
27442 enum attr_type type;
27444 if (!insn
27445 || NOTE_P (insn)
27446 || DEBUG_INSN_P (insn)
27447 || GET_CODE (PATTERN (insn)) == USE
27448 || GET_CODE (PATTERN (insn)) == CLOBBER)
27449 return false;
27451 switch (rs6000_cpu) {
27452 case PROCESSOR_POWER4:
27453 case PROCESSOR_POWER5:
27454 if (is_microcoded_insn (insn))
27455 return true;
27457 if (is_branch_slot_insn (insn))
27458 return true;
27460 break;
27461 case PROCESSOR_POWER6:
27462 type = get_attr_type (insn);
27464 switch (type)
27466 case TYPE_EXTS:
27467 case TYPE_CNTLZ:
27468 case TYPE_SHIFT:
27469 case TYPE_VAR_SHIFT_ROTATE:
27470 case TYPE_TRAP:
27471 case TYPE_MUL:
27472 case TYPE_IDIV:
27473 case TYPE_DELAYED_COMPARE:
27474 case TYPE_FPCOMPARE:
27475 case TYPE_MFCR:
27476 case TYPE_MTCR:
27477 case TYPE_MFJMPR:
27478 case TYPE_MTJMPR:
27479 case TYPE_ISYNC:
27480 case TYPE_SYNC:
27481 case TYPE_LOAD_L:
27482 case TYPE_STORE_C:
27483 return true;
27484 default:
27485 break;
27487 break;
27488 case PROCESSOR_POWER7:
27489 type = get_attr_type (insn);
27491 switch (type)
27493 case TYPE_ISYNC:
27494 case TYPE_SYNC:
27495 case TYPE_LOAD_L:
27496 case TYPE_STORE_C:
27497 return true;
27498 case TYPE_LOAD:
27499 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27500 && get_attr_update (insn) == UPDATE_YES)
27501 return true;
27502 else
27503 break;
27504 case TYPE_STORE:
27505 if (get_attr_update (insn) == UPDATE_YES
27506 && get_attr_indexed (insn) == INDEXED_YES)
27507 return true;
27508 else
27509 break;
27510 default:
27511 break;
27513 break;
27514 case PROCESSOR_POWER8:
27515 type = get_attr_type (insn);
27517 switch (type)
27519 case TYPE_MFCR:
27520 case TYPE_MTCR:
27521 case TYPE_ISYNC:
27522 case TYPE_SYNC:
27523 case TYPE_LOAD_L:
27524 case TYPE_STORE_C:
27525 return true;
27526 case TYPE_LOAD:
27527 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27528 && get_attr_update (insn) == UPDATE_YES)
27529 return true;
27530 else
27531 break;
27532 case TYPE_STORE:
27533 if (get_attr_update (insn) == UPDATE_YES
27534 && get_attr_indexed (insn) == INDEXED_YES)
27535 return true;
27536 else
27537 break;
27538 default:
27539 break;
27541 break;
27542 default:
27543 break;
27546 return false;
27549 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
27550 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
27552 static bool
27553 is_costly_group (rtx *group_insns, rtx next_insn)
27555 int i;
27556 int issue_rate = rs6000_issue_rate ();
27558 for (i = 0; i < issue_rate; i++)
27560 sd_iterator_def sd_it;
27561 dep_t dep;
27562 rtx insn = group_insns[i];
27564 if (!insn)
27565 continue;
27567 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
27569 rtx next = DEP_CON (dep);
27571 if (next == next_insn
27572 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
27573 return true;
27577 return false;
27580 /* Utility of the function redefine_groups.
27581 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
27582 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
27583 to keep it "far" (in a separate group) from GROUP_INSNS, following
27584 one of the following schemes, depending on the value of the flag
27585 -minsert_sched_nops = X:
27586 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
27587 in order to force NEXT_INSN into a separate group.
27588 (2) X < sched_finish_regroup_exact: insert exactly X nops.
27589 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
27590 insertion (has a group just ended, how many vacant issue slots remain in the
27591 last group, and how many dispatch groups were encountered so far). */
27593 static int
27594 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
27595 rtx next_insn, bool *group_end, int can_issue_more,
27596 int *group_count)
27598 rtx nop;
27599 bool force;
27600 int issue_rate = rs6000_issue_rate ();
27601 bool end = *group_end;
27602 int i;
27604 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
27605 return can_issue_more;
27607 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
27608 return can_issue_more;
27610 force = is_costly_group (group_insns, next_insn);
27611 if (!force)
27612 return can_issue_more;
27614 if (sched_verbose > 6)
27615 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
27616 *group_count ,can_issue_more);
27618 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
27620 if (*group_end)
27621 can_issue_more = 0;
27623 /* Since only a branch can be issued in the last issue_slot, it is
27624 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
27625 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
27626 in this case the last nop will start a new group and the branch
27627 will be forced to the new group. */
27628 if (can_issue_more && !is_branch_slot_insn (next_insn))
27629 can_issue_more--;
27631 /* Do we have a special group ending nop? */
27632 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
27633 || rs6000_cpu_attr == CPU_POWER8)
27635 nop = gen_group_ending_nop ();
27636 emit_insn_before (nop, next_insn);
27637 can_issue_more = 0;
27639 else
27640 while (can_issue_more > 0)
27642 nop = gen_nop ();
27643 emit_insn_before (nop, next_insn);
27644 can_issue_more--;
27647 *group_end = true;
27648 return 0;
27651 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
27653 int n_nops = rs6000_sched_insert_nops;
27655 /* Nops can't be issued from the branch slot, so the effective
27656 issue_rate for nops is 'issue_rate - 1'. */
27657 if (can_issue_more == 0)
27658 can_issue_more = issue_rate;
27659 can_issue_more--;
27660 if (can_issue_more == 0)
27662 can_issue_more = issue_rate - 1;
27663 (*group_count)++;
27664 end = true;
27665 for (i = 0; i < issue_rate; i++)
27667 group_insns[i] = 0;
27671 while (n_nops > 0)
27673 nop = gen_nop ();
27674 emit_insn_before (nop, next_insn);
27675 if (can_issue_more == issue_rate - 1) /* new group begins */
27676 end = false;
27677 can_issue_more--;
27678 if (can_issue_more == 0)
27680 can_issue_more = issue_rate - 1;
27681 (*group_count)++;
27682 end = true;
27683 for (i = 0; i < issue_rate; i++)
27685 group_insns[i] = 0;
27688 n_nops--;
27691 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
27692 can_issue_more++;
27694 /* Is next_insn going to start a new group? */
27695 *group_end
27696 = (end
27697 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
27698 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
27699 || (can_issue_more < issue_rate &&
27700 insn_terminates_group_p (next_insn, previous_group)));
27701 if (*group_end && end)
27702 (*group_count)--;
27704 if (sched_verbose > 6)
27705 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
27706 *group_count, can_issue_more);
27707 return can_issue_more;
27710 return can_issue_more;
27713 /* This function tries to synch the dispatch groups that the compiler "sees"
27714 with the dispatch groups that the processor dispatcher is expected to
27715 form in practice. It tries to achieve this synchronization by forcing the
27716 estimated processor grouping on the compiler (as opposed to the function
27717 'pad_goups' which tries to force the scheduler's grouping on the processor).
27719 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
27720 examines the (estimated) dispatch groups that will be formed by the processor
27721 dispatcher. It marks these group boundaries to reflect the estimated
27722 processor grouping, overriding the grouping that the scheduler had marked.
27723 Depending on the value of the flag '-minsert-sched-nops' this function can
27724 force certain insns into separate groups or force a certain distance between
27725 them by inserting nops, for example, if there exists a "costly dependence"
27726 between the insns.
27728 The function estimates the group boundaries that the processor will form as
27729 follows: It keeps track of how many vacant issue slots are available after
27730 each insn. A subsequent insn will start a new group if one of the following
27731 4 cases applies:
27732 - no more vacant issue slots remain in the current dispatch group.
27733 - only the last issue slot, which is the branch slot, is vacant, but the next
27734 insn is not a branch.
27735 - only the last 2 or less issue slots, including the branch slot, are vacant,
27736 which means that a cracked insn (which occupies two issue slots) can't be
27737 issued in this group.
27738 - less than 'issue_rate' slots are vacant, and the next insn always needs to
27739 start a new group. */
27741 static int
27742 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
27744 rtx insn, next_insn;
27745 int issue_rate;
27746 int can_issue_more;
27747 int slot, i;
27748 bool group_end;
27749 int group_count = 0;
27750 rtx *group_insns;
27752 /* Initialize. */
27753 issue_rate = rs6000_issue_rate ();
27754 group_insns = XALLOCAVEC (rtx, issue_rate);
27755 for (i = 0; i < issue_rate; i++)
27757 group_insns[i] = 0;
27759 can_issue_more = issue_rate;
27760 slot = 0;
27761 insn = get_next_active_insn (prev_head_insn, tail);
27762 group_end = false;
27764 while (insn != NULL_RTX)
27766 slot = (issue_rate - can_issue_more);
27767 group_insns[slot] = insn;
27768 can_issue_more =
27769 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
27770 if (insn_terminates_group_p (insn, current_group))
27771 can_issue_more = 0;
27773 next_insn = get_next_active_insn (insn, tail);
27774 if (next_insn == NULL_RTX)
27775 return group_count + 1;
27777 /* Is next_insn going to start a new group? */
27778 group_end
27779 = (can_issue_more == 0
27780 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
27781 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
27782 || (can_issue_more < issue_rate &&
27783 insn_terminates_group_p (next_insn, previous_group)));
27785 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
27786 next_insn, &group_end, can_issue_more,
27787 &group_count);
27789 if (group_end)
27791 group_count++;
27792 can_issue_more = 0;
27793 for (i = 0; i < issue_rate; i++)
27795 group_insns[i] = 0;
27799 if (GET_MODE (next_insn) == TImode && can_issue_more)
27800 PUT_MODE (next_insn, VOIDmode);
27801 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
27802 PUT_MODE (next_insn, TImode);
27804 insn = next_insn;
27805 if (can_issue_more == 0)
27806 can_issue_more = issue_rate;
27807 } /* while */
27809 return group_count;
27812 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
27813 dispatch group boundaries that the scheduler had marked. Pad with nops
27814 any dispatch groups which have vacant issue slots, in order to force the
27815 scheduler's grouping on the processor dispatcher. The function
27816 returns the number of dispatch groups found. */
27818 static int
27819 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
27821 rtx insn, next_insn;
27822 rtx nop;
27823 int issue_rate;
27824 int can_issue_more;
27825 int group_end;
27826 int group_count = 0;
27828 /* Initialize issue_rate. */
27829 issue_rate = rs6000_issue_rate ();
27830 can_issue_more = issue_rate;
27832 insn = get_next_active_insn (prev_head_insn, tail);
27833 next_insn = get_next_active_insn (insn, tail);
27835 while (insn != NULL_RTX)
27837 can_issue_more =
27838 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
27840 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
27842 if (next_insn == NULL_RTX)
27843 break;
27845 if (group_end)
27847 /* If the scheduler had marked group termination at this location
27848 (between insn and next_insn), and neither insn nor next_insn will
27849 force group termination, pad the group with nops to force group
27850 termination. */
27851 if (can_issue_more
27852 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
27853 && !insn_terminates_group_p (insn, current_group)
27854 && !insn_terminates_group_p (next_insn, previous_group))
27856 if (!is_branch_slot_insn (next_insn))
27857 can_issue_more--;
27859 while (can_issue_more)
27861 nop = gen_nop ();
27862 emit_insn_before (nop, next_insn);
27863 can_issue_more--;
27867 can_issue_more = issue_rate;
27868 group_count++;
27871 insn = next_insn;
27872 next_insn = get_next_active_insn (insn, tail);
27875 return group_count;
27878 /* We're beginning a new block. Initialize data structures as necessary. */
27880 static void
27881 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
27882 int sched_verbose ATTRIBUTE_UNUSED,
27883 int max_ready ATTRIBUTE_UNUSED)
27885 last_scheduled_insn = NULL_RTX;
27886 load_store_pendulum = 0;
27889 /* The following function is called at the end of scheduling BB.
27890 After reload, it inserts nops at insn group bundling. */
27892 static void
27893 rs6000_sched_finish (FILE *dump, int sched_verbose)
27895 int n_groups;
27897 if (sched_verbose)
27898 fprintf (dump, "=== Finishing schedule.\n");
27900 if (reload_completed && rs6000_sched_groups)
27902 /* Do not run sched_finish hook when selective scheduling enabled. */
27903 if (sel_sched_p ())
27904 return;
27906 if (rs6000_sched_insert_nops == sched_finish_none)
27907 return;
27909 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
27910 n_groups = pad_groups (dump, sched_verbose,
27911 current_sched_info->prev_head,
27912 current_sched_info->next_tail);
27913 else
27914 n_groups = redefine_groups (dump, sched_verbose,
27915 current_sched_info->prev_head,
27916 current_sched_info->next_tail);
27918 if (sched_verbose >= 6)
27920 fprintf (dump, "ngroups = %d\n", n_groups);
27921 print_rtl (dump, current_sched_info->prev_head);
27922 fprintf (dump, "Done finish_sched\n");
27927 struct _rs6000_sched_context
27929 short cached_can_issue_more;
27930 rtx last_scheduled_insn;
27931 int load_store_pendulum;
27934 typedef struct _rs6000_sched_context rs6000_sched_context_def;
27935 typedef rs6000_sched_context_def *rs6000_sched_context_t;
27937 /* Allocate store for new scheduling context. */
27938 static void *
27939 rs6000_alloc_sched_context (void)
27941 return xmalloc (sizeof (rs6000_sched_context_def));
27944 /* If CLEAN_P is true then initializes _SC with clean data,
27945 and from the global context otherwise. */
27946 static void
27947 rs6000_init_sched_context (void *_sc, bool clean_p)
27949 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
27951 if (clean_p)
27953 sc->cached_can_issue_more = 0;
27954 sc->last_scheduled_insn = NULL_RTX;
27955 sc->load_store_pendulum = 0;
27957 else
27959 sc->cached_can_issue_more = cached_can_issue_more;
27960 sc->last_scheduled_insn = last_scheduled_insn;
27961 sc->load_store_pendulum = load_store_pendulum;
27965 /* Sets the global scheduling context to the one pointed to by _SC. */
27966 static void
27967 rs6000_set_sched_context (void *_sc)
27969 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
27971 gcc_assert (sc != NULL);
27973 cached_can_issue_more = sc->cached_can_issue_more;
27974 last_scheduled_insn = sc->last_scheduled_insn;
27975 load_store_pendulum = sc->load_store_pendulum;
27978 /* Free _SC. */
27979 static void
27980 rs6000_free_sched_context (void *_sc)
27982 gcc_assert (_sc != NULL);
27984 free (_sc);
27988 /* Length in units of the trampoline for entering a nested function. */
27991 rs6000_trampoline_size (void)
27993 int ret = 0;
27995 switch (DEFAULT_ABI)
27997 default:
27998 gcc_unreachable ();
28000 case ABI_AIX:
28001 ret = (TARGET_32BIT) ? 12 : 24;
28002 break;
28004 case ABI_ELFv2:
28005 gcc_assert (!TARGET_32BIT);
28006 ret = 32;
28007 break;
28009 case ABI_DARWIN:
28010 case ABI_V4:
28011 ret = (TARGET_32BIT) ? 40 : 48;
28012 break;
28015 return ret;
28018 /* Emit RTL insns to initialize the variable parts of a trampoline.
28019 FNADDR is an RTX for the address of the function's pure code.
28020 CXT is an RTX for the static chain value for the function. */
28022 static void
28023 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
28025 int regsize = (TARGET_32BIT) ? 4 : 8;
28026 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
28027 rtx ctx_reg = force_reg (Pmode, cxt);
28028 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
28030 switch (DEFAULT_ABI)
28032 default:
28033 gcc_unreachable ();
28035 /* Under AIX, just build the 3 word function descriptor */
28036 case ABI_AIX:
28038 rtx fnmem, fn_reg, toc_reg;
28040 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28041 error ("You cannot take the address of a nested function if you use "
28042 "the -mno-pointers-to-nested-functions option.");
28044 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
28045 fn_reg = gen_reg_rtx (Pmode);
28046 toc_reg = gen_reg_rtx (Pmode);
28048 /* Macro to shorten the code expansions below. */
28049 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
28051 m_tramp = replace_equiv_address (m_tramp, addr);
28053 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
28054 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
28055 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
28056 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
28057 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
28059 # undef MEM_PLUS
28061 break;
28063 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
28064 case ABI_ELFv2:
28065 case ABI_DARWIN:
28066 case ABI_V4:
28067 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
28068 LCT_NORMAL, VOIDmode, 4,
28069 addr, Pmode,
28070 GEN_INT (rs6000_trampoline_size ()), SImode,
28071 fnaddr, Pmode,
28072 ctx_reg, Pmode);
28073 break;
28078 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
28079 identifier as an argument, so the front end shouldn't look it up. */
28081 static bool
28082 rs6000_attribute_takes_identifier_p (const_tree attr_id)
28084 return is_attribute_p ("altivec", attr_id);
28087 /* Handle the "altivec" attribute. The attribute may have
28088 arguments as follows:
28090 __attribute__((altivec(vector__)))
28091 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
28092 __attribute__((altivec(bool__))) (always followed by 'unsigned')
28094 and may appear more than once (e.g., 'vector bool char') in a
28095 given declaration. */
28097 static tree
28098 rs6000_handle_altivec_attribute (tree *node,
28099 tree name ATTRIBUTE_UNUSED,
28100 tree args,
28101 int flags ATTRIBUTE_UNUSED,
28102 bool *no_add_attrs)
28104 tree type = *node, result = NULL_TREE;
28105 enum machine_mode mode;
28106 int unsigned_p;
28107 char altivec_type
28108 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
28109 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
28110 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
28111 : '?');
28113 while (POINTER_TYPE_P (type)
28114 || TREE_CODE (type) == FUNCTION_TYPE
28115 || TREE_CODE (type) == METHOD_TYPE
28116 || TREE_CODE (type) == ARRAY_TYPE)
28117 type = TREE_TYPE (type);
28119 mode = TYPE_MODE (type);
28121 /* Check for invalid AltiVec type qualifiers. */
28122 if (type == long_double_type_node)
28123 error ("use of %<long double%> in AltiVec types is invalid");
28124 else if (type == boolean_type_node)
28125 error ("use of boolean types in AltiVec types is invalid");
28126 else if (TREE_CODE (type) == COMPLEX_TYPE)
28127 error ("use of %<complex%> in AltiVec types is invalid");
28128 else if (DECIMAL_FLOAT_MODE_P (mode))
28129 error ("use of decimal floating point types in AltiVec types is invalid");
28130 else if (!TARGET_VSX)
28132 if (type == long_unsigned_type_node || type == long_integer_type_node)
28134 if (TARGET_64BIT)
28135 error ("use of %<long%> in AltiVec types is invalid for "
28136 "64-bit code without -mvsx");
28137 else if (rs6000_warn_altivec_long)
28138 warning (0, "use of %<long%> in AltiVec types is deprecated; "
28139 "use %<int%>");
28141 else if (type == long_long_unsigned_type_node
28142 || type == long_long_integer_type_node)
28143 error ("use of %<long long%> in AltiVec types is invalid without "
28144 "-mvsx");
28145 else if (type == double_type_node)
28146 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
28149 switch (altivec_type)
28151 case 'v':
28152 unsigned_p = TYPE_UNSIGNED (type);
28153 switch (mode)
28155 case TImode:
28156 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
28157 break;
28158 case DImode:
28159 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
28160 break;
28161 case SImode:
28162 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
28163 break;
28164 case HImode:
28165 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
28166 break;
28167 case QImode:
28168 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
28169 break;
28170 case SFmode: result = V4SF_type_node; break;
28171 case DFmode: result = V2DF_type_node; break;
28172 /* If the user says 'vector int bool', we may be handed the 'bool'
28173 attribute _before_ the 'vector' attribute, and so select the
28174 proper type in the 'b' case below. */
28175 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
28176 case V2DImode: case V2DFmode:
28177 result = type;
28178 default: break;
28180 break;
28181 case 'b':
28182 switch (mode)
28184 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
28185 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
28186 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
28187 case QImode: case V16QImode: result = bool_V16QI_type_node;
28188 default: break;
28190 break;
28191 case 'p':
28192 switch (mode)
28194 case V8HImode: result = pixel_V8HI_type_node;
28195 default: break;
28197 default: break;
28200 /* Propagate qualifiers attached to the element type
28201 onto the vector type. */
28202 if (result && result != type && TYPE_QUALS (type))
28203 result = build_qualified_type (result, TYPE_QUALS (type));
28205 *no_add_attrs = true; /* No need to hang on to the attribute. */
28207 if (result)
28208 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
28210 return NULL_TREE;
28213 /* AltiVec defines four built-in scalar types that serve as vector
28214 elements; we must teach the compiler how to mangle them. */
28216 static const char *
28217 rs6000_mangle_type (const_tree type)
28219 type = TYPE_MAIN_VARIANT (type);
28221 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28222 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28223 return NULL;
28225 if (type == bool_char_type_node) return "U6__boolc";
28226 if (type == bool_short_type_node) return "U6__bools";
28227 if (type == pixel_type_node) return "u7__pixel";
28228 if (type == bool_int_type_node) return "U6__booli";
28229 if (type == bool_long_type_node) return "U6__booll";
28231 /* Mangle IBM extended float long double as `g' (__float128) on
28232 powerpc*-linux where long-double-64 previously was the default. */
28233 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
28234 && TARGET_ELF
28235 && TARGET_LONG_DOUBLE_128
28236 && !TARGET_IEEEQUAD)
28237 return "g";
28239 /* For all other types, use normal C++ mangling. */
28240 return NULL;
28243 /* Handle a "longcall" or "shortcall" attribute; arguments as in
28244 struct attribute_spec.handler. */
28246 static tree
28247 rs6000_handle_longcall_attribute (tree *node, tree name,
28248 tree args ATTRIBUTE_UNUSED,
28249 int flags ATTRIBUTE_UNUSED,
28250 bool *no_add_attrs)
28252 if (TREE_CODE (*node) != FUNCTION_TYPE
28253 && TREE_CODE (*node) != FIELD_DECL
28254 && TREE_CODE (*node) != TYPE_DECL)
28256 warning (OPT_Wattributes, "%qE attribute only applies to functions",
28257 name);
28258 *no_add_attrs = true;
28261 return NULL_TREE;
28264 /* Set longcall attributes on all functions declared when
28265 rs6000_default_long_calls is true. */
28266 static void
28267 rs6000_set_default_type_attributes (tree type)
28269 if (rs6000_default_long_calls
28270 && (TREE_CODE (type) == FUNCTION_TYPE
28271 || TREE_CODE (type) == METHOD_TYPE))
28272 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
28273 NULL_TREE,
28274 TYPE_ATTRIBUTES (type));
28276 #if TARGET_MACHO
28277 darwin_set_default_type_attributes (type);
28278 #endif
28281 /* Return a reference suitable for calling a function with the
28282 longcall attribute. */
28285 rs6000_longcall_ref (rtx call_ref)
28287 const char *call_name;
28288 tree node;
28290 if (GET_CODE (call_ref) != SYMBOL_REF)
28291 return call_ref;
28293 /* System V adds '.' to the internal name, so skip them. */
28294 call_name = XSTR (call_ref, 0);
28295 if (*call_name == '.')
28297 while (*call_name == '.')
28298 call_name++;
28300 node = get_identifier (call_name);
28301 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
28304 return force_reg (Pmode, call_ref);
28307 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
28308 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
28309 #endif
28311 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
28312 struct attribute_spec.handler. */
28313 static tree
28314 rs6000_handle_struct_attribute (tree *node, tree name,
28315 tree args ATTRIBUTE_UNUSED,
28316 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
28318 tree *type = NULL;
28319 if (DECL_P (*node))
28321 if (TREE_CODE (*node) == TYPE_DECL)
28322 type = &TREE_TYPE (*node);
28324 else
28325 type = node;
28327 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
28328 || TREE_CODE (*type) == UNION_TYPE)))
28330 warning (OPT_Wattributes, "%qE attribute ignored", name);
28331 *no_add_attrs = true;
28334 else if ((is_attribute_p ("ms_struct", name)
28335 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
28336 || ((is_attribute_p ("gcc_struct", name)
28337 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
28339 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
28340 name);
28341 *no_add_attrs = true;
28344 return NULL_TREE;
28347 static bool
28348 rs6000_ms_bitfield_layout_p (const_tree record_type)
28350 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
28351 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
28352 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
28355 #ifdef USING_ELFOS_H
28357 /* A get_unnamed_section callback, used for switching to toc_section. */
28359 static void
28360 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
28362 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28363 && TARGET_MINIMAL_TOC
28364 && !TARGET_RELOCATABLE)
28366 if (!toc_initialized)
28368 toc_initialized = 1;
28369 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28370 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
28371 fprintf (asm_out_file, "\t.tc ");
28372 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
28373 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28374 fprintf (asm_out_file, "\n");
28376 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28377 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28378 fprintf (asm_out_file, " = .+32768\n");
28380 else
28381 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28383 else if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28384 && !TARGET_RELOCATABLE)
28385 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28386 else
28388 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28389 if (!toc_initialized)
28391 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28392 fprintf (asm_out_file, " = .+32768\n");
28393 toc_initialized = 1;
28398 /* Implement TARGET_ASM_INIT_SECTIONS. */
28400 static void
28401 rs6000_elf_asm_init_sections (void)
28403 toc_section
28404 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
28406 sdata2_section
28407 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
28408 SDATA2_SECTION_ASM_OP);
28411 /* Implement TARGET_SELECT_RTX_SECTION. */
28413 static section *
28414 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
28415 unsigned HOST_WIDE_INT align)
28417 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
28418 return toc_section;
28419 else
28420 return default_elf_select_rtx_section (mode, x, align);
28423 /* For a SYMBOL_REF, set generic flags and then perform some
28424 target-specific processing.
28426 When the AIX ABI is requested on a non-AIX system, replace the
28427 function name with the real name (with a leading .) rather than the
28428 function descriptor name. This saves a lot of overriding code to
28429 read the prefixes. */
28431 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
28432 static void
28433 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
28435 default_encode_section_info (decl, rtl, first);
28437 if (first
28438 && TREE_CODE (decl) == FUNCTION_DECL
28439 && !TARGET_AIX
28440 && DEFAULT_ABI == ABI_AIX)
28442 rtx sym_ref = XEXP (rtl, 0);
28443 size_t len = strlen (XSTR (sym_ref, 0));
28444 char *str = XALLOCAVEC (char, len + 2);
28445 str[0] = '.';
28446 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
28447 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
28451 static inline bool
28452 compare_section_name (const char *section, const char *templ)
28454 int len;
28456 len = strlen (templ);
28457 return (strncmp (section, templ, len) == 0
28458 && (section[len] == 0 || section[len] == '.'));
28461 bool
28462 rs6000_elf_in_small_data_p (const_tree decl)
28464 if (rs6000_sdata == SDATA_NONE)
28465 return false;
28467 /* We want to merge strings, so we never consider them small data. */
28468 if (TREE_CODE (decl) == STRING_CST)
28469 return false;
28471 /* Functions are never in the small data area. */
28472 if (TREE_CODE (decl) == FUNCTION_DECL)
28473 return false;
28475 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
28477 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
28478 if (compare_section_name (section, ".sdata")
28479 || compare_section_name (section, ".sdata2")
28480 || compare_section_name (section, ".gnu.linkonce.s")
28481 || compare_section_name (section, ".sbss")
28482 || compare_section_name (section, ".sbss2")
28483 || compare_section_name (section, ".gnu.linkonce.sb")
28484 || strcmp (section, ".PPC.EMB.sdata0") == 0
28485 || strcmp (section, ".PPC.EMB.sbss0") == 0)
28486 return true;
28488 else
28490 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
28492 if (size > 0
28493 && size <= g_switch_value
28494 /* If it's not public, and we're not going to reference it there,
28495 there's no need to put it in the small data section. */
28496 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
28497 return true;
28500 return false;
28503 #endif /* USING_ELFOS_H */
28505 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
28507 static bool
28508 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
28510 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
28513 /* Do not place thread-local symbols refs in the object blocks. */
28515 static bool
28516 rs6000_use_blocks_for_decl_p (const_tree decl)
28518 return !DECL_THREAD_LOCAL_P (decl);
28521 /* Return a REG that occurs in ADDR with coefficient 1.
28522 ADDR can be effectively incremented by incrementing REG.
28524 r0 is special and we must not select it as an address
28525 register by this routine since our caller will try to
28526 increment the returned register via an "la" instruction. */
28529 find_addr_reg (rtx addr)
28531 while (GET_CODE (addr) == PLUS)
28533 if (GET_CODE (XEXP (addr, 0)) == REG
28534 && REGNO (XEXP (addr, 0)) != 0)
28535 addr = XEXP (addr, 0);
28536 else if (GET_CODE (XEXP (addr, 1)) == REG
28537 && REGNO (XEXP (addr, 1)) != 0)
28538 addr = XEXP (addr, 1);
28539 else if (CONSTANT_P (XEXP (addr, 0)))
28540 addr = XEXP (addr, 1);
28541 else if (CONSTANT_P (XEXP (addr, 1)))
28542 addr = XEXP (addr, 0);
28543 else
28544 gcc_unreachable ();
28546 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
28547 return addr;
28550 void
28551 rs6000_fatal_bad_address (rtx op)
28553 fatal_insn ("bad address", op);
28556 #if TARGET_MACHO
28558 typedef struct branch_island_d {
28559 tree function_name;
28560 tree label_name;
28561 int line_number;
28562 } branch_island;
28565 static vec<branch_island, va_gc> *branch_islands;
28567 /* Remember to generate a branch island for far calls to the given
28568 function. */
28570 static void
28571 add_compiler_branch_island (tree label_name, tree function_name,
28572 int line_number)
28574 branch_island bi = {function_name, label_name, line_number};
28575 vec_safe_push (branch_islands, bi);
28578 /* Generate far-jump branch islands for everything recorded in
28579 branch_islands. Invoked immediately after the last instruction of
28580 the epilogue has been emitted; the branch islands must be appended
28581 to, and contiguous with, the function body. Mach-O stubs are
28582 generated in machopic_output_stub(). */
28584 static void
28585 macho_branch_islands (void)
28587 char tmp_buf[512];
28589 while (!vec_safe_is_empty (branch_islands))
28591 branch_island *bi = &branch_islands->last ();
28592 const char *label = IDENTIFIER_POINTER (bi->label_name);
28593 const char *name = IDENTIFIER_POINTER (bi->function_name);
28594 char name_buf[512];
28595 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
28596 if (name[0] == '*' || name[0] == '&')
28597 strcpy (name_buf, name+1);
28598 else
28600 name_buf[0] = '_';
28601 strcpy (name_buf+1, name);
28603 strcpy (tmp_buf, "\n");
28604 strcat (tmp_buf, label);
28605 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28606 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28607 dbxout_stabd (N_SLINE, bi->line_number);
28608 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28609 if (flag_pic)
28611 if (TARGET_LINK_STACK)
28613 char name[32];
28614 get_ppc476_thunk_name (name);
28615 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
28616 strcat (tmp_buf, name);
28617 strcat (tmp_buf, "\n");
28618 strcat (tmp_buf, label);
28619 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28621 else
28623 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
28624 strcat (tmp_buf, label);
28625 strcat (tmp_buf, "_pic\n");
28626 strcat (tmp_buf, label);
28627 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28630 strcat (tmp_buf, "\taddis r11,r11,ha16(");
28631 strcat (tmp_buf, name_buf);
28632 strcat (tmp_buf, " - ");
28633 strcat (tmp_buf, label);
28634 strcat (tmp_buf, "_pic)\n");
28636 strcat (tmp_buf, "\tmtlr r0\n");
28638 strcat (tmp_buf, "\taddi r12,r11,lo16(");
28639 strcat (tmp_buf, name_buf);
28640 strcat (tmp_buf, " - ");
28641 strcat (tmp_buf, label);
28642 strcat (tmp_buf, "_pic)\n");
28644 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
28646 else
28648 strcat (tmp_buf, ":\nlis r12,hi16(");
28649 strcat (tmp_buf, name_buf);
28650 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
28651 strcat (tmp_buf, name_buf);
28652 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
28654 output_asm_insn (tmp_buf, 0);
28655 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28656 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28657 dbxout_stabd (N_SLINE, bi->line_number);
28658 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28659 branch_islands->pop ();
28663 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
28664 already there or not. */
28666 static int
28667 no_previous_def (tree function_name)
28669 branch_island *bi;
28670 unsigned ix;
28672 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28673 if (function_name == bi->function_name)
28674 return 0;
28675 return 1;
28678 /* GET_PREV_LABEL gets the label name from the previous definition of
28679 the function. */
28681 static tree
28682 get_prev_label (tree function_name)
28684 branch_island *bi;
28685 unsigned ix;
28687 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28688 if (function_name == bi->function_name)
28689 return bi->label_name;
28690 return NULL_TREE;
28693 /* INSN is either a function call or a millicode call. It may have an
28694 unconditional jump in its delay slot.
28696 CALL_DEST is the routine we are calling. */
28698 char *
28699 output_call (rtx insn, rtx *operands, int dest_operand_number,
28700 int cookie_operand_number)
28702 static char buf[256];
28703 if (darwin_emit_branch_islands
28704 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
28705 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
28707 tree labelname;
28708 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
28710 if (no_previous_def (funname))
28712 rtx label_rtx = gen_label_rtx ();
28713 char *label_buf, temp_buf[256];
28714 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
28715 CODE_LABEL_NUMBER (label_rtx));
28716 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
28717 labelname = get_identifier (label_buf);
28718 add_compiler_branch_island (labelname, funname, insn_line (insn));
28720 else
28721 labelname = get_prev_label (funname);
28723 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
28724 instruction will reach 'foo', otherwise link as 'bl L42'".
28725 "L42" should be a 'branch island', that will do a far jump to
28726 'foo'. Branch islands are generated in
28727 macho_branch_islands(). */
28728 sprintf (buf, "jbsr %%z%d,%.246s",
28729 dest_operand_number, IDENTIFIER_POINTER (labelname));
28731 else
28732 sprintf (buf, "bl %%z%d", dest_operand_number);
28733 return buf;
28736 /* Generate PIC and indirect symbol stubs. */
28738 void
28739 machopic_output_stub (FILE *file, const char *symb, const char *stub)
28741 unsigned int length;
28742 char *symbol_name, *lazy_ptr_name;
28743 char *local_label_0;
28744 static int label = 0;
28746 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
28747 symb = (*targetm.strip_name_encoding) (symb);
28750 length = strlen (symb);
28751 symbol_name = XALLOCAVEC (char, length + 32);
28752 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
28754 lazy_ptr_name = XALLOCAVEC (char, length + 32);
28755 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
28757 if (flag_pic == 2)
28758 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
28759 else
28760 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
28762 if (flag_pic == 2)
28764 fprintf (file, "\t.align 5\n");
28766 fprintf (file, "%s:\n", stub);
28767 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28769 label++;
28770 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
28771 sprintf (local_label_0, "\"L%011d$spb\"", label);
28773 fprintf (file, "\tmflr r0\n");
28774 if (TARGET_LINK_STACK)
28776 char name[32];
28777 get_ppc476_thunk_name (name);
28778 fprintf (file, "\tbl %s\n", name);
28779 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
28781 else
28783 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
28784 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
28786 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
28787 lazy_ptr_name, local_label_0);
28788 fprintf (file, "\tmtlr r0\n");
28789 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
28790 (TARGET_64BIT ? "ldu" : "lwzu"),
28791 lazy_ptr_name, local_label_0);
28792 fprintf (file, "\tmtctr r12\n");
28793 fprintf (file, "\tbctr\n");
28795 else
28797 fprintf (file, "\t.align 4\n");
28799 fprintf (file, "%s:\n", stub);
28800 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28802 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
28803 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
28804 (TARGET_64BIT ? "ldu" : "lwzu"),
28805 lazy_ptr_name);
28806 fprintf (file, "\tmtctr r12\n");
28807 fprintf (file, "\tbctr\n");
28810 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
28811 fprintf (file, "%s:\n", lazy_ptr_name);
28812 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
28813 fprintf (file, "%sdyld_stub_binding_helper\n",
28814 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
28817 /* Legitimize PIC addresses. If the address is already
28818 position-independent, we return ORIG. Newly generated
28819 position-independent addresses go into a reg. This is REG if non
28820 zero, otherwise we allocate register(s) as necessary. */
28822 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
28825 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
28826 rtx reg)
28828 rtx base, offset;
28830 if (reg == NULL && ! reload_in_progress && ! reload_completed)
28831 reg = gen_reg_rtx (Pmode);
28833 if (GET_CODE (orig) == CONST)
28835 rtx reg_temp;
28837 if (GET_CODE (XEXP (orig, 0)) == PLUS
28838 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
28839 return orig;
28841 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
28843 /* Use a different reg for the intermediate value, as
28844 it will be marked UNCHANGING. */
28845 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
28846 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
28847 Pmode, reg_temp);
28848 offset =
28849 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
28850 Pmode, reg);
28852 if (GET_CODE (offset) == CONST_INT)
28854 if (SMALL_INT (offset))
28855 return plus_constant (Pmode, base, INTVAL (offset));
28856 else if (! reload_in_progress && ! reload_completed)
28857 offset = force_reg (Pmode, offset);
28858 else
28860 rtx mem = force_const_mem (Pmode, orig);
28861 return machopic_legitimize_pic_address (mem, Pmode, reg);
28864 return gen_rtx_PLUS (Pmode, base, offset);
28867 /* Fall back on generic machopic code. */
28868 return machopic_legitimize_pic_address (orig, mode, reg);
28871 /* Output a .machine directive for the Darwin assembler, and call
28872 the generic start_file routine. */
28874 static void
28875 rs6000_darwin_file_start (void)
28877 static const struct
28879 const char *arg;
28880 const char *name;
28881 HOST_WIDE_INT if_set;
28882 } mapping[] = {
28883 { "ppc64", "ppc64", MASK_64BIT },
28884 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
28885 { "power4", "ppc970", 0 },
28886 { "G5", "ppc970", 0 },
28887 { "7450", "ppc7450", 0 },
28888 { "7400", "ppc7400", MASK_ALTIVEC },
28889 { "G4", "ppc7400", 0 },
28890 { "750", "ppc750", 0 },
28891 { "740", "ppc750", 0 },
28892 { "G3", "ppc750", 0 },
28893 { "604e", "ppc604e", 0 },
28894 { "604", "ppc604", 0 },
28895 { "603e", "ppc603", 0 },
28896 { "603", "ppc603", 0 },
28897 { "601", "ppc601", 0 },
28898 { NULL, "ppc", 0 } };
28899 const char *cpu_id = "";
28900 size_t i;
28902 rs6000_file_start ();
28903 darwin_file_start ();
28905 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
28907 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
28908 cpu_id = rs6000_default_cpu;
28910 if (global_options_set.x_rs6000_cpu_index)
28911 cpu_id = processor_target_table[rs6000_cpu_index].name;
28913 /* Look through the mapping array. Pick the first name that either
28914 matches the argument, has a bit set in IF_SET that is also set
28915 in the target flags, or has a NULL name. */
28917 i = 0;
28918 while (mapping[i].arg != NULL
28919 && strcmp (mapping[i].arg, cpu_id) != 0
28920 && (mapping[i].if_set & rs6000_isa_flags) == 0)
28921 i++;
28923 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
28926 #endif /* TARGET_MACHO */
28928 #if TARGET_ELF
28929 static int
28930 rs6000_elf_reloc_rw_mask (void)
28932 if (flag_pic)
28933 return 3;
28934 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28935 return 2;
28936 else
28937 return 0;
28940 /* Record an element in the table of global constructors. SYMBOL is
28941 a SYMBOL_REF of the function to be called; PRIORITY is a number
28942 between 0 and MAX_INIT_PRIORITY.
28944 This differs from default_named_section_asm_out_constructor in
28945 that we have special handling for -mrelocatable. */
28947 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
28948 static void
28949 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
28951 const char *section = ".ctors";
28952 char buf[16];
28954 if (priority != DEFAULT_INIT_PRIORITY)
28956 sprintf (buf, ".ctors.%.5u",
28957 /* Invert the numbering so the linker puts us in the proper
28958 order; constructors are run from right to left, and the
28959 linker sorts in increasing order. */
28960 MAX_INIT_PRIORITY - priority);
28961 section = buf;
28964 switch_to_section (get_section (section, SECTION_WRITE, NULL));
28965 assemble_align (POINTER_SIZE);
28967 if (TARGET_RELOCATABLE)
28969 fputs ("\t.long (", asm_out_file);
28970 output_addr_const (asm_out_file, symbol);
28971 fputs (")@fixup\n", asm_out_file);
28973 else
28974 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
28977 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
28978 static void
28979 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
28981 const char *section = ".dtors";
28982 char buf[16];
28984 if (priority != DEFAULT_INIT_PRIORITY)
28986 sprintf (buf, ".dtors.%.5u",
28987 /* Invert the numbering so the linker puts us in the proper
28988 order; constructors are run from right to left, and the
28989 linker sorts in increasing order. */
28990 MAX_INIT_PRIORITY - priority);
28991 section = buf;
28994 switch_to_section (get_section (section, SECTION_WRITE, NULL));
28995 assemble_align (POINTER_SIZE);
28997 if (TARGET_RELOCATABLE)
28999 fputs ("\t.long (", asm_out_file);
29000 output_addr_const (asm_out_file, symbol);
29001 fputs (")@fixup\n", asm_out_file);
29003 else
29004 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
29007 void
29008 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
29010 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
29012 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
29013 ASM_OUTPUT_LABEL (file, name);
29014 fputs (DOUBLE_INT_ASM_OP, file);
29015 rs6000_output_function_entry (file, name);
29016 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
29017 if (DOT_SYMBOLS)
29019 fputs ("\t.size\t", file);
29020 assemble_name (file, name);
29021 fputs (",24\n\t.type\t.", file);
29022 assemble_name (file, name);
29023 fputs (",@function\n", file);
29024 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
29026 fputs ("\t.globl\t.", file);
29027 assemble_name (file, name);
29028 putc ('\n', file);
29031 else
29032 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29033 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29034 rs6000_output_function_entry (file, name);
29035 fputs (":\n", file);
29036 return;
29039 if (TARGET_RELOCATABLE
29040 && !TARGET_SECURE_PLT
29041 && (get_pool_size () != 0 || crtl->profile)
29042 && uses_TOC ())
29044 char buf[256];
29046 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
29048 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
29049 fprintf (file, "\t.long ");
29050 assemble_name (file, buf);
29051 putc ('-', file);
29052 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
29053 assemble_name (file, buf);
29054 putc ('\n', file);
29057 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29058 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29060 if (DEFAULT_ABI == ABI_AIX)
29062 const char *desc_name, *orig_name;
29064 orig_name = (*targetm.strip_name_encoding) (name);
29065 desc_name = orig_name;
29066 while (*desc_name == '.')
29067 desc_name++;
29069 if (TREE_PUBLIC (decl))
29070 fprintf (file, "\t.globl %s\n", desc_name);
29072 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
29073 fprintf (file, "%s:\n", desc_name);
29074 fprintf (file, "\t.long %s\n", orig_name);
29075 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
29076 fputs ("\t.long 0\n", file);
29077 fprintf (file, "\t.previous\n");
29079 ASM_OUTPUT_LABEL (file, name);
29082 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
29083 static void
29084 rs6000_elf_file_end (void)
29086 #ifdef HAVE_AS_GNU_ATTRIBUTE
29087 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
29089 if (rs6000_passes_float)
29090 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
29091 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
29092 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
29093 : 2));
29094 if (rs6000_passes_vector)
29095 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
29096 (TARGET_ALTIVEC_ABI ? 2
29097 : TARGET_SPE_ABI ? 3
29098 : 1));
29099 if (rs6000_returns_struct)
29100 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
29101 aix_struct_return ? 2 : 1);
29103 #endif
29104 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
29105 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
29106 file_end_indicate_exec_stack ();
29107 #endif
29109 #endif
29111 #if TARGET_XCOFF
29112 static void
29113 rs6000_xcoff_asm_output_anchor (rtx symbol)
29115 char buffer[100];
29117 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
29118 SYMBOL_REF_BLOCK_OFFSET (symbol));
29119 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
29122 static void
29123 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
29125 fputs (GLOBAL_ASM_OP, stream);
29126 RS6000_OUTPUT_BASENAME (stream, name);
29127 putc ('\n', stream);
29130 /* A get_unnamed_decl callback, used for read-only sections. PTR
29131 points to the section string variable. */
29133 static void
29134 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
29136 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
29137 *(const char *const *) directive,
29138 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29141 /* Likewise for read-write sections. */
29143 static void
29144 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
29146 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
29147 *(const char *const *) directive,
29148 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29151 static void
29152 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
29154 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
29155 *(const char *const *) directive,
29156 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29159 /* A get_unnamed_section callback, used for switching to toc_section. */
29161 static void
29162 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
29164 if (TARGET_MINIMAL_TOC)
29166 /* toc_section is always selected at least once from
29167 rs6000_xcoff_file_start, so this is guaranteed to
29168 always be defined once and only once in each file. */
29169 if (!toc_initialized)
29171 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
29172 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
29173 toc_initialized = 1;
29175 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
29176 (TARGET_32BIT ? "" : ",3"));
29178 else
29179 fputs ("\t.toc\n", asm_out_file);
29182 /* Implement TARGET_ASM_INIT_SECTIONS. */
29184 static void
29185 rs6000_xcoff_asm_init_sections (void)
29187 read_only_data_section
29188 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29189 &xcoff_read_only_section_name);
29191 private_data_section
29192 = get_unnamed_section (SECTION_WRITE,
29193 rs6000_xcoff_output_readwrite_section_asm_op,
29194 &xcoff_private_data_section_name);
29196 tls_data_section
29197 = get_unnamed_section (SECTION_TLS,
29198 rs6000_xcoff_output_tls_section_asm_op,
29199 &xcoff_tls_data_section_name);
29201 tls_private_data_section
29202 = get_unnamed_section (SECTION_TLS,
29203 rs6000_xcoff_output_tls_section_asm_op,
29204 &xcoff_private_data_section_name);
29206 read_only_private_data_section
29207 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29208 &xcoff_private_data_section_name);
29210 toc_section
29211 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
29213 readonly_data_section = read_only_data_section;
29214 exception_section = data_section;
29217 static int
29218 rs6000_xcoff_reloc_rw_mask (void)
29220 return 3;
29223 static void
29224 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
29225 tree decl ATTRIBUTE_UNUSED)
29227 int smclass;
29228 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
29230 if (flags & SECTION_CODE)
29231 smclass = 0;
29232 else if (flags & SECTION_TLS)
29233 smclass = 3;
29234 else if (flags & SECTION_WRITE)
29235 smclass = 2;
29236 else
29237 smclass = 1;
29239 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
29240 (flags & SECTION_CODE) ? "." : "",
29241 name, suffix[smclass], flags & SECTION_ENTSIZE);
29244 #define IN_NAMED_SECTION(DECL) \
29245 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
29246 && DECL_SECTION_NAME (DECL) != NULL_TREE)
29248 static section *
29249 rs6000_xcoff_select_section (tree decl, int reloc,
29250 unsigned HOST_WIDE_INT align)
29252 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
29253 named section. */
29254 if (align > BIGGEST_ALIGNMENT)
29256 resolve_unique_section (decl, reloc, true);
29257 if (IN_NAMED_SECTION (decl))
29258 return get_named_section (decl, NULL, reloc);
29261 if (decl_readonly_section (decl, reloc))
29263 if (TREE_PUBLIC (decl))
29264 return read_only_data_section;
29265 else
29266 return read_only_private_data_section;
29268 else
29270 #if HAVE_AS_TLS
29271 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29273 if (TREE_PUBLIC (decl))
29274 return tls_data_section;
29275 else if (bss_initializer_p (decl))
29277 /* Convert to COMMON to emit in BSS. */
29278 DECL_COMMON (decl) = 1;
29279 return tls_comm_section;
29281 else
29282 return tls_private_data_section;
29284 else
29285 #endif
29286 if (TREE_PUBLIC (decl))
29287 return data_section;
29288 else
29289 return private_data_section;
29293 static void
29294 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
29296 const char *name;
29298 /* Use select_section for private data and uninitialized data with
29299 alignment <= BIGGEST_ALIGNMENT. */
29300 if (!TREE_PUBLIC (decl)
29301 || DECL_COMMON (decl)
29302 || (DECL_INITIAL (decl) == NULL_TREE
29303 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
29304 || DECL_INITIAL (decl) == error_mark_node
29305 || (flag_zero_initialized_in_bss
29306 && initializer_zerop (DECL_INITIAL (decl))))
29307 return;
29309 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
29310 name = (*targetm.strip_name_encoding) (name);
29311 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
29314 /* Select section for constant in constant pool.
29316 On RS/6000, all constants are in the private read-only data area.
29317 However, if this is being placed in the TOC it must be output as a
29318 toc entry. */
29320 static section *
29321 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
29322 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
29324 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
29325 return toc_section;
29326 else
29327 return read_only_private_data_section;
29330 /* Remove any trailing [DS] or the like from the symbol name. */
29332 static const char *
29333 rs6000_xcoff_strip_name_encoding (const char *name)
29335 size_t len;
29336 if (*name == '*')
29337 name++;
29338 len = strlen (name);
29339 if (name[len - 1] == ']')
29340 return ggc_alloc_string (name, len - 4);
29341 else
29342 return name;
29345 /* Section attributes. AIX is always PIC. */
29347 static unsigned int
29348 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
29350 unsigned int align;
29351 unsigned int flags = default_section_type_flags (decl, name, reloc);
29353 /* Align to at least UNIT size. */
29354 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
29355 align = MIN_UNITS_PER_WORD;
29356 else
29357 /* Increase alignment of large objects if not already stricter. */
29358 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
29359 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
29360 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
29362 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
29365 /* Output at beginning of assembler file.
29367 Initialize the section names for the RS/6000 at this point.
29369 Specify filename, including full path, to assembler.
29371 We want to go into the TOC section so at least one .toc will be emitted.
29372 Also, in order to output proper .bs/.es pairs, we need at least one static
29373 [RW] section emitted.
29375 Finally, declare mcount when profiling to make the assembler happy. */
29377 static void
29378 rs6000_xcoff_file_start (void)
29380 rs6000_gen_section_name (&xcoff_bss_section_name,
29381 main_input_filename, ".bss_");
29382 rs6000_gen_section_name (&xcoff_private_data_section_name,
29383 main_input_filename, ".rw_");
29384 rs6000_gen_section_name (&xcoff_read_only_section_name,
29385 main_input_filename, ".ro_");
29386 rs6000_gen_section_name (&xcoff_tls_data_section_name,
29387 main_input_filename, ".tls_");
29388 rs6000_gen_section_name (&xcoff_tbss_section_name,
29389 main_input_filename, ".tbss_[UL]");
29391 fputs ("\t.file\t", asm_out_file);
29392 output_quoted_string (asm_out_file, main_input_filename);
29393 fputc ('\n', asm_out_file);
29394 if (write_symbols != NO_DEBUG)
29395 switch_to_section (private_data_section);
29396 switch_to_section (text_section);
29397 if (profile_flag)
29398 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
29399 rs6000_file_start ();
29402 /* Output at end of assembler file.
29403 On the RS/6000, referencing data should automatically pull in text. */
29405 static void
29406 rs6000_xcoff_file_end (void)
29408 switch_to_section (text_section);
29409 fputs ("_section_.text:\n", asm_out_file);
29410 switch_to_section (data_section);
29411 fputs (TARGET_32BIT
29412 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
29413 asm_out_file);
29416 #ifdef HAVE_AS_TLS
29417 static void
29418 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
29420 rtx symbol;
29421 int flags;
29423 default_encode_section_info (decl, rtl, first);
29425 /* Careful not to prod global register variables. */
29426 if (!MEM_P (rtl))
29427 return;
29428 symbol = XEXP (rtl, 0);
29429 if (GET_CODE (symbol) != SYMBOL_REF)
29430 return;
29432 flags = SYMBOL_REF_FLAGS (symbol);
29434 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29435 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
29437 SYMBOL_REF_FLAGS (symbol) = flags;
29439 #endif /* HAVE_AS_TLS */
29440 #endif /* TARGET_XCOFF */
29442 /* Compute a (partial) cost for rtx X. Return true if the complete
29443 cost has been computed, and false if subexpressions should be
29444 scanned. In either case, *TOTAL contains the cost result. */
29446 static bool
29447 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
29448 int *total, bool speed)
29450 enum machine_mode mode = GET_MODE (x);
29452 switch (code)
29454 /* On the RS/6000, if it is valid in the insn, it is free. */
29455 case CONST_INT:
29456 if (((outer_code == SET
29457 || outer_code == PLUS
29458 || outer_code == MINUS)
29459 && (satisfies_constraint_I (x)
29460 || satisfies_constraint_L (x)))
29461 || (outer_code == AND
29462 && (satisfies_constraint_K (x)
29463 || (mode == SImode
29464 ? satisfies_constraint_L (x)
29465 : satisfies_constraint_J (x))
29466 || mask_operand (x, mode)
29467 || (mode == DImode
29468 && mask64_operand (x, DImode))))
29469 || ((outer_code == IOR || outer_code == XOR)
29470 && (satisfies_constraint_K (x)
29471 || (mode == SImode
29472 ? satisfies_constraint_L (x)
29473 : satisfies_constraint_J (x))))
29474 || outer_code == ASHIFT
29475 || outer_code == ASHIFTRT
29476 || outer_code == LSHIFTRT
29477 || outer_code == ROTATE
29478 || outer_code == ROTATERT
29479 || outer_code == ZERO_EXTRACT
29480 || (outer_code == MULT
29481 && satisfies_constraint_I (x))
29482 || ((outer_code == DIV || outer_code == UDIV
29483 || outer_code == MOD || outer_code == UMOD)
29484 && exact_log2 (INTVAL (x)) >= 0)
29485 || (outer_code == COMPARE
29486 && (satisfies_constraint_I (x)
29487 || satisfies_constraint_K (x)))
29488 || ((outer_code == EQ || outer_code == NE)
29489 && (satisfies_constraint_I (x)
29490 || satisfies_constraint_K (x)
29491 || (mode == SImode
29492 ? satisfies_constraint_L (x)
29493 : satisfies_constraint_J (x))))
29494 || (outer_code == GTU
29495 && satisfies_constraint_I (x))
29496 || (outer_code == LTU
29497 && satisfies_constraint_P (x)))
29499 *total = 0;
29500 return true;
29502 else if ((outer_code == PLUS
29503 && reg_or_add_cint_operand (x, VOIDmode))
29504 || (outer_code == MINUS
29505 && reg_or_sub_cint_operand (x, VOIDmode))
29506 || ((outer_code == SET
29507 || outer_code == IOR
29508 || outer_code == XOR)
29509 && (INTVAL (x)
29510 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
29512 *total = COSTS_N_INSNS (1);
29513 return true;
29515 /* FALLTHRU */
29517 case CONST_DOUBLE:
29518 case CONST_WIDE_INT:
29519 case CONST:
29520 case HIGH:
29521 case SYMBOL_REF:
29522 case MEM:
29523 /* When optimizing for size, MEM should be slightly more expensive
29524 than generating address, e.g., (plus (reg) (const)).
29525 L1 cache latency is about two instructions. */
29526 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
29527 return true;
29529 case LABEL_REF:
29530 *total = 0;
29531 return true;
29533 case PLUS:
29534 case MINUS:
29535 if (FLOAT_MODE_P (mode))
29536 *total = rs6000_cost->fp;
29537 else
29538 *total = COSTS_N_INSNS (1);
29539 return false;
29541 case MULT:
29542 if (GET_CODE (XEXP (x, 1)) == CONST_INT
29543 && satisfies_constraint_I (XEXP (x, 1)))
29545 if (INTVAL (XEXP (x, 1)) >= -256
29546 && INTVAL (XEXP (x, 1)) <= 255)
29547 *total = rs6000_cost->mulsi_const9;
29548 else
29549 *total = rs6000_cost->mulsi_const;
29551 else if (mode == SFmode)
29552 *total = rs6000_cost->fp;
29553 else if (FLOAT_MODE_P (mode))
29554 *total = rs6000_cost->dmul;
29555 else if (mode == DImode)
29556 *total = rs6000_cost->muldi;
29557 else
29558 *total = rs6000_cost->mulsi;
29559 return false;
29561 case FMA:
29562 if (mode == SFmode)
29563 *total = rs6000_cost->fp;
29564 else
29565 *total = rs6000_cost->dmul;
29566 break;
29568 case DIV:
29569 case MOD:
29570 if (FLOAT_MODE_P (mode))
29572 *total = mode == DFmode ? rs6000_cost->ddiv
29573 : rs6000_cost->sdiv;
29574 return false;
29576 /* FALLTHRU */
29578 case UDIV:
29579 case UMOD:
29580 if (GET_CODE (XEXP (x, 1)) == CONST_INT
29581 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
29583 if (code == DIV || code == MOD)
29584 /* Shift, addze */
29585 *total = COSTS_N_INSNS (2);
29586 else
29587 /* Shift */
29588 *total = COSTS_N_INSNS (1);
29590 else
29592 if (GET_MODE (XEXP (x, 1)) == DImode)
29593 *total = rs6000_cost->divdi;
29594 else
29595 *total = rs6000_cost->divsi;
29597 /* Add in shift and subtract for MOD. */
29598 if (code == MOD || code == UMOD)
29599 *total += COSTS_N_INSNS (2);
29600 return false;
29602 case CTZ:
29603 case FFS:
29604 *total = COSTS_N_INSNS (4);
29605 return false;
29607 case POPCOUNT:
29608 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
29609 return false;
29611 case PARITY:
29612 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
29613 return false;
29615 case NOT:
29616 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
29618 *total = 0;
29619 return false;
29621 /* FALLTHRU */
29623 case AND:
29624 case CLZ:
29625 case IOR:
29626 case XOR:
29627 case ZERO_EXTRACT:
29628 *total = COSTS_N_INSNS (1);
29629 return false;
29631 case ASHIFT:
29632 case ASHIFTRT:
29633 case LSHIFTRT:
29634 case ROTATE:
29635 case ROTATERT:
29636 /* Handle mul_highpart. */
29637 if (outer_code == TRUNCATE
29638 && GET_CODE (XEXP (x, 0)) == MULT)
29640 if (mode == DImode)
29641 *total = rs6000_cost->muldi;
29642 else
29643 *total = rs6000_cost->mulsi;
29644 return true;
29646 else if (outer_code == AND)
29647 *total = 0;
29648 else
29649 *total = COSTS_N_INSNS (1);
29650 return false;
29652 case SIGN_EXTEND:
29653 case ZERO_EXTEND:
29654 if (GET_CODE (XEXP (x, 0)) == MEM)
29655 *total = 0;
29656 else
29657 *total = COSTS_N_INSNS (1);
29658 return false;
29660 case COMPARE:
29661 case NEG:
29662 case ABS:
29663 if (!FLOAT_MODE_P (mode))
29665 *total = COSTS_N_INSNS (1);
29666 return false;
29668 /* FALLTHRU */
29670 case FLOAT:
29671 case UNSIGNED_FLOAT:
29672 case FIX:
29673 case UNSIGNED_FIX:
29674 case FLOAT_TRUNCATE:
29675 *total = rs6000_cost->fp;
29676 return false;
29678 case FLOAT_EXTEND:
29679 if (mode == DFmode)
29680 *total = 0;
29681 else
29682 *total = rs6000_cost->fp;
29683 return false;
29685 case UNSPEC:
29686 switch (XINT (x, 1))
29688 case UNSPEC_FRSP:
29689 *total = rs6000_cost->fp;
29690 return true;
29692 default:
29693 break;
29695 break;
29697 case CALL:
29698 case IF_THEN_ELSE:
29699 if (!speed)
29701 *total = COSTS_N_INSNS (1);
29702 return true;
29704 else if (FLOAT_MODE_P (mode)
29705 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
29707 *total = rs6000_cost->fp;
29708 return false;
29710 break;
29712 case EQ:
29713 case GTU:
29714 case LTU:
29715 /* Carry bit requires mode == Pmode.
29716 NEG or PLUS already counted so only add one. */
29717 if (mode == Pmode
29718 && (outer_code == NEG || outer_code == PLUS))
29720 *total = COSTS_N_INSNS (1);
29721 return true;
29723 if (outer_code == SET)
29725 if (XEXP (x, 1) == const0_rtx)
29727 if (TARGET_ISEL && !TARGET_MFCRF)
29728 *total = COSTS_N_INSNS (8);
29729 else
29730 *total = COSTS_N_INSNS (2);
29731 return true;
29733 else if (mode == Pmode)
29735 *total = COSTS_N_INSNS (3);
29736 return false;
29739 /* FALLTHRU */
29741 case GT:
29742 case LT:
29743 case UNORDERED:
29744 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
29746 if (TARGET_ISEL && !TARGET_MFCRF)
29747 *total = COSTS_N_INSNS (8);
29748 else
29749 *total = COSTS_N_INSNS (2);
29750 return true;
29752 /* CC COMPARE. */
29753 if (outer_code == COMPARE)
29755 *total = 0;
29756 return true;
29758 break;
29760 default:
29761 break;
29764 return false;
29767 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
29769 static bool
29770 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
29771 bool speed)
29773 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
29775 fprintf (stderr,
29776 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
29777 "opno = %d, total = %d, speed = %s, x:\n",
29778 ret ? "complete" : "scan inner",
29779 GET_RTX_NAME (code),
29780 GET_RTX_NAME (outer_code),
29781 opno,
29782 *total,
29783 speed ? "true" : "false");
29785 debug_rtx (x);
29787 return ret;
29790 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
29792 static int
29793 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
29794 addr_space_t as, bool speed)
29796 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
29798 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
29799 ret, speed ? "true" : "false");
29800 debug_rtx (x);
29802 return ret;
29806 /* A C expression returning the cost of moving data from a register of class
29807 CLASS1 to one of CLASS2. */
29809 static int
29810 rs6000_register_move_cost (enum machine_mode mode,
29811 reg_class_t from, reg_class_t to)
29813 int ret;
29815 if (TARGET_DEBUG_COST)
29816 dbg_cost_ctrl++;
29818 /* Moves from/to GENERAL_REGS. */
29819 if (reg_classes_intersect_p (to, GENERAL_REGS)
29820 || reg_classes_intersect_p (from, GENERAL_REGS))
29822 reg_class_t rclass = from;
29824 if (! reg_classes_intersect_p (to, GENERAL_REGS))
29825 rclass = to;
29827 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
29828 ret = (rs6000_memory_move_cost (mode, rclass, false)
29829 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
29831 /* It's more expensive to move CR_REGS than CR0_REGS because of the
29832 shift. */
29833 else if (rclass == CR_REGS)
29834 ret = 4;
29836 /* For those processors that have slow LR/CTR moves, make them more
29837 expensive than memory in order to bias spills to memory .*/
29838 else if ((rs6000_cpu == PROCESSOR_POWER6
29839 || rs6000_cpu == PROCESSOR_POWER7
29840 || rs6000_cpu == PROCESSOR_POWER8)
29841 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
29842 ret = 6 * hard_regno_nregs[0][mode];
29844 else
29845 /* A move will cost one instruction per GPR moved. */
29846 ret = 2 * hard_regno_nregs[0][mode];
29849 /* If we have VSX, we can easily move between FPR or Altivec registers. */
29850 else if (VECTOR_MEM_VSX_P (mode)
29851 && reg_classes_intersect_p (to, VSX_REGS)
29852 && reg_classes_intersect_p (from, VSX_REGS))
29853 ret = 2 * hard_regno_nregs[32][mode];
29855 /* Moving between two similar registers is just one instruction. */
29856 else if (reg_classes_intersect_p (to, from))
29857 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
29859 /* Everything else has to go through GENERAL_REGS. */
29860 else
29861 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
29862 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
29864 if (TARGET_DEBUG_COST)
29866 if (dbg_cost_ctrl == 1)
29867 fprintf (stderr,
29868 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
29869 ret, GET_MODE_NAME (mode), reg_class_names[from],
29870 reg_class_names[to]);
29871 dbg_cost_ctrl--;
29874 return ret;
29877 /* A C expressions returning the cost of moving data of MODE from a register to
29878 or from memory. */
29880 static int
29881 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
29882 bool in ATTRIBUTE_UNUSED)
29884 int ret;
29886 if (TARGET_DEBUG_COST)
29887 dbg_cost_ctrl++;
29889 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
29890 ret = 4 * hard_regno_nregs[0][mode];
29891 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
29892 || reg_classes_intersect_p (rclass, VSX_REGS)))
29893 ret = 4 * hard_regno_nregs[32][mode];
29894 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
29895 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
29896 else
29897 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
29899 if (TARGET_DEBUG_COST)
29901 if (dbg_cost_ctrl == 1)
29902 fprintf (stderr,
29903 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
29904 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
29905 dbg_cost_ctrl--;
29908 return ret;
29911 /* Returns a code for a target-specific builtin that implements
29912 reciprocal of the function, or NULL_TREE if not available. */
29914 static tree
29915 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
29916 bool sqrt ATTRIBUTE_UNUSED)
29918 if (optimize_insn_for_size_p ())
29919 return NULL_TREE;
29921 if (md_fn)
29922 switch (fn)
29924 case VSX_BUILTIN_XVSQRTDP:
29925 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
29926 return NULL_TREE;
29928 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
29930 case VSX_BUILTIN_XVSQRTSP:
29931 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
29932 return NULL_TREE;
29934 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
29936 default:
29937 return NULL_TREE;
29940 else
29941 switch (fn)
29943 case BUILT_IN_SQRT:
29944 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
29945 return NULL_TREE;
29947 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
29949 case BUILT_IN_SQRTF:
29950 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
29951 return NULL_TREE;
29953 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
29955 default:
29956 return NULL_TREE;
29960 /* Load up a constant. If the mode is a vector mode, splat the value across
29961 all of the vector elements. */
29963 static rtx
29964 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
29966 rtx reg;
29968 if (mode == SFmode || mode == DFmode)
29970 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
29971 reg = force_reg (mode, d);
29973 else if (mode == V4SFmode)
29975 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
29976 rtvec v = gen_rtvec (4, d, d, d, d);
29977 reg = gen_reg_rtx (mode);
29978 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29980 else if (mode == V2DFmode)
29982 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
29983 rtvec v = gen_rtvec (2, d, d);
29984 reg = gen_reg_rtx (mode);
29985 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
29987 else
29988 gcc_unreachable ();
29990 return reg;
29993 /* Generate an FMA instruction. */
29995 static void
29996 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
29998 enum machine_mode mode = GET_MODE (target);
29999 rtx dst;
30001 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30002 gcc_assert (dst != NULL);
30004 if (dst != target)
30005 emit_move_insn (target, dst);
30008 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
30010 static void
30011 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
30013 enum machine_mode mode = GET_MODE (target);
30014 rtx dst;
30016 /* Altivec does not support fms directly;
30017 generate in terms of fma in that case. */
30018 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
30019 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
30020 else
30022 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
30023 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30025 gcc_assert (dst != NULL);
30027 if (dst != target)
30028 emit_move_insn (target, dst);
30031 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
30033 static void
30034 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
30036 enum machine_mode mode = GET_MODE (dst);
30037 rtx r;
30039 /* This is a tad more complicated, since the fnma_optab is for
30040 a different expression: fma(-m1, m2, a), which is the same
30041 thing except in the case of signed zeros.
30043 Fortunately we know that if FMA is supported that FNMSUB is
30044 also supported in the ISA. Just expand it directly. */
30046 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
30048 r = gen_rtx_NEG (mode, a);
30049 r = gen_rtx_FMA (mode, m1, m2, r);
30050 r = gen_rtx_NEG (mode, r);
30051 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
30054 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
30055 add a reg_note saying that this was a division. Support both scalar and
30056 vector divide. Assumes no trapping math and finite arguments. */
30058 void
30059 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
30061 enum machine_mode mode = GET_MODE (dst);
30062 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
30063 int i;
30065 /* Low precision estimates guarantee 5 bits of accuracy. High
30066 precision estimates guarantee 14 bits of accuracy. SFmode
30067 requires 23 bits of accuracy. DFmode requires 52 bits of
30068 accuracy. Each pass at least doubles the accuracy, leading
30069 to the following. */
30070 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
30071 if (mode == DFmode || mode == V2DFmode)
30072 passes++;
30074 enum insn_code code = optab_handler (smul_optab, mode);
30075 insn_gen_fn gen_mul = GEN_FCN (code);
30077 gcc_assert (code != CODE_FOR_nothing);
30079 one = rs6000_load_constant_and_splat (mode, dconst1);
30081 /* x0 = 1./d estimate */
30082 x0 = gen_reg_rtx (mode);
30083 emit_insn (gen_rtx_SET (VOIDmode, x0,
30084 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
30085 UNSPEC_FRES)));
30087 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
30088 if (passes > 1) {
30090 /* e0 = 1. - d * x0 */
30091 e0 = gen_reg_rtx (mode);
30092 rs6000_emit_nmsub (e0, d, x0, one);
30094 /* x1 = x0 + e0 * x0 */
30095 x1 = gen_reg_rtx (mode);
30096 rs6000_emit_madd (x1, e0, x0, x0);
30098 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
30099 ++i, xprev = xnext, eprev = enext) {
30101 /* enext = eprev * eprev */
30102 enext = gen_reg_rtx (mode);
30103 emit_insn (gen_mul (enext, eprev, eprev));
30105 /* xnext = xprev + enext * xprev */
30106 xnext = gen_reg_rtx (mode);
30107 rs6000_emit_madd (xnext, enext, xprev, xprev);
30110 } else
30111 xprev = x0;
30113 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
30115 /* u = n * xprev */
30116 u = gen_reg_rtx (mode);
30117 emit_insn (gen_mul (u, n, xprev));
30119 /* v = n - (d * u) */
30120 v = gen_reg_rtx (mode);
30121 rs6000_emit_nmsub (v, d, u, n);
30123 /* dst = (v * xprev) + u */
30124 rs6000_emit_madd (dst, v, xprev, u);
30126 if (note_p)
30127 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
30130 /* Newton-Raphson approximation of single/double-precision floating point
30131 rsqrt. Assumes no trapping math and finite arguments. */
30133 void
30134 rs6000_emit_swrsqrt (rtx dst, rtx src)
30136 enum machine_mode mode = GET_MODE (src);
30137 rtx x0 = gen_reg_rtx (mode);
30138 rtx y = gen_reg_rtx (mode);
30140 /* Low precision estimates guarantee 5 bits of accuracy. High
30141 precision estimates guarantee 14 bits of accuracy. SFmode
30142 requires 23 bits of accuracy. DFmode requires 52 bits of
30143 accuracy. Each pass at least doubles the accuracy, leading
30144 to the following. */
30145 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
30146 if (mode == DFmode || mode == V2DFmode)
30147 passes++;
30149 REAL_VALUE_TYPE dconst3_2;
30150 int i;
30151 rtx halfthree;
30152 enum insn_code code = optab_handler (smul_optab, mode);
30153 insn_gen_fn gen_mul = GEN_FCN (code);
30155 gcc_assert (code != CODE_FOR_nothing);
30157 /* Load up the constant 1.5 either as a scalar, or as a vector. */
30158 real_from_integer (&dconst3_2, VOIDmode, 3, SIGNED);
30159 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
30161 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
30163 /* x0 = rsqrt estimate */
30164 emit_insn (gen_rtx_SET (VOIDmode, x0,
30165 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
30166 UNSPEC_RSQRT)));
30168 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
30169 rs6000_emit_msub (y, src, halfthree, src);
30171 for (i = 0; i < passes; i++)
30173 rtx x1 = gen_reg_rtx (mode);
30174 rtx u = gen_reg_rtx (mode);
30175 rtx v = gen_reg_rtx (mode);
30177 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
30178 emit_insn (gen_mul (u, x0, x0));
30179 rs6000_emit_nmsub (v, y, u, halfthree);
30180 emit_insn (gen_mul (x1, x0, v));
30181 x0 = x1;
30184 emit_move_insn (dst, x0);
30185 return;
30188 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
30189 (Power7) targets. DST is the target, and SRC is the argument operand. */
30191 void
30192 rs6000_emit_popcount (rtx dst, rtx src)
30194 enum machine_mode mode = GET_MODE (dst);
30195 rtx tmp1, tmp2;
30197 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
30198 if (TARGET_POPCNTD)
30200 if (mode == SImode)
30201 emit_insn (gen_popcntdsi2 (dst, src));
30202 else
30203 emit_insn (gen_popcntddi2 (dst, src));
30204 return;
30207 tmp1 = gen_reg_rtx (mode);
30209 if (mode == SImode)
30211 emit_insn (gen_popcntbsi2 (tmp1, src));
30212 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
30213 NULL_RTX, 0);
30214 tmp2 = force_reg (SImode, tmp2);
30215 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
30217 else
30219 emit_insn (gen_popcntbdi2 (tmp1, src));
30220 tmp2 = expand_mult (DImode, tmp1,
30221 GEN_INT ((HOST_WIDE_INT)
30222 0x01010101 << 32 | 0x01010101),
30223 NULL_RTX, 0);
30224 tmp2 = force_reg (DImode, tmp2);
30225 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
30230 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
30231 target, and SRC is the argument operand. */
30233 void
30234 rs6000_emit_parity (rtx dst, rtx src)
30236 enum machine_mode mode = GET_MODE (dst);
30237 rtx tmp;
30239 tmp = gen_reg_rtx (mode);
30241 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
30242 if (TARGET_CMPB)
30244 if (mode == SImode)
30246 emit_insn (gen_popcntbsi2 (tmp, src));
30247 emit_insn (gen_paritysi2_cmpb (dst, tmp));
30249 else
30251 emit_insn (gen_popcntbdi2 (tmp, src));
30252 emit_insn (gen_paritydi2_cmpb (dst, tmp));
30254 return;
30257 if (mode == SImode)
30259 /* Is mult+shift >= shift+xor+shift+xor? */
30260 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
30262 rtx tmp1, tmp2, tmp3, tmp4;
30264 tmp1 = gen_reg_rtx (SImode);
30265 emit_insn (gen_popcntbsi2 (tmp1, src));
30267 tmp2 = gen_reg_rtx (SImode);
30268 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
30269 tmp3 = gen_reg_rtx (SImode);
30270 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
30272 tmp4 = gen_reg_rtx (SImode);
30273 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
30274 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
30276 else
30277 rs6000_emit_popcount (tmp, src);
30278 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
30280 else
30282 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
30283 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
30285 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
30287 tmp1 = gen_reg_rtx (DImode);
30288 emit_insn (gen_popcntbdi2 (tmp1, src));
30290 tmp2 = gen_reg_rtx (DImode);
30291 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
30292 tmp3 = gen_reg_rtx (DImode);
30293 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
30295 tmp4 = gen_reg_rtx (DImode);
30296 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
30297 tmp5 = gen_reg_rtx (DImode);
30298 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
30300 tmp6 = gen_reg_rtx (DImode);
30301 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
30302 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
30304 else
30305 rs6000_emit_popcount (tmp, src);
30306 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
30310 /* Expand an Altivec constant permutation for little endian mode.
30311 There are two issues: First, the two input operands must be
30312 swapped so that together they form a double-wide array in LE
30313 order. Second, the vperm instruction has surprising behavior
30314 in LE mode: it interprets the elements of the source vectors
30315 in BE mode ("left to right") and interprets the elements of
30316 the destination vector in LE mode ("right to left"). To
30317 correct for this, we must subtract each element of the permute
30318 control vector from 31.
30320 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
30321 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
30322 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
30323 serve as the permute control vector. Then, in BE mode,
30325 vperm 9,10,11,12
30327 places the desired result in vr9. However, in LE mode the
30328 vector contents will be
30330 vr10 = 00000003 00000002 00000001 00000000
30331 vr11 = 00000007 00000006 00000005 00000004
30333 The result of the vperm using the same permute control vector is
30335 vr9 = 05000000 07000000 01000000 03000000
30337 That is, the leftmost 4 bytes of vr10 are interpreted as the
30338 source for the rightmost 4 bytes of vr9, and so on.
30340 If we change the permute control vector to
30342 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
30344 and issue
30346 vperm 9,11,10,12
30348 we get the desired
30350 vr9 = 00000006 00000004 00000002 00000000. */
30352 void
30353 altivec_expand_vec_perm_const_le (rtx operands[4])
30355 unsigned int i;
30356 rtx perm[16];
30357 rtx constv, unspec;
30358 rtx target = operands[0];
30359 rtx op0 = operands[1];
30360 rtx op1 = operands[2];
30361 rtx sel = operands[3];
30363 /* Unpack and adjust the constant selector. */
30364 for (i = 0; i < 16; ++i)
30366 rtx e = XVECEXP (sel, 0, i);
30367 unsigned int elt = 31 - (INTVAL (e) & 31);
30368 perm[i] = GEN_INT (elt);
30371 /* Expand to a permute, swapping the inputs and using the
30372 adjusted selector. */
30373 if (!REG_P (op0))
30374 op0 = force_reg (V16QImode, op0);
30375 if (!REG_P (op1))
30376 op1 = force_reg (V16QImode, op1);
30378 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
30379 constv = force_reg (V16QImode, constv);
30380 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
30381 UNSPEC_VPERM);
30382 if (!REG_P (target))
30384 rtx tmp = gen_reg_rtx (V16QImode);
30385 emit_move_insn (tmp, unspec);
30386 unspec = tmp;
30389 emit_move_insn (target, unspec);
30392 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
30393 permute control vector. But here it's not a constant, so we must
30394 generate a vector NAND or NOR to do the adjustment. */
30396 void
30397 altivec_expand_vec_perm_le (rtx operands[4])
30399 rtx notx, iorx, unspec;
30400 rtx target = operands[0];
30401 rtx op0 = operands[1];
30402 rtx op1 = operands[2];
30403 rtx sel = operands[3];
30404 rtx tmp = target;
30405 rtx norreg = gen_reg_rtx (V16QImode);
30406 enum machine_mode mode = GET_MODE (target);
30408 /* Get everything in regs so the pattern matches. */
30409 if (!REG_P (op0))
30410 op0 = force_reg (mode, op0);
30411 if (!REG_P (op1))
30412 op1 = force_reg (mode, op1);
30413 if (!REG_P (sel))
30414 sel = force_reg (V16QImode, sel);
30415 if (!REG_P (target))
30416 tmp = gen_reg_rtx (mode);
30418 /* Invert the selector with a VNAND if available, else a VNOR.
30419 The VNAND is preferred for future fusion opportunities. */
30420 notx = gen_rtx_NOT (V16QImode, sel);
30421 iorx = (TARGET_P8_VECTOR
30422 ? gen_rtx_IOR (V16QImode, notx, notx)
30423 : gen_rtx_AND (V16QImode, notx, notx));
30424 emit_insn (gen_rtx_SET (VOIDmode, norreg, iorx));
30426 /* Permute with operands reversed and adjusted selector. */
30427 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
30428 UNSPEC_VPERM);
30430 /* Copy into target, possibly by way of a register. */
30431 if (!REG_P (target))
30433 emit_move_insn (tmp, unspec);
30434 unspec = tmp;
30437 emit_move_insn (target, unspec);
30440 /* Expand an Altivec constant permutation. Return true if we match
30441 an efficient implementation; false to fall back to VPERM. */
30443 bool
30444 altivec_expand_vec_perm_const (rtx operands[4])
30446 struct altivec_perm_insn {
30447 HOST_WIDE_INT mask;
30448 enum insn_code impl;
30449 unsigned char perm[16];
30451 static const struct altivec_perm_insn patterns[] = {
30452 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
30453 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
30454 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
30455 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
30456 { OPTION_MASK_ALTIVEC,
30457 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
30458 : CODE_FOR_altivec_vmrglb_direct),
30459 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
30460 { OPTION_MASK_ALTIVEC,
30461 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
30462 : CODE_FOR_altivec_vmrglh_direct),
30463 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
30464 { OPTION_MASK_ALTIVEC,
30465 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
30466 : CODE_FOR_altivec_vmrglw_direct),
30467 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
30468 { OPTION_MASK_ALTIVEC,
30469 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
30470 : CODE_FOR_altivec_vmrghb_direct),
30471 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
30472 { OPTION_MASK_ALTIVEC,
30473 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
30474 : CODE_FOR_altivec_vmrghh_direct),
30475 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
30476 { OPTION_MASK_ALTIVEC,
30477 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
30478 : CODE_FOR_altivec_vmrghw_direct),
30479 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
30480 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
30481 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
30482 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
30483 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
30486 unsigned int i, j, elt, which;
30487 unsigned char perm[16];
30488 rtx target, op0, op1, sel, x;
30489 bool one_vec;
30491 target = operands[0];
30492 op0 = operands[1];
30493 op1 = operands[2];
30494 sel = operands[3];
30496 /* Unpack the constant selector. */
30497 for (i = which = 0; i < 16; ++i)
30499 rtx e = XVECEXP (sel, 0, i);
30500 elt = INTVAL (e) & 31;
30501 which |= (elt < 16 ? 1 : 2);
30502 perm[i] = elt;
30505 /* Simplify the constant selector based on operands. */
30506 switch (which)
30508 default:
30509 gcc_unreachable ();
30511 case 3:
30512 one_vec = false;
30513 if (!rtx_equal_p (op0, op1))
30514 break;
30515 /* FALLTHRU */
30517 case 2:
30518 for (i = 0; i < 16; ++i)
30519 perm[i] &= 15;
30520 op0 = op1;
30521 one_vec = true;
30522 break;
30524 case 1:
30525 op1 = op0;
30526 one_vec = true;
30527 break;
30530 /* Look for splat patterns. */
30531 if (one_vec)
30533 elt = perm[0];
30535 for (i = 0; i < 16; ++i)
30536 if (perm[i] != elt)
30537 break;
30538 if (i == 16)
30540 if (!BYTES_BIG_ENDIAN)
30541 elt = 15 - elt;
30542 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
30543 return true;
30546 if (elt % 2 == 0)
30548 for (i = 0; i < 16; i += 2)
30549 if (perm[i] != elt || perm[i + 1] != elt + 1)
30550 break;
30551 if (i == 16)
30553 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
30554 x = gen_reg_rtx (V8HImode);
30555 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
30556 GEN_INT (field)));
30557 emit_move_insn (target, gen_lowpart (V16QImode, x));
30558 return true;
30562 if (elt % 4 == 0)
30564 for (i = 0; i < 16; i += 4)
30565 if (perm[i] != elt
30566 || perm[i + 1] != elt + 1
30567 || perm[i + 2] != elt + 2
30568 || perm[i + 3] != elt + 3)
30569 break;
30570 if (i == 16)
30572 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
30573 x = gen_reg_rtx (V4SImode);
30574 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
30575 GEN_INT (field)));
30576 emit_move_insn (target, gen_lowpart (V16QImode, x));
30577 return true;
30582 /* Look for merge and pack patterns. */
30583 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
30585 bool swapped;
30587 if ((patterns[j].mask & rs6000_isa_flags) == 0)
30588 continue;
30590 elt = patterns[j].perm[0];
30591 if (perm[0] == elt)
30592 swapped = false;
30593 else if (perm[0] == elt + 16)
30594 swapped = true;
30595 else
30596 continue;
30597 for (i = 1; i < 16; ++i)
30599 elt = patterns[j].perm[i];
30600 if (swapped)
30601 elt = (elt >= 16 ? elt - 16 : elt + 16);
30602 else if (one_vec && elt >= 16)
30603 elt -= 16;
30604 if (perm[i] != elt)
30605 break;
30607 if (i == 16)
30609 enum insn_code icode = patterns[j].impl;
30610 enum machine_mode omode = insn_data[icode].operand[0].mode;
30611 enum machine_mode imode = insn_data[icode].operand[1].mode;
30613 /* For little-endian, don't use vpkuwum and vpkuhum if the
30614 underlying vector type is not V4SI and V8HI, respectively.
30615 For example, using vpkuwum with a V8HI picks up the even
30616 halfwords (BE numbering) when the even halfwords (LE
30617 numbering) are what we need. */
30618 if (!BYTES_BIG_ENDIAN
30619 && icode == CODE_FOR_altivec_vpkuwum_direct
30620 && ((GET_CODE (op0) == REG
30621 && GET_MODE (op0) != V4SImode)
30622 || (GET_CODE (op0) == SUBREG
30623 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
30624 continue;
30625 if (!BYTES_BIG_ENDIAN
30626 && icode == CODE_FOR_altivec_vpkuhum_direct
30627 && ((GET_CODE (op0) == REG
30628 && GET_MODE (op0) != V8HImode)
30629 || (GET_CODE (op0) == SUBREG
30630 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
30631 continue;
30633 /* For little-endian, the two input operands must be swapped
30634 (or swapped back) to ensure proper right-to-left numbering
30635 from 0 to 2N-1. */
30636 if (swapped ^ !BYTES_BIG_ENDIAN)
30637 x = op0, op0 = op1, op1 = x;
30638 if (imode != V16QImode)
30640 op0 = gen_lowpart (imode, op0);
30641 op1 = gen_lowpart (imode, op1);
30643 if (omode == V16QImode)
30644 x = target;
30645 else
30646 x = gen_reg_rtx (omode);
30647 emit_insn (GEN_FCN (icode) (x, op0, op1));
30648 if (omode != V16QImode)
30649 emit_move_insn (target, gen_lowpart (V16QImode, x));
30650 return true;
30654 if (!BYTES_BIG_ENDIAN)
30656 altivec_expand_vec_perm_const_le (operands);
30657 return true;
30660 return false;
30663 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
30664 Return true if we match an efficient implementation. */
30666 static bool
30667 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
30668 unsigned char perm0, unsigned char perm1)
30670 rtx x;
30672 /* If both selectors come from the same operand, fold to single op. */
30673 if ((perm0 & 2) == (perm1 & 2))
30675 if (perm0 & 2)
30676 op0 = op1;
30677 else
30678 op1 = op0;
30680 /* If both operands are equal, fold to simpler permutation. */
30681 if (rtx_equal_p (op0, op1))
30683 perm0 = perm0 & 1;
30684 perm1 = (perm1 & 1) + 2;
30686 /* If the first selector comes from the second operand, swap. */
30687 else if (perm0 & 2)
30689 if (perm1 & 2)
30690 return false;
30691 perm0 -= 2;
30692 perm1 += 2;
30693 x = op0, op0 = op1, op1 = x;
30695 /* If the second selector does not come from the second operand, fail. */
30696 else if ((perm1 & 2) == 0)
30697 return false;
30699 /* Success! */
30700 if (target != NULL)
30702 enum machine_mode vmode, dmode;
30703 rtvec v;
30705 vmode = GET_MODE (target);
30706 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
30707 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
30708 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
30709 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
30710 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
30711 emit_insn (gen_rtx_SET (VOIDmode, target, x));
30713 return true;
30716 bool
30717 rs6000_expand_vec_perm_const (rtx operands[4])
30719 rtx target, op0, op1, sel;
30720 unsigned char perm0, perm1;
30722 target = operands[0];
30723 op0 = operands[1];
30724 op1 = operands[2];
30725 sel = operands[3];
30727 /* Unpack the constant selector. */
30728 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
30729 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
30731 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
30734 /* Test whether a constant permutation is supported. */
30736 static bool
30737 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
30738 const unsigned char *sel)
30740 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
30741 if (TARGET_ALTIVEC)
30742 return true;
30744 /* Check for ps_merge* or evmerge* insns. */
30745 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
30746 || (TARGET_SPE && vmode == V2SImode))
30748 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
30749 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
30750 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
30753 return false;
30756 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
30758 static void
30759 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
30760 enum machine_mode vmode, unsigned nelt, rtx perm[])
30762 enum machine_mode imode;
30763 rtx x;
30765 imode = vmode;
30766 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
30768 imode = GET_MODE_INNER (vmode);
30769 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
30770 imode = mode_for_vector (imode, nelt);
30773 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
30774 x = expand_vec_perm (vmode, op0, op1, x, target);
30775 if (x != target)
30776 emit_move_insn (target, x);
30779 /* Expand an extract even operation. */
30781 void
30782 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
30784 enum machine_mode vmode = GET_MODE (target);
30785 unsigned i, nelt = GET_MODE_NUNITS (vmode);
30786 rtx perm[16];
30788 for (i = 0; i < nelt; i++)
30789 perm[i] = GEN_INT (i * 2);
30791 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
30794 /* Expand a vector interleave operation. */
30796 void
30797 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
30799 enum machine_mode vmode = GET_MODE (target);
30800 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
30801 rtx perm[16];
30803 high = (highp ? 0 : nelt / 2);
30804 for (i = 0; i < nelt / 2; i++)
30806 perm[i * 2] = GEN_INT (i + high);
30807 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
30810 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
30813 /* Return an RTX representing where to find the function value of a
30814 function returning MODE. */
30815 static rtx
30816 rs6000_complex_function_value (enum machine_mode mode)
30818 unsigned int regno;
30819 rtx r1, r2;
30820 enum machine_mode inner = GET_MODE_INNER (mode);
30821 unsigned int inner_bytes = GET_MODE_SIZE (inner);
30823 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
30824 regno = FP_ARG_RETURN;
30825 else
30827 regno = GP_ARG_RETURN;
30829 /* 32-bit is OK since it'll go in r3/r4. */
30830 if (TARGET_32BIT && inner_bytes >= 4)
30831 return gen_rtx_REG (mode, regno);
30834 if (inner_bytes >= 8)
30835 return gen_rtx_REG (mode, regno);
30837 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
30838 const0_rtx);
30839 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
30840 GEN_INT (inner_bytes));
30841 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
30844 /* Target hook for TARGET_FUNCTION_VALUE.
30846 On the SPE, both FPs and vectors are returned in r3.
30848 On RS/6000 an integer value is in r3 and a floating-point value is in
30849 fp1, unless -msoft-float. */
30851 static rtx
30852 rs6000_function_value (const_tree valtype,
30853 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
30854 bool outgoing ATTRIBUTE_UNUSED)
30856 enum machine_mode mode;
30857 unsigned int regno;
30858 enum machine_mode elt_mode;
30859 int n_elts;
30861 /* Special handling for structs in darwin64. */
30862 if (TARGET_MACHO
30863 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
30865 CUMULATIVE_ARGS valcum;
30866 rtx valret;
30868 valcum.words = 0;
30869 valcum.fregno = FP_ARG_MIN_REG;
30870 valcum.vregno = ALTIVEC_ARG_MIN_REG;
30871 /* Do a trial code generation as if this were going to be passed as
30872 an argument; if any part goes in memory, we return NULL. */
30873 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
30874 if (valret)
30875 return valret;
30876 /* Otherwise fall through to standard ABI rules. */
30879 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
30880 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (valtype), valtype,
30881 &elt_mode, &n_elts))
30883 int first_reg, n_regs, i;
30884 rtx par;
30886 if (SCALAR_FLOAT_MODE_P (elt_mode))
30888 /* _Decimal128 must use even/odd register pairs. */
30889 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30890 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
30892 else
30894 first_reg = ALTIVEC_ARG_RETURN;
30895 n_regs = 1;
30898 par = gen_rtx_PARALLEL (TYPE_MODE (valtype), rtvec_alloc (n_elts));
30899 for (i = 0; i < n_elts; i++)
30901 rtx r = gen_rtx_REG (elt_mode, first_reg + i * n_regs);
30902 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
30903 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
30906 return par;
30909 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
30911 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
30912 return gen_rtx_PARALLEL (DImode,
30913 gen_rtvec (2,
30914 gen_rtx_EXPR_LIST (VOIDmode,
30915 gen_rtx_REG (SImode, GP_ARG_RETURN),
30916 const0_rtx),
30917 gen_rtx_EXPR_LIST (VOIDmode,
30918 gen_rtx_REG (SImode,
30919 GP_ARG_RETURN + 1),
30920 GEN_INT (4))));
30922 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
30924 return gen_rtx_PARALLEL (DCmode,
30925 gen_rtvec (4,
30926 gen_rtx_EXPR_LIST (VOIDmode,
30927 gen_rtx_REG (SImode, GP_ARG_RETURN),
30928 const0_rtx),
30929 gen_rtx_EXPR_LIST (VOIDmode,
30930 gen_rtx_REG (SImode,
30931 GP_ARG_RETURN + 1),
30932 GEN_INT (4)),
30933 gen_rtx_EXPR_LIST (VOIDmode,
30934 gen_rtx_REG (SImode,
30935 GP_ARG_RETURN + 2),
30936 GEN_INT (8)),
30937 gen_rtx_EXPR_LIST (VOIDmode,
30938 gen_rtx_REG (SImode,
30939 GP_ARG_RETURN + 3),
30940 GEN_INT (12))));
30943 mode = TYPE_MODE (valtype);
30944 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
30945 || POINTER_TYPE_P (valtype))
30946 mode = TARGET_32BIT ? SImode : DImode;
30948 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
30949 /* _Decimal128 must use an even/odd register pair. */
30950 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30951 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
30952 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
30953 regno = FP_ARG_RETURN;
30954 else if (TREE_CODE (valtype) == COMPLEX_TYPE
30955 && targetm.calls.split_complex_arg)
30956 return rs6000_complex_function_value (mode);
30957 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30958 return register is used in both cases, and we won't see V2DImode/V2DFmode
30959 for pure altivec, combine the two cases. */
30960 else if (TREE_CODE (valtype) == VECTOR_TYPE
30961 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
30962 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
30963 regno = ALTIVEC_ARG_RETURN;
30964 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
30965 && (mode == DFmode || mode == DCmode
30966 || mode == TFmode || mode == TCmode))
30967 return spe_build_register_parallel (mode, GP_ARG_RETURN);
30968 else
30969 regno = GP_ARG_RETURN;
30971 return gen_rtx_REG (mode, regno);
30974 /* Define how to find the value returned by a library function
30975 assuming the value has mode MODE. */
30977 rs6000_libcall_value (enum machine_mode mode)
30979 unsigned int regno;
30981 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
30983 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
30984 return gen_rtx_PARALLEL (DImode,
30985 gen_rtvec (2,
30986 gen_rtx_EXPR_LIST (VOIDmode,
30987 gen_rtx_REG (SImode, GP_ARG_RETURN),
30988 const0_rtx),
30989 gen_rtx_EXPR_LIST (VOIDmode,
30990 gen_rtx_REG (SImode,
30991 GP_ARG_RETURN + 1),
30992 GEN_INT (4))));
30995 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
30996 /* _Decimal128 must use an even/odd register pair. */
30997 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
30998 else if (SCALAR_FLOAT_MODE_P (mode)
30999 && TARGET_HARD_FLOAT && TARGET_FPRS
31000 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
31001 regno = FP_ARG_RETURN;
31002 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31003 return register is used in both cases, and we won't see V2DImode/V2DFmode
31004 for pure altivec, combine the two cases. */
31005 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
31006 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
31007 regno = ALTIVEC_ARG_RETURN;
31008 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
31009 return rs6000_complex_function_value (mode);
31010 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
31011 && (mode == DFmode || mode == DCmode
31012 || mode == TFmode || mode == TCmode))
31013 return spe_build_register_parallel (mode, GP_ARG_RETURN);
31014 else
31015 regno = GP_ARG_RETURN;
31017 return gen_rtx_REG (mode, regno);
31021 /* Return true if we use LRA instead of reload pass. */
31022 static bool
31023 rs6000_lra_p (void)
31025 return rs6000_lra_flag;
31028 /* Given FROM and TO register numbers, say whether this elimination is allowed.
31029 Frame pointer elimination is automatically handled.
31031 For the RS/6000, if frame pointer elimination is being done, we would like
31032 to convert ap into fp, not sp.
31034 We need r30 if -mminimal-toc was specified, and there are constant pool
31035 references. */
31037 static bool
31038 rs6000_can_eliminate (const int from, const int to)
31040 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
31041 ? ! frame_pointer_needed
31042 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
31043 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
31044 : true);
31047 /* Define the offset between two registers, FROM to be eliminated and its
31048 replacement TO, at the start of a routine. */
31049 HOST_WIDE_INT
31050 rs6000_initial_elimination_offset (int from, int to)
31052 rs6000_stack_t *info = rs6000_stack_info ();
31053 HOST_WIDE_INT offset;
31055 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31056 offset = info->push_p ? 0 : -info->total_size;
31057 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31059 offset = info->push_p ? 0 : -info->total_size;
31060 if (FRAME_GROWS_DOWNWARD)
31061 offset += info->fixed_size + info->vars_size + info->parm_size;
31063 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31064 offset = FRAME_GROWS_DOWNWARD
31065 ? info->fixed_size + info->vars_size + info->parm_size
31066 : 0;
31067 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31068 offset = info->total_size;
31069 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31070 offset = info->push_p ? info->total_size : 0;
31071 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
31072 offset = 0;
31073 else
31074 gcc_unreachable ();
31076 return offset;
31079 static rtx
31080 rs6000_dwarf_register_span (rtx reg)
31082 rtx parts[8];
31083 int i, words;
31084 unsigned regno = REGNO (reg);
31085 enum machine_mode mode = GET_MODE (reg);
31087 if (TARGET_SPE
31088 && regno < 32
31089 && (SPE_VECTOR_MODE (GET_MODE (reg))
31090 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
31091 && mode != SFmode && mode != SDmode && mode != SCmode)))
31093 else
31094 return NULL_RTX;
31096 regno = REGNO (reg);
31098 /* The duality of the SPE register size wreaks all kinds of havoc.
31099 This is a way of distinguishing r0 in 32-bits from r0 in
31100 64-bits. */
31101 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
31102 gcc_assert (words <= 4);
31103 for (i = 0; i < words; i++, regno++)
31105 if (BYTES_BIG_ENDIAN)
31107 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
31108 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
31110 else
31112 parts[2 * i] = gen_rtx_REG (SImode, regno);
31113 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
31117 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
31120 /* Fill in sizes for SPE register high parts in table used by unwinder. */
31122 static void
31123 rs6000_init_dwarf_reg_sizes_extra (tree address)
31125 if (TARGET_SPE)
31127 int i;
31128 enum machine_mode mode = TYPE_MODE (char_type_node);
31129 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
31130 rtx mem = gen_rtx_MEM (BLKmode, addr);
31131 rtx value = gen_int_mode (4, mode);
31133 for (i = 1201; i < 1232; i++)
31135 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
31136 HOST_WIDE_INT offset
31137 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
31139 emit_move_insn (adjust_address (mem, mode, offset), value);
31143 if (TARGET_MACHO && ! TARGET_ALTIVEC)
31145 int i;
31146 enum machine_mode mode = TYPE_MODE (char_type_node);
31147 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
31148 rtx mem = gen_rtx_MEM (BLKmode, addr);
31149 rtx value = gen_int_mode (16, mode);
31151 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
31152 The unwinder still needs to know the size of Altivec registers. */
31154 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
31156 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
31157 HOST_WIDE_INT offset
31158 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
31160 emit_move_insn (adjust_address (mem, mode, offset), value);
31165 /* Map internal gcc register numbers to DWARF2 register numbers. */
31167 unsigned int
31168 rs6000_dbx_register_number (unsigned int regno)
31170 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
31171 return regno;
31172 if (regno == LR_REGNO)
31173 return 108;
31174 if (regno == CTR_REGNO)
31175 return 109;
31176 if (CR_REGNO_P (regno))
31177 return regno - CR0_REGNO + 86;
31178 if (regno == CA_REGNO)
31179 return 101; /* XER */
31180 if (ALTIVEC_REGNO_P (regno))
31181 return regno - FIRST_ALTIVEC_REGNO + 1124;
31182 if (regno == VRSAVE_REGNO)
31183 return 356;
31184 if (regno == VSCR_REGNO)
31185 return 67;
31186 if (regno == SPE_ACC_REGNO)
31187 return 99;
31188 if (regno == SPEFSCR_REGNO)
31189 return 612;
31190 /* SPE high reg number. We get these values of regno from
31191 rs6000_dwarf_register_span. */
31192 gcc_assert (regno >= 1200 && regno < 1232);
31193 return regno;
31196 /* target hook eh_return_filter_mode */
31197 static enum machine_mode
31198 rs6000_eh_return_filter_mode (void)
31200 return TARGET_32BIT ? SImode : word_mode;
31203 /* Target hook for scalar_mode_supported_p. */
31204 static bool
31205 rs6000_scalar_mode_supported_p (enum machine_mode mode)
31207 if (DECIMAL_FLOAT_MODE_P (mode))
31208 return default_decimal_float_supported_p ();
31209 else
31210 return default_scalar_mode_supported_p (mode);
31213 /* Target hook for vector_mode_supported_p. */
31214 static bool
31215 rs6000_vector_mode_supported_p (enum machine_mode mode)
31218 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
31219 return true;
31221 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
31222 return true;
31224 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
31225 return true;
31227 else
31228 return false;
31231 /* Target hook for invalid_arg_for_unprototyped_fn. */
31232 static const char *
31233 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
31235 return (!rs6000_darwin64_abi
31236 && typelist == 0
31237 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
31238 && (funcdecl == NULL_TREE
31239 || (TREE_CODE (funcdecl) == FUNCTION_DECL
31240 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
31241 ? N_("AltiVec argument passed to unprototyped function")
31242 : NULL;
31245 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
31246 setup by using __stack_chk_fail_local hidden function instead of
31247 calling __stack_chk_fail directly. Otherwise it is better to call
31248 __stack_chk_fail directly. */
31250 static tree ATTRIBUTE_UNUSED
31251 rs6000_stack_protect_fail (void)
31253 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
31254 ? default_hidden_stack_protect_fail ()
31255 : default_external_stack_protect_fail ();
31258 void
31259 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
31260 int num_operands ATTRIBUTE_UNUSED)
31262 if (rs6000_warn_cell_microcode)
31264 const char *temp;
31265 int insn_code_number = recog_memoized (insn);
31266 location_t location = INSN_LOCATION (insn);
31268 /* Punt on insns we cannot recognize. */
31269 if (insn_code_number < 0)
31270 return;
31272 temp = get_insn_template (insn_code_number, insn);
31274 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
31275 warning_at (location, OPT_mwarn_cell_microcode,
31276 "emitting microcode insn %s\t[%s] #%d",
31277 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31278 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
31279 warning_at (location, OPT_mwarn_cell_microcode,
31280 "emitting conditional microcode insn %s\t[%s] #%d",
31281 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31285 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
31287 #if TARGET_ELF
31288 static unsigned HOST_WIDE_INT
31289 rs6000_asan_shadow_offset (void)
31291 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
31293 #endif
31295 /* Mask options that we want to support inside of attribute((target)) and
31296 #pragma GCC target operations. Note, we do not include things like
31297 64/32-bit, endianess, hard/soft floating point, etc. that would have
31298 different calling sequences. */
31300 struct rs6000_opt_mask {
31301 const char *name; /* option name */
31302 HOST_WIDE_INT mask; /* mask to set */
31303 bool invert; /* invert sense of mask */
31304 bool valid_target; /* option is a target option */
31307 static struct rs6000_opt_mask const rs6000_opt_masks[] =
31309 { "altivec", OPTION_MASK_ALTIVEC, false, true },
31310 { "cmpb", OPTION_MASK_CMPB, false, true },
31311 { "crypto", OPTION_MASK_CRYPTO, false, true },
31312 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
31313 { "dlmzb", OPTION_MASK_DLMZB, false, true },
31314 { "fprnd", OPTION_MASK_FPRND, false, true },
31315 { "hard-dfp", OPTION_MASK_DFP, false, true },
31316 { "htm", OPTION_MASK_HTM, false, true },
31317 { "isel", OPTION_MASK_ISEL, false, true },
31318 { "mfcrf", OPTION_MASK_MFCRF, false, true },
31319 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
31320 { "mulhw", OPTION_MASK_MULHW, false, true },
31321 { "multiple", OPTION_MASK_MULTIPLE, false, true },
31322 { "popcntb", OPTION_MASK_POPCNTB, false, true },
31323 { "popcntd", OPTION_MASK_POPCNTD, false, true },
31324 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
31325 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
31326 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
31327 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
31328 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
31329 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
31330 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
31331 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
31332 { "string", OPTION_MASK_STRING, false, true },
31333 { "update", OPTION_MASK_NO_UPDATE, true , true },
31334 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, false },
31335 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, false },
31336 { "vsx", OPTION_MASK_VSX, false, true },
31337 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
31338 #ifdef OPTION_MASK_64BIT
31339 #if TARGET_AIX_OS
31340 { "aix64", OPTION_MASK_64BIT, false, false },
31341 { "aix32", OPTION_MASK_64BIT, true, false },
31342 #else
31343 { "64", OPTION_MASK_64BIT, false, false },
31344 { "32", OPTION_MASK_64BIT, true, false },
31345 #endif
31346 #endif
31347 #ifdef OPTION_MASK_EABI
31348 { "eabi", OPTION_MASK_EABI, false, false },
31349 #endif
31350 #ifdef OPTION_MASK_LITTLE_ENDIAN
31351 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
31352 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
31353 #endif
31354 #ifdef OPTION_MASK_RELOCATABLE
31355 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
31356 #endif
31357 #ifdef OPTION_MASK_STRICT_ALIGN
31358 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
31359 #endif
31360 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
31361 { "string", OPTION_MASK_STRING, false, false },
31364 /* Builtin mask mapping for printing the flags. */
31365 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
31367 { "altivec", RS6000_BTM_ALTIVEC, false, false },
31368 { "vsx", RS6000_BTM_VSX, false, false },
31369 { "spe", RS6000_BTM_SPE, false, false },
31370 { "paired", RS6000_BTM_PAIRED, false, false },
31371 { "fre", RS6000_BTM_FRE, false, false },
31372 { "fres", RS6000_BTM_FRES, false, false },
31373 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
31374 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
31375 { "popcntd", RS6000_BTM_POPCNTD, false, false },
31376 { "cell", RS6000_BTM_CELL, false, false },
31377 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
31378 { "crypto", RS6000_BTM_CRYPTO, false, false },
31379 { "htm", RS6000_BTM_HTM, false, false },
31380 { "hard-dfp", RS6000_BTM_DFP, false, false },
31381 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
31384 /* Option variables that we want to support inside attribute((target)) and
31385 #pragma GCC target operations. */
31387 struct rs6000_opt_var {
31388 const char *name; /* option name */
31389 size_t global_offset; /* offset of the option in global_options. */
31390 size_t target_offset; /* offset of the option in target optiosn. */
31393 static struct rs6000_opt_var const rs6000_opt_vars[] =
31395 { "friz",
31396 offsetof (struct gcc_options, x_TARGET_FRIZ),
31397 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
31398 { "avoid-indexed-addresses",
31399 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
31400 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
31401 { "paired",
31402 offsetof (struct gcc_options, x_rs6000_paired_float),
31403 offsetof (struct cl_target_option, x_rs6000_paired_float), },
31404 { "longcall",
31405 offsetof (struct gcc_options, x_rs6000_default_long_calls),
31406 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
31409 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
31410 parsing. Return true if there were no errors. */
31412 static bool
31413 rs6000_inner_target_options (tree args, bool attr_p)
31415 bool ret = true;
31417 if (args == NULL_TREE)
31420 else if (TREE_CODE (args) == STRING_CST)
31422 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31423 char *q;
31425 while ((q = strtok (p, ",")) != NULL)
31427 bool error_p = false;
31428 bool not_valid_p = false;
31429 const char *cpu_opt = NULL;
31431 p = NULL;
31432 if (strncmp (q, "cpu=", 4) == 0)
31434 int cpu_index = rs6000_cpu_name_lookup (q+4);
31435 if (cpu_index >= 0)
31436 rs6000_cpu_index = cpu_index;
31437 else
31439 error_p = true;
31440 cpu_opt = q+4;
31443 else if (strncmp (q, "tune=", 5) == 0)
31445 int tune_index = rs6000_cpu_name_lookup (q+5);
31446 if (tune_index >= 0)
31447 rs6000_tune_index = tune_index;
31448 else
31450 error_p = true;
31451 cpu_opt = q+5;
31454 else
31456 size_t i;
31457 bool invert = false;
31458 char *r = q;
31460 error_p = true;
31461 if (strncmp (r, "no-", 3) == 0)
31463 invert = true;
31464 r += 3;
31467 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
31468 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
31470 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
31472 if (!rs6000_opt_masks[i].valid_target)
31473 not_valid_p = true;
31474 else
31476 error_p = false;
31477 rs6000_isa_flags_explicit |= mask;
31479 /* VSX needs altivec, so -mvsx automagically sets
31480 altivec. */
31481 if (mask == OPTION_MASK_VSX && !invert)
31482 mask |= OPTION_MASK_ALTIVEC;
31484 if (rs6000_opt_masks[i].invert)
31485 invert = !invert;
31487 if (invert)
31488 rs6000_isa_flags &= ~mask;
31489 else
31490 rs6000_isa_flags |= mask;
31492 break;
31495 if (error_p && !not_valid_p)
31497 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
31498 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
31500 size_t j = rs6000_opt_vars[i].global_offset;
31501 *((int *) ((char *)&global_options + j)) = !invert;
31502 error_p = false;
31503 break;
31508 if (error_p)
31510 const char *eprefix, *esuffix;
31512 ret = false;
31513 if (attr_p)
31515 eprefix = "__attribute__((__target__(";
31516 esuffix = ")))";
31518 else
31520 eprefix = "#pragma GCC target ";
31521 esuffix = "";
31524 if (cpu_opt)
31525 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
31526 q, esuffix);
31527 else if (not_valid_p)
31528 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
31529 else
31530 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
31535 else if (TREE_CODE (args) == TREE_LIST)
31539 tree value = TREE_VALUE (args);
31540 if (value)
31542 bool ret2 = rs6000_inner_target_options (value, attr_p);
31543 if (!ret2)
31544 ret = false;
31546 args = TREE_CHAIN (args);
31548 while (args != NULL_TREE);
31551 else
31552 gcc_unreachable ();
31554 return ret;
31557 /* Print out the target options as a list for -mdebug=target. */
31559 static void
31560 rs6000_debug_target_options (tree args, const char *prefix)
31562 if (args == NULL_TREE)
31563 fprintf (stderr, "%s<NULL>", prefix);
31565 else if (TREE_CODE (args) == STRING_CST)
31567 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31568 char *q;
31570 while ((q = strtok (p, ",")) != NULL)
31572 p = NULL;
31573 fprintf (stderr, "%s\"%s\"", prefix, q);
31574 prefix = ", ";
31578 else if (TREE_CODE (args) == TREE_LIST)
31582 tree value = TREE_VALUE (args);
31583 if (value)
31585 rs6000_debug_target_options (value, prefix);
31586 prefix = ", ";
31588 args = TREE_CHAIN (args);
31590 while (args != NULL_TREE);
31593 else
31594 gcc_unreachable ();
31596 return;
31600 /* Hook to validate attribute((target("..."))). */
31602 static bool
31603 rs6000_valid_attribute_p (tree fndecl,
31604 tree ARG_UNUSED (name),
31605 tree args,
31606 int flags)
31608 struct cl_target_option cur_target;
31609 bool ret;
31610 tree old_optimize = build_optimization_node (&global_options);
31611 tree new_target, new_optimize;
31612 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
31614 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
31616 if (TARGET_DEBUG_TARGET)
31618 tree tname = DECL_NAME (fndecl);
31619 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
31620 if (tname)
31621 fprintf (stderr, "function: %.*s\n",
31622 (int) IDENTIFIER_LENGTH (tname),
31623 IDENTIFIER_POINTER (tname));
31624 else
31625 fprintf (stderr, "function: unknown\n");
31627 fprintf (stderr, "args:");
31628 rs6000_debug_target_options (args, " ");
31629 fprintf (stderr, "\n");
31631 if (flags)
31632 fprintf (stderr, "flags: 0x%x\n", flags);
31634 fprintf (stderr, "--------------------\n");
31637 old_optimize = build_optimization_node (&global_options);
31638 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
31640 /* If the function changed the optimization levels as well as setting target
31641 options, start with the optimizations specified. */
31642 if (func_optimize && func_optimize != old_optimize)
31643 cl_optimization_restore (&global_options,
31644 TREE_OPTIMIZATION (func_optimize));
31646 /* The target attributes may also change some optimization flags, so update
31647 the optimization options if necessary. */
31648 cl_target_option_save (&cur_target, &global_options);
31649 rs6000_cpu_index = rs6000_tune_index = -1;
31650 ret = rs6000_inner_target_options (args, true);
31652 /* Set up any additional state. */
31653 if (ret)
31655 ret = rs6000_option_override_internal (false);
31656 new_target = build_target_option_node (&global_options);
31658 else
31659 new_target = NULL;
31661 new_optimize = build_optimization_node (&global_options);
31663 if (!new_target)
31664 ret = false;
31666 else if (fndecl)
31668 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
31670 if (old_optimize != new_optimize)
31671 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
31674 cl_target_option_restore (&global_options, &cur_target);
31676 if (old_optimize != new_optimize)
31677 cl_optimization_restore (&global_options,
31678 TREE_OPTIMIZATION (old_optimize));
31680 return ret;
31684 /* Hook to validate the current #pragma GCC target and set the state, and
31685 update the macros based on what was changed. If ARGS is NULL, then
31686 POP_TARGET is used to reset the options. */
31688 bool
31689 rs6000_pragma_target_parse (tree args, tree pop_target)
31691 tree prev_tree = build_target_option_node (&global_options);
31692 tree cur_tree;
31693 struct cl_target_option *prev_opt, *cur_opt;
31694 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
31695 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
31697 if (TARGET_DEBUG_TARGET)
31699 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
31700 fprintf (stderr, "args:");
31701 rs6000_debug_target_options (args, " ");
31702 fprintf (stderr, "\n");
31704 if (pop_target)
31706 fprintf (stderr, "pop_target:\n");
31707 debug_tree (pop_target);
31709 else
31710 fprintf (stderr, "pop_target: <NULL>\n");
31712 fprintf (stderr, "--------------------\n");
31715 if (! args)
31717 cur_tree = ((pop_target)
31718 ? pop_target
31719 : target_option_default_node);
31720 cl_target_option_restore (&global_options,
31721 TREE_TARGET_OPTION (cur_tree));
31723 else
31725 rs6000_cpu_index = rs6000_tune_index = -1;
31726 if (!rs6000_inner_target_options (args, false)
31727 || !rs6000_option_override_internal (false)
31728 || (cur_tree = build_target_option_node (&global_options))
31729 == NULL_TREE)
31731 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
31732 fprintf (stderr, "invalid pragma\n");
31734 return false;
31738 target_option_current_node = cur_tree;
31740 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
31741 change the macros that are defined. */
31742 if (rs6000_target_modify_macros_ptr)
31744 prev_opt = TREE_TARGET_OPTION (prev_tree);
31745 prev_bumask = prev_opt->x_rs6000_builtin_mask;
31746 prev_flags = prev_opt->x_rs6000_isa_flags;
31748 cur_opt = TREE_TARGET_OPTION (cur_tree);
31749 cur_flags = cur_opt->x_rs6000_isa_flags;
31750 cur_bumask = cur_opt->x_rs6000_builtin_mask;
31752 diff_bumask = (prev_bumask ^ cur_bumask);
31753 diff_flags = (prev_flags ^ cur_flags);
31755 if ((diff_flags != 0) || (diff_bumask != 0))
31757 /* Delete old macros. */
31758 rs6000_target_modify_macros_ptr (false,
31759 prev_flags & diff_flags,
31760 prev_bumask & diff_bumask);
31762 /* Define new macros. */
31763 rs6000_target_modify_macros_ptr (true,
31764 cur_flags & diff_flags,
31765 cur_bumask & diff_bumask);
31769 return true;
31773 /* Remember the last target of rs6000_set_current_function. */
31774 static GTY(()) tree rs6000_previous_fndecl;
31776 /* Establish appropriate back-end context for processing the function
31777 FNDECL. The argument might be NULL to indicate processing at top
31778 level, outside of any function scope. */
31779 static void
31780 rs6000_set_current_function (tree fndecl)
31782 tree old_tree = (rs6000_previous_fndecl
31783 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
31784 : NULL_TREE);
31786 tree new_tree = (fndecl
31787 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
31788 : NULL_TREE);
31790 if (TARGET_DEBUG_TARGET)
31792 bool print_final = false;
31793 fprintf (stderr, "\n==================== rs6000_set_current_function");
31795 if (fndecl)
31796 fprintf (stderr, ", fndecl %s (%p)",
31797 (DECL_NAME (fndecl)
31798 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
31799 : "<unknown>"), (void *)fndecl);
31801 if (rs6000_previous_fndecl)
31802 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
31804 fprintf (stderr, "\n");
31805 if (new_tree)
31807 fprintf (stderr, "\nnew fndecl target specific options:\n");
31808 debug_tree (new_tree);
31809 print_final = true;
31812 if (old_tree)
31814 fprintf (stderr, "\nold fndecl target specific options:\n");
31815 debug_tree (old_tree);
31816 print_final = true;
31819 if (print_final)
31820 fprintf (stderr, "--------------------\n");
31823 /* Only change the context if the function changes. This hook is called
31824 several times in the course of compiling a function, and we don't want to
31825 slow things down too much or call target_reinit when it isn't safe. */
31826 if (fndecl && fndecl != rs6000_previous_fndecl)
31828 rs6000_previous_fndecl = fndecl;
31829 if (old_tree == new_tree)
31832 else if (new_tree)
31834 cl_target_option_restore (&global_options,
31835 TREE_TARGET_OPTION (new_tree));
31836 if (TREE_TARGET_GLOBALS (new_tree))
31837 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
31838 else
31839 TREE_TARGET_GLOBALS (new_tree)
31840 = save_target_globals_default_opts ();
31843 else if (old_tree)
31845 new_tree = target_option_current_node;
31846 cl_target_option_restore (&global_options,
31847 TREE_TARGET_OPTION (new_tree));
31848 if (TREE_TARGET_GLOBALS (new_tree))
31849 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
31850 else if (new_tree == target_option_default_node)
31851 restore_target_globals (&default_target_globals);
31852 else
31853 TREE_TARGET_GLOBALS (new_tree)
31854 = save_target_globals_default_opts ();
31860 /* Save the current options */
31862 static void
31863 rs6000_function_specific_save (struct cl_target_option *ptr,
31864 struct gcc_options *opts)
31866 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
31867 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
31870 /* Restore the current options */
31872 static void
31873 rs6000_function_specific_restore (struct gcc_options *opts,
31874 struct cl_target_option *ptr)
31877 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
31878 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
31879 (void) rs6000_option_override_internal (false);
31882 /* Print the current options */
31884 static void
31885 rs6000_function_specific_print (FILE *file, int indent,
31886 struct cl_target_option *ptr)
31888 rs6000_print_isa_options (file, indent, "Isa options set",
31889 ptr->x_rs6000_isa_flags);
31891 rs6000_print_isa_options (file, indent, "Isa options explicit",
31892 ptr->x_rs6000_isa_flags_explicit);
31895 /* Helper function to print the current isa or misc options on a line. */
31897 static void
31898 rs6000_print_options_internal (FILE *file,
31899 int indent,
31900 const char *string,
31901 HOST_WIDE_INT flags,
31902 const char *prefix,
31903 const struct rs6000_opt_mask *opts,
31904 size_t num_elements)
31906 size_t i;
31907 size_t start_column = 0;
31908 size_t cur_column;
31909 size_t max_column = 76;
31910 const char *comma = "";
31912 if (indent)
31913 start_column += fprintf (file, "%*s", indent, "");
31915 if (!flags)
31917 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
31918 return;
31921 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
31923 /* Print the various mask options. */
31924 cur_column = start_column;
31925 for (i = 0; i < num_elements; i++)
31927 if ((flags & opts[i].mask) != 0)
31929 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
31930 size_t len = (strlen (comma)
31931 + strlen (prefix)
31932 + strlen (no_str)
31933 + strlen (rs6000_opt_masks[i].name));
31935 cur_column += len;
31936 if (cur_column > max_column)
31938 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
31939 cur_column = start_column + len;
31940 comma = "";
31943 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
31944 rs6000_opt_masks[i].name);
31945 flags &= ~ opts[i].mask;
31946 comma = ", ";
31950 fputs ("\n", file);
31953 /* Helper function to print the current isa options on a line. */
31955 static void
31956 rs6000_print_isa_options (FILE *file, int indent, const char *string,
31957 HOST_WIDE_INT flags)
31959 rs6000_print_options_internal (file, indent, string, flags, "-m",
31960 &rs6000_opt_masks[0],
31961 ARRAY_SIZE (rs6000_opt_masks));
31964 static void
31965 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
31966 HOST_WIDE_INT flags)
31968 rs6000_print_options_internal (file, indent, string, flags, "",
31969 &rs6000_builtin_mask_names[0],
31970 ARRAY_SIZE (rs6000_builtin_mask_names));
31974 /* Hook to determine if one function can safely inline another. */
31976 static bool
31977 rs6000_can_inline_p (tree caller, tree callee)
31979 bool ret = false;
31980 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
31981 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
31983 /* If callee has no option attributes, then it is ok to inline. */
31984 if (!callee_tree)
31985 ret = true;
31987 /* If caller has no option attributes, but callee does then it is not ok to
31988 inline. */
31989 else if (!caller_tree)
31990 ret = false;
31992 else
31994 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
31995 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
31997 /* Callee's options should a subset of the caller's, i.e. a vsx function
31998 can inline an altivec function but a non-vsx function can't inline a
31999 vsx function. */
32000 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
32001 == callee_opts->x_rs6000_isa_flags)
32002 ret = true;
32005 if (TARGET_DEBUG_TARGET)
32006 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
32007 (DECL_NAME (caller)
32008 ? IDENTIFIER_POINTER (DECL_NAME (caller))
32009 : "<unknown>"),
32010 (DECL_NAME (callee)
32011 ? IDENTIFIER_POINTER (DECL_NAME (callee))
32012 : "<unknown>"),
32013 (ret ? "can" : "cannot"));
32015 return ret;
32018 /* Allocate a stack temp and fixup the address so it meets the particular
32019 memory requirements (either offetable or REG+REG addressing). */
32022 rs6000_allocate_stack_temp (enum machine_mode mode,
32023 bool offsettable_p,
32024 bool reg_reg_p)
32026 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
32027 rtx addr = XEXP (stack, 0);
32028 int strict_p = (reload_in_progress || reload_completed);
32030 if (!legitimate_indirect_address_p (addr, strict_p))
32032 if (offsettable_p
32033 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
32034 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32036 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
32037 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32040 return stack;
32043 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
32044 to such a form to deal with memory reference instructions like STFIWX that
32045 only take reg+reg addressing. */
32048 rs6000_address_for_fpconvert (rtx x)
32050 int strict_p = (reload_in_progress || reload_completed);
32051 rtx addr;
32053 gcc_assert (MEM_P (x));
32054 addr = XEXP (x, 0);
32055 if (! legitimate_indirect_address_p (addr, strict_p)
32056 && ! legitimate_indexed_address_p (addr, strict_p))
32058 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
32060 rtx reg = XEXP (addr, 0);
32061 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
32062 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
32063 gcc_assert (REG_P (reg));
32064 emit_insn (gen_add3_insn (reg, reg, size_rtx));
32065 addr = reg;
32067 else if (GET_CODE (addr) == PRE_MODIFY)
32069 rtx reg = XEXP (addr, 0);
32070 rtx expr = XEXP (addr, 1);
32071 gcc_assert (REG_P (reg));
32072 gcc_assert (GET_CODE (expr) == PLUS);
32073 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
32074 addr = reg;
32077 x = replace_equiv_address (x, copy_addr_to_reg (addr));
32080 return x;
32083 /* Given a memory reference, if it is not in the form for altivec memory
32084 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
32085 convert to the altivec format. */
32088 rs6000_address_for_altivec (rtx x)
32090 gcc_assert (MEM_P (x));
32091 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
32093 rtx addr = XEXP (x, 0);
32094 int strict_p = (reload_in_progress || reload_completed);
32096 if (!legitimate_indexed_address_p (addr, strict_p)
32097 && !legitimate_indirect_address_p (addr, strict_p))
32098 addr = copy_to_mode_reg (Pmode, addr);
32100 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
32101 x = change_address (x, GET_MODE (x), addr);
32104 return x;
32107 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
32109 On the RS/6000, all integer constants are acceptable, most won't be valid
32110 for particular insns, though. Only easy FP constants are acceptable. */
32112 static bool
32113 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
32115 if (TARGET_ELF && rs6000_tls_referenced_p (x))
32116 return false;
32118 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
32119 || GET_MODE (x) == VOIDmode
32120 || (TARGET_POWERPC64 && mode == DImode)
32121 || easy_fp_constant (x, mode)
32122 || easy_vector_constant (x, mode));
32127 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
32129 void
32130 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
32132 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
32133 rtx toc_load = NULL_RTX;
32134 rtx toc_restore = NULL_RTX;
32135 rtx func_addr;
32136 rtx abi_reg = NULL_RTX;
32137 rtx call[4];
32138 int n_call;
32139 rtx insn;
32141 /* Handle longcall attributes. */
32142 if (INTVAL (cookie) & CALL_LONG)
32143 func_desc = rs6000_longcall_ref (func_desc);
32145 /* Handle indirect calls. */
32146 if (GET_CODE (func_desc) != SYMBOL_REF
32147 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
32149 /* Save the TOC into its reserved slot before the call,
32150 and prepare to restore it after the call. */
32151 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32152 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
32153 rtx stack_toc_mem = gen_frame_mem (Pmode,
32154 gen_rtx_PLUS (Pmode, stack_ptr,
32155 stack_toc_offset));
32156 toc_restore = gen_rtx_SET (VOIDmode, toc_reg, stack_toc_mem);
32158 /* Can we optimize saving the TOC in the prologue or
32159 do we need to do it at every call? */
32160 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
32161 cfun->machine->save_toc_in_prologue = true;
32162 else
32164 MEM_VOLATILE_P (stack_toc_mem) = 1;
32165 emit_move_insn (stack_toc_mem, toc_reg);
32168 if (DEFAULT_ABI == ABI_ELFv2)
32170 /* A function pointer in the ELFv2 ABI is just a plain address, but
32171 the ABI requires it to be loaded into r12 before the call. */
32172 func_addr = gen_rtx_REG (Pmode, 12);
32173 emit_move_insn (func_addr, func_desc);
32174 abi_reg = func_addr;
32176 else
32178 /* A function pointer under AIX is a pointer to a data area whose
32179 first word contains the actual address of the function, whose
32180 second word contains a pointer to its TOC, and whose third word
32181 contains a value to place in the static chain register (r11).
32182 Note that if we load the static chain, our "trampoline" need
32183 not have any executable code. */
32185 /* Load up address of the actual function. */
32186 func_desc = force_reg (Pmode, func_desc);
32187 func_addr = gen_reg_rtx (Pmode);
32188 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
32190 /* Prepare to load the TOC of the called function. Note that the
32191 TOC load must happen immediately before the actual call so
32192 that unwinding the TOC registers works correctly. See the
32193 comment in frob_update_context. */
32194 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
32195 rtx func_toc_mem = gen_rtx_MEM (Pmode,
32196 gen_rtx_PLUS (Pmode, func_desc,
32197 func_toc_offset));
32198 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
32200 /* If we have a static chain, load it up. */
32201 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32203 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
32204 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
32205 rtx func_sc_mem = gen_rtx_MEM (Pmode,
32206 gen_rtx_PLUS (Pmode, func_desc,
32207 func_sc_offset));
32208 emit_move_insn (sc_reg, func_sc_mem);
32209 abi_reg = sc_reg;
32213 else
32215 /* Direct calls use the TOC: for local calls, the callee will
32216 assume the TOC register is set; for non-local calls, the
32217 PLT stub needs the TOC register. */
32218 abi_reg = toc_reg;
32219 func_addr = func_desc;
32222 /* Create the call. */
32223 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
32224 if (value != NULL_RTX)
32225 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32226 n_call = 1;
32228 if (toc_load)
32229 call[n_call++] = toc_load;
32230 if (toc_restore)
32231 call[n_call++] = toc_restore;
32233 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
32235 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
32236 insn = emit_call_insn (insn);
32238 /* Mention all registers defined by the ABI to hold information
32239 as uses in CALL_INSN_FUNCTION_USAGE. */
32240 if (abi_reg)
32241 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32244 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
32246 void
32247 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
32249 rtx call[2];
32250 rtx insn;
32252 gcc_assert (INTVAL (cookie) == 0);
32254 /* Create the call. */
32255 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
32256 if (value != NULL_RTX)
32257 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32259 call[1] = simple_return_rtx;
32261 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
32262 insn = emit_call_insn (insn);
32264 /* Note use of the TOC register. */
32265 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
32266 /* We need to also mark a use of the link register since the function we
32267 sibling-call to will use it to return to our caller. */
32268 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
32271 /* Return whether we need to always update the saved TOC pointer when we update
32272 the stack pointer. */
32274 static bool
32275 rs6000_save_toc_in_prologue_p (void)
32277 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
32280 #ifdef HAVE_GAS_HIDDEN
32281 # define USE_HIDDEN_LINKONCE 1
32282 #else
32283 # define USE_HIDDEN_LINKONCE 0
32284 #endif
32286 /* Fills in the label name that should be used for a 476 link stack thunk. */
32288 void
32289 get_ppc476_thunk_name (char name[32])
32291 gcc_assert (TARGET_LINK_STACK);
32293 if (USE_HIDDEN_LINKONCE)
32294 sprintf (name, "__ppc476.get_thunk");
32295 else
32296 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
32299 /* This function emits the simple thunk routine that is used to preserve
32300 the link stack on the 476 cpu. */
32302 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
32303 static void
32304 rs6000_code_end (void)
32306 char name[32];
32307 tree decl;
32309 if (!TARGET_LINK_STACK)
32310 return;
32312 get_ppc476_thunk_name (name);
32314 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
32315 build_function_type_list (void_type_node, NULL_TREE));
32316 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
32317 NULL_TREE, void_type_node);
32318 TREE_PUBLIC (decl) = 1;
32319 TREE_STATIC (decl) = 1;
32321 #if RS6000_WEAK
32322 if (USE_HIDDEN_LINKONCE)
32324 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
32325 targetm.asm_out.unique_section (decl, 0);
32326 switch_to_section (get_named_section (decl, NULL, 0));
32327 DECL_WEAK (decl) = 1;
32328 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
32329 targetm.asm_out.globalize_label (asm_out_file, name);
32330 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
32331 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
32333 else
32334 #endif
32336 switch_to_section (text_section);
32337 ASM_OUTPUT_LABEL (asm_out_file, name);
32340 DECL_INITIAL (decl) = make_node (BLOCK);
32341 current_function_decl = decl;
32342 init_function_start (decl);
32343 first_function_block_is_cold = false;
32344 /* Make sure unwind info is emitted for the thunk if needed. */
32345 final_start_function (emit_barrier (), asm_out_file, 1);
32347 fputs ("\tblr\n", asm_out_file);
32349 final_end_function ();
32350 init_insn_lengths ();
32351 free_after_compilation (cfun);
32352 set_cfun (NULL);
32353 current_function_decl = NULL;
32356 /* Add r30 to hard reg set if the prologue sets it up and it is not
32357 pic_offset_table_rtx. */
32359 static void
32360 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
32362 if (!TARGET_SINGLE_PIC_BASE
32363 && TARGET_TOC
32364 && TARGET_MINIMAL_TOC
32365 && get_pool_size () != 0)
32366 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32370 /* Helper function for rs6000_split_logical to emit a logical instruction after
32371 spliting the operation to single GPR registers.
32373 DEST is the destination register.
32374 OP1 and OP2 are the input source registers.
32375 CODE is the base operation (AND, IOR, XOR, NOT).
32376 MODE is the machine mode.
32377 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32378 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32379 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32380 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32381 formation of the AND instructions. */
32383 static void
32384 rs6000_split_logical_inner (rtx dest,
32385 rtx op1,
32386 rtx op2,
32387 enum rtx_code code,
32388 enum machine_mode mode,
32389 bool complement_final_p,
32390 bool complement_op1_p,
32391 bool complement_op2_p,
32392 rtx clobber_reg)
32394 rtx bool_rtx;
32395 rtx set_rtx;
32397 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
32398 if (op2 && GET_CODE (op2) == CONST_INT
32399 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
32400 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32402 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
32403 HOST_WIDE_INT value = INTVAL (op2) & mask;
32405 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
32406 if (code == AND)
32408 if (value == 0)
32410 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
32411 return;
32414 else if (value == mask)
32416 if (!rtx_equal_p (dest, op1))
32417 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32418 return;
32422 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
32423 into separate ORI/ORIS or XORI/XORIS instrucitons. */
32424 else if (code == IOR || code == XOR)
32426 if (value == 0)
32428 if (!rtx_equal_p (dest, op1))
32429 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32430 return;
32435 if (complement_op1_p)
32436 op1 = gen_rtx_NOT (mode, op1);
32438 if (complement_op2_p)
32439 op2 = gen_rtx_NOT (mode, op2);
32441 bool_rtx = ((code == NOT)
32442 ? gen_rtx_NOT (mode, op1)
32443 : gen_rtx_fmt_ee (code, mode, op1, op2));
32445 if (complement_final_p)
32446 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
32448 set_rtx = gen_rtx_SET (VOIDmode, dest, bool_rtx);
32450 /* Is this AND with an explicit clobber? */
32451 if (clobber_reg)
32453 rtx clobber = gen_rtx_CLOBBER (VOIDmode, clobber_reg);
32454 set_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set_rtx, clobber));
32457 emit_insn (set_rtx);
32458 return;
32461 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
32462 operations are split immediately during RTL generation to allow for more
32463 optimizations of the AND/IOR/XOR.
32465 OPERANDS is an array containing the destination and two input operands.
32466 CODE is the base operation (AND, IOR, XOR, NOT).
32467 MODE is the machine mode.
32468 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32469 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32470 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32471 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32472 formation of the AND instructions. */
32474 static void
32475 rs6000_split_logical_di (rtx operands[3],
32476 enum rtx_code code,
32477 bool complement_final_p,
32478 bool complement_op1_p,
32479 bool complement_op2_p,
32480 rtx clobber_reg)
32482 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
32483 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
32484 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
32485 enum hi_lo { hi = 0, lo = 1 };
32486 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
32487 size_t i;
32489 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
32490 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
32491 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
32492 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
32494 if (code == NOT)
32495 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
32496 else
32498 if (GET_CODE (operands[2]) != CONST_INT)
32500 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
32501 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
32503 else
32505 HOST_WIDE_INT value = INTVAL (operands[2]);
32506 HOST_WIDE_INT value_hi_lo[2];
32508 gcc_assert (!complement_final_p);
32509 gcc_assert (!complement_op1_p);
32510 gcc_assert (!complement_op2_p);
32512 value_hi_lo[hi] = value >> 32;
32513 value_hi_lo[lo] = value & lower_32bits;
32515 for (i = 0; i < 2; i++)
32517 HOST_WIDE_INT sub_value = value_hi_lo[i];
32519 if (sub_value & sign_bit)
32520 sub_value |= upper_32bits;
32522 op2_hi_lo[i] = GEN_INT (sub_value);
32524 /* If this is an AND instruction, check to see if we need to load
32525 the value in a register. */
32526 if (code == AND && sub_value != -1 && sub_value != 0
32527 && !and_operand (op2_hi_lo[i], SImode))
32528 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
32533 for (i = 0; i < 2; i++)
32535 /* Split large IOR/XOR operations. */
32536 if ((code == IOR || code == XOR)
32537 && GET_CODE (op2_hi_lo[i]) == CONST_INT
32538 && !complement_final_p
32539 && !complement_op1_p
32540 && !complement_op2_p
32541 && clobber_reg == NULL_RTX
32542 && !logical_const_operand (op2_hi_lo[i], SImode))
32544 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
32545 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
32546 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
32547 rtx tmp = gen_reg_rtx (SImode);
32549 /* Make sure the constant is sign extended. */
32550 if ((hi_16bits & sign_bit) != 0)
32551 hi_16bits |= upper_32bits;
32553 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
32554 code, SImode, false, false, false,
32555 NULL_RTX);
32557 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
32558 code, SImode, false, false, false,
32559 NULL_RTX);
32561 else
32562 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
32563 code, SImode, complement_final_p,
32564 complement_op1_p, complement_op2_p,
32565 clobber_reg);
32568 return;
32571 /* Split the insns that make up boolean operations operating on multiple GPR
32572 registers. The boolean MD patterns ensure that the inputs either are
32573 exactly the same as the output registers, or there is no overlap.
32575 OPERANDS is an array containing the destination and two input operands.
32576 CODE is the base operation (AND, IOR, XOR, NOT).
32577 MODE is the machine mode.
32578 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32579 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32580 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32581 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32582 formation of the AND instructions. */
32584 void
32585 rs6000_split_logical (rtx operands[3],
32586 enum rtx_code code,
32587 bool complement_final_p,
32588 bool complement_op1_p,
32589 bool complement_op2_p,
32590 rtx clobber_reg)
32592 enum machine_mode mode = GET_MODE (operands[0]);
32593 enum machine_mode sub_mode;
32594 rtx op0, op1, op2;
32595 int sub_size, regno0, regno1, nregs, i;
32597 /* If this is DImode, use the specialized version that can run before
32598 register allocation. */
32599 if (mode == DImode && !TARGET_POWERPC64)
32601 rs6000_split_logical_di (operands, code, complement_final_p,
32602 complement_op1_p, complement_op2_p,
32603 clobber_reg);
32604 return;
32607 op0 = operands[0];
32608 op1 = operands[1];
32609 op2 = (code == NOT) ? NULL_RTX : operands[2];
32610 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
32611 sub_size = GET_MODE_SIZE (sub_mode);
32612 regno0 = REGNO (op0);
32613 regno1 = REGNO (op1);
32615 gcc_assert (reload_completed);
32616 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
32617 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
32619 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
32620 gcc_assert (nregs > 1);
32622 if (op2 && REG_P (op2))
32623 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
32625 for (i = 0; i < nregs; i++)
32627 int offset = i * sub_size;
32628 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
32629 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
32630 rtx sub_op2 = ((code == NOT)
32631 ? NULL_RTX
32632 : simplify_subreg (sub_mode, op2, mode, offset));
32634 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
32635 complement_final_p, complement_op1_p,
32636 complement_op2_p, clobber_reg);
32639 return;
32643 /* Return true if the peephole2 can combine a load involving a combination of
32644 an addis instruction and a load with an offset that can be fused together on
32645 a power8.
32647 The operands are:
32648 operands[0] register set with addis
32649 operands[1] value set via addis
32650 operands[2] target register being loaded
32651 operands[3] D-form memory reference using operands[0].
32653 In addition, we are passed a boolean that is true if this is a peephole2,
32654 and we can use see if the addis_reg is dead after the insn and can be
32655 replaced by the target register. */
32657 bool
32658 fusion_gpr_load_p (rtx *operands, bool peep2_p)
32660 rtx addis_reg = operands[0];
32661 rtx addis_value = operands[1];
32662 rtx target = operands[2];
32663 rtx mem = operands[3];
32664 rtx addr;
32665 rtx base_reg;
32667 /* Validate arguments. */
32668 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
32669 return false;
32671 if (!base_reg_operand (target, GET_MODE (target)))
32672 return false;
32674 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
32675 return false;
32677 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
32678 return false;
32680 /* Allow sign/zero extension. */
32681 if (GET_CODE (mem) == ZERO_EXTEND
32682 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
32683 mem = XEXP (mem, 0);
32685 if (!MEM_P (mem))
32686 return false;
32688 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
32689 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
32690 return false;
32692 /* Validate that the register used to load the high value is either the
32693 register being loaded, or we can safely replace its use in a peephole2.
32695 If this is a peephole2, we assume that there are 2 instructions in the
32696 peephole (addis and load), so we want to check if the target register was
32697 not used in the memory address and the register to hold the addis result
32698 is dead after the peephole. */
32699 if (REGNO (addis_reg) != REGNO (target))
32701 if (!peep2_p)
32702 return false;
32704 if (reg_mentioned_p (target, mem))
32705 return false;
32707 if (!peep2_reg_dead_p (2, addis_reg))
32708 return false;
32710 /* If the target register being loaded is the stack pointer, we must
32711 avoid loading any other value into it, even temporarily. */
32712 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
32713 return false;
32716 base_reg = XEXP (addr, 0);
32717 return REGNO (addis_reg) == REGNO (base_reg);
32720 /* During the peephole2 pass, adjust and expand the insns for a load fusion
32721 sequence. We adjust the addis register to use the target register. If the
32722 load sign extends, we adjust the code to do the zero extending load, and an
32723 explicit sign extension later since the fusion only covers zero extending
32724 loads.
32726 The operands are:
32727 operands[0] register set with addis (to be replaced with target)
32728 operands[1] value set via addis
32729 operands[2] target register being loaded
32730 operands[3] D-form memory reference using operands[0]. */
32732 void
32733 expand_fusion_gpr_load (rtx *operands)
32735 rtx addis_value = operands[1];
32736 rtx target = operands[2];
32737 rtx orig_mem = operands[3];
32738 rtx new_addr, new_mem, orig_addr, offset;
32739 enum rtx_code plus_or_lo_sum;
32740 enum machine_mode target_mode = GET_MODE (target);
32741 enum machine_mode extend_mode = target_mode;
32742 enum machine_mode ptr_mode = Pmode;
32743 enum rtx_code extend = UNKNOWN;
32744 rtx addis_reg = ((ptr_mode == target_mode)
32745 ? target
32746 : simplify_subreg (ptr_mode, target, target_mode, 0));
32748 if (GET_CODE (orig_mem) == ZERO_EXTEND
32749 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
32751 extend = GET_CODE (orig_mem);
32752 orig_mem = XEXP (orig_mem, 0);
32753 target_mode = GET_MODE (orig_mem);
32756 gcc_assert (MEM_P (orig_mem));
32758 orig_addr = XEXP (orig_mem, 0);
32759 plus_or_lo_sum = GET_CODE (orig_addr);
32760 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
32762 offset = XEXP (orig_addr, 1);
32763 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_reg, offset);
32764 new_mem = change_address (orig_mem, target_mode, new_addr);
32766 if (extend != UNKNOWN)
32767 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
32769 emit_insn (gen_rtx_SET (VOIDmode, addis_reg, addis_value));
32770 emit_insn (gen_rtx_SET (VOIDmode, target, new_mem));
32772 if (extend == SIGN_EXTEND)
32774 int sub_off = ((BYTES_BIG_ENDIAN)
32775 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
32776 : 0);
32777 rtx sign_reg
32778 = simplify_subreg (target_mode, target, extend_mode, sub_off);
32780 emit_insn (gen_rtx_SET (VOIDmode, target,
32781 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
32784 return;
32787 /* Return a string to fuse an addis instruction with a gpr load to the same
32788 register that we loaded up the addis instruction. The code is complicated,
32789 so we call output_asm_insn directly, and just return "".
32791 The operands are:
32792 operands[0] register set with addis (must be same reg as target).
32793 operands[1] value set via addis
32794 operands[2] target register being loaded
32795 operands[3] D-form memory reference using operands[0]. */
32797 const char *
32798 emit_fusion_gpr_load (rtx *operands)
32800 rtx addis_reg = operands[0];
32801 rtx addis_value = operands[1];
32802 rtx target = operands[2];
32803 rtx mem = operands[3];
32804 rtx fuse_ops[10];
32805 rtx addr;
32806 rtx load_offset;
32807 const char *addis_str = NULL;
32808 const char *load_str = NULL;
32809 const char *extend_insn = NULL;
32810 const char *mode_name = NULL;
32811 char insn_template[80];
32812 enum machine_mode mode;
32813 const char *comment_str = ASM_COMMENT_START;
32814 bool sign_p = false;
32816 gcc_assert (REG_P (addis_reg) && REG_P (target));
32817 gcc_assert (REGNO (addis_reg) == REGNO (target));
32819 if (*comment_str == ' ')
32820 comment_str++;
32822 /* Allow sign/zero extension. */
32823 if (GET_CODE (mem) == ZERO_EXTEND)
32824 mem = XEXP (mem, 0);
32826 else if (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN)
32828 sign_p = true;
32829 mem = XEXP (mem, 0);
32832 gcc_assert (MEM_P (mem));
32833 addr = XEXP (mem, 0);
32834 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
32835 gcc_unreachable ();
32837 load_offset = XEXP (addr, 1);
32839 /* Now emit the load instruction to the same register. */
32840 mode = GET_MODE (mem);
32841 switch (mode)
32843 case QImode:
32844 mode_name = "char";
32845 load_str = "lbz";
32846 extend_insn = "extsb %0,%0";
32847 break;
32849 case HImode:
32850 mode_name = "short";
32851 load_str = "lhz";
32852 extend_insn = "extsh %0,%0";
32853 break;
32855 case SImode:
32856 mode_name = "int";
32857 load_str = "lwz";
32858 extend_insn = "extsw %0,%0";
32859 break;
32861 case DImode:
32862 if (TARGET_POWERPC64)
32864 mode_name = "long";
32865 load_str = "ld";
32867 else
32868 gcc_unreachable ();
32869 break;
32871 default:
32872 gcc_unreachable ();
32875 /* Emit the addis instruction. */
32876 fuse_ops[0] = target;
32877 if (satisfies_constraint_L (addis_value))
32879 fuse_ops[1] = addis_value;
32880 addis_str = "lis %0,%v1";
32883 else if (GET_CODE (addis_value) == PLUS)
32885 rtx op0 = XEXP (addis_value, 0);
32886 rtx op1 = XEXP (addis_value, 1);
32888 if (REG_P (op0) && CONST_INT_P (op1)
32889 && satisfies_constraint_L (op1))
32891 fuse_ops[1] = op0;
32892 fuse_ops[2] = op1;
32893 addis_str = "addis %0,%1,%v2";
32897 else if (GET_CODE (addis_value) == HIGH)
32899 rtx value = XEXP (addis_value, 0);
32900 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
32902 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
32903 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
32904 if (TARGET_ELF)
32905 addis_str = "addis %0,%2,%1@toc@ha";
32907 else if (TARGET_XCOFF)
32908 addis_str = "addis %0,%1@u(%2)";
32910 else
32911 gcc_unreachable ();
32914 else if (GET_CODE (value) == PLUS)
32916 rtx op0 = XEXP (value, 0);
32917 rtx op1 = XEXP (value, 1);
32919 if (GET_CODE (op0) == UNSPEC
32920 && XINT (op0, 1) == UNSPEC_TOCREL
32921 && CONST_INT_P (op1))
32923 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
32924 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
32925 fuse_ops[3] = op1;
32926 if (TARGET_ELF)
32927 addis_str = "addis %0,%2,%1+%3@toc@ha";
32929 else if (TARGET_XCOFF)
32930 addis_str = "addis %0,%1+%3@u(%2)";
32932 else
32933 gcc_unreachable ();
32937 else if (satisfies_constraint_L (value))
32939 fuse_ops[1] = value;
32940 addis_str = "lis %0,%v1";
32943 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
32945 fuse_ops[1] = value;
32946 addis_str = "lis %0,%1@ha";
32950 if (!addis_str)
32951 fatal_insn ("Could not generate addis value for fusion", addis_value);
32953 sprintf (insn_template, "%s\t\t%s gpr load fusion, type %s", addis_str,
32954 comment_str, mode_name);
32955 output_asm_insn (insn_template, fuse_ops);
32957 /* Emit the D-form load instruction. */
32958 if (CONST_INT_P (load_offset) && satisfies_constraint_I (load_offset))
32960 sprintf (insn_template, "%s %%0,%%1(%%0)", load_str);
32961 fuse_ops[1] = load_offset;
32962 output_asm_insn (insn_template, fuse_ops);
32965 else if (GET_CODE (load_offset) == UNSPEC
32966 && XINT (load_offset, 1) == UNSPEC_TOCREL)
32968 if (TARGET_ELF)
32969 sprintf (insn_template, "%s %%0,%%1@toc@l(%%0)", load_str);
32971 else if (TARGET_XCOFF)
32972 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
32974 else
32975 gcc_unreachable ();
32977 fuse_ops[1] = XVECEXP (load_offset, 0, 0);
32978 output_asm_insn (insn_template, fuse_ops);
32981 else if (GET_CODE (load_offset) == PLUS
32982 && GET_CODE (XEXP (load_offset, 0)) == UNSPEC
32983 && XINT (XEXP (load_offset, 0), 1) == UNSPEC_TOCREL
32984 && CONST_INT_P (XEXP (load_offset, 1)))
32986 rtx tocrel_unspec = XEXP (load_offset, 0);
32987 if (TARGET_ELF)
32988 sprintf (insn_template, "%s %%0,%%1+%%2@toc@l(%%0)", load_str);
32990 else if (TARGET_XCOFF)
32991 sprintf (insn_template, "%s %%0,%%1+%%2@l(%%0)", load_str);
32993 else
32994 gcc_unreachable ();
32996 fuse_ops[1] = XVECEXP (tocrel_unspec, 0, 0);
32997 fuse_ops[2] = XEXP (load_offset, 1);
32998 output_asm_insn (insn_template, fuse_ops);
33001 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (load_offset))
33003 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
33005 fuse_ops[1] = load_offset;
33006 output_asm_insn (insn_template, fuse_ops);
33009 else
33010 fatal_insn ("Unable to generate load offset for fusion", load_offset);
33012 /* Handle sign extension. The peephole2 pass generates this as a separate
33013 insn, but we handle it just in case it got reattached. */
33014 if (sign_p)
33016 gcc_assert (extend_insn != NULL);
33017 output_asm_insn (extend_insn, fuse_ops);
33020 return "";
33024 struct gcc_target targetm = TARGET_INITIALIZER;
33026 #include "gt-rs6000.h"