1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
5 Free Software Foundation, Inc.
6 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 3, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
44 #include "basic-block.h"
45 #include "diagnostic-core.h"
51 #include "target-def.h"
52 #include "common/common-target.h"
53 #include "langhooks.h"
56 #include "sched-int.h"
58 #include "tree-flow.h"
61 #include "tm-constrs.h"
64 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
67 #include "gstab.h" /* for N_SLINE */
70 #ifndef TARGET_NO_PROTOTYPE
71 #define TARGET_NO_PROTOTYPE 0
74 #define min(A,B) ((A) < (B) ? (A) : (B))
75 #define max(A,B) ((A) > (B) ? (A) : (B))
77 /* Structure used to define the rs6000 stack */
78 typedef struct rs6000_stack
{
79 int reload_completed
; /* stack info won't change from here on */
80 int first_gp_reg_save
; /* first callee saved GP register used */
81 int first_fp_reg_save
; /* first callee saved FP register used */
82 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
83 int lr_save_p
; /* true if the link reg needs to be saved */
84 int cr_save_p
; /* true if the CR reg needs to be saved */
85 unsigned int vrsave_mask
; /* mask of vec registers to save */
86 int push_p
; /* true if we need to allocate stack space */
87 int calls_p
; /* true if the function makes any calls */
88 int world_save_p
; /* true if we're saving *everything*:
89 r13-r31, cr, f14-f31, vrsave, v20-v31 */
90 enum rs6000_abi abi
; /* which ABI to use */
91 int gp_save_offset
; /* offset to save GP regs from initial SP */
92 int fp_save_offset
; /* offset to save FP regs from initial SP */
93 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
94 int lr_save_offset
; /* offset to save LR from initial SP */
95 int cr_save_offset
; /* offset to save CR from initial SP */
96 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
97 int spe_gp_save_offset
; /* offset to save spe 64-bit gprs */
98 int varargs_save_offset
; /* offset to save the varargs registers */
99 int ehrd_offset
; /* offset to EH return data */
100 int reg_size
; /* register size (4 or 8) */
101 HOST_WIDE_INT vars_size
; /* variable save area size */
102 int parm_size
; /* outgoing parameter size */
103 int save_size
; /* save area size */
104 int fixed_size
; /* fixed size of stack frame */
105 int gp_size
; /* size of saved GP registers */
106 int fp_size
; /* size of saved FP registers */
107 int altivec_size
; /* size of saved AltiVec registers */
108 int cr_size
; /* size to hold CR if not in save_size */
109 int vrsave_size
; /* size to hold VRSAVE if not in save_size */
110 int altivec_padding_size
; /* size of altivec alignment padding if
112 int spe_gp_size
; /* size of 64-bit GPR save size for SPE */
113 int spe_padding_size
;
114 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
115 int spe_64bit_regs_used
;
119 /* A C structure for machine-specific, per-function data.
120 This is added to the cfun structure. */
121 typedef struct GTY(()) machine_function
123 /* Some local-dynamic symbol. */
124 const char *some_ld_name
;
125 /* Whether the instruction chain has been scanned already. */
126 int insn_chain_scanned_p
;
127 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
128 int ra_needs_full_frame
;
129 /* Flags if __builtin_return_address (0) was used. */
131 /* Cache lr_save_p after expansion of builtin_eh_return. */
133 /* Whether we need to save the TOC to the reserved stack location in the
134 function prologue. */
135 bool save_toc_in_prologue
;
136 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
137 varargs save area. */
138 HOST_WIDE_INT varargs_save_offset
;
139 /* Temporary stack slot to use for SDmode copies. This slot is
140 64-bits wide and is allocated early enough so that the offset
141 does not overflow the 16-bit load/store offset field. */
142 rtx sdmode_stack_slot
;
145 /* Support targetm.vectorize.builtin_mask_for_load. */
146 static GTY(()) tree altivec_builtin_mask_for_load
;
148 /* Set to nonzero once AIX common-mode calls have been defined. */
149 static GTY(()) int common_mode_defined
;
151 /* Label number of label created for -mrelocatable, to call to so we can
152 get the address of the GOT section */
153 static int rs6000_pic_labelno
;
156 /* Counter for labels which are to be placed in .fixup. */
157 int fixuplabelno
= 0;
160 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
163 /* Specify the machine mode that pointers have. After generation of rtl, the
164 compiler makes no further distinction between pointers and any other objects
165 of this machine mode. The type is unsigned since not all things that
166 include rs6000.h also include machmode.h. */
167 unsigned rs6000_pmode
;
169 /* Width in bits of a pointer. */
170 unsigned rs6000_pointer_size
;
172 #ifdef HAVE_AS_GNU_ATTRIBUTE
173 /* Flag whether floating point values have been passed/returned. */
174 static bool rs6000_passes_float
;
175 /* Flag whether vector values have been passed/returned. */
176 static bool rs6000_passes_vector
;
177 /* Flag whether small (<= 8 byte) structures have been returned. */
178 static bool rs6000_returns_struct
;
181 /* Value is TRUE if register/mode pair is acceptable. */
182 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
184 /* Maximum number of registers needed for a given register class and mode. */
185 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
187 /* How many registers are needed for a given register and mode. */
188 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
190 /* Map register number to register class. */
191 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
193 /* Reload functions based on the type and the vector unit. */
194 static enum insn_code rs6000_vector_reload
[NUM_MACHINE_MODES
][2];
196 static int dbg_cost_ctrl
;
198 /* Built in types. */
199 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
200 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
202 /* Flag to say the TOC is initialized */
204 char toc_label_name
[10];
206 /* Cached value of rs6000_variable_issue. This is cached in
207 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
208 static short cached_can_issue_more
;
210 static GTY(()) section
*read_only_data_section
;
211 static GTY(()) section
*private_data_section
;
212 static GTY(()) section
*read_only_private_data_section
;
213 static GTY(()) section
*sdata2_section
;
214 static GTY(()) section
*toc_section
;
216 struct builtin_description
218 const unsigned int mask
;
219 const enum insn_code icode
;
220 const char *const name
;
221 const enum rs6000_builtins code
;
224 /* Describe the vector unit used for modes. */
225 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
226 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
228 /* Register classes for various constraints that are based on the target
230 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
232 /* Describe the alignment of a vector. */
233 int rs6000_vector_align
[NUM_MACHINE_MODES
];
235 /* Map selected modes to types for builtins. */
236 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
238 /* What modes to automatically generate reciprocal divide estimate (fre) and
239 reciprocal sqrt (frsqrte) for. */
240 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
242 /* Masks to determine which reciprocal esitmate instructions to generate
244 enum rs6000_recip_mask
{
245 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
246 RECIP_DF_DIV
= 0x002,
247 RECIP_V4SF_DIV
= 0x004,
248 RECIP_V2DF_DIV
= 0x008,
250 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
251 RECIP_DF_RSQRT
= 0x020,
252 RECIP_V4SF_RSQRT
= 0x040,
253 RECIP_V2DF_RSQRT
= 0x080,
255 /* Various combination of flags for -mrecip=xxx. */
257 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
258 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
259 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
261 RECIP_HIGH_PRECISION
= RECIP_ALL
,
263 /* On low precision machines like the power5, don't enable double precision
264 reciprocal square root estimate, since it isn't accurate enough. */
265 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
268 /* -mrecip options. */
271 const char *string
; /* option name */
272 unsigned int mask
; /* mask bits to set */
273 } recip_options
[] = {
274 { "all", RECIP_ALL
},
275 { "none", RECIP_NONE
},
276 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
278 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
279 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
280 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
281 | RECIP_V2DF_RSQRT
) },
282 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
283 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
286 /* 2 argument gen function typedef. */
287 typedef rtx (*gen_2arg_fn_t
) (rtx
, rtx
, rtx
);
289 /* Pointer to function (in rs6000-c.c) that can define or undefine target
290 macros that have changed. Languages that don't support the preprocessor
291 don't link in rs6000-c.c, so we can't call it directly. */
292 void (*rs6000_target_modify_macros_ptr
) (bool, int, unsigned);
295 /* Target cpu costs. */
297 struct processor_costs
{
298 const int mulsi
; /* cost of SImode multiplication. */
299 const int mulsi_const
; /* cost of SImode multiplication by constant. */
300 const int mulsi_const9
; /* cost of SImode mult by short constant. */
301 const int muldi
; /* cost of DImode multiplication. */
302 const int divsi
; /* cost of SImode division. */
303 const int divdi
; /* cost of DImode division. */
304 const int fp
; /* cost of simple SFmode and DFmode insns. */
305 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
306 const int sdiv
; /* cost of SFmode division (fdivs). */
307 const int ddiv
; /* cost of DFmode division (fdiv). */
308 const int cache_line_size
; /* cache line size in bytes. */
309 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
310 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
311 const int simultaneous_prefetches
; /* number of parallel prefetch
315 const struct processor_costs
*rs6000_cost
;
317 /* Processor costs (relative to an add) */
319 /* Instruction size costs on 32bit processors. */
321 struct processor_costs size32_cost
= {
322 COSTS_N_INSNS (1), /* mulsi */
323 COSTS_N_INSNS (1), /* mulsi_const */
324 COSTS_N_INSNS (1), /* mulsi_const9 */
325 COSTS_N_INSNS (1), /* muldi */
326 COSTS_N_INSNS (1), /* divsi */
327 COSTS_N_INSNS (1), /* divdi */
328 COSTS_N_INSNS (1), /* fp */
329 COSTS_N_INSNS (1), /* dmul */
330 COSTS_N_INSNS (1), /* sdiv */
331 COSTS_N_INSNS (1), /* ddiv */
338 /* Instruction size costs on 64bit processors. */
340 struct processor_costs size64_cost
= {
341 COSTS_N_INSNS (1), /* mulsi */
342 COSTS_N_INSNS (1), /* mulsi_const */
343 COSTS_N_INSNS (1), /* mulsi_const9 */
344 COSTS_N_INSNS (1), /* muldi */
345 COSTS_N_INSNS (1), /* divsi */
346 COSTS_N_INSNS (1), /* divdi */
347 COSTS_N_INSNS (1), /* fp */
348 COSTS_N_INSNS (1), /* dmul */
349 COSTS_N_INSNS (1), /* sdiv */
350 COSTS_N_INSNS (1), /* ddiv */
357 /* Instruction costs on RIOS1 processors. */
359 struct processor_costs rios1_cost
= {
360 COSTS_N_INSNS (5), /* mulsi */
361 COSTS_N_INSNS (4), /* mulsi_const */
362 COSTS_N_INSNS (3), /* mulsi_const9 */
363 COSTS_N_INSNS (5), /* muldi */
364 COSTS_N_INSNS (19), /* divsi */
365 COSTS_N_INSNS (19), /* divdi */
366 COSTS_N_INSNS (2), /* fp */
367 COSTS_N_INSNS (2), /* dmul */
368 COSTS_N_INSNS (19), /* sdiv */
369 COSTS_N_INSNS (19), /* ddiv */
370 128, /* cache line size */
376 /* Instruction costs on RIOS2 processors. */
378 struct processor_costs rios2_cost
= {
379 COSTS_N_INSNS (2), /* mulsi */
380 COSTS_N_INSNS (2), /* mulsi_const */
381 COSTS_N_INSNS (2), /* mulsi_const9 */
382 COSTS_N_INSNS (2), /* muldi */
383 COSTS_N_INSNS (13), /* divsi */
384 COSTS_N_INSNS (13), /* divdi */
385 COSTS_N_INSNS (2), /* fp */
386 COSTS_N_INSNS (2), /* dmul */
387 COSTS_N_INSNS (17), /* sdiv */
388 COSTS_N_INSNS (17), /* ddiv */
389 256, /* cache line size */
395 /* Instruction costs on RS64A processors. */
397 struct processor_costs rs64a_cost
= {
398 COSTS_N_INSNS (20), /* mulsi */
399 COSTS_N_INSNS (12), /* mulsi_const */
400 COSTS_N_INSNS (8), /* mulsi_const9 */
401 COSTS_N_INSNS (34), /* muldi */
402 COSTS_N_INSNS (65), /* divsi */
403 COSTS_N_INSNS (67), /* divdi */
404 COSTS_N_INSNS (4), /* fp */
405 COSTS_N_INSNS (4), /* dmul */
406 COSTS_N_INSNS (31), /* sdiv */
407 COSTS_N_INSNS (31), /* ddiv */
408 128, /* cache line size */
414 /* Instruction costs on MPCCORE processors. */
416 struct processor_costs mpccore_cost
= {
417 COSTS_N_INSNS (2), /* mulsi */
418 COSTS_N_INSNS (2), /* mulsi_const */
419 COSTS_N_INSNS (2), /* mulsi_const9 */
420 COSTS_N_INSNS (2), /* muldi */
421 COSTS_N_INSNS (6), /* divsi */
422 COSTS_N_INSNS (6), /* divdi */
423 COSTS_N_INSNS (4), /* fp */
424 COSTS_N_INSNS (5), /* dmul */
425 COSTS_N_INSNS (10), /* sdiv */
426 COSTS_N_INSNS (17), /* ddiv */
427 32, /* cache line size */
433 /* Instruction costs on PPC403 processors. */
435 struct processor_costs ppc403_cost
= {
436 COSTS_N_INSNS (4), /* mulsi */
437 COSTS_N_INSNS (4), /* mulsi_const */
438 COSTS_N_INSNS (4), /* mulsi_const9 */
439 COSTS_N_INSNS (4), /* muldi */
440 COSTS_N_INSNS (33), /* divsi */
441 COSTS_N_INSNS (33), /* divdi */
442 COSTS_N_INSNS (11), /* fp */
443 COSTS_N_INSNS (11), /* dmul */
444 COSTS_N_INSNS (11), /* sdiv */
445 COSTS_N_INSNS (11), /* ddiv */
446 32, /* cache line size */
452 /* Instruction costs on PPC405 processors. */
454 struct processor_costs ppc405_cost
= {
455 COSTS_N_INSNS (5), /* mulsi */
456 COSTS_N_INSNS (4), /* mulsi_const */
457 COSTS_N_INSNS (3), /* mulsi_const9 */
458 COSTS_N_INSNS (5), /* muldi */
459 COSTS_N_INSNS (35), /* divsi */
460 COSTS_N_INSNS (35), /* divdi */
461 COSTS_N_INSNS (11), /* fp */
462 COSTS_N_INSNS (11), /* dmul */
463 COSTS_N_INSNS (11), /* sdiv */
464 COSTS_N_INSNS (11), /* ddiv */
465 32, /* cache line size */
471 /* Instruction costs on PPC440 processors. */
473 struct processor_costs ppc440_cost
= {
474 COSTS_N_INSNS (3), /* mulsi */
475 COSTS_N_INSNS (2), /* mulsi_const */
476 COSTS_N_INSNS (2), /* mulsi_const9 */
477 COSTS_N_INSNS (3), /* muldi */
478 COSTS_N_INSNS (34), /* divsi */
479 COSTS_N_INSNS (34), /* divdi */
480 COSTS_N_INSNS (5), /* fp */
481 COSTS_N_INSNS (5), /* dmul */
482 COSTS_N_INSNS (19), /* sdiv */
483 COSTS_N_INSNS (33), /* ddiv */
484 32, /* cache line size */
490 /* Instruction costs on PPC476 processors. */
492 struct processor_costs ppc476_cost
= {
493 COSTS_N_INSNS (4), /* mulsi */
494 COSTS_N_INSNS (4), /* mulsi_const */
495 COSTS_N_INSNS (4), /* mulsi_const9 */
496 COSTS_N_INSNS (4), /* muldi */
497 COSTS_N_INSNS (11), /* divsi */
498 COSTS_N_INSNS (11), /* divdi */
499 COSTS_N_INSNS (6), /* fp */
500 COSTS_N_INSNS (6), /* dmul */
501 COSTS_N_INSNS (19), /* sdiv */
502 COSTS_N_INSNS (33), /* ddiv */
503 32, /* l1 cache line size */
509 /* Instruction costs on PPC601 processors. */
511 struct processor_costs ppc601_cost
= {
512 COSTS_N_INSNS (5), /* mulsi */
513 COSTS_N_INSNS (5), /* mulsi_const */
514 COSTS_N_INSNS (5), /* mulsi_const9 */
515 COSTS_N_INSNS (5), /* muldi */
516 COSTS_N_INSNS (36), /* divsi */
517 COSTS_N_INSNS (36), /* divdi */
518 COSTS_N_INSNS (4), /* fp */
519 COSTS_N_INSNS (5), /* dmul */
520 COSTS_N_INSNS (17), /* sdiv */
521 COSTS_N_INSNS (31), /* ddiv */
522 32, /* cache line size */
528 /* Instruction costs on PPC603 processors. */
530 struct processor_costs ppc603_cost
= {
531 COSTS_N_INSNS (5), /* mulsi */
532 COSTS_N_INSNS (3), /* mulsi_const */
533 COSTS_N_INSNS (2), /* mulsi_const9 */
534 COSTS_N_INSNS (5), /* muldi */
535 COSTS_N_INSNS (37), /* divsi */
536 COSTS_N_INSNS (37), /* divdi */
537 COSTS_N_INSNS (3), /* fp */
538 COSTS_N_INSNS (4), /* dmul */
539 COSTS_N_INSNS (18), /* sdiv */
540 COSTS_N_INSNS (33), /* ddiv */
541 32, /* cache line size */
547 /* Instruction costs on PPC604 processors. */
549 struct processor_costs ppc604_cost
= {
550 COSTS_N_INSNS (4), /* mulsi */
551 COSTS_N_INSNS (4), /* mulsi_const */
552 COSTS_N_INSNS (4), /* mulsi_const9 */
553 COSTS_N_INSNS (4), /* muldi */
554 COSTS_N_INSNS (20), /* divsi */
555 COSTS_N_INSNS (20), /* divdi */
556 COSTS_N_INSNS (3), /* fp */
557 COSTS_N_INSNS (3), /* dmul */
558 COSTS_N_INSNS (18), /* sdiv */
559 COSTS_N_INSNS (32), /* ddiv */
560 32, /* cache line size */
566 /* Instruction costs on PPC604e processors. */
568 struct processor_costs ppc604e_cost
= {
569 COSTS_N_INSNS (2), /* mulsi */
570 COSTS_N_INSNS (2), /* mulsi_const */
571 COSTS_N_INSNS (2), /* mulsi_const9 */
572 COSTS_N_INSNS (2), /* muldi */
573 COSTS_N_INSNS (20), /* divsi */
574 COSTS_N_INSNS (20), /* divdi */
575 COSTS_N_INSNS (3), /* fp */
576 COSTS_N_INSNS (3), /* dmul */
577 COSTS_N_INSNS (18), /* sdiv */
578 COSTS_N_INSNS (32), /* ddiv */
579 32, /* cache line size */
585 /* Instruction costs on PPC620 processors. */
587 struct processor_costs ppc620_cost
= {
588 COSTS_N_INSNS (5), /* mulsi */
589 COSTS_N_INSNS (4), /* mulsi_const */
590 COSTS_N_INSNS (3), /* mulsi_const9 */
591 COSTS_N_INSNS (7), /* muldi */
592 COSTS_N_INSNS (21), /* divsi */
593 COSTS_N_INSNS (37), /* divdi */
594 COSTS_N_INSNS (3), /* fp */
595 COSTS_N_INSNS (3), /* dmul */
596 COSTS_N_INSNS (18), /* sdiv */
597 COSTS_N_INSNS (32), /* ddiv */
598 128, /* cache line size */
604 /* Instruction costs on PPC630 processors. */
606 struct processor_costs ppc630_cost
= {
607 COSTS_N_INSNS (5), /* mulsi */
608 COSTS_N_INSNS (4), /* mulsi_const */
609 COSTS_N_INSNS (3), /* mulsi_const9 */
610 COSTS_N_INSNS (7), /* muldi */
611 COSTS_N_INSNS (21), /* divsi */
612 COSTS_N_INSNS (37), /* divdi */
613 COSTS_N_INSNS (3), /* fp */
614 COSTS_N_INSNS (3), /* dmul */
615 COSTS_N_INSNS (17), /* sdiv */
616 COSTS_N_INSNS (21), /* ddiv */
617 128, /* cache line size */
623 /* Instruction costs on Cell processor. */
624 /* COSTS_N_INSNS (1) ~ one add. */
626 struct processor_costs ppccell_cost
= {
627 COSTS_N_INSNS (9/2)+2, /* mulsi */
628 COSTS_N_INSNS (6/2), /* mulsi_const */
629 COSTS_N_INSNS (6/2), /* mulsi_const9 */
630 COSTS_N_INSNS (15/2)+2, /* muldi */
631 COSTS_N_INSNS (38/2), /* divsi */
632 COSTS_N_INSNS (70/2), /* divdi */
633 COSTS_N_INSNS (10/2), /* fp */
634 COSTS_N_INSNS (10/2), /* dmul */
635 COSTS_N_INSNS (74/2), /* sdiv */
636 COSTS_N_INSNS (74/2), /* ddiv */
637 128, /* cache line size */
643 /* Instruction costs on PPC750 and PPC7400 processors. */
645 struct processor_costs ppc750_cost
= {
646 COSTS_N_INSNS (5), /* mulsi */
647 COSTS_N_INSNS (3), /* mulsi_const */
648 COSTS_N_INSNS (2), /* mulsi_const9 */
649 COSTS_N_INSNS (5), /* muldi */
650 COSTS_N_INSNS (17), /* divsi */
651 COSTS_N_INSNS (17), /* divdi */
652 COSTS_N_INSNS (3), /* fp */
653 COSTS_N_INSNS (3), /* dmul */
654 COSTS_N_INSNS (17), /* sdiv */
655 COSTS_N_INSNS (31), /* ddiv */
656 32, /* cache line size */
662 /* Instruction costs on PPC7450 processors. */
664 struct processor_costs ppc7450_cost
= {
665 COSTS_N_INSNS (4), /* mulsi */
666 COSTS_N_INSNS (3), /* mulsi_const */
667 COSTS_N_INSNS (3), /* mulsi_const9 */
668 COSTS_N_INSNS (4), /* muldi */
669 COSTS_N_INSNS (23), /* divsi */
670 COSTS_N_INSNS (23), /* divdi */
671 COSTS_N_INSNS (5), /* fp */
672 COSTS_N_INSNS (5), /* dmul */
673 COSTS_N_INSNS (21), /* sdiv */
674 COSTS_N_INSNS (35), /* ddiv */
675 32, /* cache line size */
681 /* Instruction costs on PPC8540 processors. */
683 struct processor_costs ppc8540_cost
= {
684 COSTS_N_INSNS (4), /* mulsi */
685 COSTS_N_INSNS (4), /* mulsi_const */
686 COSTS_N_INSNS (4), /* mulsi_const9 */
687 COSTS_N_INSNS (4), /* muldi */
688 COSTS_N_INSNS (19), /* divsi */
689 COSTS_N_INSNS (19), /* divdi */
690 COSTS_N_INSNS (4), /* fp */
691 COSTS_N_INSNS (4), /* dmul */
692 COSTS_N_INSNS (29), /* sdiv */
693 COSTS_N_INSNS (29), /* ddiv */
694 32, /* cache line size */
697 1, /* prefetch streams /*/
700 /* Instruction costs on E300C2 and E300C3 cores. */
702 struct processor_costs ppce300c2c3_cost
= {
703 COSTS_N_INSNS (4), /* mulsi */
704 COSTS_N_INSNS (4), /* mulsi_const */
705 COSTS_N_INSNS (4), /* mulsi_const9 */
706 COSTS_N_INSNS (4), /* muldi */
707 COSTS_N_INSNS (19), /* divsi */
708 COSTS_N_INSNS (19), /* divdi */
709 COSTS_N_INSNS (3), /* fp */
710 COSTS_N_INSNS (4), /* dmul */
711 COSTS_N_INSNS (18), /* sdiv */
712 COSTS_N_INSNS (33), /* ddiv */
716 1, /* prefetch streams /*/
719 /* Instruction costs on PPCE500MC processors. */
721 struct processor_costs ppce500mc_cost
= {
722 COSTS_N_INSNS (4), /* mulsi */
723 COSTS_N_INSNS (4), /* mulsi_const */
724 COSTS_N_INSNS (4), /* mulsi_const9 */
725 COSTS_N_INSNS (4), /* muldi */
726 COSTS_N_INSNS (14), /* divsi */
727 COSTS_N_INSNS (14), /* divdi */
728 COSTS_N_INSNS (8), /* fp */
729 COSTS_N_INSNS (10), /* dmul */
730 COSTS_N_INSNS (36), /* sdiv */
731 COSTS_N_INSNS (66), /* ddiv */
732 64, /* cache line size */
735 1, /* prefetch streams /*/
738 /* Instruction costs on PPCE500MC64 processors. */
740 struct processor_costs ppce500mc64_cost
= {
741 COSTS_N_INSNS (4), /* mulsi */
742 COSTS_N_INSNS (4), /* mulsi_const */
743 COSTS_N_INSNS (4), /* mulsi_const9 */
744 COSTS_N_INSNS (4), /* muldi */
745 COSTS_N_INSNS (14), /* divsi */
746 COSTS_N_INSNS (14), /* divdi */
747 COSTS_N_INSNS (4), /* fp */
748 COSTS_N_INSNS (10), /* dmul */
749 COSTS_N_INSNS (36), /* sdiv */
750 COSTS_N_INSNS (66), /* ddiv */
751 64, /* cache line size */
754 1, /* prefetch streams /*/
757 /* Instruction costs on PPCE5500 processors. */
759 struct processor_costs ppce5500_cost
= {
760 COSTS_N_INSNS (5), /* mulsi */
761 COSTS_N_INSNS (5), /* mulsi_const */
762 COSTS_N_INSNS (4), /* mulsi_const9 */
763 COSTS_N_INSNS (5), /* muldi */
764 COSTS_N_INSNS (14), /* divsi */
765 COSTS_N_INSNS (14), /* divdi */
766 COSTS_N_INSNS (7), /* fp */
767 COSTS_N_INSNS (10), /* dmul */
768 COSTS_N_INSNS (36), /* sdiv */
769 COSTS_N_INSNS (66), /* ddiv */
770 64, /* cache line size */
773 1, /* prefetch streams /*/
776 /* Instruction costs on PPCE6500 processors. */
778 struct processor_costs ppce6500_cost
= {
779 COSTS_N_INSNS (5), /* mulsi */
780 COSTS_N_INSNS (5), /* mulsi_const */
781 COSTS_N_INSNS (4), /* mulsi_const9 */
782 COSTS_N_INSNS (5), /* muldi */
783 COSTS_N_INSNS (14), /* divsi */
784 COSTS_N_INSNS (14), /* divdi */
785 COSTS_N_INSNS (7), /* fp */
786 COSTS_N_INSNS (10), /* dmul */
787 COSTS_N_INSNS (36), /* sdiv */
788 COSTS_N_INSNS (66), /* ddiv */
789 64, /* cache line size */
792 1, /* prefetch streams /*/
795 /* Instruction costs on AppliedMicro Titan processors. */
797 struct processor_costs titan_cost
= {
798 COSTS_N_INSNS (5), /* mulsi */
799 COSTS_N_INSNS (5), /* mulsi_const */
800 COSTS_N_INSNS (5), /* mulsi_const9 */
801 COSTS_N_INSNS (5), /* muldi */
802 COSTS_N_INSNS (18), /* divsi */
803 COSTS_N_INSNS (18), /* divdi */
804 COSTS_N_INSNS (10), /* fp */
805 COSTS_N_INSNS (10), /* dmul */
806 COSTS_N_INSNS (46), /* sdiv */
807 COSTS_N_INSNS (72), /* ddiv */
808 32, /* cache line size */
811 1, /* prefetch streams /*/
814 /* Instruction costs on POWER4 and POWER5 processors. */
816 struct processor_costs power4_cost
= {
817 COSTS_N_INSNS (3), /* mulsi */
818 COSTS_N_INSNS (2), /* mulsi_const */
819 COSTS_N_INSNS (2), /* mulsi_const9 */
820 COSTS_N_INSNS (4), /* muldi */
821 COSTS_N_INSNS (18), /* divsi */
822 COSTS_N_INSNS (34), /* divdi */
823 COSTS_N_INSNS (3), /* fp */
824 COSTS_N_INSNS (3), /* dmul */
825 COSTS_N_INSNS (17), /* sdiv */
826 COSTS_N_INSNS (17), /* ddiv */
827 128, /* cache line size */
830 8, /* prefetch streams /*/
833 /* Instruction costs on POWER6 processors. */
835 struct processor_costs power6_cost
= {
836 COSTS_N_INSNS (8), /* mulsi */
837 COSTS_N_INSNS (8), /* mulsi_const */
838 COSTS_N_INSNS (8), /* mulsi_const9 */
839 COSTS_N_INSNS (8), /* muldi */
840 COSTS_N_INSNS (22), /* divsi */
841 COSTS_N_INSNS (28), /* divdi */
842 COSTS_N_INSNS (3), /* fp */
843 COSTS_N_INSNS (3), /* dmul */
844 COSTS_N_INSNS (13), /* sdiv */
845 COSTS_N_INSNS (16), /* ddiv */
846 128, /* cache line size */
849 16, /* prefetch streams */
852 /* Instruction costs on POWER7 processors. */
854 struct processor_costs power7_cost
= {
855 COSTS_N_INSNS (2), /* mulsi */
856 COSTS_N_INSNS (2), /* mulsi_const */
857 COSTS_N_INSNS (2), /* mulsi_const9 */
858 COSTS_N_INSNS (2), /* muldi */
859 COSTS_N_INSNS (18), /* divsi */
860 COSTS_N_INSNS (34), /* divdi */
861 COSTS_N_INSNS (3), /* fp */
862 COSTS_N_INSNS (3), /* dmul */
863 COSTS_N_INSNS (13), /* sdiv */
864 COSTS_N_INSNS (16), /* ddiv */
865 128, /* cache line size */
868 12, /* prefetch streams */
871 /* Instruction costs on POWER A2 processors. */
873 struct processor_costs ppca2_cost
= {
874 COSTS_N_INSNS (16), /* mulsi */
875 COSTS_N_INSNS (16), /* mulsi_const */
876 COSTS_N_INSNS (16), /* mulsi_const9 */
877 COSTS_N_INSNS (16), /* muldi */
878 COSTS_N_INSNS (22), /* divsi */
879 COSTS_N_INSNS (28), /* divdi */
880 COSTS_N_INSNS (3), /* fp */
881 COSTS_N_INSNS (3), /* dmul */
882 COSTS_N_INSNS (59), /* sdiv */
883 COSTS_N_INSNS (72), /* ddiv */
887 16, /* prefetch streams */
891 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
892 #undef RS6000_BUILTIN_1
893 #undef RS6000_BUILTIN_2
894 #undef RS6000_BUILTIN_3
895 #undef RS6000_BUILTIN_A
896 #undef RS6000_BUILTIN_D
897 #undef RS6000_BUILTIN_E
898 #undef RS6000_BUILTIN_P
899 #undef RS6000_BUILTIN_Q
900 #undef RS6000_BUILTIN_S
901 #undef RS6000_BUILTIN_X
903 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
904 { NAME, ICODE, MASK, ATTR },
906 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
907 { NAME, ICODE, MASK, ATTR },
909 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
910 { NAME, ICODE, MASK, ATTR },
912 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
913 { NAME, ICODE, MASK, ATTR },
915 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
916 { NAME, ICODE, MASK, ATTR },
918 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
919 { NAME, ICODE, MASK, ATTR },
921 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
922 { NAME, ICODE, MASK, ATTR },
924 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
925 { NAME, ICODE, MASK, ATTR },
927 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
928 { NAME, ICODE, MASK, ATTR },
930 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
931 { NAME, ICODE, MASK, ATTR },
933 struct rs6000_builtin_info_type
{
935 const enum insn_code icode
;
940 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
942 #include "rs6000-builtin.def"
945 #undef RS6000_BUILTIN_1
946 #undef RS6000_BUILTIN_2
947 #undef RS6000_BUILTIN_3
948 #undef RS6000_BUILTIN_A
949 #undef RS6000_BUILTIN_D
950 #undef RS6000_BUILTIN_E
951 #undef RS6000_BUILTIN_P
952 #undef RS6000_BUILTIN_Q
953 #undef RS6000_BUILTIN_S
954 #undef RS6000_BUILTIN_X
956 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
957 static tree (*rs6000_veclib_handler
) (tree
, tree
, tree
);
960 static bool rs6000_debug_legitimate_address_p (enum machine_mode
, rtx
, bool);
961 static bool spe_func_has_64bit_regs_p (void);
962 static struct machine_function
* rs6000_init_machine_status (void);
963 static int rs6000_ra_ever_killed (void);
964 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
965 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
966 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
967 static tree
rs6000_builtin_vectorized_libmass (tree
, tree
, tree
);
968 static rtx
rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
969 static int rs6000_memory_move_cost (enum machine_mode
, reg_class_t
, bool);
970 static bool rs6000_debug_rtx_costs (rtx
, int, int, int, int *, bool);
971 static int rs6000_debug_address_cost (rtx
, bool);
972 static int rs6000_debug_adjust_cost (rtx
, rtx
, rtx
, int);
973 static bool is_microcoded_insn (rtx
);
974 static bool is_nonpipeline_insn (rtx
);
975 static bool is_cracked_insn (rtx
);
976 static bool is_load_insn (rtx
, rtx
*);
977 static bool is_store_insn (rtx
, rtx
*);
978 static bool set_to_load_agen (rtx
,rtx
);
979 static bool insn_terminates_group_p (rtx
, enum group_termination
);
980 static bool insn_must_be_first_in_group (rtx
);
981 static bool insn_must_be_last_in_group (rtx
);
982 static void altivec_init_builtins (void);
983 static tree
builtin_function_type (enum machine_mode
, enum machine_mode
,
984 enum machine_mode
, enum machine_mode
,
985 enum rs6000_builtins
, const char *name
);
986 static void rs6000_common_init_builtins (void);
987 static void paired_init_builtins (void);
988 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
989 static void spe_init_builtins (void);
990 static rtx
spe_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
991 static rtx
spe_expand_evsel_builtin (enum insn_code
, tree
, rtx
);
992 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
993 static rs6000_stack_t
*rs6000_stack_info (void);
994 static void is_altivec_return_reg (rtx
, void *);
995 int easy_vector_constant (rtx
, enum machine_mode
);
996 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, enum machine_mode
);
997 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
998 static int rs6000_tls_symbol_ref_1 (rtx
*, void *);
999 static int rs6000_get_some_local_dynamic_name_1 (rtx
*, void *);
1000 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1003 static void macho_branch_islands (void);
1005 static rtx
rs6000_legitimize_reload_address (rtx
, enum machine_mode
, int, int,
1007 static rtx
rs6000_debug_legitimize_reload_address (rtx
, enum machine_mode
, int,
1009 static bool rs6000_mode_dependent_address (const_rtx
);
1010 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1011 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1012 enum machine_mode
, rtx
);
1013 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1016 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1017 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1019 static bool rs6000_secondary_memory_needed (enum reg_class
, enum reg_class
,
1021 static bool rs6000_debug_secondary_memory_needed (enum reg_class
,
1024 static bool rs6000_cannot_change_mode_class (enum machine_mode
,
1027 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode
,
1030 static bool rs6000_save_toc_in_prologue_p (void);
1032 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, enum machine_mode
, int, int,
1034 = rs6000_legitimize_reload_address
;
1036 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1037 = rs6000_mode_dependent_address
;
1039 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1040 enum machine_mode
, rtx
)
1041 = rs6000_secondary_reload_class
;
1043 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1044 = rs6000_preferred_reload_class
;
1046 bool (*rs6000_secondary_memory_needed_ptr
) (enum reg_class
, enum reg_class
,
1048 = rs6000_secondary_memory_needed
;
1050 bool (*rs6000_cannot_change_mode_class_ptr
) (enum machine_mode
,
1053 = rs6000_cannot_change_mode_class
;
1055 const int INSN_NOT_AVAILABLE
= -1;
1057 /* Hash table stuff for keeping track of TOC entries. */
1059 struct GTY(()) toc_hash_struct
1061 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1062 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1064 enum machine_mode key_mode
;
1068 static GTY ((param_is (struct toc_hash_struct
))) htab_t toc_hash_table
;
1070 /* Hash table to keep track of the argument types for builtin functions. */
1072 struct GTY(()) builtin_hash_struct
1075 enum machine_mode mode
[4]; /* return value + 3 arguments. */
1076 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1079 static GTY ((param_is (struct builtin_hash_struct
))) htab_t builtin_hash_table
;
1082 /* Default register names. */
1083 char rs6000_reg_names
[][8] =
1085 "0", "1", "2", "3", "4", "5", "6", "7",
1086 "8", "9", "10", "11", "12", "13", "14", "15",
1087 "16", "17", "18", "19", "20", "21", "22", "23",
1088 "24", "25", "26", "27", "28", "29", "30", "31",
1089 "0", "1", "2", "3", "4", "5", "6", "7",
1090 "8", "9", "10", "11", "12", "13", "14", "15",
1091 "16", "17", "18", "19", "20", "21", "22", "23",
1092 "24", "25", "26", "27", "28", "29", "30", "31",
1093 "mq", "lr", "ctr","ap",
1094 "0", "1", "2", "3", "4", "5", "6", "7",
1096 /* AltiVec registers. */
1097 "0", "1", "2", "3", "4", "5", "6", "7",
1098 "8", "9", "10", "11", "12", "13", "14", "15",
1099 "16", "17", "18", "19", "20", "21", "22", "23",
1100 "24", "25", "26", "27", "28", "29", "30", "31",
1102 /* SPE registers. */
1103 "spe_acc", "spefscr",
1104 /* Soft frame pointer. */
1108 #ifdef TARGET_REGNAMES
1109 static const char alt_reg_names
[][8] =
1111 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1112 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1113 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1114 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1115 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1116 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1117 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1118 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1119 "mq", "lr", "ctr", "ap",
1120 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1122 /* AltiVec registers. */
1123 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1124 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1125 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1126 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1128 /* SPE registers. */
1129 "spe_acc", "spefscr",
1130 /* Soft frame pointer. */
1135 /* Table of valid machine attributes. */
1137 static const struct attribute_spec rs6000_attribute_table
[] =
1139 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1140 affects_type_identity } */
1141 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1143 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1145 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1147 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1149 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1151 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1152 SUBTARGET_ATTRIBUTE_TABLE
,
1154 { NULL
, 0, 0, false, false, false, NULL
, false }
1157 #ifndef MASK_STRICT_ALIGN
1158 #define MASK_STRICT_ALIGN 0
1160 #ifndef TARGET_PROFILE_KERNEL
1161 #define TARGET_PROFILE_KERNEL 0
1164 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1165 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1167 /* Initialize the GCC target structure. */
1168 #undef TARGET_ATTRIBUTE_TABLE
1169 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1170 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1171 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1172 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1173 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1175 #undef TARGET_ASM_ALIGNED_DI_OP
1176 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1178 /* Default unaligned ops are only provided for ELF. Find the ops needed
1179 for non-ELF systems. */
1180 #ifndef OBJECT_FORMAT_ELF
1182 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1184 #undef TARGET_ASM_UNALIGNED_HI_OP
1185 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1186 #undef TARGET_ASM_UNALIGNED_SI_OP
1187 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1188 #undef TARGET_ASM_UNALIGNED_DI_OP
1189 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1192 #undef TARGET_ASM_UNALIGNED_HI_OP
1193 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1194 #undef TARGET_ASM_UNALIGNED_SI_OP
1195 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1196 #undef TARGET_ASM_UNALIGNED_DI_OP
1197 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1198 #undef TARGET_ASM_ALIGNED_DI_OP
1199 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1203 /* This hook deals with fixups for relocatable code and DI-mode objects
1205 #undef TARGET_ASM_INTEGER
1206 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1208 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1209 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1210 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1213 #undef TARGET_SET_UP_BY_PROLOGUE
1214 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1216 #undef TARGET_HAVE_TLS
1217 #define TARGET_HAVE_TLS HAVE_AS_TLS
1219 #undef TARGET_CANNOT_FORCE_CONST_MEM
1220 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1222 #undef TARGET_DELEGITIMIZE_ADDRESS
1223 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1225 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1226 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1228 #undef TARGET_ASM_FUNCTION_PROLOGUE
1229 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1230 #undef TARGET_ASM_FUNCTION_EPILOGUE
1231 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1233 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1234 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1236 #undef TARGET_LEGITIMIZE_ADDRESS
1237 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1239 #undef TARGET_SCHED_VARIABLE_ISSUE
1240 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1242 #undef TARGET_SCHED_ISSUE_RATE
1243 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1244 #undef TARGET_SCHED_ADJUST_COST
1245 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1246 #undef TARGET_SCHED_ADJUST_PRIORITY
1247 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1248 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1249 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1250 #undef TARGET_SCHED_INIT
1251 #define TARGET_SCHED_INIT rs6000_sched_init
1252 #undef TARGET_SCHED_FINISH
1253 #define TARGET_SCHED_FINISH rs6000_sched_finish
1254 #undef TARGET_SCHED_REORDER
1255 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1256 #undef TARGET_SCHED_REORDER2
1257 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1259 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1260 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1262 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1263 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1265 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1266 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1267 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1268 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1269 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1270 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1271 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1272 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1274 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1275 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1276 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1277 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1278 rs6000_builtin_support_vector_misalignment
1279 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1280 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1281 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1282 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1283 rs6000_builtin_vectorization_cost
1284 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1285 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1286 rs6000_preferred_simd_mode
1287 #undef TARGET_VECTORIZE_INIT_COST
1288 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1289 #undef TARGET_VECTORIZE_ADD_STMT_COST
1290 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1291 #undef TARGET_VECTORIZE_FINISH_COST
1292 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1293 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1294 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1296 #undef TARGET_INIT_BUILTINS
1297 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1298 #undef TARGET_BUILTIN_DECL
1299 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1301 #undef TARGET_EXPAND_BUILTIN
1302 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1304 #undef TARGET_MANGLE_TYPE
1305 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1307 #undef TARGET_INIT_LIBFUNCS
1308 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1311 #undef TARGET_BINDS_LOCAL_P
1312 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1315 #undef TARGET_MS_BITFIELD_LAYOUT_P
1316 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1318 #undef TARGET_ASM_OUTPUT_MI_THUNK
1319 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1321 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1322 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1324 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1325 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1327 #undef TARGET_INVALID_WITHIN_DOLOOP
1328 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1330 #undef TARGET_REGISTER_MOVE_COST
1331 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1332 #undef TARGET_MEMORY_MOVE_COST
1333 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1334 #undef TARGET_RTX_COSTS
1335 #define TARGET_RTX_COSTS rs6000_rtx_costs
1336 #undef TARGET_ADDRESS_COST
1337 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
1339 #undef TARGET_DWARF_REGISTER_SPAN
1340 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1342 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1343 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1345 /* On rs6000, function arguments are promoted, as are function return
1347 #undef TARGET_PROMOTE_FUNCTION_MODE
1348 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1350 #undef TARGET_RETURN_IN_MEMORY
1351 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1353 #undef TARGET_SETUP_INCOMING_VARARGS
1354 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1356 /* Always strict argument naming on rs6000. */
1357 #undef TARGET_STRICT_ARGUMENT_NAMING
1358 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1359 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1360 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1361 #undef TARGET_SPLIT_COMPLEX_ARG
1362 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1363 #undef TARGET_MUST_PASS_IN_STACK
1364 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1365 #undef TARGET_PASS_BY_REFERENCE
1366 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1367 #undef TARGET_ARG_PARTIAL_BYTES
1368 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1369 #undef TARGET_FUNCTION_ARG_ADVANCE
1370 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1371 #undef TARGET_FUNCTION_ARG
1372 #define TARGET_FUNCTION_ARG rs6000_function_arg
1373 #undef TARGET_FUNCTION_ARG_BOUNDARY
1374 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1376 #undef TARGET_BUILD_BUILTIN_VA_LIST
1377 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1379 #undef TARGET_EXPAND_BUILTIN_VA_START
1380 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1382 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1383 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1385 #undef TARGET_EH_RETURN_FILTER_MODE
1386 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1388 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1389 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1391 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1392 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1394 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1395 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1397 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1398 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1400 #undef TARGET_OPTION_OVERRIDE
1401 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1403 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1404 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1405 rs6000_builtin_vectorized_function
1408 #undef TARGET_STACK_PROTECT_FAIL
1409 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1412 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1413 The PowerPC architecture requires only weak consistency among
1414 processors--that is, memory accesses between processors need not be
1415 sequentially consistent and memory accesses among processors can occur
1416 in any order. The ability to order memory accesses weakly provides
1417 opportunities for more efficient use of the system bus. Unless a
1418 dependency exists, the 604e allows read operations to precede store
1420 #undef TARGET_RELAXED_ORDERING
1421 #define TARGET_RELAXED_ORDERING true
1424 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1425 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1428 /* Use a 32-bit anchor range. This leads to sequences like:
1430 addis tmp,anchor,high
1433 where tmp itself acts as an anchor, and can be shared between
1434 accesses to the same 64k page. */
1435 #undef TARGET_MIN_ANCHOR_OFFSET
1436 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1437 #undef TARGET_MAX_ANCHOR_OFFSET
1438 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1439 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1440 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1442 #undef TARGET_BUILTIN_RECIPROCAL
1443 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1445 #undef TARGET_EXPAND_TO_RTL_HOOK
1446 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1448 #undef TARGET_INSTANTIATE_DECLS
1449 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1451 #undef TARGET_SECONDARY_RELOAD
1452 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1454 #undef TARGET_LEGITIMATE_ADDRESS_P
1455 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1457 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1458 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1460 #undef TARGET_CAN_ELIMINATE
1461 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1463 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1464 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1466 #undef TARGET_TRAMPOLINE_INIT
1467 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1469 #undef TARGET_FUNCTION_VALUE
1470 #define TARGET_FUNCTION_VALUE rs6000_function_value
1472 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1473 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1475 #undef TARGET_OPTION_SAVE
1476 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1478 #undef TARGET_OPTION_RESTORE
1479 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1481 #undef TARGET_OPTION_PRINT
1482 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1484 #undef TARGET_CAN_INLINE_P
1485 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1487 #undef TARGET_SET_CURRENT_FUNCTION
1488 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1490 #undef TARGET_LEGITIMATE_CONSTANT_P
1491 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1493 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1494 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1497 /* Simplifications for entries below. */
1500 POWERPC_BASE_MASK
= MASK_POWERPC
| MASK_NEW_MNEMONICS
,
1501 POWERPC_7400_MASK
= POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_ALTIVEC
1504 /* Some OSs don't support saving the high part of 64-bit registers on context
1505 switch. Other OSs don't support saving Altivec registers. On those OSs, we
1506 don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants
1507 either, the user must explicitly specify them and we won't interfere with
1508 the user's specification. */
1511 POWER_MASKS
= MASK_POWER
| MASK_POWER2
| MASK_MULTIPLE
| MASK_STRING
,
1512 POWERPC_MASKS
= (POWERPC_BASE_MASK
| MASK_PPC_GPOPT
| MASK_STRICT_ALIGN
1513 | MASK_PPC_GFXOPT
| MASK_POWERPC64
| MASK_ALTIVEC
1514 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
| MASK_MULHW
1515 | MASK_DLMZB
| MASK_CMPB
| MASK_MFPGPR
| MASK_DFP
1516 | MASK_POPCNTD
| MASK_VSX
| MASK_ISEL
| MASK_NO_UPDATE
1517 | MASK_RECIP_PRECISION
)
1520 /* Masks for instructions set at various powerpc ISAs. */
1522 ISA_2_1_MASKS
= MASK_MFCRF
,
1523 ISA_2_2_MASKS
= (ISA_2_1_MASKS
| MASK_POPCNTB
),
1524 ISA_2_4_MASKS
= (ISA_2_2_MASKS
| MASK_FPRND
),
1526 /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add
1527 ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel,
1528 fre, fsqrt, etc. were no longer documented as optional. Group masks by
1529 server and embedded. */
1530 ISA_2_5_MASKS_EMBEDDED
= (ISA_2_2_MASKS
| MASK_CMPB
| MASK_RECIP_PRECISION
1531 | MASK_PPC_GFXOPT
| MASK_PPC_GPOPT
),
1532 ISA_2_5_MASKS_SERVER
= (ISA_2_5_MASKS_EMBEDDED
| MASK_DFP
),
1534 /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but
1535 altivec is a win so enable it. */
1536 ISA_2_6_MASKS_EMBEDDED
= (ISA_2_5_MASKS_EMBEDDED
| MASK_POPCNTD
),
1537 ISA_2_6_MASKS_SERVER
= (ISA_2_5_MASKS_SERVER
| MASK_POPCNTD
| MASK_ALTIVEC
1543 const char *const name
; /* Canonical processor name. */
1544 const enum processor_type processor
; /* Processor type enum value. */
1545 const int target_enable
; /* Target flags to enable. */
1548 static struct rs6000_ptt
const processor_target_table
[] =
1550 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1551 #include "rs6000-cpus.def"
1555 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1559 rs6000_cpu_name_lookup (const char *name
)
1565 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1566 if (! strcmp (name
, processor_target_table
[i
].name
))
1574 /* Return number of consecutive hard regs needed starting at reg REGNO
1575 to hold something of mode MODE.
1576 This is ordinarily the length in words of a value of mode MODE
1577 but can be less for certain modes in special long registers.
1579 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1580 scalar instructions. The upper 32 bits are only available to the
1583 POWER and PowerPC GPRs hold 32 bits worth;
1584 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1587 rs6000_hard_regno_nregs_internal (int regno
, enum machine_mode mode
)
1589 unsigned HOST_WIDE_INT reg_size
;
1591 if (FP_REGNO_P (regno
))
1592 reg_size
= (VECTOR_MEM_VSX_P (mode
)
1593 ? UNITS_PER_VSX_WORD
1594 : UNITS_PER_FP_WORD
);
1596 else if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1597 reg_size
= UNITS_PER_SPE_WORD
;
1599 else if (ALTIVEC_REGNO_P (regno
))
1600 reg_size
= UNITS_PER_ALTIVEC_WORD
;
1602 /* The value returned for SCmode in the E500 double case is 2 for
1603 ABI compatibility; storing an SCmode value in a single register
1604 would require function_arg and rs6000_spe_function_arg to handle
1605 SCmode so as to pass the value correctly in a pair of
1607 else if (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
) && mode
!= SCmode
1608 && !DECIMAL_FLOAT_MODE_P (mode
))
1609 reg_size
= UNITS_PER_FP_WORD
;
1612 reg_size
= UNITS_PER_WORD
;
1614 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
1617 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1620 rs6000_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
1622 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
1624 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1625 implementations. Don't allow an item to be split between a FP register
1626 and an Altivec register. */
1627 if (VECTOR_MEM_VSX_P (mode
))
1629 if (FP_REGNO_P (regno
))
1630 return FP_REGNO_P (last_regno
);
1632 if (ALTIVEC_REGNO_P (regno
))
1633 return ALTIVEC_REGNO_P (last_regno
);
1636 /* The GPRs can hold any mode, but values bigger than one register
1637 cannot go past R31. */
1638 if (INT_REGNO_P (regno
))
1639 return INT_REGNO_P (last_regno
);
1641 /* The float registers (except for VSX vector modes) can only hold floating
1642 modes and DImode. This excludes the 32-bit decimal float mode for
1644 if (FP_REGNO_P (regno
))
1646 if (SCALAR_FLOAT_MODE_P (mode
)
1647 && (mode
!= TDmode
|| (regno
% 2) == 0)
1648 && FP_REGNO_P (last_regno
))
1651 if (GET_MODE_CLASS (mode
) == MODE_INT
1652 && GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
1655 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
1656 && PAIRED_VECTOR_MODE (mode
))
1662 /* The CR register can only hold CC modes. */
1663 if (CR_REGNO_P (regno
))
1664 return GET_MODE_CLASS (mode
) == MODE_CC
;
1666 if (CA_REGNO_P (regno
))
1667 return mode
== BImode
;
1669 /* AltiVec only in AldyVec registers. */
1670 if (ALTIVEC_REGNO_P (regno
))
1671 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
);
1673 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1674 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1677 /* We cannot put TImode anywhere except general register and it must be able
1678 to fit within the register set. In the future, allow TImode in the
1679 Altivec or VSX registers. */
1681 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
1684 /* Print interesting facts about registers. */
1686 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
1690 for (r
= first_regno
; r
<= last_regno
; ++r
)
1692 const char *comma
= "";
1695 if (first_regno
== last_regno
)
1696 fprintf (stderr
, "%s:\t", reg_name
);
1698 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
1701 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1702 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
1706 fprintf (stderr
, ",\n\t");
1711 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
1712 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
1713 rs6000_hard_regno_nregs
[m
][r
]);
1715 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
1720 if (call_used_regs
[r
])
1724 fprintf (stderr
, ",\n\t");
1729 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
1737 fprintf (stderr
, ",\n\t");
1742 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
1748 fprintf (stderr
, ",\n\t");
1752 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
1756 #define DEBUG_FMT_D "%-32s= %d\n"
1757 #define DEBUG_FMT_X "%-32s= 0x%x\n"
1758 #define DEBUG_FMT_S "%-32s= %s\n"
1760 /* Print various interesting information with -mdebug=reg. */
1762 rs6000_debug_reg_global (void)
1764 static const char *const tf
[2] = { "false", "true" };
1765 const char *nl
= (const char *)0;
1767 char costly_num
[20];
1769 const char *costly_str
;
1770 const char *nop_str
;
1771 const char *trace_str
;
1772 const char *abi_str
;
1773 const char *cmodel_str
;
1775 /* Map enum rs6000_vector to string. */
1776 static const char *rs6000_debug_vector_unit
[] = {
1785 fprintf (stderr
, "Register information: (last virtual reg = %d)\n",
1786 LAST_VIRTUAL_REGISTER
);
1787 rs6000_debug_reg_print (0, 31, "gr");
1788 rs6000_debug_reg_print (32, 63, "fp");
1789 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
1792 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
1793 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
1794 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
1795 rs6000_debug_reg_print (MQ_REGNO
, MQ_REGNO
, "mq");
1796 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
1797 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
1798 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
1799 rs6000_debug_reg_print (SPE_ACC_REGNO
, SPE_ACC_REGNO
, "spe_a");
1800 rs6000_debug_reg_print (SPEFSCR_REGNO
, SPEFSCR_REGNO
, "spe_f");
1804 "d reg_class = %s\n"
1805 "f reg_class = %s\n"
1806 "v reg_class = %s\n"
1807 "wa reg_class = %s\n"
1808 "wd reg_class = %s\n"
1809 "wf reg_class = %s\n"
1810 "ws reg_class = %s\n\n",
1811 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
1812 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
1813 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
1814 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
1815 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
1816 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
1817 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]]);
1819 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1820 if (rs6000_vector_unit
[m
] || rs6000_vector_mem
[m
])
1823 fprintf (stderr
, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1825 rs6000_debug_vector_unit
[ rs6000_vector_unit
[m
] ],
1826 rs6000_debug_vector_unit
[ rs6000_vector_mem
[m
] ]);
1832 if (rs6000_recip_control
)
1834 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
1836 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1837 if (rs6000_recip_bits
[m
])
1840 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1842 (RS6000_RECIP_AUTO_RE_P (m
)
1844 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
1845 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
1847 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
1850 fputs ("\n", stderr
);
1853 if (rs6000_cpu_index
>= 0)
1854 fprintf (stderr
, DEBUG_FMT_S
, "cpu",
1855 processor_target_table
[rs6000_cpu_index
].name
);
1857 if (rs6000_tune_index
>= 0)
1858 fprintf (stderr
, DEBUG_FMT_S
, "tune",
1859 processor_target_table
[rs6000_tune_index
].name
);
1861 switch (rs6000_sched_costly_dep
)
1863 case max_dep_latency
:
1864 costly_str
= "max_dep_latency";
1868 costly_str
= "no_dep_costly";
1871 case all_deps_costly
:
1872 costly_str
= "all_deps_costly";
1875 case true_store_to_load_dep_costly
:
1876 costly_str
= "true_store_to_load_dep_costly";
1879 case store_to_load_dep_costly
:
1880 costly_str
= "store_to_load_dep_costly";
1884 costly_str
= costly_num
;
1885 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
1889 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
1891 switch (rs6000_sched_insert_nops
)
1893 case sched_finish_regroup_exact
:
1894 nop_str
= "sched_finish_regroup_exact";
1897 case sched_finish_pad_groups
:
1898 nop_str
= "sched_finish_pad_groups";
1901 case sched_finish_none
:
1902 nop_str
= "sched_finish_none";
1907 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
1911 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
1913 switch (rs6000_sdata
)
1920 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
1924 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
1928 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
1933 switch (rs6000_traceback
)
1935 case traceback_default
: trace_str
= "default"; break;
1936 case traceback_none
: trace_str
= "none"; break;
1937 case traceback_part
: trace_str
= "part"; break;
1938 case traceback_full
: trace_str
= "full"; break;
1939 default: trace_str
= "unknown"; break;
1942 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
1944 switch (rs6000_current_cmodel
)
1946 case CMODEL_SMALL
: cmodel_str
= "small"; break;
1947 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
1948 case CMODEL_LARGE
: cmodel_str
= "large"; break;
1949 default: cmodel_str
= "unknown"; break;
1952 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
1954 switch (rs6000_current_abi
)
1956 case ABI_NONE
: abi_str
= "none"; break;
1957 case ABI_AIX
: abi_str
= "aix"; break;
1958 case ABI_V4
: abi_str
= "V4"; break;
1959 case ABI_DARWIN
: abi_str
= "darwin"; break;
1960 default: abi_str
= "unknown"; break;
1963 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
1965 if (rs6000_altivec_abi
)
1966 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
1969 fprintf (stderr
, DEBUG_FMT_S
, "spe_abi", "true");
1971 if (rs6000_darwin64_abi
)
1972 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
1974 if (rs6000_float_gprs
)
1975 fprintf (stderr
, DEBUG_FMT_S
, "float_gprs", "true");
1977 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
1978 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
1979 tf
[!!rs6000_align_branch_targets
]);
1980 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
1981 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
1982 rs6000_long_double_type_size
);
1983 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
1984 (int)rs6000_sched_restricted_insns_priority
);
1985 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
1987 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
1988 (int)RS6000_BUILTIN_COUNT
);
1989 fprintf (stderr
, DEBUG_FMT_X
, "Builtin mask", rs6000_builtin_mask
);
1992 /* Initialize the various global tables that are based on register size. */
1994 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
2000 /* Precalculate REGNO_REG_CLASS. */
2001 rs6000_regno_regclass
[0] = GENERAL_REGS
;
2002 for (r
= 1; r
< 32; ++r
)
2003 rs6000_regno_regclass
[r
] = BASE_REGS
;
2005 for (r
= 32; r
< 64; ++r
)
2006 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
2008 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2009 rs6000_regno_regclass
[r
] = NO_REGS
;
2011 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
2012 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
2014 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
2015 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
2016 rs6000_regno_regclass
[r
] = CR_REGS
;
2018 rs6000_regno_regclass
[MQ_REGNO
] = MQ_REGS
;
2019 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
2020 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
2021 rs6000_regno_regclass
[CA_REGNO
] = CA_REGS
;
2022 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
2023 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
2024 rs6000_regno_regclass
[SPE_ACC_REGNO
] = SPE_ACC_REGS
;
2025 rs6000_regno_regclass
[SPEFSCR_REGNO
] = SPEFSCR_REGS
;
2026 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
2027 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
2029 /* Precalculate vector information, this must be set up before the
2030 rs6000_hard_regno_nregs_internal below. */
2031 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2033 rs6000_vector_unit
[m
] = rs6000_vector_mem
[m
] = VECTOR_NONE
;
2034 rs6000_vector_reload
[m
][0] = CODE_FOR_nothing
;
2035 rs6000_vector_reload
[m
][1] = CODE_FOR_nothing
;
2038 for (c
= 0; c
< (int)(int)RS6000_CONSTRAINT_MAX
; c
++)
2039 rs6000_constraints
[c
] = NO_REGS
;
2041 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2042 believes it can use native alignment or still uses 128-bit alignment. */
2043 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
2054 /* V2DF mode, VSX only. */
2057 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
2058 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
2059 rs6000_vector_align
[V2DFmode
] = align64
;
2062 /* V4SF mode, either VSX or Altivec. */
2065 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
2066 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
2067 rs6000_vector_align
[V4SFmode
] = align32
;
2069 else if (TARGET_ALTIVEC
)
2071 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
2072 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
2073 rs6000_vector_align
[V4SFmode
] = align32
;
2076 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2080 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
2081 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
2082 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
2083 rs6000_vector_align
[V4SImode
] = align32
;
2084 rs6000_vector_align
[V8HImode
] = align32
;
2085 rs6000_vector_align
[V16QImode
] = align32
;
2089 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
2090 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
2091 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
2095 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
2096 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
2097 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
2101 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2102 Altivec doesn't have 64-bit support. */
2105 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
2106 rs6000_vector_unit
[V2DImode
] = VECTOR_NONE
;
2107 rs6000_vector_align
[V2DImode
] = align64
;
2110 /* DFmode, see if we want to use the VSX unit. */
2111 if (TARGET_VSX
&& TARGET_VSX_SCALAR_DOUBLE
)
2113 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
2114 rs6000_vector_mem
[DFmode
]
2115 = (TARGET_VSX_SCALAR_MEMORY
? VECTOR_VSX
: VECTOR_NONE
);
2116 rs6000_vector_align
[DFmode
] = align64
;
2119 /* TODO add SPE and paired floating point vector support. */
2121 /* Register class constraints for the constraints that depend on compile
2123 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
2124 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
;
2126 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
2127 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
;
2131 /* At present, we just use VSX_REGS, but we have different constraints
2132 based on the use, in case we want to fine tune the default register
2133 class used. wa = any VSX register, wf = register class to use for
2134 V4SF, wd = register class to use for V2DF, and ws = register classs to
2135 use for DF scalars. */
2136 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
2137 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
;
2138 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
;
2139 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = (TARGET_VSX_SCALAR_MEMORY
2145 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
2147 /* Set up the reload helper functions. */
2148 if (TARGET_VSX
|| TARGET_ALTIVEC
)
2152 rs6000_vector_reload
[V16QImode
][0] = CODE_FOR_reload_v16qi_di_store
;
2153 rs6000_vector_reload
[V16QImode
][1] = CODE_FOR_reload_v16qi_di_load
;
2154 rs6000_vector_reload
[V8HImode
][0] = CODE_FOR_reload_v8hi_di_store
;
2155 rs6000_vector_reload
[V8HImode
][1] = CODE_FOR_reload_v8hi_di_load
;
2156 rs6000_vector_reload
[V4SImode
][0] = CODE_FOR_reload_v4si_di_store
;
2157 rs6000_vector_reload
[V4SImode
][1] = CODE_FOR_reload_v4si_di_load
;
2158 rs6000_vector_reload
[V2DImode
][0] = CODE_FOR_reload_v2di_di_store
;
2159 rs6000_vector_reload
[V2DImode
][1] = CODE_FOR_reload_v2di_di_load
;
2160 rs6000_vector_reload
[V4SFmode
][0] = CODE_FOR_reload_v4sf_di_store
;
2161 rs6000_vector_reload
[V4SFmode
][1] = CODE_FOR_reload_v4sf_di_load
;
2162 rs6000_vector_reload
[V2DFmode
][0] = CODE_FOR_reload_v2df_di_store
;
2163 rs6000_vector_reload
[V2DFmode
][1] = CODE_FOR_reload_v2df_di_load
;
2164 if (TARGET_VSX
&& TARGET_VSX_SCALAR_MEMORY
)
2166 rs6000_vector_reload
[DFmode
][0] = CODE_FOR_reload_df_di_store
;
2167 rs6000_vector_reload
[DFmode
][1] = CODE_FOR_reload_df_di_load
;
2172 rs6000_vector_reload
[V16QImode
][0] = CODE_FOR_reload_v16qi_si_store
;
2173 rs6000_vector_reload
[V16QImode
][1] = CODE_FOR_reload_v16qi_si_load
;
2174 rs6000_vector_reload
[V8HImode
][0] = CODE_FOR_reload_v8hi_si_store
;
2175 rs6000_vector_reload
[V8HImode
][1] = CODE_FOR_reload_v8hi_si_load
;
2176 rs6000_vector_reload
[V4SImode
][0] = CODE_FOR_reload_v4si_si_store
;
2177 rs6000_vector_reload
[V4SImode
][1] = CODE_FOR_reload_v4si_si_load
;
2178 rs6000_vector_reload
[V2DImode
][0] = CODE_FOR_reload_v2di_si_store
;
2179 rs6000_vector_reload
[V2DImode
][1] = CODE_FOR_reload_v2di_si_load
;
2180 rs6000_vector_reload
[V4SFmode
][0] = CODE_FOR_reload_v4sf_si_store
;
2181 rs6000_vector_reload
[V4SFmode
][1] = CODE_FOR_reload_v4sf_si_load
;
2182 rs6000_vector_reload
[V2DFmode
][0] = CODE_FOR_reload_v2df_si_store
;
2183 rs6000_vector_reload
[V2DFmode
][1] = CODE_FOR_reload_v2df_si_load
;
2184 if (TARGET_VSX
&& TARGET_VSX_SCALAR_MEMORY
)
2186 rs6000_vector_reload
[DFmode
][0] = CODE_FOR_reload_df_si_store
;
2187 rs6000_vector_reload
[DFmode
][1] = CODE_FOR_reload_df_si_load
;
2192 /* Precalculate HARD_REGNO_NREGS. */
2193 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2194 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2195 rs6000_hard_regno_nregs
[m
][r
]
2196 = rs6000_hard_regno_nregs_internal (r
, (enum machine_mode
)m
);
2198 /* Precalculate HARD_REGNO_MODE_OK. */
2199 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2200 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2201 if (rs6000_hard_regno_mode_ok (r
, (enum machine_mode
)m
))
2202 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
2204 /* Precalculate CLASS_MAX_NREGS sizes. */
2205 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
2209 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
2210 reg_size
= UNITS_PER_VSX_WORD
;
2212 else if (c
== ALTIVEC_REGS
)
2213 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2215 else if (c
== FLOAT_REGS
)
2216 reg_size
= UNITS_PER_FP_WORD
;
2219 reg_size
= UNITS_PER_WORD
;
2221 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2222 rs6000_class_max_nregs
[m
][c
]
2223 = (GET_MODE_SIZE (m
) + reg_size
- 1) / reg_size
;
2226 if (TARGET_E500_DOUBLE
)
2227 rs6000_class_max_nregs
[DFmode
][GENERAL_REGS
] = 1;
2229 /* Calculate which modes to automatically generate code to use a the
2230 reciprocal divide and square root instructions. In the future, possibly
2231 automatically generate the instructions even if the user did not specify
2232 -mrecip. The older machines double precision reciprocal sqrt estimate is
2233 not accurate enough. */
2234 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
2236 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2238 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2239 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2240 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2241 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2242 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2244 if (TARGET_FRSQRTES
)
2245 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2247 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2248 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2249 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2250 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2251 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2253 if (rs6000_recip_control
)
2255 if (!flag_finite_math_only
)
2256 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2257 if (flag_trapping_math
)
2258 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2259 if (!flag_reciprocal_math
)
2260 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2261 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
2263 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
2264 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
2265 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2267 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
2268 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
2269 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2271 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
2272 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
2273 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2275 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
2276 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
2277 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2279 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
2280 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
2281 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2283 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
2284 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
2285 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2287 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
2288 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
2289 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2291 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
2292 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
2293 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2297 if (global_init_p
|| TARGET_DEBUG_TARGET
)
2299 if (TARGET_DEBUG_REG
)
2300 rs6000_debug_reg_global ();
2302 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
2304 "SImode variable mult cost = %d\n"
2305 "SImode constant mult cost = %d\n"
2306 "SImode short constant mult cost = %d\n"
2307 "DImode multipliciation cost = %d\n"
2308 "SImode division cost = %d\n"
2309 "DImode division cost = %d\n"
2310 "Simple fp operation cost = %d\n"
2311 "DFmode multiplication cost = %d\n"
2312 "SFmode division cost = %d\n"
2313 "DFmode division cost = %d\n"
2314 "cache line size = %d\n"
2315 "l1 cache size = %d\n"
2316 "l2 cache size = %d\n"
2317 "simultaneous prefetches = %d\n"
2320 rs6000_cost
->mulsi_const
,
2321 rs6000_cost
->mulsi_const9
,
2329 rs6000_cost
->cache_line_size
,
2330 rs6000_cost
->l1_cache_size
,
2331 rs6000_cost
->l2_cache_size
,
2332 rs6000_cost
->simultaneous_prefetches
);
2337 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2340 darwin_rs6000_override_options (void)
2342 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2344 rs6000_altivec_abi
= 1;
2345 TARGET_ALTIVEC_VRSAVE
= 1;
2346 rs6000_current_abi
= ABI_DARWIN
;
2348 if (DEFAULT_ABI
== ABI_DARWIN
2350 darwin_one_byte_bool
= 1;
2352 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
2354 target_flags
|= MASK_POWERPC64
;
2355 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2359 rs6000_default_long_calls
= 1;
2360 target_flags
|= MASK_SOFT_FLOAT
;
2363 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2365 if (!flag_mkernel
&& !flag_apple_kext
2367 && ! (target_flags_explicit
& MASK_ALTIVEC
))
2368 target_flags
|= MASK_ALTIVEC
;
2370 /* Unless the user (not the configurer) has explicitly overridden
2371 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2372 G4 unless targeting the kernel. */
2375 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
2376 && ! (target_flags_explicit
& MASK_ALTIVEC
)
2377 && ! global_options_set
.x_rs6000_cpu_index
)
2379 target_flags
|= MASK_ALTIVEC
;
2384 /* If not otherwise specified by a target, make 'long double' equivalent to
2387 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2388 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2391 /* Return the builtin mask of the various options used that could affect which
2392 builtins were used. In the past we used target_flags, but we've run out of
2393 bits, and some options like SPE and PAIRED are no longer in
2397 rs6000_builtin_mask_calculate (void)
2399 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
2400 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
2401 | ((TARGET_SPE
) ? RS6000_BTM_SPE
: 0)
2402 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
2403 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
2404 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
2405 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
2406 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
2407 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
2408 | ((TARGET_POWERPC
) ? RS6000_BTM_POWERPC
: 0)
2409 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0));
2412 /* Override command line options. Mostly we process the processor type and
2413 sometimes adjust other TARGET_ options. */
2416 rs6000_option_override_internal (bool global_init_p
)
2419 bool have_cpu
= false;
2421 /* The default cpu requested at configure time, if any. */
2422 const char *implicit_cpu
= OPTION_TARGET_CPU_DEFAULT
;
2427 struct cl_target_option
*main_target_opt
2428 = ((global_init_p
|| target_option_default_node
== NULL
)
2429 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
2431 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2432 library functions, so warn about it. The flag may be useful for
2433 performance studies from time to time though, so don't disable it
2435 if (global_options_set
.x_rs6000_alignment_flags
2436 && rs6000_alignment_flags
== MASK_ALIGN_POWER
2437 && DEFAULT_ABI
== ABI_DARWIN
2439 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2440 " it is incompatible with the installed C and C++ libraries");
2442 /* Numerous experiment shows that IRA based loop pressure
2443 calculation works better for RTL loop invariant motion on targets
2444 with enough (>= 32) registers. It is an expensive optimization.
2445 So it is on only for peak performance. */
2446 if (optimize
>= 3 && global_init_p
)
2447 flag_ira_loop_pressure
= 1;
2449 /* Set the pointer size. */
2452 rs6000_pmode
= (int)DImode
;
2453 rs6000_pointer_size
= 64;
2457 rs6000_pmode
= (int)SImode
;
2458 rs6000_pointer_size
= 32;
2461 set_masks
= POWER_MASKS
| POWERPC_MASKS
| MASK_SOFT_FLOAT
;
2462 #ifdef OS_MISSING_POWERPC64
2463 if (OS_MISSING_POWERPC64
)
2464 set_masks
&= ~MASK_POWERPC64
;
2466 #ifdef OS_MISSING_ALTIVEC
2467 if (OS_MISSING_ALTIVEC
)
2468 set_masks
&= ~MASK_ALTIVEC
;
2471 /* Don't override by the processor default if given explicitly. */
2472 set_masks
&= ~target_flags_explicit
;
2474 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2475 the cpu in a target attribute or pragma, but did not specify a tuning
2476 option, use the cpu for the tuning option rather than the option specified
2477 with -mtune on the command line. Process a '--with-cpu' configuration
2478 request as an implicit --cpu. */
2479 if (rs6000_cpu_index
>= 0)
2481 cpu_index
= rs6000_cpu_index
;
2484 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
2486 rs6000_cpu_index
= cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
2491 const char *default_cpu
=
2492 (implicit_cpu
? implicit_cpu
2493 : (TARGET_POWERPC64
? "powerpc64" : "powerpc"));
2495 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
2496 have_cpu
= implicit_cpu
!= 0;
2499 gcc_assert (cpu_index
>= 0);
2501 target_flags
&= ~set_masks
;
2502 target_flags
|= (processor_target_table
[cpu_index
].target_enable
2505 if (rs6000_tune_index
>= 0)
2506 tune_index
= rs6000_tune_index
;
2508 rs6000_tune_index
= tune_index
= cpu_index
;
2512 enum processor_type tune_proc
2513 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
2516 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
2517 if (processor_target_table
[i
].processor
== tune_proc
)
2519 rs6000_tune_index
= tune_index
= i
;
2524 gcc_assert (tune_index
>= 0);
2525 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
2527 /* Pick defaults for SPE related control flags. Do this early to make sure
2528 that the TARGET_ macros are representative ASAP. */
2530 int spe_capable_cpu
=
2531 (rs6000_cpu
== PROCESSOR_PPC8540
2532 || rs6000_cpu
== PROCESSOR_PPC8548
);
2534 if (!global_options_set
.x_rs6000_spe_abi
)
2535 rs6000_spe_abi
= spe_capable_cpu
;
2537 if (!global_options_set
.x_rs6000_spe
)
2538 rs6000_spe
= spe_capable_cpu
;
2540 if (!global_options_set
.x_rs6000_float_gprs
)
2542 (rs6000_cpu
== PROCESSOR_PPC8540
? 1
2543 : rs6000_cpu
== PROCESSOR_PPC8548
? 2
2547 if (global_options_set
.x_rs6000_spe_abi
2550 error ("not configured for SPE ABI");
2552 if (global_options_set
.x_rs6000_spe
2555 error ("not configured for SPE instruction set");
2557 if (main_target_opt
!= NULL
2558 && ((main_target_opt
->x_rs6000_spe_abi
!= rs6000_spe_abi
)
2559 || (main_target_opt
->x_rs6000_spe
!= rs6000_spe
)
2560 || (main_target_opt
->x_rs6000_float_gprs
!= rs6000_float_gprs
)))
2561 error ("target attribute or pragma changes SPE ABI");
2563 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
2564 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
2565 || rs6000_cpu
== PROCESSOR_PPCE5500
)
2568 error ("AltiVec not supported in this target");
2570 error ("SPE not supported in this target");
2572 if (rs6000_cpu
== PROCESSOR_PPCE6500
)
2575 error ("SPE not supported in this target");
2578 /* Disable Cell microcode if we are optimizing for the Cell
2579 and not optimizing for size. */
2580 if (rs6000_gen_cell_microcode
== -1)
2581 rs6000_gen_cell_microcode
= !(rs6000_cpu
== PROCESSOR_CELL
2584 /* If we are optimizing big endian systems for space and it's OK to
2585 use instructions that would be microcoded on the Cell, use the
2586 load/store multiple and string instructions. */
2587 if (BYTES_BIG_ENDIAN
&& optimize_size
&& rs6000_gen_cell_microcode
)
2588 target_flags
|= ~target_flags_explicit
& (MASK_MULTIPLE
| MASK_STRING
);
2590 /* Don't allow -mmultiple or -mstring on little endian systems
2591 unless the cpu is a 750, because the hardware doesn't support the
2592 instructions used in little endian mode, and causes an alignment
2593 trap. The 750 does not cause an alignment trap (except when the
2594 target is unaligned). */
2596 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
2598 if (TARGET_MULTIPLE
)
2600 target_flags
&= ~MASK_MULTIPLE
;
2601 if ((target_flags_explicit
& MASK_MULTIPLE
) != 0)
2602 warning (0, "-mmultiple is not supported on little endian systems");
2607 target_flags
&= ~MASK_STRING
;
2608 if ((target_flags_explicit
& MASK_STRING
) != 0)
2609 warning (0, "-mstring is not supported on little endian systems");
2613 /* Add some warnings for VSX. */
2616 const char *msg
= NULL
;
2617 if (!TARGET_HARD_FLOAT
|| !TARGET_FPRS
2618 || !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
2620 if (target_flags_explicit
& MASK_VSX
)
2621 msg
= N_("-mvsx requires hardware floating point");
2623 target_flags
&= ~ MASK_VSX
;
2625 else if (TARGET_PAIRED_FLOAT
)
2626 msg
= N_("-mvsx and -mpaired are incompatible");
2627 /* The hardware will allow VSX and little endian, but until we make sure
2628 things like vector select, etc. work don't allow VSX on little endian
2629 systems at this point. */
2630 else if (!BYTES_BIG_ENDIAN
)
2631 msg
= N_("-mvsx used with little endian code");
2632 else if (TARGET_AVOID_XFORM
> 0)
2633 msg
= N_("-mvsx needs indexed addressing");
2634 else if (!TARGET_ALTIVEC
&& (target_flags_explicit
& MASK_ALTIVEC
))
2636 if (target_flags_explicit
& MASK_VSX
)
2637 msg
= N_("-mvsx and -mno-altivec are incompatible");
2639 msg
= N_("-mno-altivec disables vsx");
2645 target_flags
&= ~ MASK_VSX
;
2646 target_flags_explicit
|= MASK_VSX
;
2650 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2651 unless the user explicitly used the -mno-<option> to disable the code. */
2653 target_flags
|= (ISA_2_6_MASKS_SERVER
& ~target_flags_explicit
);
2654 else if (TARGET_POPCNTD
)
2655 target_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~target_flags_explicit
);
2656 else if (TARGET_DFP
)
2657 target_flags
|= (ISA_2_5_MASKS_SERVER
& ~target_flags_explicit
);
2658 else if (TARGET_CMPB
)
2659 target_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~target_flags_explicit
);
2660 else if (TARGET_FPRND
)
2661 target_flags
|= (ISA_2_4_MASKS
& ~target_flags_explicit
);
2662 else if (TARGET_POPCNTB
)
2663 target_flags
|= (ISA_2_2_MASKS
& ~target_flags_explicit
);
2664 else if (TARGET_ALTIVEC
)
2665 target_flags
|= (MASK_PPC_GFXOPT
& ~target_flags_explicit
);
2667 /* E500mc does "better" if we inline more aggressively. Respect the
2668 user's opinion, though. */
2669 if (rs6000_block_move_inline_limit
== 0
2670 && (rs6000_cpu
== PROCESSOR_PPCE500MC
2671 || rs6000_cpu
== PROCESSOR_PPCE500MC64
2672 || rs6000_cpu
== PROCESSOR_PPCE5500
2673 || rs6000_cpu
== PROCESSOR_PPCE6500
))
2674 rs6000_block_move_inline_limit
= 128;
2676 /* store_one_arg depends on expand_block_move to handle at least the
2677 size of reg_parm_stack_space. */
2678 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
2679 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
2683 /* If the appropriate debug option is enabled, replace the target hooks
2684 with debug versions that call the real version and then prints
2685 debugging information. */
2686 if (TARGET_DEBUG_COST
)
2688 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
2689 targetm
.address_cost
= rs6000_debug_address_cost
;
2690 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
2693 if (TARGET_DEBUG_ADDR
)
2695 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
2696 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
2697 rs6000_secondary_reload_class_ptr
2698 = rs6000_debug_secondary_reload_class
;
2699 rs6000_secondary_memory_needed_ptr
2700 = rs6000_debug_secondary_memory_needed
;
2701 rs6000_cannot_change_mode_class_ptr
2702 = rs6000_debug_cannot_change_mode_class
;
2703 rs6000_preferred_reload_class_ptr
2704 = rs6000_debug_preferred_reload_class
;
2705 rs6000_legitimize_reload_address_ptr
2706 = rs6000_debug_legitimize_reload_address
;
2707 rs6000_mode_dependent_address_ptr
2708 = rs6000_debug_mode_dependent_address
;
2711 if (rs6000_veclibabi_name
)
2713 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
2714 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
2717 error ("unknown vectorization library ABI type (%s) for "
2718 "-mveclibabi= switch", rs6000_veclibabi_name
);
2724 if (!global_options_set
.x_rs6000_long_double_type_size
)
2726 if (main_target_opt
!= NULL
2727 && (main_target_opt
->x_rs6000_long_double_type_size
2728 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
2729 error ("target attribute or pragma changes long double size");
2731 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
2734 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2735 if (!global_options_set
.x_rs6000_ieeequad
)
2736 rs6000_ieeequad
= 1;
2739 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2740 target attribute or pragma which automatically enables both options,
2741 unless the altivec ABI was set. This is set by default for 64-bit, but
2743 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
2744 target_flags
&= ~((MASK_VSX
| MASK_ALTIVEC
) & ~target_flags_explicit
);
2746 /* Enable Altivec ABI for AIX -maltivec. */
2747 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
2749 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
2750 error ("target attribute or pragma changes AltiVec ABI");
2752 rs6000_altivec_abi
= 1;
2755 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2756 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2757 be explicitly overridden in either case. */
2760 if (!global_options_set
.x_rs6000_altivec_abi
2761 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
2763 if (main_target_opt
!= NULL
&&
2764 !main_target_opt
->x_rs6000_altivec_abi
)
2765 error ("target attribute or pragma changes AltiVec ABI");
2767 rs6000_altivec_abi
= 1;
2770 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
2771 if (!global_options_set
.x_TARGET_ALTIVEC_VRSAVE
)
2772 TARGET_ALTIVEC_VRSAVE
= rs6000_altivec_abi
;
2775 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2776 So far, the only darwin64 targets are also MACH-O. */
2778 && DEFAULT_ABI
== ABI_DARWIN
2781 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
2782 error ("target attribute or pragma changes darwin64 ABI");
2785 rs6000_darwin64_abi
= 1;
2786 /* Default to natural alignment, for better performance. */
2787 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
2791 /* Place FP constants in the constant pool instead of TOC
2792 if section anchors enabled. */
2793 if (flag_section_anchors
)
2794 TARGET_NO_FP_IN_TOC
= 1;
2796 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2797 SUBTARGET_OVERRIDE_OPTIONS
;
2799 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2800 SUBSUBTARGET_OVERRIDE_OPTIONS
;
2802 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2803 SUB3TARGET_OVERRIDE_OPTIONS
;
2806 /* For the E500 family of cores, reset the single/double FP flags to let us
2807 check that they remain constant across attributes or pragmas. Also,
2808 clear a possible request for string instructions, not supported and which
2809 we might have silently queried above for -Os.
2811 For other families, clear ISEL in case it was set implicitly.
2816 case PROCESSOR_PPC8540
:
2817 case PROCESSOR_PPC8548
:
2818 case PROCESSOR_PPCE500MC
:
2819 case PROCESSOR_PPCE500MC64
:
2820 case PROCESSOR_PPCE5500
:
2821 case PROCESSOR_PPCE6500
:
2823 rs6000_single_float
= TARGET_E500_SINGLE
|| TARGET_E500_DOUBLE
;
2824 rs6000_double_float
= TARGET_E500_DOUBLE
;
2826 target_flags
&= ~MASK_STRING
;
2832 if (have_cpu
&& !(target_flags_explicit
& MASK_ISEL
))
2833 target_flags
&= ~MASK_ISEL
;
2838 if (main_target_opt
)
2840 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
2841 error ("target attribute or pragma changes single precision floating "
2843 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
2844 error ("target attribute or pragma changes double precision floating "
2848 /* Detect invalid option combinations with E500. */
2851 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
2852 && rs6000_cpu
!= PROCESSOR_POWER5
2853 && rs6000_cpu
!= PROCESSOR_POWER6
2854 && rs6000_cpu
!= PROCESSOR_POWER7
2855 && rs6000_cpu
!= PROCESSOR_PPCA2
2856 && rs6000_cpu
!= PROCESSOR_CELL
2857 && rs6000_cpu
!= PROCESSOR_PPC476
);
2858 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
2859 || rs6000_cpu
== PROCESSOR_POWER5
2860 || rs6000_cpu
== PROCESSOR_POWER7
);
2861 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
2862 || rs6000_cpu
== PROCESSOR_POWER5
2863 || rs6000_cpu
== PROCESSOR_POWER6
2864 || rs6000_cpu
== PROCESSOR_POWER7
2865 || rs6000_cpu
== PROCESSOR_PPCE500MC
2866 || rs6000_cpu
== PROCESSOR_PPCE500MC64
2867 || rs6000_cpu
== PROCESSOR_PPCE5500
2868 || rs6000_cpu
== PROCESSOR_PPCE6500
);
2870 /* Allow debug switches to override the above settings. These are set to -1
2871 in rs6000.opt to indicate the user hasn't directly set the switch. */
2872 if (TARGET_ALWAYS_HINT
>= 0)
2873 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
2875 if (TARGET_SCHED_GROUPS
>= 0)
2876 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
2878 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
2879 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
2881 rs6000_sched_restricted_insns_priority
2882 = (rs6000_sched_groups
? 1 : 0);
2884 /* Handle -msched-costly-dep option. */
2885 rs6000_sched_costly_dep
2886 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
2888 if (rs6000_sched_costly_dep_str
)
2890 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
2891 rs6000_sched_costly_dep
= no_dep_costly
;
2892 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
2893 rs6000_sched_costly_dep
= all_deps_costly
;
2894 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
2895 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
2896 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
2897 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
2899 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
2900 atoi (rs6000_sched_costly_dep_str
));
2903 /* Handle -minsert-sched-nops option. */
2904 rs6000_sched_insert_nops
2905 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
2907 if (rs6000_sched_insert_nops_str
)
2909 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
2910 rs6000_sched_insert_nops
= sched_finish_none
;
2911 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
2912 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
2913 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
2914 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
2916 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
2917 atoi (rs6000_sched_insert_nops_str
));
2922 #ifdef TARGET_REGNAMES
2923 /* If the user desires alternate register names, copy in the
2924 alternate names now. */
2925 if (TARGET_REGNAMES
)
2926 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
2929 /* Set aix_struct_return last, after the ABI is determined.
2930 If -maix-struct-return or -msvr4-struct-return was explicitly
2931 used, don't override with the ABI default. */
2932 if (!global_options_set
.x_aix_struct_return
)
2933 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
2936 /* IBM XL compiler defaults to unsigned bitfields. */
2937 if (TARGET_XL_COMPAT
)
2938 flag_signed_bitfields
= 0;
2941 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
2942 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
2945 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
2947 /* We can only guarantee the availability of DI pseudo-ops when
2948 assembling for 64-bit targets. */
2951 targetm
.asm_out
.aligned_op
.di
= NULL
;
2952 targetm
.asm_out
.unaligned_op
.di
= NULL
;
2956 /* Set branch target alignment, if not optimizing for size. */
2959 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2960 aligned 8byte to avoid misprediction by the branch predictor. */
2961 if (rs6000_cpu
== PROCESSOR_TITAN
2962 || rs6000_cpu
== PROCESSOR_CELL
)
2964 if (align_functions
<= 0)
2965 align_functions
= 8;
2966 if (align_jumps
<= 0)
2968 if (align_loops
<= 0)
2971 if (rs6000_align_branch_targets
)
2973 if (align_functions
<= 0)
2974 align_functions
= 16;
2975 if (align_jumps
<= 0)
2977 if (align_loops
<= 0)
2979 can_override_loop_align
= 1;
2983 if (align_jumps_max_skip
<= 0)
2984 align_jumps_max_skip
= 15;
2985 if (align_loops_max_skip
<= 0)
2986 align_loops_max_skip
= 15;
2989 /* Arrange to save and restore machine status around nested functions. */
2990 init_machine_status
= rs6000_init_machine_status
;
2992 /* We should always be splitting complex arguments, but we can't break
2993 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2994 if (DEFAULT_ABI
!= ABI_AIX
)
2995 targetm
.calls
.split_complex_arg
= NULL
;
2998 /* Initialize rs6000_cost with the appropriate target costs. */
3000 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
3004 case PROCESSOR_RIOS1
:
3005 rs6000_cost
= &rios1_cost
;
3008 case PROCESSOR_RIOS2
:
3009 rs6000_cost
= &rios2_cost
;
3012 case PROCESSOR_RS64A
:
3013 rs6000_cost
= &rs64a_cost
;
3016 case PROCESSOR_MPCCORE
:
3017 rs6000_cost
= &mpccore_cost
;
3020 case PROCESSOR_PPC403
:
3021 rs6000_cost
= &ppc403_cost
;
3024 case PROCESSOR_PPC405
:
3025 rs6000_cost
= &ppc405_cost
;
3028 case PROCESSOR_PPC440
:
3029 rs6000_cost
= &ppc440_cost
;
3032 case PROCESSOR_PPC476
:
3033 rs6000_cost
= &ppc476_cost
;
3036 case PROCESSOR_PPC601
:
3037 rs6000_cost
= &ppc601_cost
;
3040 case PROCESSOR_PPC603
:
3041 rs6000_cost
= &ppc603_cost
;
3044 case PROCESSOR_PPC604
:
3045 rs6000_cost
= &ppc604_cost
;
3048 case PROCESSOR_PPC604e
:
3049 rs6000_cost
= &ppc604e_cost
;
3052 case PROCESSOR_PPC620
:
3053 rs6000_cost
= &ppc620_cost
;
3056 case PROCESSOR_PPC630
:
3057 rs6000_cost
= &ppc630_cost
;
3060 case PROCESSOR_CELL
:
3061 rs6000_cost
= &ppccell_cost
;
3064 case PROCESSOR_PPC750
:
3065 case PROCESSOR_PPC7400
:
3066 rs6000_cost
= &ppc750_cost
;
3069 case PROCESSOR_PPC7450
:
3070 rs6000_cost
= &ppc7450_cost
;
3073 case PROCESSOR_PPC8540
:
3074 case PROCESSOR_PPC8548
:
3075 rs6000_cost
= &ppc8540_cost
;
3078 case PROCESSOR_PPCE300C2
:
3079 case PROCESSOR_PPCE300C3
:
3080 rs6000_cost
= &ppce300c2c3_cost
;
3083 case PROCESSOR_PPCE500MC
:
3084 rs6000_cost
= &ppce500mc_cost
;
3087 case PROCESSOR_PPCE500MC64
:
3088 rs6000_cost
= &ppce500mc64_cost
;
3091 case PROCESSOR_PPCE5500
:
3092 rs6000_cost
= &ppce5500_cost
;
3095 case PROCESSOR_PPCE6500
:
3096 rs6000_cost
= &ppce6500_cost
;
3099 case PROCESSOR_TITAN
:
3100 rs6000_cost
= &titan_cost
;
3103 case PROCESSOR_POWER4
:
3104 case PROCESSOR_POWER5
:
3105 rs6000_cost
= &power4_cost
;
3108 case PROCESSOR_POWER6
:
3109 rs6000_cost
= &power6_cost
;
3112 case PROCESSOR_POWER7
:
3113 rs6000_cost
= &power7_cost
;
3116 case PROCESSOR_PPCA2
:
3117 rs6000_cost
= &ppca2_cost
;
3126 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
3127 rs6000_cost
->simultaneous_prefetches
,
3128 global_options
.x_param_values
,
3129 global_options_set
.x_param_values
);
3130 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
3131 global_options
.x_param_values
,
3132 global_options_set
.x_param_values
);
3133 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
3134 rs6000_cost
->cache_line_size
,
3135 global_options
.x_param_values
,
3136 global_options_set
.x_param_values
);
3137 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
3138 global_options
.x_param_values
,
3139 global_options_set
.x_param_values
);
3141 /* If using typedef char *va_list, signal that
3142 __builtin_va_start (&ap, 0) can be optimized to
3143 ap = __builtin_next_arg (0). */
3144 if (DEFAULT_ABI
!= ABI_V4
)
3145 targetm
.expand_builtin_va_start
= NULL
;
3148 /* Set up single/double float flags.
3149 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3150 then set both flags. */
3151 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
3152 && rs6000_single_float
== 0 && rs6000_double_float
== 0)
3153 rs6000_single_float
= rs6000_double_float
= 1;
3155 /* If not explicitly specified via option, decide whether to generate indexed
3156 load/store instructions. */
3157 if (TARGET_AVOID_XFORM
== -1)
3158 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3159 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3160 need indexed accesses and the type used is the scalar type of the element
3161 being loaded or stored. */
3162 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
3163 && !TARGET_ALTIVEC
);
3165 /* Set the -mrecip options. */
3166 if (rs6000_recip_name
)
3168 char *p
= ASTRDUP (rs6000_recip_name
);
3170 unsigned int mask
, i
;
3173 while ((q
= strtok (p
, ",")) != NULL
)
3184 if (!strcmp (q
, "default"))
3185 mask
= ((TARGET_RECIP_PRECISION
)
3186 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
3189 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
3190 if (!strcmp (q
, recip_options
[i
].string
))
3192 mask
= recip_options
[i
].mask
;
3196 if (i
== ARRAY_SIZE (recip_options
))
3198 error ("unknown option for -mrecip=%s", q
);
3206 rs6000_recip_control
&= ~mask
;
3208 rs6000_recip_control
|= mask
;
3212 /* Set the builtin mask of the various options used that could affect which
3213 builtins were used. In the past we used target_flags, but we've run out
3214 of bits, and some options like SPE and PAIRED are no longer in
3216 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
3217 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
3218 fprintf (stderr
, "new builtin mask = 0x%x%s%s%s%s\n", rs6000_builtin_mask
,
3219 (rs6000_builtin_mask
& RS6000_BTM_ALTIVEC
) ? ", altivec" : "",
3220 (rs6000_builtin_mask
& RS6000_BTM_VSX
) ? ", vsx" : "",
3221 (rs6000_builtin_mask
& RS6000_BTM_PAIRED
) ? ", paired" : "",
3222 (rs6000_builtin_mask
& RS6000_BTM_SPE
) ? ", spe" : "");
3224 /* Initialize all of the registers. */
3225 rs6000_init_hard_regno_mode_ok (global_init_p
);
3227 /* Save the initial options in case the user does function specific options */
3229 target_option_default_node
= target_option_current_node
3230 = build_target_option_node ();
3232 /* If not explicitly specified via option, decide whether to generate the
3233 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3234 if (TARGET_LINK_STACK
== -1)
3235 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
3240 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3241 define the target cpu type. */
3244 rs6000_option_override (void)
3246 (void) rs6000_option_override_internal (true);
3250 /* Implement targetm.vectorize.builtin_mask_for_load. */
3252 rs6000_builtin_mask_for_load (void)
3254 if (TARGET_ALTIVEC
|| TARGET_VSX
)
3255 return altivec_builtin_mask_for_load
;
3260 /* Implement LOOP_ALIGN. */
3262 rs6000_loop_align (rtx label
)
3267 /* Don't override loop alignment if -falign-loops was specified. */
3268 if (!can_override_loop_align
)
3269 return align_loops_log
;
3271 bb
= BLOCK_FOR_INSN (label
);
3272 ninsns
= num_loop_insns(bb
->loop_father
);
3274 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3275 if (ninsns
> 4 && ninsns
<= 8
3276 && (rs6000_cpu
== PROCESSOR_POWER4
3277 || rs6000_cpu
== PROCESSOR_POWER5
3278 || rs6000_cpu
== PROCESSOR_POWER6
3279 || rs6000_cpu
== PROCESSOR_POWER7
))
3282 return align_loops_log
;
3285 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3287 rs6000_loop_align_max_skip (rtx label
)
3289 return (1 << rs6000_loop_align (label
)) - 1;
3292 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3293 after applying N number of iterations. This routine does not determine
3294 how may iterations are required to reach desired alignment. */
3297 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
3304 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
3307 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
3317 /* Assuming that all other types are naturally aligned. CHECKME! */
3322 /* Return true if the vector misalignment factor is supported by the
3325 rs6000_builtin_support_vector_misalignment (enum machine_mode mode
,
3332 /* Return if movmisalign pattern is not supported for this mode. */
3333 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
3336 if (misalignment
== -1)
3338 /* Misalignment factor is unknown at compile time but we know
3339 it's word aligned. */
3340 if (rs6000_vector_alignment_reachable (type
, is_packed
))
3342 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
3344 if (element_size
== 64 || element_size
== 32)
3351 /* VSX supports word-aligned vector. */
3352 if (misalignment
% 4 == 0)
3358 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3360 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
3361 tree vectype
, int misalign
)
3366 switch (type_of_cost
)
3376 case cond_branch_not_taken
:
3385 case vec_promote_demote
:
3391 case cond_branch_taken
:
3394 case unaligned_load
:
3395 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
3397 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3399 /* Double word aligned. */
3407 /* Double word aligned. */
3411 /* Unknown misalignment. */
3424 /* Misaligned loads are not supported. */
3429 case unaligned_store
:
3430 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
3432 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3434 /* Double word aligned. */
3442 /* Double word aligned. */
3446 /* Unknown misalignment. */
3459 /* Misaligned stores are not supported. */
3465 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3466 elem_type
= TREE_TYPE (vectype
);
3467 /* 32-bit vectors loaded into registers are stored as double
3468 precision, so we need n/2 converts in addition to the usual
3469 n/2 merges to construct a vector of short floats from them. */
3470 if (SCALAR_FLOAT_TYPE_P (elem_type
)
3471 && TYPE_PRECISION (elem_type
) == 32)
3472 return elements
+ 1;
3474 return elements
/ 2 + 1;
3481 /* Implement targetm.vectorize.preferred_simd_mode. */
3483 static enum machine_mode
3484 rs6000_preferred_simd_mode (enum machine_mode mode
)
3493 if (TARGET_ALTIVEC
|| TARGET_VSX
)
3517 if (TARGET_PAIRED_FLOAT
3523 /* Implement targetm.vectorize.init_cost. */
3526 rs6000_init_cost (struct loop
*loop_info ATTRIBUTE_UNUSED
)
3528 unsigned *cost
= XNEWVEC (unsigned, 3);
3529 cost
[vect_prologue
] = cost
[vect_body
] = cost
[vect_epilogue
] = 0;
3533 /* Implement targetm.vectorize.add_stmt_cost. */
3536 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
3537 struct _stmt_vec_info
*stmt_info
, int misalign
,
3538 enum vect_cost_model_location where
)
3540 unsigned *cost
= (unsigned *) data
;
3541 unsigned retval
= 0;
3543 if (flag_vect_cost_model
)
3545 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
3546 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
3548 /* Statements in an inner loop relative to the loop being
3549 vectorized are weighted more heavily. The value here is
3550 arbitrary and could potentially be improved with analysis. */
3551 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
3552 count
*= 50; /* FIXME. */
3554 retval
= (unsigned) (count
* stmt_cost
);
3555 cost
[where
] += retval
;
3561 /* Implement targetm.vectorize.finish_cost. */
3564 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
3565 unsigned *body_cost
, unsigned *epilogue_cost
)
3567 unsigned *cost
= (unsigned *) data
;
3568 *prologue_cost
= cost
[vect_prologue
];
3569 *body_cost
= cost
[vect_body
];
3570 *epilogue_cost
= cost
[vect_epilogue
];
3573 /* Implement targetm.vectorize.destroy_cost_data. */
3576 rs6000_destroy_cost_data (void *data
)
3581 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3582 library with vectorized intrinsics. */
3585 rs6000_builtin_vectorized_libmass (tree fndecl
, tree type_out
, tree type_in
)
3588 const char *suffix
= NULL
;
3589 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
3592 enum machine_mode el_mode
, in_mode
;
3595 /* Libmass is suitable for unsafe math only as it does not correctly support
3596 parts of IEEE with the required precision such as denormals. Only support
3597 it if we have VSX to use the simd d2 or f4 functions.
3598 XXX: Add variable length support. */
3599 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
3602 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
3603 n
= TYPE_VECTOR_SUBPARTS (type_out
);
3604 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
3605 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
3606 if (el_mode
!= in_mode
3610 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3612 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
3615 case BUILT_IN_ATAN2
:
3616 case BUILT_IN_HYPOT
:
3622 case BUILT_IN_ACOSH
:
3624 case BUILT_IN_ASINH
:
3626 case BUILT_IN_ATANH
:
3634 case BUILT_IN_EXPM1
:
3635 case BUILT_IN_LGAMMA
:
3636 case BUILT_IN_LOG10
:
3637 case BUILT_IN_LOG1P
:
3645 bdecl
= builtin_decl_implicit (fn
);
3646 suffix
= "d2"; /* pow -> powd2 */
3647 if (el_mode
!= DFmode
3652 case BUILT_IN_ATAN2F
:
3653 case BUILT_IN_HYPOTF
:
3658 case BUILT_IN_ACOSF
:
3659 case BUILT_IN_ACOSHF
:
3660 case BUILT_IN_ASINF
:
3661 case BUILT_IN_ASINHF
:
3662 case BUILT_IN_ATANF
:
3663 case BUILT_IN_ATANHF
:
3664 case BUILT_IN_CBRTF
:
3666 case BUILT_IN_COSHF
:
3668 case BUILT_IN_ERFCF
:
3669 case BUILT_IN_EXP2F
:
3671 case BUILT_IN_EXPM1F
:
3672 case BUILT_IN_LGAMMAF
:
3673 case BUILT_IN_LOG10F
:
3674 case BUILT_IN_LOG1PF
:
3675 case BUILT_IN_LOG2F
:
3678 case BUILT_IN_SINHF
:
3679 case BUILT_IN_SQRTF
:
3681 case BUILT_IN_TANHF
:
3682 bdecl
= builtin_decl_implicit (fn
);
3683 suffix
= "4"; /* powf -> powf4 */
3684 if (el_mode
!= SFmode
3696 gcc_assert (suffix
!= NULL
);
3697 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
3698 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
3699 strcat (name
, suffix
);
3702 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
3703 else if (n_args
== 2)
3704 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
3708 /* Build a function declaration for the vectorized function. */
3709 new_fndecl
= build_decl (BUILTINS_LOCATION
,
3710 FUNCTION_DECL
, get_identifier (name
), fntype
);
3711 TREE_PUBLIC (new_fndecl
) = 1;
3712 DECL_EXTERNAL (new_fndecl
) = 1;
3713 DECL_IS_NOVOPS (new_fndecl
) = 1;
3714 TREE_READONLY (new_fndecl
) = 1;
3719 /* Returns a function decl for a vectorized version of the builtin function
3720 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3721 if it is not available. */
3724 rs6000_builtin_vectorized_function (tree fndecl
, tree type_out
,
3727 enum machine_mode in_mode
, out_mode
;
3730 if (TARGET_DEBUG_BUILTIN
)
3731 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3732 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
3733 GET_MODE_NAME (TYPE_MODE (type_out
)),
3734 GET_MODE_NAME (TYPE_MODE (type_in
)));
3736 if (TREE_CODE (type_out
) != VECTOR_TYPE
3737 || TREE_CODE (type_in
) != VECTOR_TYPE
3738 || !TARGET_VECTORIZE_BUILTINS
)
3741 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
3742 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
3743 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
3744 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
3746 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3748 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
3751 case BUILT_IN_COPYSIGN
:
3752 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3753 && out_mode
== DFmode
&& out_n
== 2
3754 && in_mode
== DFmode
&& in_n
== 2)
3755 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
3757 case BUILT_IN_COPYSIGNF
:
3758 if (out_mode
!= SFmode
|| out_n
!= 4
3759 || in_mode
!= SFmode
|| in_n
!= 4)
3761 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3762 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
3763 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3764 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
3767 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3768 && out_mode
== DFmode
&& out_n
== 2
3769 && in_mode
== DFmode
&& in_n
== 2)
3770 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTDP
];
3772 case BUILT_IN_SQRTF
:
3773 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3774 && out_mode
== SFmode
&& out_n
== 4
3775 && in_mode
== SFmode
&& in_n
== 4)
3776 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTSP
];
3779 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3780 && out_mode
== DFmode
&& out_n
== 2
3781 && in_mode
== DFmode
&& in_n
== 2)
3782 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
3784 case BUILT_IN_CEILF
:
3785 if (out_mode
!= SFmode
|| out_n
!= 4
3786 || in_mode
!= SFmode
|| in_n
!= 4)
3788 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3789 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
3790 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3791 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
3793 case BUILT_IN_FLOOR
:
3794 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3795 && out_mode
== DFmode
&& out_n
== 2
3796 && in_mode
== DFmode
&& in_n
== 2)
3797 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
3799 case BUILT_IN_FLOORF
:
3800 if (out_mode
!= SFmode
|| out_n
!= 4
3801 || in_mode
!= SFmode
|| in_n
!= 4)
3803 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3804 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
3805 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3806 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
3809 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3810 && out_mode
== DFmode
&& out_n
== 2
3811 && in_mode
== DFmode
&& in_n
== 2)
3812 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
3815 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3816 && out_mode
== SFmode
&& out_n
== 4
3817 && in_mode
== SFmode
&& in_n
== 4)
3818 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
3819 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
3820 && out_mode
== SFmode
&& out_n
== 4
3821 && in_mode
== SFmode
&& in_n
== 4)
3822 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
3824 case BUILT_IN_TRUNC
:
3825 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3826 && out_mode
== DFmode
&& out_n
== 2
3827 && in_mode
== DFmode
&& in_n
== 2)
3828 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
3830 case BUILT_IN_TRUNCF
:
3831 if (out_mode
!= SFmode
|| out_n
!= 4
3832 || in_mode
!= SFmode
|| in_n
!= 4)
3834 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3835 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
3836 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3837 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
3839 case BUILT_IN_NEARBYINT
:
3840 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3841 && flag_unsafe_math_optimizations
3842 && out_mode
== DFmode
&& out_n
== 2
3843 && in_mode
== DFmode
&& in_n
== 2)
3844 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
3846 case BUILT_IN_NEARBYINTF
:
3847 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3848 && flag_unsafe_math_optimizations
3849 && out_mode
== SFmode
&& out_n
== 4
3850 && in_mode
== SFmode
&& in_n
== 4)
3851 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
3854 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3855 && !flag_trapping_math
3856 && out_mode
== DFmode
&& out_n
== 2
3857 && in_mode
== DFmode
&& in_n
== 2)
3858 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
3860 case BUILT_IN_RINTF
:
3861 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3862 && !flag_trapping_math
3863 && out_mode
== SFmode
&& out_n
== 4
3864 && in_mode
== SFmode
&& in_n
== 4)
3865 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
3872 else if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
3874 enum rs6000_builtins fn
3875 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
3878 case RS6000_BUILTIN_RSQRTF
:
3879 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
3880 && out_mode
== SFmode
&& out_n
== 4
3881 && in_mode
== SFmode
&& in_n
== 4)
3882 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
3884 case RS6000_BUILTIN_RSQRT
:
3885 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3886 && out_mode
== DFmode
&& out_n
== 2
3887 && in_mode
== DFmode
&& in_n
== 2)
3888 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
3890 case RS6000_BUILTIN_RECIPF
:
3891 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
3892 && out_mode
== SFmode
&& out_n
== 4
3893 && in_mode
== SFmode
&& in_n
== 4)
3894 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
3896 case RS6000_BUILTIN_RECIP
:
3897 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3898 && out_mode
== DFmode
&& out_n
== 2
3899 && in_mode
== DFmode
&& in_n
== 2)
3900 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
3907 /* Generate calls to libmass if appropriate. */
3908 if (rs6000_veclib_handler
)
3909 return rs6000_veclib_handler (fndecl
, type_out
, type_in
);
3914 /* Default CPU string for rs6000*_file_start functions. */
3915 static const char *rs6000_default_cpu
;
3917 /* Do anything needed at the start of the asm file. */
3920 rs6000_file_start (void)
3923 const char *start
= buffer
;
3924 FILE *file
= asm_out_file
;
3926 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
3928 default_file_start ();
3930 if (flag_verbose_asm
)
3932 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
3934 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
3936 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
3940 if (global_options_set
.x_rs6000_cpu_index
)
3942 fprintf (file
, "%s -mcpu=%s", start
,
3943 processor_target_table
[rs6000_cpu_index
].name
);
3947 if (global_options_set
.x_rs6000_tune_index
)
3949 fprintf (file
, "%s -mtune=%s", start
,
3950 processor_target_table
[rs6000_tune_index
].name
);
3954 if (PPC405_ERRATUM77
)
3956 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
3960 #ifdef USING_ELFOS_H
3961 switch (rs6000_sdata
)
3963 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
3964 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
3965 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
3966 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
3969 if (rs6000_sdata
&& g_switch_value
)
3971 fprintf (file
, "%s -G %d", start
,
3981 if (DEFAULT_ABI
== ABI_AIX
|| (TARGET_ELF
&& flag_pic
== 2))
3983 switch_to_section (toc_section
);
3984 switch_to_section (text_section
);
3989 /* Return nonzero if this function is known to have a null epilogue. */
3992 direct_return (void)
3994 if (reload_completed
)
3996 rs6000_stack_t
*info
= rs6000_stack_info ();
3998 if (info
->first_gp_reg_save
== 32
3999 && info
->first_fp_reg_save
== 64
4000 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
4001 && ! info
->lr_save_p
4002 && ! info
->cr_save_p
4003 && info
->vrsave_mask
== 0
4011 /* Return the number of instructions it takes to form a constant in an
4012 integer register. */
4015 num_insns_constant_wide (HOST_WIDE_INT value
)
4017 /* signed constant loadable with {cal|addi} */
4018 if ((unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000)
4021 /* constant loadable with {cau|addis} */
4022 else if ((value
& 0xffff) == 0
4023 && (value
>> 31 == -1 || value
>> 31 == 0))
4026 #if HOST_BITS_PER_WIDE_INT == 64
4027 else if (TARGET_POWERPC64
)
4029 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
4030 HOST_WIDE_INT high
= value
>> 31;
4032 if (high
== 0 || high
== -1)
4038 return num_insns_constant_wide (high
) + 1;
4040 return num_insns_constant_wide (low
) + 1;
4042 return (num_insns_constant_wide (high
)
4043 + num_insns_constant_wide (low
) + 1);
4052 num_insns_constant (rtx op
, enum machine_mode mode
)
4054 HOST_WIDE_INT low
, high
;
4056 switch (GET_CODE (op
))
4059 #if HOST_BITS_PER_WIDE_INT == 64
4060 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
4061 && mask64_operand (op
, mode
))
4065 return num_insns_constant_wide (INTVAL (op
));
4068 if (mode
== SFmode
|| mode
== SDmode
)
4073 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4074 if (DECIMAL_FLOAT_MODE_P (mode
))
4075 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
4077 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
4078 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
4081 if (mode
== VOIDmode
|| mode
== DImode
)
4083 high
= CONST_DOUBLE_HIGH (op
);
4084 low
= CONST_DOUBLE_LOW (op
);
4091 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4092 if (DECIMAL_FLOAT_MODE_P (mode
))
4093 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, l
);
4095 REAL_VALUE_TO_TARGET_DOUBLE (rv
, l
);
4096 high
= l
[WORDS_BIG_ENDIAN
== 0];
4097 low
= l
[WORDS_BIG_ENDIAN
!= 0];
4101 return (num_insns_constant_wide (low
)
4102 + num_insns_constant_wide (high
));
4105 if ((high
== 0 && low
>= 0)
4106 || (high
== -1 && low
< 0))
4107 return num_insns_constant_wide (low
);
4109 else if (mask64_operand (op
, mode
))
4113 return num_insns_constant_wide (high
) + 1;
4116 return (num_insns_constant_wide (high
)
4117 + num_insns_constant_wide (low
) + 1);
4125 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4126 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4127 corresponding element of the vector, but for V4SFmode and V2SFmode,
4128 the corresponding "float" is interpreted as an SImode integer. */
4131 const_vector_elt_as_int (rtx op
, unsigned int elt
)
4135 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4136 gcc_assert (GET_MODE (op
) != V2DImode
4137 && GET_MODE (op
) != V2DFmode
);
4139 tmp
= CONST_VECTOR_ELT (op
, elt
);
4140 if (GET_MODE (op
) == V4SFmode
4141 || GET_MODE (op
) == V2SFmode
)
4142 tmp
= gen_lowpart (SImode
, tmp
);
4143 return INTVAL (tmp
);
4146 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4147 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4148 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4149 all items are set to the same value and contain COPIES replicas of the
4150 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4151 operand and the others are set to the value of the operand's msb. */
4154 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
4156 enum machine_mode mode
= GET_MODE (op
);
4157 enum machine_mode inner
= GET_MODE_INNER (mode
);
4165 HOST_WIDE_INT splat_val
;
4166 HOST_WIDE_INT msb_val
;
4168 if (mode
== V2DImode
|| mode
== V2DFmode
)
4171 nunits
= GET_MODE_NUNITS (mode
);
4172 bitsize
= GET_MODE_BITSIZE (inner
);
4173 mask
= GET_MODE_MASK (inner
);
4175 val
= const_vector_elt_as_int (op
, nunits
- 1);
4177 msb_val
= val
> 0 ? 0 : -1;
4179 /* Construct the value to be splatted, if possible. If not, return 0. */
4180 for (i
= 2; i
<= copies
; i
*= 2)
4182 HOST_WIDE_INT small_val
;
4184 small_val
= splat_val
>> bitsize
;
4186 if (splat_val
!= ((small_val
<< bitsize
) | (small_val
& mask
)))
4188 splat_val
= small_val
;
4191 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4192 if (EASY_VECTOR_15 (splat_val
))
4195 /* Also check if we can splat, and then add the result to itself. Do so if
4196 the value is positive, of if the splat instruction is using OP's mode;
4197 for splat_val < 0, the splat and the add should use the same mode. */
4198 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
4199 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
4202 /* Also check if are loading up the most significant bit which can be done by
4203 loading up -1 and shifting the value left by -1. */
4204 else if (EASY_VECTOR_MSB (splat_val
, inner
))
4210 /* Check if VAL is present in every STEP-th element, and the
4211 other elements are filled with its most significant bit. */
4212 for (i
= 0; i
< nunits
- 1; ++i
)
4214 HOST_WIDE_INT desired_val
;
4215 if (((i
+ 1) & (step
- 1)) == 0)
4218 desired_val
= msb_val
;
4220 if (desired_val
!= const_vector_elt_as_int (op
, i
))
4228 /* Return true if OP is of the given MODE and can be synthesized
4229 with a vspltisb, vspltish or vspltisw. */
4232 easy_altivec_constant (rtx op
, enum machine_mode mode
)
4234 unsigned step
, copies
;
4236 if (mode
== VOIDmode
)
4237 mode
= GET_MODE (op
);
4238 else if (mode
!= GET_MODE (op
))
4241 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4243 if (mode
== V2DFmode
)
4244 return zero_constant (op
, mode
);
4246 if (mode
== V2DImode
)
4248 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4250 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
4251 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
4254 if (zero_constant (op
, mode
))
4257 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
4258 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
4264 /* Start with a vspltisw. */
4265 step
= GET_MODE_NUNITS (mode
) / 4;
4268 if (vspltis_constant (op
, step
, copies
))
4271 /* Then try with a vspltish. */
4277 if (vspltis_constant (op
, step
, copies
))
4280 /* And finally a vspltisb. */
4286 if (vspltis_constant (op
, step
, copies
))
4292 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4293 result is OP. Abort if it is not possible. */
4296 gen_easy_altivec_constant (rtx op
)
4298 enum machine_mode mode
= GET_MODE (op
);
4299 int nunits
= GET_MODE_NUNITS (mode
);
4300 rtx last
= CONST_VECTOR_ELT (op
, nunits
- 1);
4301 unsigned step
= nunits
/ 4;
4302 unsigned copies
= 1;
4304 /* Start with a vspltisw. */
4305 if (vspltis_constant (op
, step
, copies
))
4306 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, last
));
4308 /* Then try with a vspltish. */
4314 if (vspltis_constant (op
, step
, copies
))
4315 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, last
));
4317 /* And finally a vspltisb. */
4323 if (vspltis_constant (op
, step
, copies
))
4324 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, last
));
4330 output_vec_const_move (rtx
*operands
)
4333 enum machine_mode mode
;
4338 mode
= GET_MODE (dest
);
4342 if (zero_constant (vec
, mode
))
4343 return "xxlxor %x0,%x0,%x0";
4345 if (mode
== V2DImode
4346 && INTVAL (CONST_VECTOR_ELT (vec
, 0)) == -1
4347 && INTVAL (CONST_VECTOR_ELT (vec
, 1)) == -1)
4348 return "vspltisw %0,-1";
4354 if (zero_constant (vec
, mode
))
4355 return "vxor %0,%0,%0";
4357 splat_vec
= gen_easy_altivec_constant (vec
);
4358 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
4359 operands
[1] = XEXP (splat_vec
, 0);
4360 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
4363 switch (GET_MODE (splat_vec
))
4366 return "vspltisw %0,%1";
4369 return "vspltish %0,%1";
4372 return "vspltisb %0,%1";
4379 gcc_assert (TARGET_SPE
);
4381 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4382 pattern of V1DI, V4HI, and V2SF.
4384 FIXME: We should probably return # and add post reload
4385 splitters for these, but this way is so easy ;-). */
4386 cst
= INTVAL (CONST_VECTOR_ELT (vec
, 0));
4387 cst2
= INTVAL (CONST_VECTOR_ELT (vec
, 1));
4388 operands
[1] = CONST_VECTOR_ELT (vec
, 0);
4389 operands
[2] = CONST_VECTOR_ELT (vec
, 1);
4391 return "li %0,%1\n\tevmergelo %0,%0,%0";
4393 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4396 /* Initialize TARGET of vector PAIRED to VALS. */
4399 paired_expand_vector_init (rtx target
, rtx vals
)
4401 enum machine_mode mode
= GET_MODE (target
);
4402 int n_elts
= GET_MODE_NUNITS (mode
);
4404 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
4407 for (i
= 0; i
< n_elts
; ++i
)
4409 x
= XVECEXP (vals
, 0, i
);
4410 if (!(CONST_INT_P (x
)
4411 || GET_CODE (x
) == CONST_DOUBLE
4412 || GET_CODE (x
) == CONST_FIXED
))
4417 /* Load from constant pool. */
4418 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
4424 /* The vector is initialized only with non-constants. */
4425 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
4426 XVECEXP (vals
, 0, 1));
4428 emit_move_insn (target
, new_rtx
);
4432 /* One field is non-constant and the other one is a constant. Load the
4433 constant from the constant pool and use ps_merge instruction to
4434 construct the whole vector. */
4435 op1
= XVECEXP (vals
, 0, 0);
4436 op2
= XVECEXP (vals
, 0, 1);
4438 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
4440 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
4441 emit_move_insn (tmp
, constant_op
);
4443 if (CONSTANT_P (op1
))
4444 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
4446 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
4448 emit_move_insn (target
, new_rtx
);
4452 paired_expand_vector_move (rtx operands
[])
4454 rtx op0
= operands
[0], op1
= operands
[1];
4456 emit_move_insn (op0
, op1
);
4459 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4460 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4461 operands for the relation operation COND. This is a recursive
4465 paired_emit_vector_compare (enum rtx_code rcode
,
4466 rtx dest
, rtx op0
, rtx op1
,
4467 rtx cc_op0
, rtx cc_op1
)
4469 rtx tmp
= gen_reg_rtx (V2SFmode
);
4472 gcc_assert (TARGET_PAIRED_FLOAT
);
4473 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
4479 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4483 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
4484 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
4488 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
4491 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4494 tmp1
= gen_reg_rtx (V2SFmode
);
4495 max
= gen_reg_rtx (V2SFmode
);
4496 min
= gen_reg_rtx (V2SFmode
);
4497 gen_reg_rtx (V2SFmode
);
4499 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
4500 emit_insn (gen_selv2sf4
4501 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
4502 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
4503 emit_insn (gen_selv2sf4
4504 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
4505 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
4506 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
4509 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4512 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4515 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4518 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4521 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4530 /* Emit vector conditional expression.
4531 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4532 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4535 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
4536 rtx cond
, rtx cc_op0
, rtx cc_op1
)
4538 enum rtx_code rcode
= GET_CODE (cond
);
4540 if (!TARGET_PAIRED_FLOAT
)
4543 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
4548 /* Initialize vector TARGET to VALS. */
4551 rs6000_expand_vector_init (rtx target
, rtx vals
)
4553 enum machine_mode mode
= GET_MODE (target
);
4554 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4555 int n_elts
= GET_MODE_NUNITS (mode
);
4556 int n_var
= 0, one_var
= -1;
4557 bool all_same
= true, all_const_zero
= true;
4561 for (i
= 0; i
< n_elts
; ++i
)
4563 x
= XVECEXP (vals
, 0, i
);
4564 if (!(CONST_INT_P (x
)
4565 || GET_CODE (x
) == CONST_DOUBLE
4566 || GET_CODE (x
) == CONST_FIXED
))
4567 ++n_var
, one_var
= i
;
4568 else if (x
!= CONST0_RTX (inner_mode
))
4569 all_const_zero
= false;
4571 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
4577 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
4578 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
4579 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
4581 /* Zero register. */
4582 emit_insn (gen_rtx_SET (VOIDmode
, target
,
4583 gen_rtx_XOR (mode
, target
, target
)));
4586 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
4588 /* Splat immediate. */
4589 emit_insn (gen_rtx_SET (VOIDmode
, target
, const_vec
));
4594 /* Load from constant pool. */
4595 emit_move_insn (target
, const_vec
);
4600 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4601 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
4603 rtx op0
= XVECEXP (vals
, 0, 0);
4604 rtx op1
= XVECEXP (vals
, 0, 1);
4607 if (!MEM_P (op0
) && !REG_P (op0
))
4608 op0
= force_reg (inner_mode
, op0
);
4609 if (mode
== V2DFmode
)
4610 emit_insn (gen_vsx_splat_v2df (target
, op0
));
4612 emit_insn (gen_vsx_splat_v2di (target
, op0
));
4616 op0
= force_reg (inner_mode
, op0
);
4617 op1
= force_reg (inner_mode
, op1
);
4618 if (mode
== V2DFmode
)
4619 emit_insn (gen_vsx_concat_v2df (target
, op0
, op1
));
4621 emit_insn (gen_vsx_concat_v2di (target
, op0
, op1
));
4626 /* With single precision floating point on VSX, know that internally single
4627 precision is actually represented as a double, and either make 2 V2DF
4628 vectors, and convert these vectors to single precision, or do one
4629 conversion, and splat the result to the other elements. */
4630 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (mode
))
4634 rtx freg
= gen_reg_rtx (V4SFmode
);
4635 rtx sreg
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
4637 emit_insn (gen_vsx_xscvdpsp_scalar (freg
, sreg
));
4638 emit_insn (gen_vsx_xxspltw_v4sf (target
, freg
, const0_rtx
));
4642 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
4643 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
4644 rtx flt_even
= gen_reg_rtx (V4SFmode
);
4645 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
4646 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
4647 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
4648 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
4649 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
4651 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
4652 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
4653 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
4654 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
4655 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
4660 /* Store value to stack temp. Load vector element. Splat. However, splat
4661 of 64-bit items is not supported on Altivec. */
4662 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
4664 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
4665 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
4666 XVECEXP (vals
, 0, 0));
4667 x
= gen_rtx_UNSPEC (VOIDmode
,
4668 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
4669 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
4671 gen_rtx_SET (VOIDmode
,
4674 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
4675 gen_rtx_PARALLEL (VOIDmode
,
4676 gen_rtvec (1, const0_rtx
)));
4677 emit_insn (gen_rtx_SET (VOIDmode
, target
,
4678 gen_rtx_VEC_DUPLICATE (mode
, x
)));
4682 /* One field is non-constant. Load constant then overwrite
4686 rtx copy
= copy_rtx (vals
);
4688 /* Load constant part of vector, substitute neighboring value for
4690 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
4691 rs6000_expand_vector_init (target
, copy
);
4693 /* Insert variable. */
4694 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
4698 /* Construct the vector in memory one field at a time
4699 and load the whole vector. */
4700 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
4701 for (i
= 0; i
< n_elts
; i
++)
4702 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
4703 i
* GET_MODE_SIZE (inner_mode
)),
4704 XVECEXP (vals
, 0, i
));
4705 emit_move_insn (target
, mem
);
4708 /* Set field ELT of TARGET to VAL. */
4711 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
4713 enum machine_mode mode
= GET_MODE (target
);
4714 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4715 rtx reg
= gen_reg_rtx (mode
);
4717 int width
= GET_MODE_SIZE (inner_mode
);
4720 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
4722 rtx (*set_func
) (rtx
, rtx
, rtx
, rtx
)
4723 = ((mode
== V2DFmode
) ? gen_vsx_set_v2df
: gen_vsx_set_v2di
);
4724 emit_insn (set_func (target
, target
, val
, GEN_INT (elt
)));
4728 /* Load single variable value. */
4729 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
4730 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
4731 x
= gen_rtx_UNSPEC (VOIDmode
,
4732 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
4733 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
4735 gen_rtx_SET (VOIDmode
,
4739 /* Linear sequence. */
4740 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
4741 for (i
= 0; i
< 16; ++i
)
4742 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
4744 /* Set permute mask to insert element into target. */
4745 for (i
= 0; i
< width
; ++i
)
4746 XVECEXP (mask
, 0, elt
*width
+ i
)
4747 = GEN_INT (i
+ 0x10);
4748 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
4749 x
= gen_rtx_UNSPEC (mode
,
4750 gen_rtvec (3, target
, reg
,
4751 force_reg (V16QImode
, x
)),
4753 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
4756 /* Extract field ELT from VEC into TARGET. */
4759 rs6000_expand_vector_extract (rtx target
, rtx vec
, int elt
)
4761 enum machine_mode mode
= GET_MODE (vec
);
4762 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4765 if (VECTOR_MEM_VSX_P (mode
))
4772 emit_insn (gen_vsx_extract_v2df (target
, vec
, GEN_INT (elt
)));
4775 emit_insn (gen_vsx_extract_v2di (target
, vec
, GEN_INT (elt
)));
4778 emit_insn (gen_vsx_extract_v4sf (target
, vec
, GEN_INT (elt
)));
4783 /* Allocate mode-sized buffer. */
4784 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
4786 emit_move_insn (mem
, vec
);
4788 /* Add offset to field within buffer matching vector element. */
4789 mem
= adjust_address_nv (mem
, inner_mode
, elt
* GET_MODE_SIZE (inner_mode
));
4791 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
4794 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4795 implement ANDing by the mask IN. */
4797 build_mask64_2_operands (rtx in
, rtx
*out
)
4799 #if HOST_BITS_PER_WIDE_INT >= 64
4800 unsigned HOST_WIDE_INT c
, lsb
, m1
, m2
;
4803 gcc_assert (GET_CODE (in
) == CONST_INT
);
4808 /* Assume c initially something like 0x00fff000000fffff. The idea
4809 is to rotate the word so that the middle ^^^^^^ group of zeros
4810 is at the MS end and can be cleared with an rldicl mask. We then
4811 rotate back and clear off the MS ^^ group of zeros with a
4813 c
= ~c
; /* c == 0xff000ffffff00000 */
4814 lsb
= c
& -c
; /* lsb == 0x0000000000100000 */
4815 m1
= -lsb
; /* m1 == 0xfffffffffff00000 */
4816 c
= ~c
; /* c == 0x00fff000000fffff */
4817 c
&= -lsb
; /* c == 0x00fff00000000000 */
4818 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
4819 c
= ~c
; /* c == 0xff000fffffffffff */
4820 c
&= -lsb
; /* c == 0xff00000000000000 */
4822 while ((lsb
>>= 1) != 0)
4823 shift
++; /* shift == 44 on exit from loop */
4824 m1
<<= 64 - shift
; /* m1 == 0xffffff0000000000 */
4825 m1
= ~m1
; /* m1 == 0x000000ffffffffff */
4826 m2
= ~c
; /* m2 == 0x00ffffffffffffff */
4830 /* Assume c initially something like 0xff000f0000000000. The idea
4831 is to rotate the word so that the ^^^ middle group of zeros
4832 is at the LS end and can be cleared with an rldicr mask. We then
4833 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4835 lsb
= c
& -c
; /* lsb == 0x0000010000000000 */
4836 m2
= -lsb
; /* m2 == 0xffffff0000000000 */
4837 c
= ~c
; /* c == 0x00fff0ffffffffff */
4838 c
&= -lsb
; /* c == 0x00fff00000000000 */
4839 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
4840 c
= ~c
; /* c == 0xff000fffffffffff */
4841 c
&= -lsb
; /* c == 0xff00000000000000 */
4843 while ((lsb
>>= 1) != 0)
4844 shift
++; /* shift == 44 on exit from loop */
4845 m1
= ~c
; /* m1 == 0x00ffffffffffffff */
4846 m1
>>= shift
; /* m1 == 0x0000000000000fff */
4847 m1
= ~m1
; /* m1 == 0xfffffffffffff000 */
4850 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4851 masks will be all 1's. We are guaranteed more than one transition. */
4852 out
[0] = GEN_INT (64 - shift
);
4853 out
[1] = GEN_INT (m1
);
4854 out
[2] = GEN_INT (shift
);
4855 out
[3] = GEN_INT (m2
);
4863 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4866 invalid_e500_subreg (rtx op
, enum machine_mode mode
)
4868 if (TARGET_E500_DOUBLE
)
4870 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4871 subreg:TI and reg:TF. Decimal float modes are like integer
4872 modes (only low part of each register used) for this
4874 if (GET_CODE (op
) == SUBREG
4875 && (mode
== SImode
|| mode
== DImode
|| mode
== TImode
4876 || mode
== DDmode
|| mode
== TDmode
)
4877 && REG_P (SUBREG_REG (op
))
4878 && (GET_MODE (SUBREG_REG (op
)) == DFmode
4879 || GET_MODE (SUBREG_REG (op
)) == TFmode
))
4882 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4884 if (GET_CODE (op
) == SUBREG
4885 && (mode
== DFmode
|| mode
== TFmode
)
4886 && REG_P (SUBREG_REG (op
))
4887 && (GET_MODE (SUBREG_REG (op
)) == DImode
4888 || GET_MODE (SUBREG_REG (op
)) == TImode
4889 || GET_MODE (SUBREG_REG (op
)) == DDmode
4890 || GET_MODE (SUBREG_REG (op
)) == TDmode
))
4895 && GET_CODE (op
) == SUBREG
4897 && REG_P (SUBREG_REG (op
))
4898 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op
))))
4904 /* AIX increases natural record alignment to doubleword if the first
4905 field is an FP double while the FP fields remain word aligned. */
4908 rs6000_special_round_type_align (tree type
, unsigned int computed
,
4909 unsigned int specified
)
4911 unsigned int align
= MAX (computed
, specified
);
4912 tree field
= TYPE_FIELDS (type
);
4914 /* Skip all non field decls */
4915 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
4916 field
= DECL_CHAIN (field
);
4918 if (field
!= NULL
&& field
!= type
)
4920 type
= TREE_TYPE (field
);
4921 while (TREE_CODE (type
) == ARRAY_TYPE
)
4922 type
= TREE_TYPE (type
);
4924 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
4925 align
= MAX (align
, 64);
4931 /* Darwin increases record alignment to the natural alignment of
4935 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
4936 unsigned int specified
)
4938 unsigned int align
= MAX (computed
, specified
);
4940 if (TYPE_PACKED (type
))
4943 /* Find the first field, looking down into aggregates. */
4945 tree field
= TYPE_FIELDS (type
);
4946 /* Skip all non field decls */
4947 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
4948 field
= DECL_CHAIN (field
);
4951 /* A packed field does not contribute any extra alignment. */
4952 if (DECL_PACKED (field
))
4954 type
= TREE_TYPE (field
);
4955 while (TREE_CODE (type
) == ARRAY_TYPE
)
4956 type
= TREE_TYPE (type
);
4957 } while (AGGREGATE_TYPE_P (type
));
4959 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
4960 align
= MAX (align
, TYPE_ALIGN (type
));
4965 /* Return 1 for an operand in small memory on V.4/eabi. */
4968 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
4969 enum machine_mode mode ATTRIBUTE_UNUSED
)
4974 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
4977 if (DEFAULT_ABI
!= ABI_V4
)
4980 /* Vector and float memory instructions have a limited offset on the
4981 SPE, so using a vector or float variable directly as an operand is
4984 && (SPE_VECTOR_MODE (mode
) || FLOAT_MODE_P (mode
)))
4987 if (GET_CODE (op
) == SYMBOL_REF
)
4990 else if (GET_CODE (op
) != CONST
4991 || GET_CODE (XEXP (op
, 0)) != PLUS
4992 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
4993 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
4998 rtx sum
= XEXP (op
, 0);
4999 HOST_WIDE_INT summand
;
5001 /* We have to be careful here, because it is the referenced address
5002 that must be 32k from _SDA_BASE_, not just the symbol. */
5003 summand
= INTVAL (XEXP (sum
, 1));
5004 if (summand
< 0 || summand
> g_switch_value
)
5007 sym_ref
= XEXP (sum
, 0);
5010 return SYMBOL_REF_SMALL_P (sym_ref
);
5016 /* Return true if either operand is a general purpose register. */
5019 gpr_or_gpr_p (rtx op0
, rtx op1
)
5021 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
5022 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
5025 /* Given an address, return a constant offset term if one exists. */
5028 address_offset (rtx op
)
5030 if (GET_CODE (op
) == PRE_INC
5031 || GET_CODE (op
) == PRE_DEC
)
5033 else if (GET_CODE (op
) == PRE_MODIFY
5034 || GET_CODE (op
) == LO_SUM
)
5037 if (GET_CODE (op
) == CONST
)
5040 if (GET_CODE (op
) == PLUS
)
5043 if (CONST_INT_P (op
))
5049 /* Return true if the MEM operand is a memory operand suitable for use
5050 with a (full width, possibly multiple) gpr load/store. On
5051 powerpc64 this means the offset must be divisible by 4.
5052 Implements 'Y' constraint.
5054 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5055 a constraint function we know the operand has satisfied a suitable
5056 memory predicate. Also accept some odd rtl generated by reload
5057 (see rs6000_legitimize_reload_address for various forms). It is
5058 important that reload rtl be accepted by appropriate constraints
5059 but not by the operand predicate.
5061 Offsetting a lo_sum should not be allowed, except where we know by
5062 alignment that a 32k boundary is not crossed, but see the ???
5063 comment in rs6000_legitimize_reload_address. */
5066 mem_operand_gpr (rtx op
, enum machine_mode mode
)
5068 unsigned HOST_WIDE_INT offset
;
5071 op
= address_offset (XEXP (op
, 0));
5075 offset
= INTVAL (op
);
5076 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
5079 else if (TARGET_POWERPC64
&& (offset
& 3) != 0)
5081 return offset
+ 0x8000 < 0x10000u
- extra
;
5084 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5087 reg_offset_addressing_ok_p (enum machine_mode mode
)
5097 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
5098 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
5106 /* Paired vector modes. Only reg+reg addressing is valid. */
5107 if (TARGET_PAIRED_FLOAT
)
5119 virtual_stack_registers_memory_p (rtx op
)
5123 if (GET_CODE (op
) == REG
)
5124 regnum
= REGNO (op
);
5126 else if (GET_CODE (op
) == PLUS
5127 && GET_CODE (XEXP (op
, 0)) == REG
5128 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
5129 regnum
= REGNO (XEXP (op
, 0));
5134 return (regnum
>= FIRST_VIRTUAL_REGISTER
5135 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
5138 /* Return true if memory accesses to OP are known to never straddle
5142 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
5143 enum machine_mode mode
)
5146 unsigned HOST_WIDE_INT dsize
, dalign
;
5148 if (GET_CODE (op
) != SYMBOL_REF
)
5151 decl
= SYMBOL_REF_DECL (op
);
5154 if (GET_MODE_SIZE (mode
) == 0)
5157 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5158 replacing memory addresses with an anchor plus offset. We
5159 could find the decl by rummaging around in the block->objects
5160 VEC for the given offset but that seems like too much work. */
5162 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
5163 && SYMBOL_REF_ANCHOR_P (op
)
5164 && SYMBOL_REF_BLOCK (op
) != NULL
)
5166 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
5167 HOST_WIDE_INT lsb
, mask
;
5169 /* Given the alignment of the block.. */
5170 dalign
= block
->alignment
;
5171 mask
= dalign
/ BITS_PER_UNIT
- 1;
5173 /* ..and the combined offset of the anchor and any offset
5174 to this block object.. */
5175 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
5176 lsb
= offset
& -offset
;
5178 /* ..find how many bits of the alignment we know for the
5183 return dalign
>= GET_MODE_SIZE (mode
);
5188 if (TREE_CODE (decl
) == FUNCTION_DECL
)
5191 if (!DECL_SIZE_UNIT (decl
))
5194 if (!host_integerp (DECL_SIZE_UNIT (decl
), 1))
5197 dsize
= tree_low_cst (DECL_SIZE_UNIT (decl
), 1);
5201 dalign
= DECL_ALIGN_UNIT (decl
);
5202 return dalign
>= dsize
;
5205 type
= TREE_TYPE (decl
);
5207 if (TREE_CODE (decl
) == STRING_CST
)
5208 dsize
= TREE_STRING_LENGTH (decl
);
5209 else if (TYPE_SIZE_UNIT (type
)
5210 && host_integerp (TYPE_SIZE_UNIT (type
), 1))
5211 dsize
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5217 dalign
= TYPE_ALIGN (type
);
5218 if (CONSTANT_CLASS_P (decl
))
5219 dalign
= CONSTANT_ALIGNMENT (decl
, dalign
);
5221 dalign
= DATA_ALIGNMENT (decl
, dalign
);
5222 dalign
/= BITS_PER_UNIT
;
5223 return dalign
>= dsize
;
5227 constant_pool_expr_p (rtx op
)
5231 split_const (op
, &base
, &offset
);
5232 return (GET_CODE (base
) == SYMBOL_REF
5233 && CONSTANT_POOL_ADDRESS_P (base
)
5234 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
5237 static const_rtx tocrel_base
, tocrel_offset
;
5239 /* Return true if OP is a toc pointer relative address (the output
5240 of create_TOC_reference). If STRICT, do not match high part or
5241 non-split -mcmodel=large/medium toc pointer relative addresses. */
5244 toc_relative_expr_p (const_rtx op
, bool strict
)
5249 if (TARGET_CMODEL
!= CMODEL_SMALL
)
5251 /* Only match the low part. */
5252 if (GET_CODE (op
) == LO_SUM
5253 && REG_P (XEXP (op
, 0))
5254 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
))
5261 tocrel_offset
= const0_rtx
;
5262 if (GET_CODE (op
) == PLUS
&& CONST_INT_P (XEXP (op
, 1)))
5264 tocrel_base
= XEXP (op
, 0);
5265 tocrel_offset
= XEXP (op
, 1);
5268 return (GET_CODE (tocrel_base
) == UNSPEC
5269 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
5272 /* Return true if X is a constant pool address, and also for cmodel=medium
5273 if X is a toc-relative address known to be offsettable within MODE. */
5276 legitimate_constant_pool_address_p (const_rtx x
, enum machine_mode mode
,
5279 return (toc_relative_expr_p (x
, strict
)
5280 && (TARGET_CMODEL
!= CMODEL_MEDIUM
5281 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
5283 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
5284 INTVAL (tocrel_offset
), mode
)));
5288 legitimate_small_data_p (enum machine_mode mode
, rtx x
)
5290 return (DEFAULT_ABI
== ABI_V4
5291 && !flag_pic
&& !TARGET_TOC
5292 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
5293 && small_data_operand (x
, mode
));
5296 /* SPE offset addressing is limited to 5-bits worth of double words. */
5297 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5300 rs6000_legitimate_offset_address_p (enum machine_mode mode
, rtx x
,
5301 bool strict
, bool worst_case
)
5303 unsigned HOST_WIDE_INT offset
;
5306 if (GET_CODE (x
) != PLUS
)
5308 if (!REG_P (XEXP (x
, 0)))
5310 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
5312 if (!reg_offset_addressing_ok_p (mode
))
5313 return virtual_stack_registers_memory_p (x
);
5314 if (legitimate_constant_pool_address_p (x
, mode
, strict
))
5316 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5319 offset
= INTVAL (XEXP (x
, 1));
5327 /* SPE vector modes. */
5328 return SPE_CONST_OFFSET_OK (offset
);
5333 /* On e500v2, we may have:
5335 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5337 Which gets addressed with evldd instructions. */
5338 if (TARGET_E500_DOUBLE
)
5339 return SPE_CONST_OFFSET_OK (offset
);
5341 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5343 if (mode
== DFmode
&& VECTOR_MEM_VSX_P (DFmode
))
5348 if (!TARGET_POWERPC64
)
5350 else if (offset
& 3)
5357 if (TARGET_E500_DOUBLE
)
5358 return (SPE_CONST_OFFSET_OK (offset
)
5359 && SPE_CONST_OFFSET_OK (offset
+ 8));
5364 if (!TARGET_POWERPC64
)
5366 else if (offset
& 3)
5375 return offset
< 0x10000 - extra
;
5379 legitimate_indexed_address_p (rtx x
, int strict
)
5383 if (GET_CODE (x
) != PLUS
)
5389 /* Recognize the rtl generated by reload which we know will later be
5390 replaced with proper base and index regs. */
5392 && reload_in_progress
5393 && (REG_P (op0
) || GET_CODE (op0
) == PLUS
)
5397 return (REG_P (op0
) && REG_P (op1
)
5398 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
5399 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
5400 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
5401 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
5405 avoiding_indexed_address_p (enum machine_mode mode
)
5407 /* Avoid indexed addressing for modes that have non-indexed
5408 load/store instruction forms. */
5409 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
5413 legitimate_indirect_address_p (rtx x
, int strict
)
5415 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
5419 macho_lo_sum_memory_operand (rtx x
, enum machine_mode mode
)
5421 if (!TARGET_MACHO
|| !flag_pic
5422 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
5426 if (GET_CODE (x
) != LO_SUM
)
5428 if (GET_CODE (XEXP (x
, 0)) != REG
)
5430 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
5434 return CONSTANT_P (x
);
5438 legitimate_lo_sum_address_p (enum machine_mode mode
, rtx x
, int strict
)
5440 if (GET_CODE (x
) != LO_SUM
)
5442 if (GET_CODE (XEXP (x
, 0)) != REG
)
5444 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
5446 /* Restrict addressing for DI because of our SUBREG hackery. */
5447 if (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
5451 if (TARGET_ELF
|| TARGET_MACHO
)
5453 if (DEFAULT_ABI
!= ABI_AIX
&& DEFAULT_ABI
!= ABI_DARWIN
&& flag_pic
)
5457 if (GET_MODE_NUNITS (mode
) != 1)
5459 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
5460 && !(/* ??? Assume floating point reg based on mode? */
5461 TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
5462 && (mode
== DFmode
|| mode
== DDmode
)))
5465 return CONSTANT_P (x
);
5472 /* Try machine-dependent ways of modifying an illegitimate address
5473 to be legitimate. If we find one, return the new, valid address.
5474 This is used from only one place: `memory_address' in explow.c.
5476 OLDX is the address as it was before break_out_memory_refs was
5477 called. In some cases it is useful to look at this to decide what
5480 It is always safe for this function to do nothing. It exists to
5481 recognize opportunities to optimize the output.
5483 On RS/6000, first check for the sum of a register with a constant
5484 integer that is out of range. If so, generate code to add the
5485 constant with the low-order 16 bits masked to the register and force
5486 this result into another register (this can be done with `cau').
5487 Then generate an address of REG+(CONST&0xffff), allowing for the
5488 possibility of bit 16 being a one.
5490 Then check for the sum of a register and something not constant, try to
5491 load the other things into a register and return the sum. */
5494 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
5495 enum machine_mode mode
)
5499 if (!reg_offset_addressing_ok_p (mode
))
5501 if (virtual_stack_registers_memory_p (x
))
5504 /* In theory we should not be seeing addresses of the form reg+0,
5505 but just in case it is generated, optimize it away. */
5506 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
5507 return force_reg (Pmode
, XEXP (x
, 0));
5509 /* Make sure both operands are registers. */
5510 else if (GET_CODE (x
) == PLUS
)
5511 return gen_rtx_PLUS (Pmode
,
5512 force_reg (Pmode
, XEXP (x
, 0)),
5513 force_reg (Pmode
, XEXP (x
, 1)));
5515 return force_reg (Pmode
, x
);
5517 if (GET_CODE (x
) == SYMBOL_REF
)
5519 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
5521 return rs6000_legitimize_tls_address (x
, model
);
5530 /* As in legitimate_offset_address_p we do not assume
5531 worst-case. The mode here is just a hint as to the registers
5532 used. A TImode is usually in gprs, but may actually be in
5533 fprs. Leave worst-case scenario for reload to handle via
5534 insn constraints. */
5541 if (GET_CODE (x
) == PLUS
5542 && GET_CODE (XEXP (x
, 0)) == REG
5543 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5544 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
5546 && !(SPE_VECTOR_MODE (mode
)
5547 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)))
5549 HOST_WIDE_INT high_int
, low_int
;
5551 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5552 if (low_int
>= 0x8000 - extra
)
5554 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
5555 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
5556 GEN_INT (high_int
)), 0);
5557 return plus_constant (Pmode
, sum
, low_int
);
5559 else if (GET_CODE (x
) == PLUS
5560 && GET_CODE (XEXP (x
, 0)) == REG
5561 && GET_CODE (XEXP (x
, 1)) != CONST_INT
5562 && GET_MODE_NUNITS (mode
) == 1
5563 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
5564 || (/* ??? Assume floating point reg based on mode? */
5565 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
5566 && (mode
== DFmode
|| mode
== DDmode
)))
5567 && !avoiding_indexed_address_p (mode
))
5569 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
5570 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
5572 else if (SPE_VECTOR_MODE (mode
)
5573 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
))
5577 /* We accept [reg + reg] and [reg + OFFSET]. */
5579 if (GET_CODE (x
) == PLUS
)
5581 rtx op1
= XEXP (x
, 0);
5582 rtx op2
= XEXP (x
, 1);
5585 op1
= force_reg (Pmode
, op1
);
5587 if (GET_CODE (op2
) != REG
5588 && (GET_CODE (op2
) != CONST_INT
5589 || !SPE_CONST_OFFSET_OK (INTVAL (op2
))
5590 || (GET_MODE_SIZE (mode
) > 8
5591 && !SPE_CONST_OFFSET_OK (INTVAL (op2
) + 8))))
5592 op2
= force_reg (Pmode
, op2
);
5594 /* We can't always do [reg + reg] for these, because [reg +
5595 reg + offset] is not a legitimate addressing mode. */
5596 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
5598 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
5599 return force_reg (Pmode
, y
);
5604 return force_reg (Pmode
, x
);
5606 else if ((TARGET_ELF
5608 || !MACHO_DYNAMIC_NO_PIC_P
5614 && GET_CODE (x
) != CONST_INT
5615 && GET_CODE (x
) != CONST_DOUBLE
5617 && GET_MODE_NUNITS (mode
) == 1
5618 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
5619 || (/* ??? Assume floating point reg based on mode? */
5620 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
5621 && (mode
== DFmode
|| mode
== DDmode
))))
5623 rtx reg
= gen_reg_rtx (Pmode
);
5625 emit_insn (gen_elf_high (reg
, x
));
5627 emit_insn (gen_macho_high (reg
, x
));
5628 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
5631 && GET_CODE (x
) == SYMBOL_REF
5632 && constant_pool_expr_p (x
)
5633 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
5634 return create_TOC_reference (x
, NULL_RTX
);
5639 /* Debug version of rs6000_legitimize_address. */
5641 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
)
5647 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
5648 insns
= get_insns ();
5654 "\nrs6000_legitimize_address: mode %s, old code %s, "
5655 "new code %s, modified\n",
5656 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
5657 GET_RTX_NAME (GET_CODE (ret
)));
5659 fprintf (stderr
, "Original address:\n");
5662 fprintf (stderr
, "oldx:\n");
5665 fprintf (stderr
, "New address:\n");
5670 fprintf (stderr
, "Insns added:\n");
5671 debug_rtx_list (insns
, 20);
5677 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5678 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
5689 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5690 We need to emit DTP-relative relocations. */
5692 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
5694 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
5699 fputs ("\t.long\t", file
);
5702 fputs (DOUBLE_INT_ASM_OP
, file
);
5707 output_addr_const (file
, x
);
5708 fputs ("@dtprel+0x8000", file
);
5711 /* In the name of slightly smaller debug output, and to cater to
5712 general assembler lossage, recognize various UNSPEC sequences
5713 and turn them back into a direct symbol reference. */
5716 rs6000_delegitimize_address (rtx orig_x
)
5720 orig_x
= delegitimize_mem_from_attrs (orig_x
);
5726 if (TARGET_CMODEL
!= CMODEL_SMALL
5727 && GET_CODE (y
) == LO_SUM
)
5731 if (GET_CODE (y
) == PLUS
5732 && GET_MODE (y
) == Pmode
5733 && CONST_INT_P (XEXP (y
, 1)))
5735 offset
= XEXP (y
, 1);
5739 if (GET_CODE (y
) == UNSPEC
5740 && XINT (y
, 1) == UNSPEC_TOCREL
)
5742 #ifdef ENABLE_CHECKING
5743 if (REG_P (XVECEXP (y
, 0, 1))
5744 && REGNO (XVECEXP (y
, 0, 1)) == TOC_REGISTER
)
5748 else if (GET_CODE (XVECEXP (y
, 0, 1)) == DEBUG_EXPR
)
5750 /* Weirdness alert. df_note_compute can replace r2 with a
5751 debug_expr when this unspec is in a debug_insn.
5752 Seen in gcc.dg/pr51957-1.c */
5760 y
= XVECEXP (y
, 0, 0);
5761 if (offset
!= NULL_RTX
)
5762 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
5763 if (!MEM_P (orig_x
))
5766 return replace_equiv_address_nv (orig_x
, y
);
5770 && GET_CODE (orig_x
) == LO_SUM
5771 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
5773 y
= XEXP (XEXP (orig_x
, 1), 0);
5774 if (GET_CODE (y
) == UNSPEC
5775 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
5776 return XVECEXP (y
, 0, 0);
5782 /* Return true if X shouldn't be emitted into the debug info.
5783 The linker doesn't like .toc section references from
5784 .debug_* sections, so reject .toc section symbols. */
5787 rs6000_const_not_ok_for_debug_p (rtx x
)
5789 if (GET_CODE (x
) == SYMBOL_REF
5790 && CONSTANT_POOL_ADDRESS_P (x
))
5792 rtx c
= get_pool_constant (x
);
5793 enum machine_mode cmode
= get_pool_mode (x
);
5794 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
5801 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5803 static GTY(()) rtx rs6000_tls_symbol
;
5805 rs6000_tls_get_addr (void)
5807 if (!rs6000_tls_symbol
)
5808 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
5810 return rs6000_tls_symbol
;
5813 /* Construct the SYMBOL_REF for TLS GOT references. */
5815 static GTY(()) rtx rs6000_got_symbol
;
5817 rs6000_got_sym (void)
5819 if (!rs6000_got_symbol
)
5821 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
5822 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
5823 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
5826 return rs6000_got_symbol
;
5829 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5830 this (thread-local) address. */
5833 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
5837 dest
= gen_reg_rtx (Pmode
);
5838 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
5844 tlsreg
= gen_rtx_REG (Pmode
, 13);
5845 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
5849 tlsreg
= gen_rtx_REG (Pmode
, 2);
5850 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
5854 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
5858 tmp
= gen_reg_rtx (Pmode
);
5861 tlsreg
= gen_rtx_REG (Pmode
, 13);
5862 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
5866 tlsreg
= gen_rtx_REG (Pmode
, 2);
5867 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
5871 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
5873 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
5878 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
5880 /* We currently use relocations like @got@tlsgd for tls, which
5881 means the linker will handle allocation of tls entries, placing
5882 them in the .got section. So use a pointer to the .got section,
5883 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5884 or to secondary GOT sections used by 32-bit -fPIC. */
5886 got
= gen_rtx_REG (Pmode
, 2);
5890 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
5893 rtx gsym
= rs6000_got_sym ();
5894 got
= gen_reg_rtx (Pmode
);
5896 rs6000_emit_move (got
, gsym
, Pmode
);
5901 tmp1
= gen_reg_rtx (Pmode
);
5902 tmp2
= gen_reg_rtx (Pmode
);
5903 mem
= gen_const_mem (Pmode
, tmp1
);
5904 lab
= gen_label_rtx ();
5905 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
5906 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
5907 if (TARGET_LINK_STACK
)
5908 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
5909 emit_move_insn (tmp2
, mem
);
5910 last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
5911 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
5916 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
5918 tga
= rs6000_tls_get_addr ();
5919 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
5920 1, const0_rtx
, Pmode
);
5922 r3
= gen_rtx_REG (Pmode
, 3);
5923 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5924 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
5925 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
5926 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
5927 else if (DEFAULT_ABI
== ABI_V4
)
5928 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
5931 call_insn
= last_call_insn ();
5932 PATTERN (call_insn
) = insn
;
5933 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
5934 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
5935 pic_offset_table_rtx
);
5937 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
5939 tga
= rs6000_tls_get_addr ();
5940 tmp1
= gen_reg_rtx (Pmode
);
5941 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
5942 1, const0_rtx
, Pmode
);
5944 r3
= gen_rtx_REG (Pmode
, 3);
5945 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5946 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
5947 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
5948 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
5949 else if (DEFAULT_ABI
== ABI_V4
)
5950 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
5953 call_insn
= last_call_insn ();
5954 PATTERN (call_insn
) = insn
;
5955 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
5956 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
5957 pic_offset_table_rtx
);
5959 if (rs6000_tls_size
== 16)
5962 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
5964 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
5966 else if (rs6000_tls_size
== 32)
5968 tmp2
= gen_reg_rtx (Pmode
);
5970 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
5972 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
5975 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
5977 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
5981 tmp2
= gen_reg_rtx (Pmode
);
5983 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
5985 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
5987 insn
= gen_rtx_SET (Pmode
, dest
,
5988 gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
5994 /* IE, or 64-bit offset LE. */
5995 tmp2
= gen_reg_rtx (Pmode
);
5997 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
5999 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
6002 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
6004 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
6012 /* Return 1 if X contains a thread-local symbol. */
6015 rs6000_tls_referenced_p (rtx x
)
6017 if (! TARGET_HAVE_TLS
)
6020 return for_each_rtx (&x
, &rs6000_tls_symbol_ref_1
, 0);
6023 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6026 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
6028 if (GET_CODE (x
) == HIGH
6029 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
6032 return rs6000_tls_referenced_p (x
);
6035 /* Return 1 if *X is a thread-local symbol. This is the same as
6036 rs6000_tls_symbol_ref except for the type of the unused argument. */
6039 rs6000_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
6041 return RS6000_SYMBOL_REF_TLS_P (*x
);
6044 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6045 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6046 can be addressed relative to the toc pointer. */
6049 use_toc_relative_ref (rtx sym
)
6051 return ((constant_pool_expr_p (sym
)
6052 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
6053 get_pool_mode (sym
)))
6054 || (TARGET_CMODEL
== CMODEL_MEDIUM
6055 && !CONSTANT_POOL_ADDRESS_P (sym
)
6056 && SYMBOL_REF_LOCAL_P (sym
)));
6059 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6060 replace the input X, or the original X if no replacement is called for.
6061 The output parameter *WIN is 1 if the calling macro should goto WIN,
6064 For RS/6000, we wish to handle large displacements off a base
6065 register by splitting the addend across an addiu/addis and the mem insn.
6066 This cuts number of extra insns needed from 3 to 1.
6068 On Darwin, we use this to generate code for floating point constants.
6069 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6070 The Darwin code is inside #if TARGET_MACHO because only then are the
6071 machopic_* functions defined. */
6073 rs6000_legitimize_reload_address (rtx x
, enum machine_mode mode
,
6074 int opnum
, int type
,
6075 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
6077 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
6079 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6080 DFmode/DImode MEM. */
6083 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
6084 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)))
6085 reg_offset_p
= false;
6087 /* We must recognize output that we have already generated ourselves. */
6088 if (GET_CODE (x
) == PLUS
6089 && GET_CODE (XEXP (x
, 0)) == PLUS
6090 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
6091 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6092 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
6094 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6095 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6096 opnum
, (enum reload_type
) type
);
6101 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6102 if (GET_CODE (x
) == LO_SUM
6103 && GET_CODE (XEXP (x
, 0)) == HIGH
)
6105 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6106 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6107 opnum
, (enum reload_type
) type
);
6113 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
6114 && GET_CODE (x
) == LO_SUM
6115 && GET_CODE (XEXP (x
, 0)) == PLUS
6116 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
6117 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
6118 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
6119 && machopic_operand_p (XEXP (x
, 1)))
6121 /* Result of previous invocation of this function on Darwin
6122 floating point constant. */
6123 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6124 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6125 opnum
, (enum reload_type
) type
);
6131 if (TARGET_CMODEL
!= CMODEL_SMALL
6133 && small_toc_ref (x
, VOIDmode
))
6135 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
6136 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
6137 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6138 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6139 opnum
, (enum reload_type
) type
);
6144 /* Force ld/std non-word aligned offset into base register by wrapping
6146 if (GET_CODE (x
) == PLUS
6147 && GET_CODE (XEXP (x
, 0)) == REG
6148 && REGNO (XEXP (x
, 0)) < 32
6149 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
6150 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6152 && (INTVAL (XEXP (x
, 1)) & 3) != 0
6153 && VECTOR_MEM_NONE_P (mode
)
6154 && GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
6155 && TARGET_POWERPC64
)
6157 x
= gen_rtx_PLUS (GET_MODE (x
), x
, GEN_INT (0));
6158 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6159 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6160 opnum
, (enum reload_type
) type
);
6165 if (GET_CODE (x
) == PLUS
6166 && GET_CODE (XEXP (x
, 0)) == REG
6167 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
6168 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
6169 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6171 && !SPE_VECTOR_MODE (mode
)
6172 && !(TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
6173 || mode
== DDmode
|| mode
== TDmode
6175 && VECTOR_MEM_NONE_P (mode
))
6177 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
6178 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
6180 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6182 /* Check for 32-bit overflow. */
6183 if (high
+ low
!= val
)
6189 /* Reload the high part into a base reg; leave the low part
6190 in the mem directly. */
6192 x
= gen_rtx_PLUS (GET_MODE (x
),
6193 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
6197 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6198 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6199 opnum
, (enum reload_type
) type
);
6204 if (GET_CODE (x
) == SYMBOL_REF
6206 && VECTOR_MEM_NONE_P (mode
)
6207 && !SPE_VECTOR_MODE (mode
)
6209 && DEFAULT_ABI
== ABI_DARWIN
6210 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
6211 && machopic_symbol_defined_p (x
)
6213 && DEFAULT_ABI
== ABI_V4
6216 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6217 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6219 ??? Assume floating point reg based on mode? This assumption is
6220 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6221 where reload ends up doing a DFmode load of a constant from
6222 mem using two gprs. Unfortunately, at this point reload
6223 hasn't yet selected regs so poking around in reload data
6224 won't help and even if we could figure out the regs reliably,
6225 we'd still want to allow this transformation when the mem is
6226 naturally aligned. Since we say the address is good here, we
6227 can't disable offsets from LO_SUMs in mem_operand_gpr.
6228 FIXME: Allow offset from lo_sum for other modes too, when
6229 mem is sufficiently aligned. */
6232 && (mode
!= DImode
|| TARGET_POWERPC64
)
6233 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
6234 || (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)))
6239 rtx offset
= machopic_gen_offset (x
);
6240 x
= gen_rtx_LO_SUM (GET_MODE (x
),
6241 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
6242 gen_rtx_HIGH (Pmode
, offset
)), offset
);
6246 x
= gen_rtx_LO_SUM (GET_MODE (x
),
6247 gen_rtx_HIGH (Pmode
, x
), x
);
6249 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6250 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6251 opnum
, (enum reload_type
) type
);
6256 /* Reload an offset address wrapped by an AND that represents the
6257 masking of the lower bits. Strip the outer AND and let reload
6258 convert the offset address into an indirect address. For VSX,
6259 force reload to create the address with an AND in a separate
6260 register, because we can't guarantee an altivec register will
6262 if (VECTOR_MEM_ALTIVEC_P (mode
)
6263 && GET_CODE (x
) == AND
6264 && GET_CODE (XEXP (x
, 0)) == PLUS
6265 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
6266 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6267 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6268 && INTVAL (XEXP (x
, 1)) == -16)
6277 && GET_CODE (x
) == SYMBOL_REF
6278 && use_toc_relative_ref (x
))
6280 x
= create_TOC_reference (x
, NULL_RTX
);
6281 if (TARGET_CMODEL
!= CMODEL_SMALL
)
6282 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6283 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6284 opnum
, (enum reload_type
) type
);
6292 /* Debug version of rs6000_legitimize_reload_address. */
6294 rs6000_debug_legitimize_reload_address (rtx x
, enum machine_mode mode
,
6295 int opnum
, int type
,
6296 int ind_levels
, int *win
)
6298 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
6301 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6302 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6303 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
6307 fprintf (stderr
, "Same address returned\n");
6309 fprintf (stderr
, "NULL returned\n");
6312 fprintf (stderr
, "New address:\n");
6319 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6320 that is a valid memory address for an instruction.
6321 The MODE argument is the machine mode for the MEM expression
6322 that wants to use this address.
6324 On the RS/6000, there are four valid address: a SYMBOL_REF that
6325 refers to a constant pool entry of an address (or the sum of it
6326 plus a constant), a short (16-bit signed) constant plus a register,
6327 the sum of two registers, or a register indirect, possibly with an
6328 auto-increment. For DFmode, DDmode and DImode with a constant plus
6329 register, we must ensure that both words are addressable or PowerPC64
6330 with offset word aligned.
6332 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6333 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6334 because adjacent memory cells are accessed by adding word-sized offsets
6335 during assembly output. */
6337 rs6000_legitimate_address_p (enum machine_mode mode
, rtx x
, bool reg_ok_strict
)
6339 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
6341 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6342 if (VECTOR_MEM_ALTIVEC_P (mode
)
6343 && GET_CODE (x
) == AND
6344 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6345 && INTVAL (XEXP (x
, 1)) == -16)
6348 if (RS6000_SYMBOL_REF_TLS_P (x
))
6350 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
6352 if ((GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
6353 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
6354 && !SPE_VECTOR_MODE (mode
)
6357 /* Restrict addressing for DI because of our SUBREG hackery. */
6358 && !(TARGET_E500_DOUBLE
6359 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
6361 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
6363 if (virtual_stack_registers_memory_p (x
))
6365 if (reg_offset_p
&& legitimate_small_data_p (mode
, x
))
6368 && legitimate_constant_pool_address_p (x
, mode
, reg_ok_strict
))
6370 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6373 && GET_CODE (x
) == PLUS
6374 && GET_CODE (XEXP (x
, 0)) == REG
6375 && (XEXP (x
, 0) == virtual_stack_vars_rtx
6376 || XEXP (x
, 0) == arg_pointer_rtx
)
6377 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
6379 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
6384 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6386 || (mode
!= DFmode
&& mode
!= DDmode
)
6387 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
))
6388 && (TARGET_POWERPC64
|| mode
!= DImode
)
6389 && !avoiding_indexed_address_p (mode
)
6390 && legitimate_indexed_address_p (x
, reg_ok_strict
))
6392 if (GET_CODE (x
) == PRE_MODIFY
6396 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6398 || ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_E500_DOUBLE
))
6399 && (TARGET_POWERPC64
|| mode
!= DImode
)
6400 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
6401 && !SPE_VECTOR_MODE (mode
)
6402 /* Restrict addressing for DI because of our SUBREG hackery. */
6403 && !(TARGET_E500_DOUBLE
6404 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
6406 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
6407 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
6408 reg_ok_strict
, false)
6409 || (!avoiding_indexed_address_p (mode
)
6410 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
6411 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6413 if (reg_offset_p
&& legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
6418 /* Debug version of rs6000_legitimate_address_p. */
6420 rs6000_debug_legitimate_address_p (enum machine_mode mode
, rtx x
,
6423 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
6425 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6426 "strict = %d, code = %s\n",
6427 ret
? "true" : "false",
6428 GET_MODE_NAME (mode
),
6430 GET_RTX_NAME (GET_CODE (x
)));
6436 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6439 rs6000_mode_dependent_address_p (const_rtx addr
)
6441 return rs6000_mode_dependent_address_ptr (addr
);
6444 /* Go to LABEL if ADDR (a legitimate address expression)
6445 has an effect that depends on the machine mode it is used for.
6447 On the RS/6000 this is true of all integral offsets (since AltiVec
6448 and VSX modes don't allow them) or is a pre-increment or decrement.
6450 ??? Except that due to conceptual problems in offsettable_address_p
6451 we can't really report the problems of integral offsets. So leave
6452 this assuming that the adjustable offset must be valid for the
6453 sub-words of a TFmode operand, which is what we had before. */
6456 rs6000_mode_dependent_address (const_rtx addr
)
6458 switch (GET_CODE (addr
))
6461 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6462 is considered a legitimate address before reload, so there
6463 are no offset restrictions in that case. Note that this
6464 condition is safe in strict mode because any address involving
6465 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6466 been rejected as illegitimate. */
6467 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
6468 && XEXP (addr
, 0) != arg_pointer_rtx
6469 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
6471 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
6472 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
6477 /* Anything in the constant pool is sufficiently aligned that
6478 all bytes have the same high part address. */
6479 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
6481 /* Auto-increment cases are now treated generically in recog.c. */
6483 return TARGET_UPDATE
;
6485 /* AND is only allowed in Altivec loads. */
6496 /* Debug version of rs6000_mode_dependent_address. */
6498 rs6000_debug_mode_dependent_address (const_rtx addr
)
6500 bool ret
= rs6000_mode_dependent_address (addr
);
6502 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
6503 ret
? "true" : "false");
6509 /* Implement FIND_BASE_TERM. */
6512 rs6000_find_base_term (rtx op
)
6517 if (GET_CODE (base
) == CONST
)
6518 base
= XEXP (base
, 0);
6519 if (GET_CODE (base
) == PLUS
)
6520 base
= XEXP (base
, 0);
6521 if (GET_CODE (base
) == UNSPEC
)
6522 switch (XINT (base
, 1))
6525 case UNSPEC_MACHOPIC_OFFSET
:
6526 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6527 for aliasing purposes. */
6528 return XVECEXP (base
, 0, 0);
6534 /* More elaborate version of recog's offsettable_memref_p predicate
6535 that works around the ??? note of rs6000_mode_dependent_address.
6536 In particular it accepts
6538 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6540 in 32-bit mode, that the recog predicate rejects. */
6543 rs6000_offsettable_memref_p (rtx op
, enum machine_mode reg_mode
)
6550 /* First mimic offsettable_memref_p. */
6551 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
6554 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6555 the latter predicate knows nothing about the mode of the memory
6556 reference and, therefore, assumes that it is the largest supported
6557 mode (TFmode). As a consequence, legitimate offsettable memory
6558 references are rejected. rs6000_legitimate_offset_address_p contains
6559 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6560 at least with a little bit of help here given that we know the
6561 actual registers used. */
6562 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
6563 || GET_MODE_SIZE (reg_mode
) == 4);
6564 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
6568 /* Change register usage conditional on target flags. */
6570 rs6000_conditional_register_usage (void)
6574 if (TARGET_DEBUG_TARGET
)
6575 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
6577 /* Set MQ register fixed (already call_used) if not POWER
6578 architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
6583 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6585 fixed_regs
[13] = call_used_regs
[13]
6586 = call_really_used_regs
[13] = 1;
6588 /* Conditionally disable FPRs. */
6589 if (TARGET_SOFT_FLOAT
|| !TARGET_FPRS
)
6590 for (i
= 32; i
< 64; i
++)
6591 fixed_regs
[i
] = call_used_regs
[i
]
6592 = call_really_used_regs
[i
] = 1;
6594 /* The TOC register is not killed across calls in a way that is
6595 visible to the compiler. */
6596 if (DEFAULT_ABI
== ABI_AIX
)
6597 call_really_used_regs
[2] = 0;
6599 if (DEFAULT_ABI
== ABI_V4
6600 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
6602 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6604 if (DEFAULT_ABI
== ABI_V4
6605 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
6607 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6608 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6609 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6611 if (DEFAULT_ABI
== ABI_DARWIN
6612 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
6613 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6614 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6615 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6617 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
6618 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6619 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6623 global_regs
[SPEFSCR_REGNO
] = 1;
6624 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6625 registers in prologues and epilogues. We no longer use r14
6626 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6627 pool for link-compatibility with older versions of GCC. Once
6628 "old" code has died out, we can return r14 to the allocation
6631 = call_used_regs
[14]
6632 = call_really_used_regs
[14] = 1;
6635 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
6637 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
6638 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6639 call_really_used_regs
[VRSAVE_REGNO
] = 1;
6642 if (TARGET_ALTIVEC
|| TARGET_VSX
)
6643 global_regs
[VSCR_REGNO
] = 1;
6645 if (TARGET_ALTIVEC_ABI
)
6647 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
6648 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6650 /* AIX reserves VR20:31 in non-extended ABI mode. */
6652 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
6653 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6657 /* Try to output insns to set TARGET equal to the constant C if it can
6658 be done in less than N insns. Do all computations in MODE.
6659 Returns the place where the output has been placed if it can be
6660 done and the insns have been emitted. If it would take more than N
6661 insns, zero is returned and no insns and emitted. */
6664 rs6000_emit_set_const (rtx dest
, enum machine_mode mode
,
6665 rtx source
, int n ATTRIBUTE_UNUSED
)
6667 rtx result
, insn
, set
;
6668 HOST_WIDE_INT c0
, c1
;
6675 dest
= gen_reg_rtx (mode
);
6676 emit_insn (gen_rtx_SET (VOIDmode
, dest
, source
));
6680 result
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
6682 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (result
),
6683 GEN_INT (INTVAL (source
)
6684 & (~ (HOST_WIDE_INT
) 0xffff))));
6685 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
6686 gen_rtx_IOR (SImode
, copy_rtx (result
),
6687 GEN_INT (INTVAL (source
) & 0xffff))));
6692 switch (GET_CODE (source
))
6695 c0
= INTVAL (source
);
6700 #if HOST_BITS_PER_WIDE_INT >= 64
6701 c0
= CONST_DOUBLE_LOW (source
);
6704 c0
= CONST_DOUBLE_LOW (source
);
6705 c1
= CONST_DOUBLE_HIGH (source
);
6713 result
= rs6000_emit_set_long_const (dest
, c0
, c1
);
6720 insn
= get_last_insn ();
6721 set
= single_set (insn
);
6722 if (! CONSTANT_P (SET_SRC (set
)))
6723 set_unique_reg_note (insn
, REG_EQUAL
, source
);
6728 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6729 fall back to a straight forward decomposition. We do this to avoid
6730 exponential run times encountered when looking for longer sequences
6731 with rs6000_emit_set_const. */
6733 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
6735 if (!TARGET_POWERPC64
)
6737 rtx operand1
, operand2
;
6739 operand1
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
== 0,
6741 operand2
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
!= 0,
6743 emit_move_insn (operand1
, GEN_INT (c1
));
6744 emit_move_insn (operand2
, GEN_INT (c2
));
6748 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
6751 ud2
= (c1
& 0xffff0000) >> 16;
6752 #if HOST_BITS_PER_WIDE_INT >= 64
6756 ud4
= (c2
& 0xffff0000) >> 16;
6758 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
6759 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
6762 emit_move_insn (dest
, GEN_INT (((ud1
^ 0x8000) - 0x8000)));
6764 emit_move_insn (dest
, GEN_INT (ud1
));
6767 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
6768 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
6771 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
6774 emit_move_insn (dest
, GEN_INT (ud2
<< 16));
6776 emit_move_insn (copy_rtx (dest
),
6777 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6780 else if (ud3
== 0 && ud4
== 0)
6782 gcc_assert (ud2
& 0x8000);
6783 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
6786 emit_move_insn (copy_rtx (dest
),
6787 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6789 emit_move_insn (copy_rtx (dest
),
6790 gen_rtx_ZERO_EXTEND (DImode
,
6791 gen_lowpart (SImode
,
6794 else if ((ud4
== 0xffff && (ud3
& 0x8000))
6795 || (ud4
== 0 && ! (ud3
& 0x8000)))
6798 emit_move_insn (dest
, GEN_INT (((ud3
<< 16) ^ 0x80000000)
6801 emit_move_insn (dest
, GEN_INT (ud3
<< 16));
6804 emit_move_insn (copy_rtx (dest
),
6805 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6807 emit_move_insn (copy_rtx (dest
),
6808 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
6811 emit_move_insn (copy_rtx (dest
),
6812 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6818 emit_move_insn (dest
, GEN_INT (((ud4
<< 16) ^ 0x80000000)
6821 emit_move_insn (dest
, GEN_INT (ud4
<< 16));
6824 emit_move_insn (copy_rtx (dest
),
6825 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6828 emit_move_insn (copy_rtx (dest
),
6829 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
6832 emit_move_insn (copy_rtx (dest
),
6833 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6834 GEN_INT (ud2
<< 16)));
6836 emit_move_insn (copy_rtx (dest
),
6837 gen_rtx_IOR (DImode
, copy_rtx (dest
), GEN_INT (ud1
)));
6843 /* Helper for the following. Get rid of [r+r] memory refs
6844 in cases where it won't work (TImode, TFmode, TDmode). */
6847 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
6849 if (reload_in_progress
)
6852 if (GET_CODE (operands
[0]) == MEM
6853 && GET_CODE (XEXP (operands
[0], 0)) != REG
6854 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
6855 GET_MODE (operands
[0]), false))
6857 = replace_equiv_address (operands
[0],
6858 copy_addr_to_reg (XEXP (operands
[0], 0)));
6860 if (GET_CODE (operands
[1]) == MEM
6861 && GET_CODE (XEXP (operands
[1], 0)) != REG
6862 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
6863 GET_MODE (operands
[1]), false))
6865 = replace_equiv_address (operands
[1],
6866 copy_addr_to_reg (XEXP (operands
[1], 0)));
6869 /* Emit a move from SOURCE to DEST in mode MODE. */
6871 rs6000_emit_move (rtx dest
, rtx source
, enum machine_mode mode
)
6875 operands
[1] = source
;
6877 if (TARGET_DEBUG_ADDR
)
6880 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6881 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6882 GET_MODE_NAME (mode
),
6885 can_create_pseudo_p ());
6887 fprintf (stderr
, "source:\n");
6891 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6892 if (GET_CODE (operands
[1]) == CONST_DOUBLE
6893 && ! FLOAT_MODE_P (mode
)
6894 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
6896 /* FIXME. This should never happen. */
6897 /* Since it seems that it does, do the safe thing and convert
6899 operands
[1] = gen_int_mode (CONST_DOUBLE_LOW (operands
[1]), mode
);
6901 gcc_assert (GET_CODE (operands
[1]) != CONST_DOUBLE
6902 || FLOAT_MODE_P (mode
)
6903 || ((CONST_DOUBLE_HIGH (operands
[1]) != 0
6904 || CONST_DOUBLE_LOW (operands
[1]) < 0)
6905 && (CONST_DOUBLE_HIGH (operands
[1]) != -1
6906 || CONST_DOUBLE_LOW (operands
[1]) >= 0)));
6908 /* Check if GCC is setting up a block move that will end up using FP
6909 registers as temporaries. We must make sure this is acceptable. */
6910 if (GET_CODE (operands
[0]) == MEM
6911 && GET_CODE (operands
[1]) == MEM
6913 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
6914 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
6915 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
6916 ? 32 : MEM_ALIGN (operands
[0])))
6917 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
6919 : MEM_ALIGN (operands
[1]))))
6920 && ! MEM_VOLATILE_P (operands
[0])
6921 && ! MEM_VOLATILE_P (operands
[1]))
6923 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
6924 adjust_address (operands
[1], SImode
, 0));
6925 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
6926 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
6930 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
6931 && !gpc_reg_operand (operands
[1], mode
))
6932 operands
[1] = force_reg (mode
, operands
[1]);
6934 if (mode
== SFmode
&& ! TARGET_POWERPC
6935 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
6936 && GET_CODE (operands
[0]) == MEM
)
6940 if (reload_in_progress
|| reload_completed
)
6941 regnum
= true_regnum (operands
[1]);
6942 else if (GET_CODE (operands
[1]) == REG
)
6943 regnum
= REGNO (operands
[1]);
6947 /* If operands[1] is a register, on POWER it may have
6948 double-precision data in it, so truncate it to single
6950 if (FP_REGNO_P (regnum
) || regnum
>= FIRST_PSEUDO_REGISTER
)
6953 newreg
= (!can_create_pseudo_p () ? copy_rtx (operands
[1])
6954 : gen_reg_rtx (mode
));
6955 emit_insn (gen_aux_truncdfsf2 (newreg
, operands
[1]));
6956 operands
[1] = newreg
;
6960 /* Recognize the case where operand[1] is a reference to thread-local
6961 data and load its address to a register. */
6962 if (rs6000_tls_referenced_p (operands
[1]))
6964 enum tls_model model
;
6965 rtx tmp
= operands
[1];
6968 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
6970 addend
= XEXP (XEXP (tmp
, 0), 1);
6971 tmp
= XEXP (XEXP (tmp
, 0), 0);
6974 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
6975 model
= SYMBOL_REF_TLS_MODEL (tmp
);
6976 gcc_assert (model
!= 0);
6978 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
6981 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
6982 tmp
= force_operand (tmp
, operands
[0]);
6987 /* Handle the case where reload calls us with an invalid address. */
6988 if (reload_in_progress
&& mode
== Pmode
6989 && (! general_operand (operands
[1], mode
)
6990 || ! nonimmediate_operand (operands
[0], mode
)))
6993 /* 128-bit constant floating-point values on Darwin should really be
6994 loaded as two parts. */
6995 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
6996 && mode
== TFmode
&& GET_CODE (operands
[1]) == CONST_DOUBLE
)
6998 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
6999 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
7001 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
7002 GET_MODE_SIZE (DFmode
)),
7003 simplify_gen_subreg (DFmode
, operands
[1], mode
,
7004 GET_MODE_SIZE (DFmode
)),
7009 if (reload_in_progress
&& cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
7010 cfun
->machine
->sdmode_stack_slot
=
7011 eliminate_regs (cfun
->machine
->sdmode_stack_slot
, VOIDmode
, NULL_RTX
);
7013 if (reload_in_progress
7015 && MEM_P (operands
[0])
7016 && rtx_equal_p (operands
[0], cfun
->machine
->sdmode_stack_slot
)
7017 && REG_P (operands
[1]))
7019 if (FP_REGNO_P (REGNO (operands
[1])))
7021 rtx mem
= adjust_address_nv (operands
[0], DDmode
, 0);
7022 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7023 emit_insn (gen_movsd_store (mem
, operands
[1]));
7025 else if (INT_REGNO_P (REGNO (operands
[1])))
7027 rtx mem
= adjust_address_nv (operands
[0], mode
, 4);
7028 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7029 emit_insn (gen_movsd_hardfloat (mem
, operands
[1]));
7035 if (reload_in_progress
7037 && REG_P (operands
[0])
7038 && MEM_P (operands
[1])
7039 && rtx_equal_p (operands
[1], cfun
->machine
->sdmode_stack_slot
))
7041 if (FP_REGNO_P (REGNO (operands
[0])))
7043 rtx mem
= adjust_address_nv (operands
[1], DDmode
, 0);
7044 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7045 emit_insn (gen_movsd_load (operands
[0], mem
));
7047 else if (INT_REGNO_P (REGNO (operands
[0])))
7049 rtx mem
= adjust_address_nv (operands
[1], mode
, 4);
7050 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7051 emit_insn (gen_movsd_hardfloat (operands
[0], mem
));
7058 /* FIXME: In the long term, this switch statement should go away
7059 and be replaced by a sequence of tests based on things like
7065 if (CONSTANT_P (operands
[1])
7066 && GET_CODE (operands
[1]) != CONST_INT
)
7067 operands
[1] = force_const_mem (mode
, operands
[1]);
7072 rs6000_eliminate_indexed_memrefs (operands
);
7079 if (CONSTANT_P (operands
[1])
7080 && ! easy_fp_constant (operands
[1], mode
))
7081 operands
[1] = force_const_mem (mode
, operands
[1]);
7094 if (CONSTANT_P (operands
[1])
7095 && !easy_vector_constant (operands
[1], mode
))
7096 operands
[1] = force_const_mem (mode
, operands
[1]);
7101 /* Use default pattern for address of ELF small data */
7104 && DEFAULT_ABI
== ABI_V4
7105 && (GET_CODE (operands
[1]) == SYMBOL_REF
7106 || GET_CODE (operands
[1]) == CONST
)
7107 && small_data_operand (operands
[1], mode
))
7109 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7113 if (DEFAULT_ABI
== ABI_V4
7114 && mode
== Pmode
&& mode
== SImode
7115 && flag_pic
== 1 && got_operand (operands
[1], mode
))
7117 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
7121 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
7125 && CONSTANT_P (operands
[1])
7126 && GET_CODE (operands
[1]) != HIGH
7127 && GET_CODE (operands
[1]) != CONST_INT
)
7129 rtx target
= (!can_create_pseudo_p ()
7131 : gen_reg_rtx (mode
));
7133 /* If this is a function address on -mcall-aixdesc,
7134 convert it to the address of the descriptor. */
7135 if (DEFAULT_ABI
== ABI_AIX
7136 && GET_CODE (operands
[1]) == SYMBOL_REF
7137 && XSTR (operands
[1], 0)[0] == '.')
7139 const char *name
= XSTR (operands
[1], 0);
7141 while (*name
== '.')
7143 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
7144 CONSTANT_POOL_ADDRESS_P (new_ref
)
7145 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
7146 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
7147 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
7148 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
7149 operands
[1] = new_ref
;
7152 if (DEFAULT_ABI
== ABI_DARWIN
)
7155 if (MACHO_DYNAMIC_NO_PIC_P
)
7157 /* Take care of any required data indirection. */
7158 operands
[1] = rs6000_machopic_legitimize_pic_address (
7159 operands
[1], mode
, operands
[0]);
7160 if (operands
[0] != operands
[1])
7161 emit_insn (gen_rtx_SET (VOIDmode
,
7162 operands
[0], operands
[1]));
7166 emit_insn (gen_macho_high (target
, operands
[1]));
7167 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
7171 emit_insn (gen_elf_high (target
, operands
[1]));
7172 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
7176 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7177 and we have put it in the TOC, we just need to make a TOC-relative
7180 && GET_CODE (operands
[1]) == SYMBOL_REF
7181 && use_toc_relative_ref (operands
[1]))
7182 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
7183 else if (mode
== Pmode
7184 && CONSTANT_P (operands
[1])
7185 && GET_CODE (operands
[1]) != HIGH
7186 && ((GET_CODE (operands
[1]) != CONST_INT
7187 && ! easy_fp_constant (operands
[1], mode
))
7188 || (GET_CODE (operands
[1]) == CONST_INT
7189 && (num_insns_constant (operands
[1], mode
)
7190 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
7191 || (GET_CODE (operands
[0]) == REG
7192 && FP_REGNO_P (REGNO (operands
[0]))))
7193 && !toc_relative_expr_p (operands
[1], false)
7194 && (TARGET_CMODEL
== CMODEL_SMALL
7195 || can_create_pseudo_p ()
7196 || (REG_P (operands
[0])
7197 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
7201 /* Darwin uses a special PIC legitimizer. */
7202 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
7205 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
7207 if (operands
[0] != operands
[1])
7208 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7213 /* If we are to limit the number of things we put in the TOC and
7214 this is a symbol plus a constant we can add in one insn,
7215 just put the symbol in the TOC and add the constant. Don't do
7216 this if reload is in progress. */
7217 if (GET_CODE (operands
[1]) == CONST
7218 && TARGET_NO_SUM_IN_TOC
&& ! reload_in_progress
7219 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
7220 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
7221 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
7222 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
7223 && ! side_effects_p (operands
[0]))
7226 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
7227 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
7229 sym
= force_reg (mode
, sym
);
7230 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
7234 operands
[1] = force_const_mem (mode
, operands
[1]);
7237 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
7238 && constant_pool_expr_p (XEXP (operands
[1], 0))
7239 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7240 get_pool_constant (XEXP (operands
[1], 0)),
7241 get_pool_mode (XEXP (operands
[1], 0))))
7243 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
7245 operands
[1] = gen_const_mem (mode
, tocref
);
7246 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
7252 rs6000_eliminate_indexed_memrefs (operands
);
7256 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7258 gen_rtx_SET (VOIDmode
,
7259 operands
[0], operands
[1]),
7260 gen_rtx_CLOBBER (VOIDmode
,
7261 gen_rtx_SCRATCH (SImode
)))));
7267 fatal_insn ("bad move", gen_rtx_SET (VOIDmode
, dest
, source
));
7270 /* Above, we may have called force_const_mem which may have returned
7271 an invalid address. If we can, fix this up; otherwise, reload will
7272 have to deal with it. */
7273 if (GET_CODE (operands
[1]) == MEM
&& ! reload_in_progress
)
7274 operands
[1] = validize_mem (operands
[1]);
7277 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7280 /* Nonzero if we can use a floating-point register to pass this arg. */
7281 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7282 (SCALAR_FLOAT_MODE_P (MODE) \
7283 && (CUM)->fregno <= FP_ARG_MAX_REG \
7284 && TARGET_HARD_FLOAT && TARGET_FPRS)
7286 /* Nonzero if we can use an AltiVec register to pass this arg. */
7287 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7288 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7289 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7290 && TARGET_ALTIVEC_ABI \
7293 /* Return a nonzero value to say to return the function value in
7294 memory, just as large structures are always returned. TYPE will be
7295 the data type of the value, and FNTYPE will be the type of the
7296 function doing the returning, or @code{NULL} for libcalls.
7298 The AIX ABI for the RS/6000 specifies that all structures are
7299 returned in memory. The Darwin ABI does the same.
7301 For the Darwin 64 Bit ABI, a function result can be returned in
7302 registers or in memory, depending on the size of the return data
7303 type. If it is returned in registers, the value occupies the same
7304 registers as it would if it were the first and only function
7305 argument. Otherwise, the function places its result in memory at
7306 the location pointed to by GPR3.
7308 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7309 but a draft put them in memory, and GCC used to implement the draft
7310 instead of the final standard. Therefore, aix_struct_return
7311 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7312 compatibility can change DRAFT_V4_STRUCT_RET to override the
7313 default, and -m switches get the final word. See
7314 rs6000_option_override_internal for more details.
7316 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7317 long double support is enabled. These values are returned in memory.
7319 int_size_in_bytes returns -1 for variable size objects, which go in
7320 memory always. The cast to unsigned makes -1 > 8. */
7323 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
7325 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7327 && rs6000_darwin64_abi
7328 && TREE_CODE (type
) == RECORD_TYPE
7329 && int_size_in_bytes (type
) > 0)
7331 CUMULATIVE_ARGS valcum
;
7335 valcum
.fregno
= FP_ARG_MIN_REG
;
7336 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
7337 /* Do a trial code generation as if this were going to be passed
7338 as an argument; if any part goes in memory, we return NULL. */
7339 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
7342 /* Otherwise fall through to more conventional ABI rules. */
7345 if (AGGREGATE_TYPE_P (type
)
7346 && (aix_struct_return
7347 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
7350 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7351 modes only exist for GCC vector types if -maltivec. */
7352 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
7353 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
7356 /* Return synthetic vectors in memory. */
7357 if (TREE_CODE (type
) == VECTOR_TYPE
7358 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
7360 static bool warned_for_return_big_vectors
= false;
7361 if (!warned_for_return_big_vectors
)
7363 warning (0, "GCC vector returned by reference: "
7364 "non-standard ABI extension with no compatibility guarantee");
7365 warned_for_return_big_vectors
= true;
7370 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& TYPE_MODE (type
) == TFmode
)
7376 #ifdef HAVE_AS_GNU_ATTRIBUTE
7377 /* Return TRUE if a call to function FNDECL may be one that
7378 potentially affects the function calling ABI of the object file. */
7381 call_ABI_of_interest (tree fndecl
)
7383 if (cgraph_state
== CGRAPH_STATE_EXPANSION
)
7385 struct cgraph_node
*c_node
;
7387 /* Libcalls are always interesting. */
7388 if (fndecl
== NULL_TREE
)
7391 /* Any call to an external function is interesting. */
7392 if (DECL_EXTERNAL (fndecl
))
7395 /* Interesting functions that we are emitting in this object file. */
7396 c_node
= cgraph_get_node (fndecl
);
7397 c_node
= cgraph_function_or_thunk_node (c_node
, NULL
);
7398 return !cgraph_only_called_directly_p (c_node
);
7404 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7405 for a call to a function whose data type is FNTYPE.
7406 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7408 For incoming args we set the number of arguments in the prototype large
7409 so we never return a PARALLEL. */
7412 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
7413 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
7414 int libcall
, int n_named_args
,
7415 tree fndecl ATTRIBUTE_UNUSED
,
7416 enum machine_mode return_mode ATTRIBUTE_UNUSED
)
7418 static CUMULATIVE_ARGS zero_cumulative
;
7420 *cum
= zero_cumulative
;
7422 cum
->fregno
= FP_ARG_MIN_REG
;
7423 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
7424 cum
->prototype
= (fntype
&& prototype_p (fntype
));
7425 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
7426 ? CALL_LIBCALL
: CALL_NORMAL
);
7427 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
7428 cum
->stdarg
= stdarg_p (fntype
);
7430 cum
->nargs_prototype
= 0;
7431 if (incoming
|| cum
->prototype
)
7432 cum
->nargs_prototype
= n_named_args
;
7434 /* Check for a longcall attribute. */
7435 if ((!fntype
&& rs6000_default_long_calls
)
7437 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
7438 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
7439 cum
->call_cookie
|= CALL_LONG
;
7441 if (TARGET_DEBUG_ARG
)
7443 fprintf (stderr
, "\ninit_cumulative_args:");
7446 tree ret_type
= TREE_TYPE (fntype
);
7447 fprintf (stderr
, " ret code = %s,",
7448 tree_code_name
[ (int)TREE_CODE (ret_type
) ]);
7451 if (cum
->call_cookie
& CALL_LONG
)
7452 fprintf (stderr
, " longcall,");
7454 fprintf (stderr
, " proto = %d, nargs = %d\n",
7455 cum
->prototype
, cum
->nargs_prototype
);
7458 #ifdef HAVE_AS_GNU_ATTRIBUTE
7459 if (DEFAULT_ABI
== ABI_V4
)
7461 cum
->escapes
= call_ABI_of_interest (fndecl
);
7468 return_type
= TREE_TYPE (fntype
);
7469 return_mode
= TYPE_MODE (return_type
);
7472 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
7474 if (return_type
!= NULL
)
7476 if (TREE_CODE (return_type
) == RECORD_TYPE
7477 && TYPE_TRANSPARENT_AGGR (return_type
))
7479 return_type
= TREE_TYPE (first_field (return_type
));
7480 return_mode
= TYPE_MODE (return_type
);
7482 if (AGGREGATE_TYPE_P (return_type
)
7483 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
7485 rs6000_returns_struct
= true;
7487 if (SCALAR_FLOAT_MODE_P (return_mode
))
7488 rs6000_passes_float
= true;
7489 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
7490 || SPE_VECTOR_MODE (return_mode
))
7491 rs6000_passes_vector
= true;
7498 && TARGET_ALTIVEC_ABI
7499 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
7501 error ("cannot return value in vector register because"
7502 " altivec instructions are disabled, use -maltivec"
7507 /* Return true if TYPE must be passed on the stack and not in registers. */
7510 rs6000_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
7512 if (DEFAULT_ABI
== ABI_AIX
|| TARGET_64BIT
)
7513 return must_pass_in_stack_var_size (mode
, type
);
7515 return must_pass_in_stack_var_size_or_pad (mode
, type
);
7518 /* If defined, a C expression which determines whether, and in which
7519 direction, to pad out an argument with extra space. The value
7520 should be of type `enum direction': either `upward' to pad above
7521 the argument, `downward' to pad below, or `none' to inhibit
7524 For the AIX ABI structs are always stored left shifted in their
7528 function_arg_padding (enum machine_mode mode
, const_tree type
)
7530 #ifndef AGGREGATE_PADDING_FIXED
7531 #define AGGREGATE_PADDING_FIXED 0
7533 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7534 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7537 if (!AGGREGATE_PADDING_FIXED
)
7539 /* GCC used to pass structures of the same size as integer types as
7540 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7541 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7542 passed padded downward, except that -mstrict-align further
7543 muddied the water in that multi-component structures of 2 and 4
7544 bytes in size were passed padded upward.
7546 The following arranges for best compatibility with previous
7547 versions of gcc, but removes the -mstrict-align dependency. */
7548 if (BYTES_BIG_ENDIAN
)
7550 HOST_WIDE_INT size
= 0;
7552 if (mode
== BLKmode
)
7554 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
7555 size
= int_size_in_bytes (type
);
7558 size
= GET_MODE_SIZE (mode
);
7560 if (size
== 1 || size
== 2 || size
== 4)
7566 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
7568 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
7572 /* Fall back to the default. */
7573 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
7576 /* If defined, a C expression that gives the alignment boundary, in bits,
7577 of an argument with the specified mode and type. If it is not defined,
7578 PARM_BOUNDARY is used for all arguments.
7580 V.4 wants long longs and doubles to be double word aligned. Just
7581 testing the mode size is a boneheaded way to do this as it means
7582 that other types such as complex int are also double word aligned.
7583 However, we're stuck with this because changing the ABI might break
7584 existing library interfaces.
7586 Doubleword align SPE vectors.
7587 Quadword align Altivec/VSX vectors.
7588 Quadword align large synthetic vector types. */
7591 rs6000_function_arg_boundary (enum machine_mode mode
, const_tree type
)
7593 if (DEFAULT_ABI
== ABI_V4
7594 && (GET_MODE_SIZE (mode
) == 8
7595 || (TARGET_HARD_FLOAT
7597 && (mode
== TFmode
|| mode
== TDmode
))))
7599 else if (SPE_VECTOR_MODE (mode
)
7600 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7601 && int_size_in_bytes (type
) >= 8
7602 && int_size_in_bytes (type
) < 16))
7604 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
7605 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7606 && int_size_in_bytes (type
) >= 16))
7608 else if (TARGET_MACHO
7609 && rs6000_darwin64_abi
7611 && type
&& TYPE_ALIGN (type
) > 64)
7614 return PARM_BOUNDARY
;
7617 /* For a function parm of MODE and TYPE, return the starting word in
7618 the parameter area. NWORDS of the parameter area are already used. */
7621 rs6000_parm_start (enum machine_mode mode
, const_tree type
,
7622 unsigned int nwords
)
7625 unsigned int parm_offset
;
7627 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
7628 parm_offset
= DEFAULT_ABI
== ABI_V4
? 2 : 6;
7629 return nwords
+ (-(parm_offset
+ nwords
) & align
);
7632 /* Compute the size (in words) of a function argument. */
7634 static unsigned long
7635 rs6000_arg_size (enum machine_mode mode
, const_tree type
)
7639 if (mode
!= BLKmode
)
7640 size
= GET_MODE_SIZE (mode
);
7642 size
= int_size_in_bytes (type
);
7645 return (size
+ 3) >> 2;
7647 return (size
+ 7) >> 3;
7650 /* Use this to flush pending int fields. */
7653 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
7654 HOST_WIDE_INT bitpos
, int final
)
7656 unsigned int startbit
, endbit
;
7657 int intregs
, intoffset
;
7658 enum machine_mode mode
;
7660 /* Handle the situations where a float is taking up the first half
7661 of the GPR, and the other half is empty (typically due to
7662 alignment restrictions). We can detect this by a 8-byte-aligned
7663 int field, or by seeing that this is the final flush for this
7664 argument. Count the word and continue on. */
7665 if (cum
->floats_in_gpr
== 1
7666 && (cum
->intoffset
% 64 == 0
7667 || (cum
->intoffset
== -1 && final
)))
7670 cum
->floats_in_gpr
= 0;
7673 if (cum
->intoffset
== -1)
7676 intoffset
= cum
->intoffset
;
7677 cum
->intoffset
= -1;
7678 cum
->floats_in_gpr
= 0;
7680 if (intoffset
% BITS_PER_WORD
!= 0)
7682 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
7684 if (mode
== BLKmode
)
7686 /* We couldn't find an appropriate mode, which happens,
7687 e.g., in packed structs when there are 3 bytes to load.
7688 Back intoffset back to the beginning of the word in this
7690 intoffset
= intoffset
& -BITS_PER_WORD
;
7694 startbit
= intoffset
& -BITS_PER_WORD
;
7695 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
7696 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
7697 cum
->words
+= intregs
;
7698 /* words should be unsigned. */
7699 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
7701 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
7706 /* The darwin64 ABI calls for us to recurse down through structs,
7707 looking for elements passed in registers. Unfortunately, we have
7708 to track int register count here also because of misalignments
7709 in powerpc alignment mode. */
7712 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
7714 HOST_WIDE_INT startbitpos
)
7718 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
7719 if (TREE_CODE (f
) == FIELD_DECL
)
7721 HOST_WIDE_INT bitpos
= startbitpos
;
7722 tree ftype
= TREE_TYPE (f
);
7723 enum machine_mode mode
;
7724 if (ftype
== error_mark_node
)
7726 mode
= TYPE_MODE (ftype
);
7728 if (DECL_SIZE (f
) != 0
7729 && host_integerp (bit_position (f
), 1))
7730 bitpos
+= int_bit_position (f
);
7732 /* ??? FIXME: else assume zero offset. */
7734 if (TREE_CODE (ftype
) == RECORD_TYPE
)
7735 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
7736 else if (USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
7738 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
7739 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
7740 cum
->fregno
+= n_fpregs
;
7741 /* Single-precision floats present a special problem for
7742 us, because they are smaller than an 8-byte GPR, and so
7743 the structure-packing rules combined with the standard
7744 varargs behavior mean that we want to pack float/float
7745 and float/int combinations into a single register's
7746 space. This is complicated by the arg advance flushing,
7747 which works on arbitrarily large groups of int-type
7751 if (cum
->floats_in_gpr
== 1)
7753 /* Two floats in a word; count the word and reset
7756 cum
->floats_in_gpr
= 0;
7758 else if (bitpos
% 64 == 0)
7760 /* A float at the beginning of an 8-byte word;
7761 count it and put off adjusting cum->words until
7762 we see if a arg advance flush is going to do it
7764 cum
->floats_in_gpr
++;
7768 /* The float is at the end of a word, preceded
7769 by integer fields, so the arg advance flush
7770 just above has already set cum->words and
7771 everything is taken care of. */
7775 cum
->words
+= n_fpregs
;
7777 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, 1))
7779 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
7783 else if (cum
->intoffset
== -1)
7784 cum
->intoffset
= bitpos
;
7788 /* Check for an item that needs to be considered specially under the darwin 64
7789 bit ABI. These are record types where the mode is BLK or the structure is
7792 rs6000_darwin64_struct_check_p (enum machine_mode mode
, const_tree type
)
7794 return rs6000_darwin64_abi
7795 && ((mode
== BLKmode
7796 && TREE_CODE (type
) == RECORD_TYPE
7797 && int_size_in_bytes (type
) > 0)
7798 || (type
&& TREE_CODE (type
) == RECORD_TYPE
7799 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
7802 /* Update the data in CUM to advance over an argument
7803 of mode MODE and data type TYPE.
7804 (TYPE is null for libcalls where that information may not be available.)
7806 Note that for args passed by reference, function_arg will be called
7807 with MODE and TYPE set to that of the pointer to the arg, not the arg
7811 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
7812 const_tree type
, bool named
, int depth
)
7814 /* Only tick off an argument if we're not recursing. */
7816 cum
->nargs_prototype
--;
7818 #ifdef HAVE_AS_GNU_ATTRIBUTE
7819 if (DEFAULT_ABI
== ABI_V4
7822 if (SCALAR_FLOAT_MODE_P (mode
))
7823 rs6000_passes_float
= true;
7824 else if (named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
7825 rs6000_passes_vector
= true;
7826 else if (SPE_VECTOR_MODE (mode
)
7828 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
7829 rs6000_passes_vector
= true;
7833 if (TARGET_ALTIVEC_ABI
7834 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
7835 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7836 && int_size_in_bytes (type
) == 16)))
7840 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
7843 if (!TARGET_ALTIVEC
)
7844 error ("cannot pass argument in vector register because"
7845 " altivec instructions are disabled, use -maltivec"
7848 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7849 even if it is going to be passed in a vector register.
7850 Darwin does the same for variable-argument functions. */
7851 if ((DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
7852 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
7862 /* Vector parameters must be 16-byte aligned. This places
7863 them at 2 mod 4 in terms of words in 32-bit mode, since
7864 the parameter save area starts at offset 24 from the
7865 stack. In 64-bit mode, they just have to start on an
7866 even word, since the parameter save area is 16-byte
7867 aligned. Space for GPRs is reserved even if the argument
7868 will be passed in memory. */
7870 align
= (2 - cum
->words
) & 3;
7872 align
= cum
->words
& 1;
7873 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
7875 if (TARGET_DEBUG_ARG
)
7877 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
7879 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
7880 cum
->nargs_prototype
, cum
->prototype
,
7881 GET_MODE_NAME (mode
));
7885 else if (TARGET_SPE_ABI
&& TARGET_SPE
&& SPE_VECTOR_MODE (mode
)
7887 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
7890 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
7892 int size
= int_size_in_bytes (type
);
7893 /* Variable sized types have size == -1 and are
7894 treated as if consisting entirely of ints.
7895 Pad to 16 byte boundary if needed. */
7896 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
7897 && (cum
->words
% 2) != 0)
7899 /* For varargs, we can just go up by the size of the struct. */
7901 cum
->words
+= (size
+ 7) / 8;
7904 /* It is tempting to say int register count just goes up by
7905 sizeof(type)/8, but this is wrong in a case such as
7906 { int; double; int; } [powerpc alignment]. We have to
7907 grovel through the fields for these too. */
7909 cum
->floats_in_gpr
= 0;
7910 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
7911 rs6000_darwin64_record_arg_advance_flush (cum
,
7912 size
* BITS_PER_UNIT
, 1);
7914 if (TARGET_DEBUG_ARG
)
7916 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
7917 cum
->words
, TYPE_ALIGN (type
), size
);
7919 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
7920 cum
->nargs_prototype
, cum
->prototype
,
7921 GET_MODE_NAME (mode
));
7924 else if (DEFAULT_ABI
== ABI_V4
)
7926 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
7927 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
7928 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
7929 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
7930 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
7932 /* _Decimal128 must use an even/odd register pair. This assumes
7933 that the register number is odd when fregno is odd. */
7934 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
7937 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
7938 <= FP_ARG_V4_MAX_REG
)
7939 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
7942 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
7943 if (mode
== DFmode
|| mode
== TFmode
7944 || mode
== DDmode
|| mode
== TDmode
)
7945 cum
->words
+= cum
->words
& 1;
7946 cum
->words
+= rs6000_arg_size (mode
, type
);
7951 int n_words
= rs6000_arg_size (mode
, type
);
7952 int gregno
= cum
->sysv_gregno
;
7954 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7955 (r7,r8) or (r9,r10). As does any other 2 word item such
7956 as complex int due to a historical mistake. */
7958 gregno
+= (1 - gregno
) & 1;
7960 /* Multi-reg args are not split between registers and stack. */
7961 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
7963 /* Long long and SPE vectors are aligned on the stack.
7964 So are other 2 word items such as complex int due to
7965 a historical mistake. */
7967 cum
->words
+= cum
->words
& 1;
7968 cum
->words
+= n_words
;
7971 /* Note: continuing to accumulate gregno past when we've started
7972 spilling to the stack indicates the fact that we've started
7973 spilling to the stack to expand_builtin_saveregs. */
7974 cum
->sysv_gregno
= gregno
+ n_words
;
7977 if (TARGET_DEBUG_ARG
)
7979 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
7980 cum
->words
, cum
->fregno
);
7981 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
7982 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
7983 fprintf (stderr
, "mode = %4s, named = %d\n",
7984 GET_MODE_NAME (mode
), named
);
7989 int n_words
= rs6000_arg_size (mode
, type
);
7990 int start_words
= cum
->words
;
7991 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
7993 cum
->words
= align_words
+ n_words
;
7995 if (SCALAR_FLOAT_MODE_P (mode
)
7996 && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
7998 /* _Decimal128 must be passed in an even/odd float register pair.
7999 This assumes that the register number is odd when fregno is
8001 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8003 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
8006 if (TARGET_DEBUG_ARG
)
8008 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
8009 cum
->words
, cum
->fregno
);
8010 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
8011 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
8012 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
8013 named
, align_words
- start_words
, depth
);
8019 rs6000_function_arg_advance (cumulative_args_t cum
, enum machine_mode mode
,
8020 const_tree type
, bool named
)
8022 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
8027 spe_build_register_parallel (enum machine_mode mode
, int gregno
)
8034 r1
= gen_rtx_REG (DImode
, gregno
);
8035 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
8036 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, r1
));
8040 r1
= gen_rtx_REG (DImode
, gregno
);
8041 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
8042 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
8043 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
8044 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r3
));
8047 r1
= gen_rtx_REG (DImode
, gregno
);
8048 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
8049 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
8050 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
8051 r5
= gen_rtx_REG (DImode
, gregno
+ 4);
8052 r5
= gen_rtx_EXPR_LIST (VOIDmode
, r5
, GEN_INT (16));
8053 r7
= gen_rtx_REG (DImode
, gregno
+ 6);
8054 r7
= gen_rtx_EXPR_LIST (VOIDmode
, r7
, GEN_INT (24));
8055 return gen_rtx_PARALLEL (mode
, gen_rtvec (4, r1
, r3
, r5
, r7
));
8062 /* Determine where to put a SIMD argument on the SPE. */
8064 rs6000_spe_function_arg (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
8067 int gregno
= cum
->sysv_gregno
;
8069 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8070 are passed and returned in a pair of GPRs for ABI compatibility. */
8071 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
8072 || mode
== DCmode
|| mode
== TCmode
))
8074 int n_words
= rs6000_arg_size (mode
, type
);
8076 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8078 gregno
+= (1 - gregno
) & 1;
8080 /* Multi-reg args are not split between registers and stack. */
8081 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
8084 return spe_build_register_parallel (mode
, gregno
);
8088 int n_words
= rs6000_arg_size (mode
, type
);
8090 /* SPE vectors are put in odd registers. */
8091 if (n_words
== 2 && (gregno
& 1) == 0)
8094 if (gregno
+ n_words
- 1 <= GP_ARG_MAX_REG
)
8097 enum machine_mode m
= SImode
;
8099 r1
= gen_rtx_REG (m
, gregno
);
8100 r1
= gen_rtx_EXPR_LIST (m
, r1
, const0_rtx
);
8101 r2
= gen_rtx_REG (m
, gregno
+ 1);
8102 r2
= gen_rtx_EXPR_LIST (m
, r2
, GEN_INT (4));
8103 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
8110 if (gregno
<= GP_ARG_MAX_REG
)
8111 return gen_rtx_REG (mode
, gregno
);
8117 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8118 structure between cum->intoffset and bitpos to integer registers. */
8121 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
8122 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
8124 enum machine_mode mode
;
8126 unsigned int startbit
, endbit
;
8127 int this_regno
, intregs
, intoffset
;
8130 if (cum
->intoffset
== -1)
8133 intoffset
= cum
->intoffset
;
8134 cum
->intoffset
= -1;
8136 /* If this is the trailing part of a word, try to only load that
8137 much into the register. Otherwise load the whole register. Note
8138 that in the latter case we may pick up unwanted bits. It's not a
8139 problem at the moment but may wish to revisit. */
8141 if (intoffset
% BITS_PER_WORD
!= 0)
8143 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
8145 if (mode
== BLKmode
)
8147 /* We couldn't find an appropriate mode, which happens,
8148 e.g., in packed structs when there are 3 bytes to load.
8149 Back intoffset back to the beginning of the word in this
8151 intoffset
= intoffset
& -BITS_PER_WORD
;
8158 startbit
= intoffset
& -BITS_PER_WORD
;
8159 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
8160 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
8161 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
8163 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
8166 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
8170 intoffset
/= BITS_PER_UNIT
;
8173 regno
= GP_ARG_MIN_REG
+ this_regno
;
8174 reg
= gen_rtx_REG (mode
, regno
);
8176 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
8179 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
8183 while (intregs
> 0);
8186 /* Recursive workhorse for the following. */
8189 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
8190 HOST_WIDE_INT startbitpos
, rtx rvec
[],
8195 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
8196 if (TREE_CODE (f
) == FIELD_DECL
)
8198 HOST_WIDE_INT bitpos
= startbitpos
;
8199 tree ftype
= TREE_TYPE (f
);
8200 enum machine_mode mode
;
8201 if (ftype
== error_mark_node
)
8203 mode
= TYPE_MODE (ftype
);
8205 if (DECL_SIZE (f
) != 0
8206 && host_integerp (bit_position (f
), 1))
8207 bitpos
+= int_bit_position (f
);
8209 /* ??? FIXME: else assume zero offset. */
8211 if (TREE_CODE (ftype
) == RECORD_TYPE
)
8212 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
8213 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
8215 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
8219 case SCmode
: mode
= SFmode
; break;
8220 case DCmode
: mode
= DFmode
; break;
8221 case TCmode
: mode
= TFmode
; break;
8225 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
8226 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
8228 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
8229 && (mode
== TFmode
|| mode
== TDmode
));
8230 /* Long double or _Decimal128 split over regs and memory. */
8231 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
8235 = gen_rtx_EXPR_LIST (VOIDmode
,
8236 gen_rtx_REG (mode
, cum
->fregno
++),
8237 GEN_INT (bitpos
/ BITS_PER_UNIT
));
8238 if (mode
== TFmode
|| mode
== TDmode
)
8241 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, ftype
, 1))
8243 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
8245 = gen_rtx_EXPR_LIST (VOIDmode
,
8246 gen_rtx_REG (mode
, cum
->vregno
++),
8247 GEN_INT (bitpos
/ BITS_PER_UNIT
));
8249 else if (cum
->intoffset
== -1)
8250 cum
->intoffset
= bitpos
;
8254 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8255 the register(s) to be used for each field and subfield of a struct
8256 being passed by value, along with the offset of where the
8257 register's value may be found in the block. FP fields go in FP
8258 register, vector fields go in vector registers, and everything
8259 else goes in int registers, packed as in memory.
8261 This code is also used for function return values. RETVAL indicates
8262 whether this is the case.
8264 Much of this is taken from the SPARC V9 port, which has a similar
8265 calling convention. */
8268 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
8269 bool named
, bool retval
)
8271 rtx rvec
[FIRST_PSEUDO_REGISTER
];
8272 int k
= 1, kbase
= 1;
8273 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
8274 /* This is a copy; modifications are not visible to our caller. */
8275 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
8276 CUMULATIVE_ARGS
*cum
= ©_cum
;
8278 /* Pad to 16 byte boundary if needed. */
8279 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
8280 && (cum
->words
% 2) != 0)
8287 /* Put entries into rvec[] for individual FP and vector fields, and
8288 for the chunks of memory that go in int regs. Note we start at
8289 element 1; 0 is reserved for an indication of using memory, and
8290 may or may not be filled in below. */
8291 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
8292 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
8294 /* If any part of the struct went on the stack put all of it there.
8295 This hack is because the generic code for
8296 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8297 parts of the struct are not at the beginning. */
8301 return NULL_RTX
; /* doesn't go in registers at all */
8303 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8305 if (k
> 1 || cum
->use_stack
)
8306 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
8311 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8314 rs6000_mixed_function_arg (enum machine_mode mode
, const_tree type
,
8319 rtx rvec
[GP_ARG_NUM_REG
+ 1];
8321 if (align_words
>= GP_ARG_NUM_REG
)
8324 n_units
= rs6000_arg_size (mode
, type
);
8326 /* Optimize the simple case where the arg fits in one gpr, except in
8327 the case of BLKmode due to assign_parms assuming that registers are
8328 BITS_PER_WORD wide. */
8330 || (n_units
== 1 && mode
!= BLKmode
))
8331 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8334 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
8335 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8336 using a magic NULL_RTX component.
8337 This is not strictly correct. Only some of the arg belongs in
8338 memory, not all of it. However, the normal scheme using
8339 function_arg_partial_nregs can result in unusual subregs, eg.
8340 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8341 store the whole arg to memory is often more efficient than code
8342 to store pieces, and we know that space is available in the right
8343 place for the whole arg. */
8344 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8349 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
8350 rtx off
= GEN_INT (i
++ * 4);
8351 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
8353 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
8355 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
8358 /* Determine where to put an argument to a function.
8359 Value is zero to push the argument on the stack,
8360 or a hard register in which to store the argument.
8362 MODE is the argument's machine mode.
8363 TYPE is the data type of the argument (as a tree).
8364 This is null for libcalls where that information may
8366 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8367 the preceding args and about the function being called. It is
8368 not modified in this routine.
8369 NAMED is nonzero if this argument is a named parameter
8370 (otherwise it is an extra parameter matching an ellipsis).
8372 On RS/6000 the first eight words of non-FP are normally in registers
8373 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8374 Under V.4, the first 8 FP args are in registers.
8376 If this is floating-point and no prototype is specified, we use
8377 both an FP and integer register (or possibly FP reg and stack). Library
8378 functions (when CALL_LIBCALL is set) always have the proper types for args,
8379 so we can pass the FP value just in one register. emit_library_function
8380 doesn't support PARALLEL anyway.
8382 Note that for args passed by reference, function_arg will be called
8383 with MODE and TYPE set to that of the pointer to the arg, not the arg
8387 rs6000_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
8388 const_tree type
, bool named
)
8390 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8391 enum rs6000_abi abi
= DEFAULT_ABI
;
8393 /* Return a marker to indicate whether CR1 needs to set or clear the
8394 bit that V.4 uses to say fp args were passed in registers.
8395 Assume that we don't need the marker for software floating point,
8396 or compiler generated library calls. */
8397 if (mode
== VOIDmode
)
8400 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
8402 || (cum
->nargs_prototype
< 0
8403 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
))))
8405 /* For the SPE, we need to crxor CR6 always. */
8407 return GEN_INT (cum
->call_cookie
| CALL_V4_SET_FP_ARGS
);
8408 else if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
8409 return GEN_INT (cum
->call_cookie
8410 | ((cum
->fregno
== FP_ARG_MIN_REG
)
8411 ? CALL_V4_SET_FP_ARGS
8412 : CALL_V4_CLEAR_FP_ARGS
));
8415 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
8418 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
8420 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
8421 if (rslt
!= NULL_RTX
)
8423 /* Else fall through to usual handling. */
8426 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
8427 if (TARGET_64BIT
&& ! cum
->prototype
)
8429 /* Vector parameters get passed in vector register
8430 and also in GPRs or memory, in absence of prototype. */
8433 align_words
= (cum
->words
+ 1) & ~1;
8435 if (align_words
>= GP_ARG_NUM_REG
)
8441 slot
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8443 return gen_rtx_PARALLEL (mode
,
8445 gen_rtx_EXPR_LIST (VOIDmode
,
8447 gen_rtx_EXPR_LIST (VOIDmode
,
8448 gen_rtx_REG (mode
, cum
->vregno
),
8452 return gen_rtx_REG (mode
, cum
->vregno
);
8453 else if (TARGET_ALTIVEC_ABI
8454 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
8455 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
8456 && int_size_in_bytes (type
) == 16)))
8458 if (named
|| abi
== ABI_V4
)
8462 /* Vector parameters to varargs functions under AIX or Darwin
8463 get passed in memory and possibly also in GPRs. */
8464 int align
, align_words
, n_words
;
8465 enum machine_mode part_mode
;
8467 /* Vector parameters must be 16-byte aligned. This places them at
8468 2 mod 4 in terms of words in 32-bit mode, since the parameter
8469 save area starts at offset 24 from the stack. In 64-bit mode,
8470 they just have to start on an even word, since the parameter
8471 save area is 16-byte aligned. */
8473 align
= (2 - cum
->words
) & 3;
8475 align
= cum
->words
& 1;
8476 align_words
= cum
->words
+ align
;
8478 /* Out of registers? Memory, then. */
8479 if (align_words
>= GP_ARG_NUM_REG
)
8482 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8483 return rs6000_mixed_function_arg (mode
, type
, align_words
);
8485 /* The vector value goes in GPRs. Only the part of the
8486 value in GPRs is reported here. */
8488 n_words
= rs6000_arg_size (mode
, type
);
8489 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
8490 /* Fortunately, there are only two possibilities, the value
8491 is either wholly in GPRs or half in GPRs and half not. */
8494 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
8497 else if (TARGET_SPE_ABI
&& TARGET_SPE
8498 && (SPE_VECTOR_MODE (mode
)
8499 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
8502 || mode
== TCmode
))))
8503 return rs6000_spe_function_arg (cum
, mode
, type
);
8505 else if (abi
== ABI_V4
)
8507 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
8508 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
8509 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
8510 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
8511 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
8513 /* _Decimal128 must use an even/odd register pair. This assumes
8514 that the register number is odd when fregno is odd. */
8515 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8518 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
8519 <= FP_ARG_V4_MAX_REG
)
8520 return gen_rtx_REG (mode
, cum
->fregno
);
8526 int n_words
= rs6000_arg_size (mode
, type
);
8527 int gregno
= cum
->sysv_gregno
;
8529 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8530 (r7,r8) or (r9,r10). As does any other 2 word item such
8531 as complex int due to a historical mistake. */
8533 gregno
+= (1 - gregno
) & 1;
8535 /* Multi-reg args are not split between registers and stack. */
8536 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
8539 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8540 return rs6000_mixed_function_arg (mode
, type
,
8541 gregno
- GP_ARG_MIN_REG
);
8542 return gen_rtx_REG (mode
, gregno
);
8547 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
8549 /* _Decimal128 must be passed in an even/odd float register pair.
8550 This assumes that the register number is odd when fregno is odd. */
8551 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8554 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
8556 rtx rvec
[GP_ARG_NUM_REG
+ 1];
8560 enum machine_mode fmode
= mode
;
8561 unsigned long n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
8563 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
8565 /* Currently, we only ever need one reg here because complex
8566 doubles are split. */
8567 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
8568 && (fmode
== TFmode
|| fmode
== TDmode
));
8570 /* Long double or _Decimal128 split over regs and memory. */
8571 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
8574 /* Do we also need to pass this arg in the parameter save
8577 && (cum
->nargs_prototype
<= 0
8578 || (DEFAULT_ABI
== ABI_AIX
8580 && align_words
>= GP_ARG_NUM_REG
)));
8582 if (!needs_psave
&& mode
== fmode
)
8583 return gen_rtx_REG (fmode
, cum
->fregno
);
8588 /* Describe the part that goes in gprs or the stack.
8589 This piece must come first, before the fprs. */
8590 if (align_words
< GP_ARG_NUM_REG
)
8592 unsigned long n_words
= rs6000_arg_size (mode
, type
);
8594 if (align_words
+ n_words
> GP_ARG_NUM_REG
8595 || (TARGET_32BIT
&& TARGET_POWERPC64
))
8597 /* If this is partially on the stack, then we only
8598 include the portion actually in registers here. */
8599 enum machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
8602 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
8603 /* Not all of the arg fits in gprs. Say that it
8604 goes in memory too, using a magic NULL_RTX
8605 component. Also see comment in
8606 rs6000_mixed_function_arg for why the normal
8607 function_arg_partial_nregs scheme doesn't work
8609 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
,
8613 r
= gen_rtx_REG (rmode
,
8614 GP_ARG_MIN_REG
+ align_words
);
8615 off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
8616 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
8618 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
8622 /* The whole arg fits in gprs. */
8623 r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8624 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
8628 /* It's entirely in memory. */
8629 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8632 /* Describe where this piece goes in the fprs. */
8633 r
= gen_rtx_REG (fmode
, cum
->fregno
);
8634 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
8636 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
8638 else if (align_words
< GP_ARG_NUM_REG
)
8640 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8641 return rs6000_mixed_function_arg (mode
, type
, align_words
);
8643 if (mode
== BLKmode
)
8646 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8653 /* For an arg passed partly in registers and partly in memory, this is
8654 the number of bytes passed in registers. For args passed entirely in
8655 registers or entirely in memory, zero. When an arg is described by a
8656 PARALLEL, perhaps using more than one register type, this function
8657 returns the number of bytes used by the first element of the PARALLEL. */
8660 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
8661 tree type
, bool named
)
8663 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8667 if (DEFAULT_ABI
== ABI_V4
)
8670 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
)
8671 && cum
->nargs_prototype
>= 0)
8674 /* In this complicated case we just disable the partial_nregs code. */
8675 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
8678 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
8680 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
8682 /* If we are passing this arg in the fixed parameter save area
8683 (gprs or memory) as well as fprs, then this function should
8684 return the number of partial bytes passed in the parameter
8685 save area rather than partial bytes passed in fprs. */
8687 && (cum
->nargs_prototype
<= 0
8688 || (DEFAULT_ABI
== ABI_AIX
8690 && align_words
>= GP_ARG_NUM_REG
)))
8692 else if (cum
->fregno
+ ((GET_MODE_SIZE (mode
) + 7) >> 3)
8693 > FP_ARG_MAX_REG
+ 1)
8694 ret
= (FP_ARG_MAX_REG
+ 1 - cum
->fregno
) * 8;
8695 else if (cum
->nargs_prototype
>= 0)
8699 if (align_words
< GP_ARG_NUM_REG
8700 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
8701 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
8703 if (ret
!= 0 && TARGET_DEBUG_ARG
)
8704 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
8709 /* A C expression that indicates when an argument must be passed by
8710 reference. If nonzero for an argument, a copy of that argument is
8711 made in memory and a pointer to the argument is passed instead of
8712 the argument itself. The pointer is passed in whatever way is
8713 appropriate for passing a pointer to that type.
8715 Under V.4, aggregates and long double are passed by reference.
8717 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8718 reference unless the AltiVec vector extension ABI is in force.
8720 As an extension to all ABIs, variable sized types are passed by
8724 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
8725 enum machine_mode mode
, const_tree type
,
8726 bool named ATTRIBUTE_UNUSED
)
8728 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& mode
== TFmode
)
8730 if (TARGET_DEBUG_ARG
)
8731 fprintf (stderr
, "function_arg_pass_by_reference: V4 long double\n");
8738 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
8740 if (TARGET_DEBUG_ARG
)
8741 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
8745 if (int_size_in_bytes (type
) < 0)
8747 if (TARGET_DEBUG_ARG
)
8748 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
8752 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8753 modes only exist for GCC vector types if -maltivec. */
8754 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
8756 if (TARGET_DEBUG_ARG
)
8757 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
8761 /* Pass synthetic vectors in memory. */
8762 if (TREE_CODE (type
) == VECTOR_TYPE
8763 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
8765 static bool warned_for_pass_big_vectors
= false;
8766 if (TARGET_DEBUG_ARG
)
8767 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
8768 if (!warned_for_pass_big_vectors
)
8770 warning (0, "GCC vector passed by reference: "
8771 "non-standard ABI extension with no compatibility guarantee");
8772 warned_for_pass_big_vectors
= true;
8781 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
8784 enum machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
8789 for (i
= 0; i
< nregs
; i
++)
8791 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
8792 if (reload_completed
)
8794 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
8797 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
8798 i
* GET_MODE_SIZE (reg_mode
));
8801 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
8805 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
8809 /* Perform any needed actions needed for a function that is receiving a
8810 variable number of arguments.
8814 MODE and TYPE are the mode and type of the current parameter.
8816 PRETEND_SIZE is a variable that should be set to the amount of stack
8817 that must be pushed by the prolog to pretend that our caller pushed
8820 Normally, this macro will push all remaining incoming registers on the
8821 stack and set PRETEND_SIZE to the length of the registers pushed. */
8824 setup_incoming_varargs (cumulative_args_t cum
, enum machine_mode mode
,
8825 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
8828 CUMULATIVE_ARGS next_cum
;
8829 int reg_size
= TARGET_32BIT
? 4 : 8;
8830 rtx save_area
= NULL_RTX
, mem
;
8831 int first_reg_offset
;
8834 /* Skip the last named argument. */
8835 next_cum
= *get_cumulative_args (cum
);
8836 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
8838 if (DEFAULT_ABI
== ABI_V4
)
8840 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
8844 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
8845 HOST_WIDE_INT offset
= 0;
8847 /* Try to optimize the size of the varargs save area.
8848 The ABI requires that ap.reg_save_area is doubleword
8849 aligned, but we don't need to allocate space for all
8850 the bytes, only those to which we actually will save
8852 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
8853 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
8854 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
8855 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
8856 && cfun
->va_list_fpr_size
)
8859 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
8860 * UNITS_PER_FP_WORD
;
8861 if (cfun
->va_list_fpr_size
8862 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
8863 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
8865 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
8866 * UNITS_PER_FP_WORD
;
8870 offset
= -((first_reg_offset
* reg_size
) & ~7);
8871 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
8873 gpr_reg_num
= cfun
->va_list_gpr_size
;
8874 if (reg_size
== 4 && (first_reg_offset
& 1))
8877 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
8880 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
8882 - (int) (GP_ARG_NUM_REG
* reg_size
);
8884 if (gpr_size
+ fpr_size
)
8887 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
8888 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
8889 reg_save_area
= XEXP (reg_save_area
, 0);
8890 if (GET_CODE (reg_save_area
) == PLUS
)
8892 gcc_assert (XEXP (reg_save_area
, 0)
8893 == virtual_stack_vars_rtx
);
8894 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
8895 offset
+= INTVAL (XEXP (reg_save_area
, 1));
8898 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
8901 cfun
->machine
->varargs_save_offset
= offset
;
8902 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
8907 first_reg_offset
= next_cum
.words
;
8908 save_area
= virtual_incoming_args_rtx
;
8910 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
8911 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
8914 set
= get_varargs_alias_set ();
8915 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
8916 && cfun
->va_list_gpr_size
)
8918 int nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
8920 if (va_list_gpr_counter_field
)
8922 /* V4 va_list_gpr_size counts number of registers needed. */
8923 if (nregs
> cfun
->va_list_gpr_size
)
8924 nregs
= cfun
->va_list_gpr_size
;
8928 /* char * va_list instead counts number of bytes needed. */
8929 if (nregs
> cfun
->va_list_gpr_size
/ reg_size
)
8930 nregs
= cfun
->va_list_gpr_size
/ reg_size
;
8933 mem
= gen_rtx_MEM (BLKmode
,
8934 plus_constant (Pmode
, save_area
,
8935 first_reg_offset
* reg_size
));
8936 MEM_NOTRAP_P (mem
) = 1;
8937 set_mem_alias_set (mem
, set
);
8938 set_mem_align (mem
, BITS_PER_WORD
);
8940 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
8944 /* Save FP registers if needed. */
8945 if (DEFAULT_ABI
== ABI_V4
8946 && TARGET_HARD_FLOAT
&& TARGET_FPRS
8948 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
8949 && cfun
->va_list_fpr_size
)
8951 int fregno
= next_cum
.fregno
, nregs
;
8952 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
8953 rtx lab
= gen_label_rtx ();
8954 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
8955 * UNITS_PER_FP_WORD
);
8958 (gen_rtx_SET (VOIDmode
,
8960 gen_rtx_IF_THEN_ELSE (VOIDmode
,
8961 gen_rtx_NE (VOIDmode
, cr1
,
8963 gen_rtx_LABEL_REF (VOIDmode
, lab
),
8967 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
8968 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
8970 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8972 plus_constant (Pmode
, save_area
, off
));
8973 MEM_NOTRAP_P (mem
) = 1;
8974 set_mem_alias_set (mem
, set
);
8975 set_mem_align (mem
, GET_MODE_ALIGNMENT (
8976 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8977 ? DFmode
: SFmode
));
8978 emit_move_insn (mem
, gen_rtx_REG (
8979 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8980 ? DFmode
: SFmode
, fregno
));
8987 /* Create the va_list data type. */
8990 rs6000_build_builtin_va_list (void)
8992 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
8994 /* For AIX, prefer 'char *' because that's what the system
8995 header files like. */
8996 if (DEFAULT_ABI
!= ABI_V4
)
8997 return build_pointer_type (char_type_node
);
8999 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
9000 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
9001 get_identifier ("__va_list_tag"), record
);
9003 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
9004 unsigned_char_type_node
);
9005 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
9006 unsigned_char_type_node
);
9007 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
9009 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
9010 get_identifier ("reserved"), short_unsigned_type_node
);
9011 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
9012 get_identifier ("overflow_arg_area"),
9014 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
9015 get_identifier ("reg_save_area"),
9018 va_list_gpr_counter_field
= f_gpr
;
9019 va_list_fpr_counter_field
= f_fpr
;
9021 DECL_FIELD_CONTEXT (f_gpr
) = record
;
9022 DECL_FIELD_CONTEXT (f_fpr
) = record
;
9023 DECL_FIELD_CONTEXT (f_res
) = record
;
9024 DECL_FIELD_CONTEXT (f_ovf
) = record
;
9025 DECL_FIELD_CONTEXT (f_sav
) = record
;
9027 TYPE_STUB_DECL (record
) = type_decl
;
9028 TYPE_NAME (record
) = type_decl
;
9029 TYPE_FIELDS (record
) = f_gpr
;
9030 DECL_CHAIN (f_gpr
) = f_fpr
;
9031 DECL_CHAIN (f_fpr
) = f_res
;
9032 DECL_CHAIN (f_res
) = f_ovf
;
9033 DECL_CHAIN (f_ovf
) = f_sav
;
9035 layout_type (record
);
9037 /* The correct type is an array type of one element. */
9038 return build_array_type (record
, build_index_type (size_zero_node
));
9041 /* Implement va_start. */
9044 rs6000_va_start (tree valist
, rtx nextarg
)
9046 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
9047 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
9048 tree gpr
, fpr
, ovf
, sav
, t
;
9050 /* Only SVR4 needs something special. */
9051 if (DEFAULT_ABI
!= ABI_V4
)
9053 std_expand_builtin_va_start (valist
, nextarg
);
9057 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9058 f_fpr
= DECL_CHAIN (f_gpr
);
9059 f_res
= DECL_CHAIN (f_fpr
);
9060 f_ovf
= DECL_CHAIN (f_res
);
9061 f_sav
= DECL_CHAIN (f_ovf
);
9063 valist
= build_simple_mem_ref (valist
);
9064 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9065 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
9067 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
9069 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
9072 /* Count number of gp and fp argument registers used. */
9073 words
= crtl
->args
.info
.words
;
9074 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
9076 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
9079 if (TARGET_DEBUG_ARG
)
9080 fprintf (stderr
, "va_start: words = "HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
9081 HOST_WIDE_INT_PRINT_DEC
", n_fpr = "HOST_WIDE_INT_PRINT_DEC
"\n",
9082 words
, n_gpr
, n_fpr
);
9084 if (cfun
->va_list_gpr_size
)
9086 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
9087 build_int_cst (NULL_TREE
, n_gpr
));
9088 TREE_SIDE_EFFECTS (t
) = 1;
9089 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9092 if (cfun
->va_list_fpr_size
)
9094 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
9095 build_int_cst (NULL_TREE
, n_fpr
));
9096 TREE_SIDE_EFFECTS (t
) = 1;
9097 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9099 #ifdef HAVE_AS_GNU_ATTRIBUTE
9100 if (call_ABI_of_interest (cfun
->decl
))
9101 rs6000_passes_float
= true;
9105 /* Find the overflow area. */
9106 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
9108 t
= fold_build_pointer_plus_hwi (t
, words
* UNITS_PER_WORD
);
9109 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
9110 TREE_SIDE_EFFECTS (t
) = 1;
9111 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9113 /* If there were no va_arg invocations, don't set up the register
9115 if (!cfun
->va_list_gpr_size
9116 && !cfun
->va_list_fpr_size
9117 && n_gpr
< GP_ARG_NUM_REG
9118 && n_fpr
< FP_ARG_V4_MAX_REG
)
9121 /* Find the register save area. */
9122 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
9123 if (cfun
->machine
->varargs_save_offset
)
9124 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
9125 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
9126 TREE_SIDE_EFFECTS (t
) = 1;
9127 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9130 /* Implement va_arg. */
9133 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
9136 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
9137 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
9138 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
9139 tree lab_false
, lab_over
, addr
;
9141 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
9145 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
9147 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
9148 return build_va_arg_indirect_ref (t
);
9151 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9152 earlier version of gcc, with the property that it always applied alignment
9153 adjustments to the va-args (even for zero-sized types). The cheapest way
9154 to deal with this is to replicate the effect of the part of
9155 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9157 We don't need to check for pass-by-reference because of the test above.
9158 We can return a simplifed answer, since we know there's no offset to add. */
9161 && rs6000_darwin64_abi
9162 && integer_zerop (TYPE_SIZE (type
)))
9164 unsigned HOST_WIDE_INT align
, boundary
;
9165 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
9166 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
9167 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
9168 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
9169 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
9170 boundary
/= BITS_PER_UNIT
;
9171 if (boundary
> align
)
9174 /* This updates arg ptr by the amount that would be necessary
9175 to align the zero-sized (but not zero-alignment) item. */
9176 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
9177 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
9178 gimplify_and_add (t
, pre_p
);
9180 t
= fold_convert (sizetype
, valist_tmp
);
9181 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
9182 fold_convert (TREE_TYPE (valist
),
9183 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
9184 size_int (-boundary
))));
9185 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
9186 gimplify_and_add (t
, pre_p
);
9188 /* Since it is zero-sized there's no increment for the item itself. */
9189 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
9190 return build_va_arg_indirect_ref (valist_tmp
);
9193 if (DEFAULT_ABI
!= ABI_V4
)
9195 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
9197 tree elem_type
= TREE_TYPE (type
);
9198 enum machine_mode elem_mode
= TYPE_MODE (elem_type
);
9199 int elem_size
= GET_MODE_SIZE (elem_mode
);
9201 if (elem_size
< UNITS_PER_WORD
)
9203 tree real_part
, imag_part
;
9204 gimple_seq post
= NULL
;
9206 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
9208 /* Copy the value into a temporary, lest the formal temporary
9209 be reused out from under us. */
9210 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
9211 gimple_seq_add_seq (pre_p
, post
);
9213 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
9216 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
9220 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
9223 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9224 f_fpr
= DECL_CHAIN (f_gpr
);
9225 f_res
= DECL_CHAIN (f_fpr
);
9226 f_ovf
= DECL_CHAIN (f_res
);
9227 f_sav
= DECL_CHAIN (f_ovf
);
9229 valist
= build_va_arg_indirect_ref (valist
);
9230 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9231 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
9233 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
9235 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
9238 size
= int_size_in_bytes (type
);
9239 rsize
= (size
+ 3) / 4;
9242 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
9243 && ((TARGET_SINGLE_FLOAT
&& TYPE_MODE (type
) == SFmode
)
9244 || (TARGET_DOUBLE_FLOAT
9245 && (TYPE_MODE (type
) == DFmode
9246 || TYPE_MODE (type
) == TFmode
9247 || TYPE_MODE (type
) == SDmode
9248 || TYPE_MODE (type
) == DDmode
9249 || TYPE_MODE (type
) == TDmode
))))
9251 /* FP args go in FP registers, if present. */
9253 n_reg
= (size
+ 7) / 8;
9254 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
9255 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
9256 if (TYPE_MODE (type
) != SFmode
&& TYPE_MODE (type
) != SDmode
)
9261 /* Otherwise into GP registers. */
9270 /* Pull the value out of the saved registers.... */
9273 addr
= create_tmp_var (ptr_type_node
, "addr");
9275 /* AltiVec vectors never go in registers when -mabi=altivec. */
9276 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
9280 lab_false
= create_artificial_label (input_location
);
9281 lab_over
= create_artificial_label (input_location
);
9283 /* Long long and SPE vectors are aligned in the registers.
9284 As are any other 2 gpr item such as complex int due to a
9285 historical mistake. */
9287 if (n_reg
== 2 && reg
== gpr
)
9290 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9291 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
9292 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
9293 unshare_expr (reg
), u
);
9295 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9296 reg number is 0 for f1, so we want to make it odd. */
9297 else if (reg
== fpr
&& TYPE_MODE (type
) == TDmode
)
9299 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9300 build_int_cst (TREE_TYPE (reg
), 1));
9301 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
9304 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
9305 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
9306 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
9307 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
9308 gimplify_and_add (t
, pre_p
);
9312 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
9314 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9315 build_int_cst (TREE_TYPE (reg
), n_reg
));
9316 u
= fold_convert (sizetype
, u
);
9317 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
9318 t
= fold_build_pointer_plus (t
, u
);
9320 /* _Decimal32 varargs are located in the second word of the 64-bit
9321 FP register for 32-bit binaries. */
9322 if (!TARGET_POWERPC64
9323 && TARGET_HARD_FLOAT
&& TARGET_FPRS
9324 && TYPE_MODE (type
) == SDmode
)
9325 t
= fold_build_pointer_plus_hwi (t
, size
);
9327 gimplify_assign (addr
, t
, pre_p
);
9329 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
9331 stmt
= gimple_build_label (lab_false
);
9332 gimple_seq_add_stmt (pre_p
, stmt
);
9334 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
9336 /* Ensure that we don't find any more args in regs.
9337 Alignment has taken care of for special cases. */
9338 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
9342 /* ... otherwise out of the overflow area. */
9344 /* Care for on-stack alignment if needed. */
9348 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
9349 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
9350 build_int_cst (TREE_TYPE (t
), -align
));
9352 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
9354 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
9356 t
= fold_build_pointer_plus_hwi (t
, size
);
9357 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
9361 stmt
= gimple_build_label (lab_over
);
9362 gimple_seq_add_stmt (pre_p
, stmt
);
9365 if (STRICT_ALIGNMENT
9366 && (TYPE_ALIGN (type
)
9367 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
9369 /* The value (of type complex double, for example) may not be
9370 aligned in memory in the saved registers, so copy via a
9371 temporary. (This is the same code as used for SPARC.) */
9372 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
9373 tree dest_addr
= build_fold_addr_expr (tmp
);
9375 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
9376 3, dest_addr
, addr
, size_int (rsize
* 4));
9378 gimplify_and_add (copy
, pre_p
);
9382 addr
= fold_convert (ptrtype
, addr
);
9383 return build_va_arg_indirect_ref (addr
);
9389 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
9392 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
9393 const char *attr_string
= "";
9395 gcc_assert (name
!= NULL
);
9396 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
9398 if (rs6000_builtin_decls
[(int)code
])
9399 fatal_error ("internal error: builtin function %s already processed", name
);
9401 rs6000_builtin_decls
[(int)code
] = t
=
9402 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
9404 /* Set any special attributes. */
9405 if ((classify
& RS6000_BTC_CONST
) != 0)
9407 /* const function, function only depends on the inputs. */
9408 TREE_READONLY (t
) = 1;
9409 TREE_NOTHROW (t
) = 1;
9410 attr_string
= ", pure";
9412 else if ((classify
& RS6000_BTC_PURE
) != 0)
9414 /* pure function, function can read global memory, but does not set any
9416 DECL_PURE_P (t
) = 1;
9417 TREE_NOTHROW (t
) = 1;
9418 attr_string
= ", const";
9420 else if ((classify
& RS6000_BTC_FP
) != 0)
9422 /* Function is a math function. If rounding mode is on, then treat the
9423 function as not reading global memory, but it can have arbitrary side
9424 effects. If it is off, then assume the function is a const function.
9425 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9426 builtin-attribute.def that is used for the math functions. */
9427 TREE_NOTHROW (t
) = 1;
9428 if (flag_rounding_math
)
9430 DECL_PURE_P (t
) = 1;
9431 DECL_IS_NOVOPS (t
) = 1;
9432 attr_string
= ", fp, pure";
9436 TREE_READONLY (t
) = 1;
9437 attr_string
= ", fp, const";
9440 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
9443 if (TARGET_DEBUG_BUILTIN
)
9444 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
9445 (int)code
, name
, attr_string
);
9448 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9450 #undef RS6000_BUILTIN_1
9451 #undef RS6000_BUILTIN_2
9452 #undef RS6000_BUILTIN_3
9453 #undef RS6000_BUILTIN_A
9454 #undef RS6000_BUILTIN_D
9455 #undef RS6000_BUILTIN_E
9456 #undef RS6000_BUILTIN_P
9457 #undef RS6000_BUILTIN_Q
9458 #undef RS6000_BUILTIN_S
9459 #undef RS6000_BUILTIN_X
9461 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9462 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9463 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9464 { MASK, ICODE, NAME, ENUM },
9466 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9467 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9468 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9469 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9470 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9471 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9472 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9474 static const struct builtin_description bdesc_3arg
[] =
9476 #include "rs6000-builtin.def"
9479 /* DST operations: void foo (void *, const int, const char). */
9481 #undef RS6000_BUILTIN_1
9482 #undef RS6000_BUILTIN_2
9483 #undef RS6000_BUILTIN_3
9484 #undef RS6000_BUILTIN_A
9485 #undef RS6000_BUILTIN_D
9486 #undef RS6000_BUILTIN_E
9487 #undef RS6000_BUILTIN_P
9488 #undef RS6000_BUILTIN_Q
9489 #undef RS6000_BUILTIN_S
9490 #undef RS6000_BUILTIN_X
9492 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9493 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9494 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9495 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9496 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9497 { MASK, ICODE, NAME, ENUM },
9499 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9500 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9501 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9502 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9503 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9505 static const struct builtin_description bdesc_dst
[] =
9507 #include "rs6000-builtin.def"
9510 /* Simple binary operations: VECc = foo (VECa, VECb). */
9512 #undef RS6000_BUILTIN_1
9513 #undef RS6000_BUILTIN_2
9514 #undef RS6000_BUILTIN_3
9515 #undef RS6000_BUILTIN_A
9516 #undef RS6000_BUILTIN_D
9517 #undef RS6000_BUILTIN_E
9518 #undef RS6000_BUILTIN_P
9519 #undef RS6000_BUILTIN_Q
9520 #undef RS6000_BUILTIN_S
9521 #undef RS6000_BUILTIN_X
9523 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9524 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9525 { MASK, ICODE, NAME, ENUM },
9527 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9528 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9529 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9530 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9531 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9532 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9533 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9534 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9536 static const struct builtin_description bdesc_2arg
[] =
9538 #include "rs6000-builtin.def"
9541 #undef RS6000_BUILTIN_1
9542 #undef RS6000_BUILTIN_2
9543 #undef RS6000_BUILTIN_3
9544 #undef RS6000_BUILTIN_A
9545 #undef RS6000_BUILTIN_D
9546 #undef RS6000_BUILTIN_E
9547 #undef RS6000_BUILTIN_P
9548 #undef RS6000_BUILTIN_Q
9549 #undef RS6000_BUILTIN_S
9550 #undef RS6000_BUILTIN_X
9552 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9553 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9554 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9555 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9556 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9557 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9558 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9559 { MASK, ICODE, NAME, ENUM },
9561 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9562 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9563 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9565 /* AltiVec predicates. */
9567 static const struct builtin_description bdesc_altivec_preds
[] =
9569 #include "rs6000-builtin.def"
9572 /* SPE predicates. */
9573 #undef RS6000_BUILTIN_1
9574 #undef RS6000_BUILTIN_2
9575 #undef RS6000_BUILTIN_3
9576 #undef RS6000_BUILTIN_A
9577 #undef RS6000_BUILTIN_D
9578 #undef RS6000_BUILTIN_E
9579 #undef RS6000_BUILTIN_P
9580 #undef RS6000_BUILTIN_Q
9581 #undef RS6000_BUILTIN_S
9582 #undef RS6000_BUILTIN_X
9584 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9585 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9586 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9587 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9588 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9589 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9590 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9591 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9592 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9593 { MASK, ICODE, NAME, ENUM },
9595 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9597 static const struct builtin_description bdesc_spe_predicates
[] =
9599 #include "rs6000-builtin.def"
9602 /* SPE evsel predicates. */
9603 #undef RS6000_BUILTIN_1
9604 #undef RS6000_BUILTIN_2
9605 #undef RS6000_BUILTIN_3
9606 #undef RS6000_BUILTIN_A
9607 #undef RS6000_BUILTIN_D
9608 #undef RS6000_BUILTIN_E
9609 #undef RS6000_BUILTIN_P
9610 #undef RS6000_BUILTIN_Q
9611 #undef RS6000_BUILTIN_S
9612 #undef RS6000_BUILTIN_X
9614 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9615 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9616 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9617 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9618 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9619 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9620 { MASK, ICODE, NAME, ENUM },
9622 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9623 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9624 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9625 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9627 static const struct builtin_description bdesc_spe_evsel
[] =
9629 #include "rs6000-builtin.def"
9632 /* PAIRED predicates. */
9633 #undef RS6000_BUILTIN_1
9634 #undef RS6000_BUILTIN_2
9635 #undef RS6000_BUILTIN_3
9636 #undef RS6000_BUILTIN_A
9637 #undef RS6000_BUILTIN_D
9638 #undef RS6000_BUILTIN_E
9639 #undef RS6000_BUILTIN_P
9640 #undef RS6000_BUILTIN_Q
9641 #undef RS6000_BUILTIN_S
9642 #undef RS6000_BUILTIN_X
9644 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9645 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9646 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9647 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9648 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9649 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9650 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9651 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9652 { MASK, ICODE, NAME, ENUM },
9654 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9655 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9657 static const struct builtin_description bdesc_paired_preds
[] =
9659 #include "rs6000-builtin.def"
9662 /* ABS* operations. */
9664 #undef RS6000_BUILTIN_1
9665 #undef RS6000_BUILTIN_2
9666 #undef RS6000_BUILTIN_3
9667 #undef RS6000_BUILTIN_A
9668 #undef RS6000_BUILTIN_D
9669 #undef RS6000_BUILTIN_E
9670 #undef RS6000_BUILTIN_P
9671 #undef RS6000_BUILTIN_Q
9672 #undef RS6000_BUILTIN_S
9673 #undef RS6000_BUILTIN_X
9675 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9676 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9677 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9678 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9679 { MASK, ICODE, NAME, ENUM },
9681 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9682 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9683 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9684 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9685 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9686 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9688 static const struct builtin_description bdesc_abs
[] =
9690 #include "rs6000-builtin.def"
9693 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9696 #undef RS6000_BUILTIN_1
9697 #undef RS6000_BUILTIN_2
9698 #undef RS6000_BUILTIN_3
9699 #undef RS6000_BUILTIN_A
9700 #undef RS6000_BUILTIN_E
9701 #undef RS6000_BUILTIN_D
9702 #undef RS6000_BUILTIN_P
9703 #undef RS6000_BUILTIN_Q
9704 #undef RS6000_BUILTIN_S
9705 #undef RS6000_BUILTIN_X
9707 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9708 { MASK, ICODE, NAME, ENUM },
9710 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9711 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9712 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9713 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9714 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9715 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9716 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9717 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9718 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9720 static const struct builtin_description bdesc_1arg
[] =
9722 #include "rs6000-builtin.def"
9725 #undef RS6000_BUILTIN_1
9726 #undef RS6000_BUILTIN_2
9727 #undef RS6000_BUILTIN_3
9728 #undef RS6000_BUILTIN_A
9729 #undef RS6000_BUILTIN_D
9730 #undef RS6000_BUILTIN_E
9731 #undef RS6000_BUILTIN_P
9732 #undef RS6000_BUILTIN_Q
9733 #undef RS6000_BUILTIN_S
9734 #undef RS6000_BUILTIN_X
9736 /* Return true if a builtin function is overloaded. */
9738 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
9740 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
9745 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
9748 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9749 rtx op0
= expand_normal (arg0
);
9750 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9751 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9753 if (icode
== CODE_FOR_nothing
)
9754 /* Builtin not supported on this processor. */
9757 /* If we got invalid arguments bail out before generating bad rtl. */
9758 if (arg0
== error_mark_node
)
9761 if (icode
== CODE_FOR_altivec_vspltisb
9762 || icode
== CODE_FOR_altivec_vspltish
9763 || icode
== CODE_FOR_altivec_vspltisw
9764 || icode
== CODE_FOR_spe_evsplatfi
9765 || icode
== CODE_FOR_spe_evsplati
)
9767 /* Only allow 5-bit *signed* literals. */
9768 if (GET_CODE (op0
) != CONST_INT
9769 || INTVAL (op0
) > 15
9770 || INTVAL (op0
) < -16)
9772 error ("argument 1 must be a 5-bit signed literal");
9778 || GET_MODE (target
) != tmode
9779 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9780 target
= gen_reg_rtx (tmode
);
9782 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9783 op0
= copy_to_mode_reg (mode0
, op0
);
9785 pat
= GEN_FCN (icode
) (target
, op0
);
9794 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
9796 rtx pat
, scratch1
, scratch2
;
9797 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9798 rtx op0
= expand_normal (arg0
);
9799 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9800 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9802 /* If we have invalid arguments, bail out before generating bad rtl. */
9803 if (arg0
== error_mark_node
)
9807 || GET_MODE (target
) != tmode
9808 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9809 target
= gen_reg_rtx (tmode
);
9811 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9812 op0
= copy_to_mode_reg (mode0
, op0
);
9814 scratch1
= gen_reg_rtx (mode0
);
9815 scratch2
= gen_reg_rtx (mode0
);
9817 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
9826 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
9829 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9830 tree arg1
= CALL_EXPR_ARG (exp
, 1);
9831 rtx op0
= expand_normal (arg0
);
9832 rtx op1
= expand_normal (arg1
);
9833 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9834 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9835 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9837 if (icode
== CODE_FOR_nothing
)
9838 /* Builtin not supported on this processor. */
9841 /* If we got invalid arguments bail out before generating bad rtl. */
9842 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9845 if (icode
== CODE_FOR_altivec_vcfux
9846 || icode
== CODE_FOR_altivec_vcfsx
9847 || icode
== CODE_FOR_altivec_vctsxs
9848 || icode
== CODE_FOR_altivec_vctuxs
9849 || icode
== CODE_FOR_altivec_vspltb
9850 || icode
== CODE_FOR_altivec_vsplth
9851 || icode
== CODE_FOR_altivec_vspltw
9852 || icode
== CODE_FOR_spe_evaddiw
9853 || icode
== CODE_FOR_spe_evldd
9854 || icode
== CODE_FOR_spe_evldh
9855 || icode
== CODE_FOR_spe_evldw
9856 || icode
== CODE_FOR_spe_evlhhesplat
9857 || icode
== CODE_FOR_spe_evlhhossplat
9858 || icode
== CODE_FOR_spe_evlhhousplat
9859 || icode
== CODE_FOR_spe_evlwhe
9860 || icode
== CODE_FOR_spe_evlwhos
9861 || icode
== CODE_FOR_spe_evlwhou
9862 || icode
== CODE_FOR_spe_evlwhsplat
9863 || icode
== CODE_FOR_spe_evlwwsplat
9864 || icode
== CODE_FOR_spe_evrlwi
9865 || icode
== CODE_FOR_spe_evslwi
9866 || icode
== CODE_FOR_spe_evsrwis
9867 || icode
== CODE_FOR_spe_evsubifw
9868 || icode
== CODE_FOR_spe_evsrwiu
)
9870 /* Only allow 5-bit unsigned literals. */
9872 if (TREE_CODE (arg1
) != INTEGER_CST
9873 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
9875 error ("argument 2 must be a 5-bit unsigned literal");
9881 || GET_MODE (target
) != tmode
9882 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9883 target
= gen_reg_rtx (tmode
);
9885 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9886 op0
= copy_to_mode_reg (mode0
, op0
);
9887 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9888 op1
= copy_to_mode_reg (mode1
, op1
);
9890 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
9899 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
9902 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
9903 tree arg0
= CALL_EXPR_ARG (exp
, 1);
9904 tree arg1
= CALL_EXPR_ARG (exp
, 2);
9905 rtx op0
= expand_normal (arg0
);
9906 rtx op1
= expand_normal (arg1
);
9907 enum machine_mode tmode
= SImode
;
9908 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9909 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9912 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
9914 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9918 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
9920 gcc_assert (mode0
== mode1
);
9922 /* If we have invalid arguments, bail out before generating bad rtl. */
9923 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9927 || GET_MODE (target
) != tmode
9928 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9929 target
= gen_reg_rtx (tmode
);
9931 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9932 op0
= copy_to_mode_reg (mode0
, op0
);
9933 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9934 op1
= copy_to_mode_reg (mode1
, op1
);
9936 scratch
= gen_reg_rtx (mode0
);
9938 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
9943 /* The vec_any* and vec_all* predicates use the same opcodes for two
9944 different operations, but the bits in CR6 will be different
9945 depending on what information we want. So we have to play tricks
9946 with CR6 to get the right bits out.
9948 If you think this is disgusting, look at the specs for the
9949 AltiVec predicates. */
9951 switch (cr6_form_int
)
9954 emit_insn (gen_cr6_test_for_zero (target
));
9957 emit_insn (gen_cr6_test_for_zero_reverse (target
));
9960 emit_insn (gen_cr6_test_for_lt (target
));
9963 emit_insn (gen_cr6_test_for_lt_reverse (target
));
9966 error ("argument 1 of __builtin_altivec_predicate is out of range");
9974 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
9977 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9978 tree arg1
= CALL_EXPR_ARG (exp
, 1);
9979 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9980 enum machine_mode mode0
= Pmode
;
9981 enum machine_mode mode1
= Pmode
;
9982 rtx op0
= expand_normal (arg0
);
9983 rtx op1
= expand_normal (arg1
);
9985 if (icode
== CODE_FOR_nothing
)
9986 /* Builtin not supported on this processor. */
9989 /* If we got invalid arguments bail out before generating bad rtl. */
9990 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9994 || GET_MODE (target
) != tmode
9995 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9996 target
= gen_reg_rtx (tmode
);
9998 op1
= copy_to_mode_reg (mode1
, op1
);
10000 if (op0
== const0_rtx
)
10002 addr
= gen_rtx_MEM (tmode
, op1
);
10006 op0
= copy_to_mode_reg (mode0
, op0
);
10007 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
10010 pat
= GEN_FCN (icode
) (target
, addr
);
10020 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
10023 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10024 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10025 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10026 enum machine_mode mode0
= Pmode
;
10027 enum machine_mode mode1
= Pmode
;
10028 rtx op0
= expand_normal (arg0
);
10029 rtx op1
= expand_normal (arg1
);
10031 if (icode
== CODE_FOR_nothing
)
10032 /* Builtin not supported on this processor. */
10035 /* If we got invalid arguments bail out before generating bad rtl. */
10036 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
10040 || GET_MODE (target
) != tmode
10041 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10042 target
= gen_reg_rtx (tmode
);
10044 op1
= copy_to_mode_reg (mode1
, op1
);
10046 if (op0
== const0_rtx
)
10048 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
10052 op0
= copy_to_mode_reg (mode0
, op0
);
10053 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
10056 pat
= GEN_FCN (icode
) (target
, addr
);
10066 spe_expand_stv_builtin (enum insn_code icode
, tree exp
)
10068 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10069 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10070 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10071 rtx op0
= expand_normal (arg0
);
10072 rtx op1
= expand_normal (arg1
);
10073 rtx op2
= expand_normal (arg2
);
10075 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
10076 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
10077 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
10079 /* Invalid arguments. Bail before doing anything stoopid! */
10080 if (arg0
== error_mark_node
10081 || arg1
== error_mark_node
10082 || arg2
== error_mark_node
)
10085 if (! (*insn_data
[icode
].operand
[2].predicate
) (op0
, mode2
))
10086 op0
= copy_to_mode_reg (mode2
, op0
);
10087 if (! (*insn_data
[icode
].operand
[0].predicate
) (op1
, mode0
))
10088 op1
= copy_to_mode_reg (mode0
, op1
);
10089 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
10090 op2
= copy_to_mode_reg (mode1
, op2
);
10092 pat
= GEN_FCN (icode
) (op1
, op2
, op0
);
10099 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
10101 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10102 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10103 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10104 rtx op0
= expand_normal (arg0
);
10105 rtx op1
= expand_normal (arg1
);
10106 rtx op2
= expand_normal (arg2
);
10108 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10109 enum machine_mode mode1
= Pmode
;
10110 enum machine_mode mode2
= Pmode
;
10112 /* Invalid arguments. Bail before doing anything stoopid! */
10113 if (arg0
== error_mark_node
10114 || arg1
== error_mark_node
10115 || arg2
== error_mark_node
)
10118 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
10119 op0
= copy_to_mode_reg (tmode
, op0
);
10121 op2
= copy_to_mode_reg (mode2
, op2
);
10123 if (op1
== const0_rtx
)
10125 addr
= gen_rtx_MEM (tmode
, op2
);
10129 op1
= copy_to_mode_reg (mode1
, op1
);
10130 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
10133 pat
= GEN_FCN (icode
) (addr
, op0
);
10140 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
10142 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10143 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10144 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10145 rtx op0
= expand_normal (arg0
);
10146 rtx op1
= expand_normal (arg1
);
10147 rtx op2
= expand_normal (arg2
);
10149 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10150 enum machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
10151 enum machine_mode mode1
= Pmode
;
10152 enum machine_mode mode2
= Pmode
;
10154 /* Invalid arguments. Bail before doing anything stoopid! */
10155 if (arg0
== error_mark_node
10156 || arg1
== error_mark_node
10157 || arg2
== error_mark_node
)
10160 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
10161 op0
= copy_to_mode_reg (smode
, op0
);
10163 op2
= copy_to_mode_reg (mode2
, op2
);
10165 if (op1
== const0_rtx
)
10167 addr
= gen_rtx_MEM (tmode
, op2
);
10171 op1
= copy_to_mode_reg (mode1
, op1
);
10172 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
10175 pat
= GEN_FCN (icode
) (addr
, op0
);
10182 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
10185 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10186 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10187 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10188 rtx op0
= expand_normal (arg0
);
10189 rtx op1
= expand_normal (arg1
);
10190 rtx op2
= expand_normal (arg2
);
10191 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10192 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
10193 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
10194 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
10196 if (icode
== CODE_FOR_nothing
)
10197 /* Builtin not supported on this processor. */
10200 /* If we got invalid arguments bail out before generating bad rtl. */
10201 if (arg0
== error_mark_node
10202 || arg1
== error_mark_node
10203 || arg2
== error_mark_node
)
10206 /* Check and prepare argument depending on the instruction code.
10208 Note that a switch statement instead of the sequence of tests
10209 would be incorrect as many of the CODE_FOR values could be
10210 CODE_FOR_nothing and that would yield multiple alternatives
10211 with identical values. We'd never reach here at runtime in
10213 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
10214 || icode
== CODE_FOR_altivec_vsldoi_v4si
10215 || icode
== CODE_FOR_altivec_vsldoi_v8hi
10216 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
10218 /* Only allow 4-bit unsigned literals. */
10220 if (TREE_CODE (arg2
) != INTEGER_CST
10221 || TREE_INT_CST_LOW (arg2
) & ~0xf)
10223 error ("argument 3 must be a 4-bit unsigned literal");
10227 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
10228 || icode
== CODE_FOR_vsx_xxpermdi_v2di
10229 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
10230 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
10231 || icode
== CODE_FOR_vsx_xxsldwi_v4si
10232 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
10233 || icode
== CODE_FOR_vsx_xxsldwi_v2di
10234 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
10236 /* Only allow 2-bit unsigned literals. */
10238 if (TREE_CODE (arg2
) != INTEGER_CST
10239 || TREE_INT_CST_LOW (arg2
) & ~0x3)
10241 error ("argument 3 must be a 2-bit unsigned literal");
10245 else if (icode
== CODE_FOR_vsx_set_v2df
10246 || icode
== CODE_FOR_vsx_set_v2di
)
10248 /* Only allow 1-bit unsigned literals. */
10250 if (TREE_CODE (arg2
) != INTEGER_CST
10251 || TREE_INT_CST_LOW (arg2
) & ~0x1)
10253 error ("argument 3 must be a 1-bit unsigned literal");
10259 || GET_MODE (target
) != tmode
10260 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10261 target
= gen_reg_rtx (tmode
);
10263 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
10264 op0
= copy_to_mode_reg (mode0
, op0
);
10265 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
10266 op1
= copy_to_mode_reg (mode1
, op1
);
10267 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
10268 op2
= copy_to_mode_reg (mode2
, op2
);
10270 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
10271 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
10273 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
10281 /* Expand the lvx builtins. */
10283 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
10285 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10286 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10288 enum machine_mode tmode
, mode0
;
10290 enum insn_code icode
;
10294 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
10295 icode
= CODE_FOR_vector_altivec_load_v16qi
;
10297 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
10298 icode
= CODE_FOR_vector_altivec_load_v8hi
;
10300 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
10301 icode
= CODE_FOR_vector_altivec_load_v4si
;
10303 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
10304 icode
= CODE_FOR_vector_altivec_load_v4sf
;
10306 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
10307 icode
= CODE_FOR_vector_altivec_load_v2df
;
10309 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
10310 icode
= CODE_FOR_vector_altivec_load_v2di
;
10313 *expandedp
= false;
10319 arg0
= CALL_EXPR_ARG (exp
, 0);
10320 op0
= expand_normal (arg0
);
10321 tmode
= insn_data
[icode
].operand
[0].mode
;
10322 mode0
= insn_data
[icode
].operand
[1].mode
;
10325 || GET_MODE (target
) != tmode
10326 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10327 target
= gen_reg_rtx (tmode
);
10329 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
10330 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
10332 pat
= GEN_FCN (icode
) (target
, op0
);
10339 /* Expand the stvx builtins. */
10341 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
10344 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10345 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10347 enum machine_mode mode0
, mode1
;
10349 enum insn_code icode
;
10353 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
10354 icode
= CODE_FOR_vector_altivec_store_v16qi
;
10356 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
10357 icode
= CODE_FOR_vector_altivec_store_v8hi
;
10359 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
10360 icode
= CODE_FOR_vector_altivec_store_v4si
;
10362 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
10363 icode
= CODE_FOR_vector_altivec_store_v4sf
;
10365 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
10366 icode
= CODE_FOR_vector_altivec_store_v2df
;
10368 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
10369 icode
= CODE_FOR_vector_altivec_store_v2di
;
10372 *expandedp
= false;
10376 arg0
= CALL_EXPR_ARG (exp
, 0);
10377 arg1
= CALL_EXPR_ARG (exp
, 1);
10378 op0
= expand_normal (arg0
);
10379 op1
= expand_normal (arg1
);
10380 mode0
= insn_data
[icode
].operand
[0].mode
;
10381 mode1
= insn_data
[icode
].operand
[1].mode
;
10383 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10384 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
10385 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
10386 op1
= copy_to_mode_reg (mode1
, op1
);
10388 pat
= GEN_FCN (icode
) (op0
, op1
);
10396 /* Expand the dst builtins. */
10398 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
10401 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10402 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10403 tree arg0
, arg1
, arg2
;
10404 enum machine_mode mode0
, mode1
;
10405 rtx pat
, op0
, op1
, op2
;
10406 const struct builtin_description
*d
;
10409 *expandedp
= false;
10411 /* Handle DST variants. */
10413 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
10414 if (d
->code
== fcode
)
10416 arg0
= CALL_EXPR_ARG (exp
, 0);
10417 arg1
= CALL_EXPR_ARG (exp
, 1);
10418 arg2
= CALL_EXPR_ARG (exp
, 2);
10419 op0
= expand_normal (arg0
);
10420 op1
= expand_normal (arg1
);
10421 op2
= expand_normal (arg2
);
10422 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10423 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10425 /* Invalid arguments, bail out before generating bad rtl. */
10426 if (arg0
== error_mark_node
10427 || arg1
== error_mark_node
10428 || arg2
== error_mark_node
)
10433 if (TREE_CODE (arg2
) != INTEGER_CST
10434 || TREE_INT_CST_LOW (arg2
) & ~0x3)
10436 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
10440 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
10441 op0
= copy_to_mode_reg (Pmode
, op0
);
10442 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
10443 op1
= copy_to_mode_reg (mode1
, op1
);
10445 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
10455 /* Expand vec_init builtin. */
10457 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
10459 enum machine_mode tmode
= TYPE_MODE (type
);
10460 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
10461 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
10462 rtvec v
= rtvec_alloc (n_elt
);
10464 gcc_assert (VECTOR_MODE_P (tmode
));
10465 gcc_assert (n_elt
== call_expr_nargs (exp
));
10467 for (i
= 0; i
< n_elt
; ++i
)
10469 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
10470 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
10473 if (!target
|| !register_operand (target
, tmode
))
10474 target
= gen_reg_rtx (tmode
);
10476 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
10480 /* Return the integer constant in ARG. Constrain it to be in the range
10481 of the subparts of VEC_TYPE; issue an error if not. */
10484 get_element_number (tree vec_type
, tree arg
)
10486 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
10488 if (!host_integerp (arg
, 1)
10489 || (elt
= tree_low_cst (arg
, 1), elt
> max
))
10491 error ("selector must be an integer constant in the range 0..%wi", max
);
10498 /* Expand vec_set builtin. */
10500 altivec_expand_vec_set_builtin (tree exp
)
10502 enum machine_mode tmode
, mode1
;
10503 tree arg0
, arg1
, arg2
;
10507 arg0
= CALL_EXPR_ARG (exp
, 0);
10508 arg1
= CALL_EXPR_ARG (exp
, 1);
10509 arg2
= CALL_EXPR_ARG (exp
, 2);
10511 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
10512 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
10513 gcc_assert (VECTOR_MODE_P (tmode
));
10515 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
10516 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
10517 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
10519 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
10520 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
10522 op0
= force_reg (tmode
, op0
);
10523 op1
= force_reg (mode1
, op1
);
10525 rs6000_expand_vector_set (op0
, op1
, elt
);
10530 /* Expand vec_ext builtin. */
10532 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
10534 enum machine_mode tmode
, mode0
;
10539 arg0
= CALL_EXPR_ARG (exp
, 0);
10540 arg1
= CALL_EXPR_ARG (exp
, 1);
10542 op0
= expand_normal (arg0
);
10543 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
10545 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
10546 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
10547 gcc_assert (VECTOR_MODE_P (mode0
));
10549 op0
= force_reg (mode0
, op0
);
10551 if (optimize
|| !target
|| !register_operand (target
, tmode
))
10552 target
= gen_reg_rtx (tmode
);
10554 rs6000_expand_vector_extract (target
, op0
, elt
);
10559 /* Expand the builtin in EXP and store the result in TARGET. Store
10560 true in *EXPANDEDP if we found a builtin to expand. */
10562 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
10564 const struct builtin_description
*d
;
10566 enum insn_code icode
;
10567 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10570 enum machine_mode tmode
, mode0
;
10571 enum rs6000_builtins fcode
10572 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
10574 if (rs6000_overloaded_builtin_p (fcode
))
10577 error ("unresolved overload for Altivec builtin %qF", fndecl
);
10579 /* Given it is invalid, just generate a normal call. */
10580 return expand_call (exp
, target
, false);
10583 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
10587 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
10591 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
10599 case ALTIVEC_BUILTIN_STVX
:
10600 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si
, exp
);
10601 case ALTIVEC_BUILTIN_STVEBX
:
10602 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
10603 case ALTIVEC_BUILTIN_STVEHX
:
10604 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
10605 case ALTIVEC_BUILTIN_STVEWX
:
10606 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
10607 case ALTIVEC_BUILTIN_STVXL
:
10608 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl
, exp
);
10610 case ALTIVEC_BUILTIN_STVLX
:
10611 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
10612 case ALTIVEC_BUILTIN_STVLXL
:
10613 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
10614 case ALTIVEC_BUILTIN_STVRX
:
10615 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
10616 case ALTIVEC_BUILTIN_STVRXL
:
10617 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
10619 case VSX_BUILTIN_STXVD2X_V2DF
:
10620 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
10621 case VSX_BUILTIN_STXVD2X_V2DI
:
10622 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
10623 case VSX_BUILTIN_STXVW4X_V4SF
:
10624 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
10625 case VSX_BUILTIN_STXVW4X_V4SI
:
10626 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
10627 case VSX_BUILTIN_STXVW4X_V8HI
:
10628 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
10629 case VSX_BUILTIN_STXVW4X_V16QI
:
10630 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
10632 case ALTIVEC_BUILTIN_MFVSCR
:
10633 icode
= CODE_FOR_altivec_mfvscr
;
10634 tmode
= insn_data
[icode
].operand
[0].mode
;
10637 || GET_MODE (target
) != tmode
10638 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10639 target
= gen_reg_rtx (tmode
);
10641 pat
= GEN_FCN (icode
) (target
);
10647 case ALTIVEC_BUILTIN_MTVSCR
:
10648 icode
= CODE_FOR_altivec_mtvscr
;
10649 arg0
= CALL_EXPR_ARG (exp
, 0);
10650 op0
= expand_normal (arg0
);
10651 mode0
= insn_data
[icode
].operand
[0].mode
;
10653 /* If we got invalid arguments bail out before generating bad rtl. */
10654 if (arg0
== error_mark_node
)
10657 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10658 op0
= copy_to_mode_reg (mode0
, op0
);
10660 pat
= GEN_FCN (icode
) (op0
);
10665 case ALTIVEC_BUILTIN_DSSALL
:
10666 emit_insn (gen_altivec_dssall ());
10669 case ALTIVEC_BUILTIN_DSS
:
10670 icode
= CODE_FOR_altivec_dss
;
10671 arg0
= CALL_EXPR_ARG (exp
, 0);
10673 op0
= expand_normal (arg0
);
10674 mode0
= insn_data
[icode
].operand
[0].mode
;
10676 /* If we got invalid arguments bail out before generating bad rtl. */
10677 if (arg0
== error_mark_node
)
10680 if (TREE_CODE (arg0
) != INTEGER_CST
10681 || TREE_INT_CST_LOW (arg0
) & ~0x3)
10683 error ("argument to dss must be a 2-bit unsigned literal");
10687 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10688 op0
= copy_to_mode_reg (mode0
, op0
);
10690 emit_insn (gen_altivec_dss (op0
));
10693 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
10694 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
10695 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
10696 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
10697 case VSX_BUILTIN_VEC_INIT_V2DF
:
10698 case VSX_BUILTIN_VEC_INIT_V2DI
:
10699 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
10701 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
10702 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
10703 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
10704 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
10705 case VSX_BUILTIN_VEC_SET_V2DF
:
10706 case VSX_BUILTIN_VEC_SET_V2DI
:
10707 return altivec_expand_vec_set_builtin (exp
);
10709 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
10710 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
10711 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
10712 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
10713 case VSX_BUILTIN_VEC_EXT_V2DF
:
10714 case VSX_BUILTIN_VEC_EXT_V2DI
:
10715 return altivec_expand_vec_ext_builtin (exp
, target
);
10719 /* Fall through. */
10722 /* Expand abs* operations. */
10724 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
10725 if (d
->code
== fcode
)
10726 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
10728 /* Expand the AltiVec predicates. */
10729 d
= bdesc_altivec_preds
;
10730 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
10731 if (d
->code
== fcode
)
10732 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
10734 /* LV* are funky. We initialized them differently. */
10737 case ALTIVEC_BUILTIN_LVSL
:
10738 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
10739 exp
, target
, false);
10740 case ALTIVEC_BUILTIN_LVSR
:
10741 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
10742 exp
, target
, false);
10743 case ALTIVEC_BUILTIN_LVEBX
:
10744 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
10745 exp
, target
, false);
10746 case ALTIVEC_BUILTIN_LVEHX
:
10747 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
10748 exp
, target
, false);
10749 case ALTIVEC_BUILTIN_LVEWX
:
10750 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
10751 exp
, target
, false);
10752 case ALTIVEC_BUILTIN_LVXL
:
10753 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl
,
10754 exp
, target
, false);
10755 case ALTIVEC_BUILTIN_LVX
:
10756 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si
,
10757 exp
, target
, false);
10758 case ALTIVEC_BUILTIN_LVLX
:
10759 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
10760 exp
, target
, true);
10761 case ALTIVEC_BUILTIN_LVLXL
:
10762 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
10763 exp
, target
, true);
10764 case ALTIVEC_BUILTIN_LVRX
:
10765 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
10766 exp
, target
, true);
10767 case ALTIVEC_BUILTIN_LVRXL
:
10768 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
10769 exp
, target
, true);
10770 case VSX_BUILTIN_LXVD2X_V2DF
:
10771 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
10772 exp
, target
, false);
10773 case VSX_BUILTIN_LXVD2X_V2DI
:
10774 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
10775 exp
, target
, false);
10776 case VSX_BUILTIN_LXVW4X_V4SF
:
10777 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
10778 exp
, target
, false);
10779 case VSX_BUILTIN_LXVW4X_V4SI
:
10780 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
10781 exp
, target
, false);
10782 case VSX_BUILTIN_LXVW4X_V8HI
:
10783 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
10784 exp
, target
, false);
10785 case VSX_BUILTIN_LXVW4X_V16QI
:
10786 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
10787 exp
, target
, false);
10791 /* Fall through. */
10794 *expandedp
= false;
10798 /* Expand the builtin in EXP and store the result in TARGET. Store
10799 true in *EXPANDEDP if we found a builtin to expand. */
10801 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
10803 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10804 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10805 const struct builtin_description
*d
;
10812 case PAIRED_BUILTIN_STX
:
10813 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
10814 case PAIRED_BUILTIN_LX
:
10815 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
10818 /* Fall through. */
10821 /* Expand the paired predicates. */
10822 d
= bdesc_paired_preds
;
10823 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
10824 if (d
->code
== fcode
)
10825 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
10827 *expandedp
= false;
10831 /* Binops that need to be initialized manually, but can be expanded
10832 automagically by rs6000_expand_binop_builtin. */
10833 static const struct builtin_description bdesc_2arg_spe
[] =
10835 { RS6000_BTM_SPE
, CODE_FOR_spe_evlddx
, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX
},
10836 { RS6000_BTM_SPE
, CODE_FOR_spe_evldwx
, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX
},
10837 { RS6000_BTM_SPE
, CODE_FOR_spe_evldhx
, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX
},
10838 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhex
, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX
},
10839 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhoux
, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX
},
10840 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhosx
, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX
},
10841 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplatx
, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX
},
10842 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplatx
, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX
},
10843 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplatx
, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX
},
10844 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplatx
, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX
},
10845 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplatx
, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX
},
10846 { RS6000_BTM_SPE
, CODE_FOR_spe_evldd
, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD
},
10847 { RS6000_BTM_SPE
, CODE_FOR_spe_evldw
, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW
},
10848 { RS6000_BTM_SPE
, CODE_FOR_spe_evldh
, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH
},
10849 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhe
, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE
},
10850 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhou
, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU
},
10851 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhos
, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS
},
10852 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplat
, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT
},
10853 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplat
, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT
},
10854 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplat
, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT
},
10855 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplat
, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT
},
10856 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplat
, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT
}
10859 /* Expand the builtin in EXP and store the result in TARGET. Store
10860 true in *EXPANDEDP if we found a builtin to expand.
10862 This expands the SPE builtins that are not simple unary and binary
10865 spe_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
10867 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10869 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10870 enum insn_code icode
;
10871 enum machine_mode tmode
, mode0
;
10873 const struct builtin_description
*d
;
10878 /* Syntax check for a 5-bit unsigned immediate. */
10881 case SPE_BUILTIN_EVSTDD
:
10882 case SPE_BUILTIN_EVSTDH
:
10883 case SPE_BUILTIN_EVSTDW
:
10884 case SPE_BUILTIN_EVSTWHE
:
10885 case SPE_BUILTIN_EVSTWHO
:
10886 case SPE_BUILTIN_EVSTWWE
:
10887 case SPE_BUILTIN_EVSTWWO
:
10888 arg1
= CALL_EXPR_ARG (exp
, 2);
10889 if (TREE_CODE (arg1
) != INTEGER_CST
10890 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
10892 error ("argument 2 must be a 5-bit unsigned literal");
10900 /* The evsplat*i instructions are not quite generic. */
10903 case SPE_BUILTIN_EVSPLATFI
:
10904 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi
,
10906 case SPE_BUILTIN_EVSPLATI
:
10907 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati
,
10913 d
= bdesc_2arg_spe
;
10914 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg_spe
); ++i
, ++d
)
10915 if (d
->code
== fcode
)
10916 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
10918 d
= bdesc_spe_predicates
;
10919 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, ++d
)
10920 if (d
->code
== fcode
)
10921 return spe_expand_predicate_builtin (d
->icode
, exp
, target
);
10923 d
= bdesc_spe_evsel
;
10924 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, ++d
)
10925 if (d
->code
== fcode
)
10926 return spe_expand_evsel_builtin (d
->icode
, exp
, target
);
10930 case SPE_BUILTIN_EVSTDDX
:
10931 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx
, exp
);
10932 case SPE_BUILTIN_EVSTDHX
:
10933 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx
, exp
);
10934 case SPE_BUILTIN_EVSTDWX
:
10935 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx
, exp
);
10936 case SPE_BUILTIN_EVSTWHEX
:
10937 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex
, exp
);
10938 case SPE_BUILTIN_EVSTWHOX
:
10939 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox
, exp
);
10940 case SPE_BUILTIN_EVSTWWEX
:
10941 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex
, exp
);
10942 case SPE_BUILTIN_EVSTWWOX
:
10943 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox
, exp
);
10944 case SPE_BUILTIN_EVSTDD
:
10945 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd
, exp
);
10946 case SPE_BUILTIN_EVSTDH
:
10947 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh
, exp
);
10948 case SPE_BUILTIN_EVSTDW
:
10949 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw
, exp
);
10950 case SPE_BUILTIN_EVSTWHE
:
10951 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe
, exp
);
10952 case SPE_BUILTIN_EVSTWHO
:
10953 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho
, exp
);
10954 case SPE_BUILTIN_EVSTWWE
:
10955 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe
, exp
);
10956 case SPE_BUILTIN_EVSTWWO
:
10957 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo
, exp
);
10958 case SPE_BUILTIN_MFSPEFSCR
:
10959 icode
= CODE_FOR_spe_mfspefscr
;
10960 tmode
= insn_data
[icode
].operand
[0].mode
;
10963 || GET_MODE (target
) != tmode
10964 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10965 target
= gen_reg_rtx (tmode
);
10967 pat
= GEN_FCN (icode
) (target
);
10972 case SPE_BUILTIN_MTSPEFSCR
:
10973 icode
= CODE_FOR_spe_mtspefscr
;
10974 arg0
= CALL_EXPR_ARG (exp
, 0);
10975 op0
= expand_normal (arg0
);
10976 mode0
= insn_data
[icode
].operand
[0].mode
;
10978 if (arg0
== error_mark_node
)
10981 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10982 op0
= copy_to_mode_reg (mode0
, op0
);
10984 pat
= GEN_FCN (icode
) (op0
);
10992 *expandedp
= false;
10997 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
10999 rtx pat
, scratch
, tmp
;
11000 tree form
= CALL_EXPR_ARG (exp
, 0);
11001 tree arg0
= CALL_EXPR_ARG (exp
, 1);
11002 tree arg1
= CALL_EXPR_ARG (exp
, 2);
11003 rtx op0
= expand_normal (arg0
);
11004 rtx op1
= expand_normal (arg1
);
11005 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11006 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11008 enum rtx_code code
;
11010 if (TREE_CODE (form
) != INTEGER_CST
)
11012 error ("argument 1 of __builtin_paired_predicate must be a constant");
11016 form_int
= TREE_INT_CST_LOW (form
);
11018 gcc_assert (mode0
== mode1
);
11020 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11024 || GET_MODE (target
) != SImode
11025 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
11026 target
= gen_reg_rtx (SImode
);
11027 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11028 op0
= copy_to_mode_reg (mode0
, op0
);
11029 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
11030 op1
= copy_to_mode_reg (mode1
, op1
);
11032 scratch
= gen_reg_rtx (CCFPmode
);
11034 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11056 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
11059 error ("argument 1 of __builtin_paired_predicate is out of range");
11063 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
11064 emit_move_insn (target
, tmp
);
11069 spe_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
11071 rtx pat
, scratch
, tmp
;
11072 tree form
= CALL_EXPR_ARG (exp
, 0);
11073 tree arg0
= CALL_EXPR_ARG (exp
, 1);
11074 tree arg1
= CALL_EXPR_ARG (exp
, 2);
11075 rtx op0
= expand_normal (arg0
);
11076 rtx op1
= expand_normal (arg1
);
11077 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11078 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11080 enum rtx_code code
;
11082 if (TREE_CODE (form
) != INTEGER_CST
)
11084 error ("argument 1 of __builtin_spe_predicate must be a constant");
11088 form_int
= TREE_INT_CST_LOW (form
);
11090 gcc_assert (mode0
== mode1
);
11092 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11096 || GET_MODE (target
) != SImode
11097 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
11098 target
= gen_reg_rtx (SImode
);
11100 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11101 op0
= copy_to_mode_reg (mode0
, op0
);
11102 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
11103 op1
= copy_to_mode_reg (mode1
, op1
);
11105 scratch
= gen_reg_rtx (CCmode
);
11107 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11112 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11113 _lower_. We use one compare, but look in different bits of the
11114 CR for each variant.
11116 There are 2 elements in each SPE simd type (upper/lower). The CR
11117 bits are set as follows:
11119 BIT0 | BIT 1 | BIT 2 | BIT 3
11120 U | L | (U | L) | (U & L)
11122 So, for an "all" relationship, BIT 3 would be set.
11123 For an "any" relationship, BIT 2 would be set. Etc.
11125 Following traditional nomenclature, these bits map to:
11127 BIT0 | BIT 1 | BIT 2 | BIT 3
11130 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11135 /* All variant. OV bit. */
11137 /* We need to get to the OV bit, which is the ORDERED bit. We
11138 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11139 that's ugly and will make validate_condition_mode die.
11140 So let's just use another pattern. */
11141 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
11143 /* Any variant. EQ bit. */
11147 /* Upper variant. LT bit. */
11151 /* Lower variant. GT bit. */
11156 error ("argument 1 of __builtin_spe_predicate is out of range");
11160 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
11161 emit_move_insn (target
, tmp
);
11166 /* The evsel builtins look like this:
11168 e = __builtin_spe_evsel_OP (a, b, c, d);
11170 and work like this:
11172 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11173 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11177 spe_expand_evsel_builtin (enum insn_code icode
, tree exp
, rtx target
)
11180 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11181 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11182 tree arg2
= CALL_EXPR_ARG (exp
, 2);
11183 tree arg3
= CALL_EXPR_ARG (exp
, 3);
11184 rtx op0
= expand_normal (arg0
);
11185 rtx op1
= expand_normal (arg1
);
11186 rtx op2
= expand_normal (arg2
);
11187 rtx op3
= expand_normal (arg3
);
11188 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11189 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11191 gcc_assert (mode0
== mode1
);
11193 if (arg0
== error_mark_node
|| arg1
== error_mark_node
11194 || arg2
== error_mark_node
|| arg3
== error_mark_node
)
11198 || GET_MODE (target
) != mode0
11199 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode0
))
11200 target
= gen_reg_rtx (mode0
);
11202 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11203 op0
= copy_to_mode_reg (mode0
, op0
);
11204 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
11205 op1
= copy_to_mode_reg (mode0
, op1
);
11206 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
11207 op2
= copy_to_mode_reg (mode0
, op2
);
11208 if (! (*insn_data
[icode
].operand
[1].predicate
) (op3
, mode1
))
11209 op3
= copy_to_mode_reg (mode0
, op3
);
11211 /* Generate the compare. */
11212 scratch
= gen_reg_rtx (CCmode
);
11213 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11218 if (mode0
== V2SImode
)
11219 emit_insn (gen_spe_evsel (target
, op2
, op3
, scratch
));
11221 emit_insn (gen_spe_evsel_fs (target
, op2
, op3
, scratch
));
11226 /* Raise an error message for a builtin function that is called without the
11227 appropriate target options being set. */
11230 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
11232 size_t uns_fncode
= (size_t)fncode
;
11233 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
11234 unsigned fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
11236 gcc_assert (name
!= NULL
);
11237 if ((fnmask
& RS6000_BTM_CELL
) != 0)
11238 error ("Builtin function %s is only valid for the cell processor", name
);
11239 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
11240 error ("Builtin function %s requires the -mvsx option", name
);
11241 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
11242 error ("Builtin function %s requires the -maltivec option", name
);
11243 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
11244 error ("Builtin function %s requires the -mpaired option", name
);
11245 else if ((fnmask
& RS6000_BTM_SPE
) != 0)
11246 error ("Builtin function %s requires the -mspe option", name
);
11248 error ("Builtin function %s is not supported with the current options",
11252 /* Expand an expression EXP that calls a built-in function,
11253 with result going to TARGET if that's convenient
11254 (and in mode MODE if that's convenient).
11255 SUBTARGET may be used as the target for computing one of EXP's operands.
11256 IGNORE is nonzero if the value is to be ignored. */
11259 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
11260 enum machine_mode mode ATTRIBUTE_UNUSED
,
11261 int ignore ATTRIBUTE_UNUSED
)
11263 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
11264 enum rs6000_builtins fcode
11265 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
11266 size_t uns_fcode
= (size_t)fcode
;
11267 const struct builtin_description
*d
;
11271 unsigned mask
= rs6000_builtin_info
[uns_fcode
].mask
;
11272 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
11274 if (TARGET_DEBUG_BUILTIN
)
11276 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
11277 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
11278 const char *name2
= ((icode
!= CODE_FOR_nothing
)
11279 ? get_insn_name ((int)icode
)
11283 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
11285 default: name3
= "unknown"; break;
11286 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
11287 case RS6000_BTC_UNARY
: name3
= "unary"; break;
11288 case RS6000_BTC_BINARY
: name3
= "binary"; break;
11289 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
11290 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
11291 case RS6000_BTC_ABS
: name3
= "abs"; break;
11292 case RS6000_BTC_EVSEL
: name3
= "evsel"; break;
11293 case RS6000_BTC_DST
: name3
= "dst"; break;
11298 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11299 (name1
) ? name1
: "---", fcode
,
11300 (name2
) ? name2
: "---", (int)icode
,
11302 func_valid_p
? "" : ", not valid");
11307 rs6000_invalid_builtin (fcode
);
11309 /* Given it is invalid, just generate a normal call. */
11310 return expand_call (exp
, target
, ignore
);
11315 case RS6000_BUILTIN_RECIP
:
11316 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
11318 case RS6000_BUILTIN_RECIPF
:
11319 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
11321 case RS6000_BUILTIN_RSQRTF
:
11322 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
11324 case RS6000_BUILTIN_RSQRT
:
11325 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
11327 case POWER7_BUILTIN_BPERMD
:
11328 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
11329 ? CODE_FOR_bpermd_di
11330 : CODE_FOR_bpermd_si
), exp
, target
);
11332 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
11333 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
11335 int icode
= (int) CODE_FOR_altivec_lvsr
;
11336 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11337 enum machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
11341 gcc_assert (TARGET_ALTIVEC
);
11343 arg
= CALL_EXPR_ARG (exp
, 0);
11344 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
11345 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
11346 addr
= memory_address (mode
, op
);
11347 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
11351 /* For the load case need to negate the address. */
11352 op
= gen_reg_rtx (GET_MODE (addr
));
11353 emit_insn (gen_rtx_SET (VOIDmode
, op
,
11354 gen_rtx_NEG (GET_MODE (addr
), addr
)));
11356 op
= gen_rtx_MEM (mode
, op
);
11359 || GET_MODE (target
) != tmode
11360 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11361 target
= gen_reg_rtx (tmode
);
11363 /*pat = gen_altivec_lvsr (target, op);*/
11364 pat
= GEN_FCN (icode
) (target
, op
);
11372 case ALTIVEC_BUILTIN_VCFUX
:
11373 case ALTIVEC_BUILTIN_VCFSX
:
11374 case ALTIVEC_BUILTIN_VCTUXS
:
11375 case ALTIVEC_BUILTIN_VCTSXS
:
11376 /* FIXME: There's got to be a nicer way to handle this case than
11377 constructing a new CALL_EXPR. */
11378 if (call_expr_nargs (exp
) == 1)
11380 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
11381 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
11389 if (TARGET_ALTIVEC
)
11391 ret
= altivec_expand_builtin (exp
, target
, &success
);
11398 ret
= spe_expand_builtin (exp
, target
, &success
);
11403 if (TARGET_PAIRED_FLOAT
)
11405 ret
= paired_expand_builtin (exp
, target
, &success
);
11411 gcc_assert (TARGET_ALTIVEC
|| TARGET_VSX
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
);
11413 /* Handle simple unary operations. */
11415 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
11416 if (d
->code
== fcode
)
11417 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
11419 /* Handle simple binary operations. */
11421 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
11422 if (d
->code
== fcode
)
11423 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
11425 /* Handle simple ternary operations. */
11427 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
11428 if (d
->code
== fcode
)
11429 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
11431 gcc_unreachable ();
11435 rs6000_init_builtins (void)
11439 enum machine_mode mode
;
11441 if (TARGET_DEBUG_BUILTIN
)
11442 fprintf (stderr
, "rs6000_init_builtins%s%s%s%s\n",
11443 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
11444 (TARGET_SPE
) ? ", spe" : "",
11445 (TARGET_ALTIVEC
) ? ", altivec" : "",
11446 (TARGET_VSX
) ? ", vsx" : "");
11448 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
11449 V2SF_type_node
= build_vector_type (float_type_node
, 2);
11450 V2DI_type_node
= build_vector_type (intDI_type_node
, 2);
11451 V2DF_type_node
= build_vector_type (double_type_node
, 2);
11452 V4HI_type_node
= build_vector_type (intHI_type_node
, 4);
11453 V4SI_type_node
= build_vector_type (intSI_type_node
, 4);
11454 V4SF_type_node
= build_vector_type (float_type_node
, 4);
11455 V8HI_type_node
= build_vector_type (intHI_type_node
, 8);
11456 V16QI_type_node
= build_vector_type (intQI_type_node
, 16);
11458 unsigned_V16QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 16);
11459 unsigned_V8HI_type_node
= build_vector_type (unsigned_intHI_type_node
, 8);
11460 unsigned_V4SI_type_node
= build_vector_type (unsigned_intSI_type_node
, 4);
11461 unsigned_V2DI_type_node
= build_vector_type (unsigned_intDI_type_node
, 2);
11463 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
11464 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
11465 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
11466 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
11468 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11469 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11470 'vector unsigned short'. */
11472 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
11473 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
11474 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
11475 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
11476 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
11478 long_integer_type_internal_node
= long_integer_type_node
;
11479 long_unsigned_type_internal_node
= long_unsigned_type_node
;
11480 long_long_integer_type_internal_node
= long_long_integer_type_node
;
11481 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
11482 intQI_type_internal_node
= intQI_type_node
;
11483 uintQI_type_internal_node
= unsigned_intQI_type_node
;
11484 intHI_type_internal_node
= intHI_type_node
;
11485 uintHI_type_internal_node
= unsigned_intHI_type_node
;
11486 intSI_type_internal_node
= intSI_type_node
;
11487 uintSI_type_internal_node
= unsigned_intSI_type_node
;
11488 intDI_type_internal_node
= intDI_type_node
;
11489 uintDI_type_internal_node
= unsigned_intDI_type_node
;
11490 float_type_internal_node
= float_type_node
;
11491 double_type_internal_node
= double_type_node
;
11492 void_type_internal_node
= void_type_node
;
11494 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11496 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
11497 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
11498 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
11499 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
11500 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
11501 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
11502 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
11503 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
11504 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
11505 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
11506 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
11507 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
11508 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
11509 builtin_mode_to_type
[V4HImode
][0] = V4HI_type_node
;
11510 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
11511 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
11512 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
11513 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
11514 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
11515 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
11516 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
11518 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
11519 TYPE_NAME (bool_char_type_node
) = tdecl
;
11521 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
11522 TYPE_NAME (bool_short_type_node
) = tdecl
;
11524 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
11525 TYPE_NAME (bool_int_type_node
) = tdecl
;
11527 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
11528 TYPE_NAME (pixel_type_node
) = tdecl
;
11530 bool_V16QI_type_node
= build_vector_type (bool_char_type_node
, 16);
11531 bool_V8HI_type_node
= build_vector_type (bool_short_type_node
, 8);
11532 bool_V4SI_type_node
= build_vector_type (bool_int_type_node
, 4);
11533 bool_V2DI_type_node
= build_vector_type (bool_long_type_node
, 2);
11534 pixel_V8HI_type_node
= build_vector_type (pixel_type_node
, 8);
11536 tdecl
= add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node
);
11537 TYPE_NAME (unsigned_V16QI_type_node
) = tdecl
;
11539 tdecl
= add_builtin_type ("__vector signed char", V16QI_type_node
);
11540 TYPE_NAME (V16QI_type_node
) = tdecl
;
11542 tdecl
= add_builtin_type ("__vector __bool char", bool_V16QI_type_node
);
11543 TYPE_NAME ( bool_V16QI_type_node
) = tdecl
;
11545 tdecl
= add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node
);
11546 TYPE_NAME (unsigned_V8HI_type_node
) = tdecl
;
11548 tdecl
= add_builtin_type ("__vector signed short", V8HI_type_node
);
11549 TYPE_NAME (V8HI_type_node
) = tdecl
;
11551 tdecl
= add_builtin_type ("__vector __bool short", bool_V8HI_type_node
);
11552 TYPE_NAME (bool_V8HI_type_node
) = tdecl
;
11554 tdecl
= add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node
);
11555 TYPE_NAME (unsigned_V4SI_type_node
) = tdecl
;
11557 tdecl
= add_builtin_type ("__vector signed int", V4SI_type_node
);
11558 TYPE_NAME (V4SI_type_node
) = tdecl
;
11560 tdecl
= add_builtin_type ("__vector __bool int", bool_V4SI_type_node
);
11561 TYPE_NAME (bool_V4SI_type_node
) = tdecl
;
11563 tdecl
= add_builtin_type ("__vector float", V4SF_type_node
);
11564 TYPE_NAME (V4SF_type_node
) = tdecl
;
11566 tdecl
= add_builtin_type ("__vector __pixel", pixel_V8HI_type_node
);
11567 TYPE_NAME (pixel_V8HI_type_node
) = tdecl
;
11569 tdecl
= add_builtin_type ("__vector double", V2DF_type_node
);
11570 TYPE_NAME (V2DF_type_node
) = tdecl
;
11572 tdecl
= add_builtin_type ("__vector long", V2DI_type_node
);
11573 TYPE_NAME (V2DI_type_node
) = tdecl
;
11575 tdecl
= add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node
);
11576 TYPE_NAME (unsigned_V2DI_type_node
) = tdecl
;
11578 tdecl
= add_builtin_type ("__vector __bool long", bool_V2DI_type_node
);
11579 TYPE_NAME (bool_V2DI_type_node
) = tdecl
;
11581 /* Paired and SPE builtins are only available if you build a compiler with
11582 the appropriate options, so only create those builtins with the
11583 appropriate compiler option. Create Altivec and VSX builtins on machines
11584 with at least the general purpose extensions (970 and newer) to allow the
11585 use of the target attribute. */
11586 if (TARGET_PAIRED_FLOAT
)
11587 paired_init_builtins ();
11589 spe_init_builtins ();
11590 if (TARGET_EXTRA_BUILTINS
)
11591 altivec_init_builtins ();
11592 if (TARGET_EXTRA_BUILTINS
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
)
11593 rs6000_common_init_builtins ();
11595 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
11596 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
11597 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
11599 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
11600 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
11601 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
11603 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
11604 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
11605 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
11607 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
11608 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
11609 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
11611 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
11612 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
11613 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
11614 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
11617 /* AIX libm provides clog as __clog. */
11618 if ((tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
11619 set_user_assembler_name (tdecl
, "__clog");
11622 #ifdef SUBTARGET_INIT_BUILTINS
11623 SUBTARGET_INIT_BUILTINS
;
11627 /* Returns the rs6000 builtin decl for CODE. */
11630 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
11634 if (code
>= RS6000_BUILTIN_COUNT
)
11635 return error_mark_node
;
11637 fnmask
= rs6000_builtin_info
[code
].mask
;
11638 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
11640 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
11641 return error_mark_node
;
11644 return rs6000_builtin_decls
[code
];
11648 spe_init_builtins (void)
11650 tree puint_type_node
= build_pointer_type (unsigned_type_node
);
11651 tree pushort_type_node
= build_pointer_type (short_unsigned_type_node
);
11652 const struct builtin_description
*d
;
11655 tree v2si_ftype_4_v2si
11656 = build_function_type_list (opaque_V2SI_type_node
,
11657 opaque_V2SI_type_node
,
11658 opaque_V2SI_type_node
,
11659 opaque_V2SI_type_node
,
11660 opaque_V2SI_type_node
,
11663 tree v2sf_ftype_4_v2sf
11664 = build_function_type_list (opaque_V2SF_type_node
,
11665 opaque_V2SF_type_node
,
11666 opaque_V2SF_type_node
,
11667 opaque_V2SF_type_node
,
11668 opaque_V2SF_type_node
,
11671 tree int_ftype_int_v2si_v2si
11672 = build_function_type_list (integer_type_node
,
11674 opaque_V2SI_type_node
,
11675 opaque_V2SI_type_node
,
11678 tree int_ftype_int_v2sf_v2sf
11679 = build_function_type_list (integer_type_node
,
11681 opaque_V2SF_type_node
,
11682 opaque_V2SF_type_node
,
11685 tree void_ftype_v2si_puint_int
11686 = build_function_type_list (void_type_node
,
11687 opaque_V2SI_type_node
,
11692 tree void_ftype_v2si_puint_char
11693 = build_function_type_list (void_type_node
,
11694 opaque_V2SI_type_node
,
11699 tree void_ftype_v2si_pv2si_int
11700 = build_function_type_list (void_type_node
,
11701 opaque_V2SI_type_node
,
11702 opaque_p_V2SI_type_node
,
11706 tree void_ftype_v2si_pv2si_char
11707 = build_function_type_list (void_type_node
,
11708 opaque_V2SI_type_node
,
11709 opaque_p_V2SI_type_node
,
11713 tree void_ftype_int
11714 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
11716 tree int_ftype_void
11717 = build_function_type_list (integer_type_node
, NULL_TREE
);
11719 tree v2si_ftype_pv2si_int
11720 = build_function_type_list (opaque_V2SI_type_node
,
11721 opaque_p_V2SI_type_node
,
11725 tree v2si_ftype_puint_int
11726 = build_function_type_list (opaque_V2SI_type_node
,
11731 tree v2si_ftype_pushort_int
11732 = build_function_type_list (opaque_V2SI_type_node
,
11737 tree v2si_ftype_signed_char
11738 = build_function_type_list (opaque_V2SI_type_node
,
11739 signed_char_type_node
,
11742 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node
);
11744 /* Initialize irregular SPE builtins. */
11746 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int
, SPE_BUILTIN_MTSPEFSCR
);
11747 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void
, SPE_BUILTIN_MFSPEFSCR
);
11748 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDDX
);
11749 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDHX
);
11750 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDWX
);
11751 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHEX
);
11752 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHOX
);
11753 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWEX
);
11754 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWOX
);
11755 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDD
);
11756 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDH
);
11757 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDW
);
11758 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHE
);
11759 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHO
);
11760 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWE
);
11761 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWO
);
11762 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATFI
);
11763 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATI
);
11766 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDDX
);
11767 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDWX
);
11768 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDHX
);
11769 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHEX
);
11770 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOUX
);
11771 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOSX
);
11772 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLATX
);
11773 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLATX
);
11774 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLATX
);
11775 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLATX
);
11776 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLATX
);
11777 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDD
);
11778 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDW
);
11779 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDH
);
11780 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLAT
);
11781 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLAT
);
11782 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLAT
);
11783 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHE
);
11784 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOS
);
11785 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOU
);
11786 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLAT
);
11787 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLAT
);
11790 d
= bdesc_spe_predicates
;
11791 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, d
++)
11795 switch (insn_data
[d
->icode
].operand
[1].mode
)
11798 type
= int_ftype_int_v2si_v2si
;
11801 type
= int_ftype_int_v2sf_v2sf
;
11804 gcc_unreachable ();
11807 def_builtin (d
->name
, type
, d
->code
);
11810 /* Evsel predicates. */
11811 d
= bdesc_spe_evsel
;
11812 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, d
++)
11816 switch (insn_data
[d
->icode
].operand
[1].mode
)
11819 type
= v2si_ftype_4_v2si
;
11822 type
= v2sf_ftype_4_v2sf
;
11825 gcc_unreachable ();
11828 def_builtin (d
->name
, type
, d
->code
);
11833 paired_init_builtins (void)
11835 const struct builtin_description
*d
;
11838 tree int_ftype_int_v2sf_v2sf
11839 = build_function_type_list (integer_type_node
,
11844 tree pcfloat_type_node
=
11845 build_pointer_type (build_qualified_type
11846 (float_type_node
, TYPE_QUAL_CONST
));
11848 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
11849 long_integer_type_node
,
11852 tree void_ftype_v2sf_long_pcfloat
=
11853 build_function_type_list (void_type_node
,
11855 long_integer_type_node
,
11860 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
11861 PAIRED_BUILTIN_LX
);
11864 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
11865 PAIRED_BUILTIN_STX
);
11868 d
= bdesc_paired_preds
;
11869 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
11873 if (TARGET_DEBUG_BUILTIN
)
11874 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
11875 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
11876 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
11878 switch (insn_data
[d
->icode
].operand
[1].mode
)
11881 type
= int_ftype_int_v2sf_v2sf
;
11884 gcc_unreachable ();
11887 def_builtin (d
->name
, type
, d
->code
);
11892 altivec_init_builtins (void)
11894 const struct builtin_description
*d
;
11899 tree pvoid_type_node
= build_pointer_type (void_type_node
);
11901 tree pcvoid_type_node
11902 = build_pointer_type (build_qualified_type (void_type_node
,
11905 tree int_ftype_opaque
11906 = build_function_type_list (integer_type_node
,
11907 opaque_V4SI_type_node
, NULL_TREE
);
11908 tree opaque_ftype_opaque
11909 = build_function_type_list (integer_type_node
, NULL_TREE
);
11910 tree opaque_ftype_opaque_int
11911 = build_function_type_list (opaque_V4SI_type_node
,
11912 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
11913 tree opaque_ftype_opaque_opaque_int
11914 = build_function_type_list (opaque_V4SI_type_node
,
11915 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
11916 integer_type_node
, NULL_TREE
);
11917 tree int_ftype_int_opaque_opaque
11918 = build_function_type_list (integer_type_node
,
11919 integer_type_node
, opaque_V4SI_type_node
,
11920 opaque_V4SI_type_node
, NULL_TREE
);
11921 tree int_ftype_int_v4si_v4si
11922 = build_function_type_list (integer_type_node
,
11923 integer_type_node
, V4SI_type_node
,
11924 V4SI_type_node
, NULL_TREE
);
11925 tree void_ftype_v4si
11926 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
11927 tree v8hi_ftype_void
11928 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
11929 tree void_ftype_void
11930 = build_function_type_list (void_type_node
, NULL_TREE
);
11931 tree void_ftype_int
11932 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
11934 tree opaque_ftype_long_pcvoid
11935 = build_function_type_list (opaque_V4SI_type_node
,
11936 long_integer_type_node
, pcvoid_type_node
,
11938 tree v16qi_ftype_long_pcvoid
11939 = build_function_type_list (V16QI_type_node
,
11940 long_integer_type_node
, pcvoid_type_node
,
11942 tree v8hi_ftype_long_pcvoid
11943 = build_function_type_list (V8HI_type_node
,
11944 long_integer_type_node
, pcvoid_type_node
,
11946 tree v4si_ftype_long_pcvoid
11947 = build_function_type_list (V4SI_type_node
,
11948 long_integer_type_node
, pcvoid_type_node
,
11950 tree v4sf_ftype_long_pcvoid
11951 = build_function_type_list (V4SF_type_node
,
11952 long_integer_type_node
, pcvoid_type_node
,
11954 tree v2df_ftype_long_pcvoid
11955 = build_function_type_list (V2DF_type_node
,
11956 long_integer_type_node
, pcvoid_type_node
,
11958 tree v2di_ftype_long_pcvoid
11959 = build_function_type_list (V2DI_type_node
,
11960 long_integer_type_node
, pcvoid_type_node
,
11963 tree void_ftype_opaque_long_pvoid
11964 = build_function_type_list (void_type_node
,
11965 opaque_V4SI_type_node
, long_integer_type_node
,
11966 pvoid_type_node
, NULL_TREE
);
11967 tree void_ftype_v4si_long_pvoid
11968 = build_function_type_list (void_type_node
,
11969 V4SI_type_node
, long_integer_type_node
,
11970 pvoid_type_node
, NULL_TREE
);
11971 tree void_ftype_v16qi_long_pvoid
11972 = build_function_type_list (void_type_node
,
11973 V16QI_type_node
, long_integer_type_node
,
11974 pvoid_type_node
, NULL_TREE
);
11975 tree void_ftype_v8hi_long_pvoid
11976 = build_function_type_list (void_type_node
,
11977 V8HI_type_node
, long_integer_type_node
,
11978 pvoid_type_node
, NULL_TREE
);
11979 tree void_ftype_v4sf_long_pvoid
11980 = build_function_type_list (void_type_node
,
11981 V4SF_type_node
, long_integer_type_node
,
11982 pvoid_type_node
, NULL_TREE
);
11983 tree void_ftype_v2df_long_pvoid
11984 = build_function_type_list (void_type_node
,
11985 V2DF_type_node
, long_integer_type_node
,
11986 pvoid_type_node
, NULL_TREE
);
11987 tree void_ftype_v2di_long_pvoid
11988 = build_function_type_list (void_type_node
,
11989 V2DI_type_node
, long_integer_type_node
,
11990 pvoid_type_node
, NULL_TREE
);
11991 tree int_ftype_int_v8hi_v8hi
11992 = build_function_type_list (integer_type_node
,
11993 integer_type_node
, V8HI_type_node
,
11994 V8HI_type_node
, NULL_TREE
);
11995 tree int_ftype_int_v16qi_v16qi
11996 = build_function_type_list (integer_type_node
,
11997 integer_type_node
, V16QI_type_node
,
11998 V16QI_type_node
, NULL_TREE
);
11999 tree int_ftype_int_v4sf_v4sf
12000 = build_function_type_list (integer_type_node
,
12001 integer_type_node
, V4SF_type_node
,
12002 V4SF_type_node
, NULL_TREE
);
12003 tree int_ftype_int_v2df_v2df
12004 = build_function_type_list (integer_type_node
,
12005 integer_type_node
, V2DF_type_node
,
12006 V2DF_type_node
, NULL_TREE
);
12007 tree v4si_ftype_v4si
12008 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
12009 tree v8hi_ftype_v8hi
12010 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
12011 tree v16qi_ftype_v16qi
12012 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
12013 tree v4sf_ftype_v4sf
12014 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
12015 tree v2df_ftype_v2df
12016 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
12017 tree void_ftype_pcvoid_int_int
12018 = build_function_type_list (void_type_node
,
12019 pcvoid_type_node
, integer_type_node
,
12020 integer_type_node
, NULL_TREE
);
12022 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
12023 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
12024 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
12025 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
12026 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
12027 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
12028 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
12029 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
12030 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
12031 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
12032 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
12033 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
12034 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
12035 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
12036 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
12037 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
12038 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
12039 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
12040 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
12041 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
12042 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
12043 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
12044 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
12045 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
12046 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
12047 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
12048 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
12049 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
12050 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
12051 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
12053 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
12054 VSX_BUILTIN_LXVD2X_V2DF
);
12055 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
12056 VSX_BUILTIN_LXVD2X_V2DI
);
12057 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
12058 VSX_BUILTIN_LXVW4X_V4SF
);
12059 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
12060 VSX_BUILTIN_LXVW4X_V4SI
);
12061 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
12062 VSX_BUILTIN_LXVW4X_V8HI
);
12063 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
12064 VSX_BUILTIN_LXVW4X_V16QI
);
12065 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
12066 VSX_BUILTIN_STXVD2X_V2DF
);
12067 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
12068 VSX_BUILTIN_STXVD2X_V2DI
);
12069 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
12070 VSX_BUILTIN_STXVW4X_V4SF
);
12071 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
12072 VSX_BUILTIN_STXVW4X_V4SI
);
12073 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
12074 VSX_BUILTIN_STXVW4X_V8HI
);
12075 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
12076 VSX_BUILTIN_STXVW4X_V16QI
);
12077 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
12078 VSX_BUILTIN_VEC_LD
);
12079 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
12080 VSX_BUILTIN_VEC_ST
);
12082 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
12083 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
12084 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
12086 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
12087 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
12088 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
12089 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
12090 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
12091 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
12092 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
12093 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
12094 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
12095 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
12096 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
12097 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
12099 /* Cell builtins. */
12100 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
12101 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
12102 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
12103 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
12105 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
12106 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
12107 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
12108 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
12110 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
12111 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
12112 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
12113 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
12115 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
12116 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
12117 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
12118 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
12120 /* Add the DST variants. */
12122 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
12123 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
12125 /* Initialize the predicates. */
12126 d
= bdesc_altivec_preds
;
12127 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
12129 enum machine_mode mode1
;
12132 if (rs6000_overloaded_builtin_p (d
->code
))
12135 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
12140 type
= int_ftype_int_opaque_opaque
;
12143 type
= int_ftype_int_v4si_v4si
;
12146 type
= int_ftype_int_v8hi_v8hi
;
12149 type
= int_ftype_int_v16qi_v16qi
;
12152 type
= int_ftype_int_v4sf_v4sf
;
12155 type
= int_ftype_int_v2df_v2df
;
12158 gcc_unreachable ();
12161 def_builtin (d
->name
, type
, d
->code
);
12164 /* Initialize the abs* operators. */
12166 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
12168 enum machine_mode mode0
;
12171 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
12176 type
= v4si_ftype_v4si
;
12179 type
= v8hi_ftype_v8hi
;
12182 type
= v16qi_ftype_v16qi
;
12185 type
= v4sf_ftype_v4sf
;
12188 type
= v2df_ftype_v2df
;
12191 gcc_unreachable ();
12194 def_builtin (d
->name
, type
, d
->code
);
12197 /* Initialize target builtin that implements
12198 targetm.vectorize.builtin_mask_for_load. */
12200 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
12201 v16qi_ftype_long_pcvoid
,
12202 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
12203 BUILT_IN_MD
, NULL
, NULL_TREE
);
12204 TREE_READONLY (decl
) = 1;
12205 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12206 altivec_builtin_mask_for_load
= decl
;
12208 /* Access to the vec_init patterns. */
12209 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
12210 integer_type_node
, integer_type_node
,
12211 integer_type_node
, NULL_TREE
);
12212 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
12214 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
12215 short_integer_type_node
,
12216 short_integer_type_node
,
12217 short_integer_type_node
,
12218 short_integer_type_node
,
12219 short_integer_type_node
,
12220 short_integer_type_node
,
12221 short_integer_type_node
, NULL_TREE
);
12222 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
12224 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
12225 char_type_node
, char_type_node
,
12226 char_type_node
, char_type_node
,
12227 char_type_node
, char_type_node
,
12228 char_type_node
, char_type_node
,
12229 char_type_node
, char_type_node
,
12230 char_type_node
, char_type_node
,
12231 char_type_node
, char_type_node
,
12232 char_type_node
, NULL_TREE
);
12233 def_builtin ("__builtin_vec_init_v16qi", ftype
,
12234 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
12236 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
12237 float_type_node
, float_type_node
,
12238 float_type_node
, NULL_TREE
);
12239 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
12241 /* VSX builtins. */
12242 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
12243 double_type_node
, NULL_TREE
);
12244 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
12246 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
12247 intDI_type_node
, NULL_TREE
);
12248 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
12250 /* Access to the vec_set patterns. */
12251 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
12253 integer_type_node
, NULL_TREE
);
12254 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
12256 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
12258 integer_type_node
, NULL_TREE
);
12259 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
12261 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
12263 integer_type_node
, NULL_TREE
);
12264 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
12266 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
12268 integer_type_node
, NULL_TREE
);
12269 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
12271 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
12273 integer_type_node
, NULL_TREE
);
12274 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
12276 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
12278 integer_type_node
, NULL_TREE
);
12279 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
12281 /* Access to the vec_extract patterns. */
12282 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
12283 integer_type_node
, NULL_TREE
);
12284 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
12286 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
12287 integer_type_node
, NULL_TREE
);
12288 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
12290 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
12291 integer_type_node
, NULL_TREE
);
12292 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
12294 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
12295 integer_type_node
, NULL_TREE
);
12296 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
12298 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
12299 integer_type_node
, NULL_TREE
);
12300 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
12302 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
12303 integer_type_node
, NULL_TREE
);
12304 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
12307 /* Hash function for builtin functions with up to 3 arguments and a return
12310 builtin_hash_function (const void *hash_entry
)
12314 const struct builtin_hash_struct
*bh
=
12315 (const struct builtin_hash_struct
*) hash_entry
;
12317 for (i
= 0; i
< 4; i
++)
12319 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
12320 ret
= (ret
* 2) + bh
->uns_p
[i
];
12326 /* Compare builtin hash entries H1 and H2 for equivalence. */
12328 builtin_hash_eq (const void *h1
, const void *h2
)
12330 const struct builtin_hash_struct
*p1
= (const struct builtin_hash_struct
*) h1
;
12331 const struct builtin_hash_struct
*p2
= (const struct builtin_hash_struct
*) h2
;
12333 return ((p1
->mode
[0] == p2
->mode
[0])
12334 && (p1
->mode
[1] == p2
->mode
[1])
12335 && (p1
->mode
[2] == p2
->mode
[2])
12336 && (p1
->mode
[3] == p2
->mode
[3])
12337 && (p1
->uns_p
[0] == p2
->uns_p
[0])
12338 && (p1
->uns_p
[1] == p2
->uns_p
[1])
12339 && (p1
->uns_p
[2] == p2
->uns_p
[2])
12340 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
12343 /* Map types for builtin functions with an explicit return type and up to 3
12344 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12345 of the argument. */
12347 builtin_function_type (enum machine_mode mode_ret
, enum machine_mode mode_arg0
,
12348 enum machine_mode mode_arg1
, enum machine_mode mode_arg2
,
12349 enum rs6000_builtins builtin
, const char *name
)
12351 struct builtin_hash_struct h
;
12352 struct builtin_hash_struct
*h2
;
12356 tree ret_type
= NULL_TREE
;
12357 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
12359 /* Create builtin_hash_table. */
12360 if (builtin_hash_table
== NULL
)
12361 builtin_hash_table
= htab_create_ggc (1500, builtin_hash_function
,
12362 builtin_hash_eq
, NULL
);
12364 h
.type
= NULL_TREE
;
12365 h
.mode
[0] = mode_ret
;
12366 h
.mode
[1] = mode_arg0
;
12367 h
.mode
[2] = mode_arg1
;
12368 h
.mode
[3] = mode_arg2
;
12374 /* If the builtin is a type that produces unsigned results or takes unsigned
12375 arguments, and it is returned as a decl for the vectorizer (such as
12376 widening multiplies, permute), make sure the arguments and return value
12377 are type correct. */
12380 /* unsigned 2 argument functions. */
12381 case ALTIVEC_BUILTIN_VMULEUB_UNS
:
12382 case ALTIVEC_BUILTIN_VMULEUH_UNS
:
12383 case ALTIVEC_BUILTIN_VMULOUB_UNS
:
12384 case ALTIVEC_BUILTIN_VMULOUH_UNS
:
12390 /* unsigned 3 argument functions. */
12391 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
12392 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
12393 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
12394 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
12395 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
12396 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
12397 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
12398 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
12399 case VSX_BUILTIN_VPERM_16QI_UNS
:
12400 case VSX_BUILTIN_VPERM_8HI_UNS
:
12401 case VSX_BUILTIN_VPERM_4SI_UNS
:
12402 case VSX_BUILTIN_VPERM_2DI_UNS
:
12403 case VSX_BUILTIN_XXSEL_16QI_UNS
:
12404 case VSX_BUILTIN_XXSEL_8HI_UNS
:
12405 case VSX_BUILTIN_XXSEL_4SI_UNS
:
12406 case VSX_BUILTIN_XXSEL_2DI_UNS
:
12413 /* signed permute functions with unsigned char mask. */
12414 case ALTIVEC_BUILTIN_VPERM_16QI
:
12415 case ALTIVEC_BUILTIN_VPERM_8HI
:
12416 case ALTIVEC_BUILTIN_VPERM_4SI
:
12417 case ALTIVEC_BUILTIN_VPERM_4SF
:
12418 case ALTIVEC_BUILTIN_VPERM_2DI
:
12419 case ALTIVEC_BUILTIN_VPERM_2DF
:
12420 case VSX_BUILTIN_VPERM_16QI
:
12421 case VSX_BUILTIN_VPERM_8HI
:
12422 case VSX_BUILTIN_VPERM_4SI
:
12423 case VSX_BUILTIN_VPERM_4SF
:
12424 case VSX_BUILTIN_VPERM_2DI
:
12425 case VSX_BUILTIN_VPERM_2DF
:
12429 /* unsigned args, signed return. */
12430 case VSX_BUILTIN_XVCVUXDDP_UNS
:
12431 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
12435 /* signed args, unsigned return. */
12436 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
12437 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
12445 /* Figure out how many args are present. */
12446 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
12450 fatal_error ("internal error: builtin function %s had no type", name
);
12452 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
12453 if (!ret_type
&& h
.uns_p
[0])
12454 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
12457 fatal_error ("internal error: builtin function %s had an unexpected "
12458 "return type %s", name
, GET_MODE_NAME (h
.mode
[0]));
12460 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
12461 arg_type
[i
] = NULL_TREE
;
12463 for (i
= 0; i
< num_args
; i
++)
12465 int m
= (int) h
.mode
[i
+1];
12466 int uns_p
= h
.uns_p
[i
+1];
12468 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
12469 if (!arg_type
[i
] && uns_p
)
12470 arg_type
[i
] = builtin_mode_to_type
[m
][0];
12473 fatal_error ("internal error: builtin function %s, argument %d "
12474 "had unexpected argument type %s", name
, i
,
12475 GET_MODE_NAME (m
));
12478 found
= htab_find_slot (builtin_hash_table
, &h
, INSERT
);
12479 if (*found
== NULL
)
12481 h2
= ggc_alloc_builtin_hash_struct ();
12483 *found
= (void *)h2
;
12485 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
12486 arg_type
[2], NULL_TREE
);
12489 return ((struct builtin_hash_struct
*)(*found
))->type
;
12493 rs6000_common_init_builtins (void)
12495 const struct builtin_description
*d
;
12498 tree opaque_ftype_opaque
= NULL_TREE
;
12499 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
12500 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
12501 tree v2si_ftype_qi
= NULL_TREE
;
12502 tree v2si_ftype_v2si_qi
= NULL_TREE
;
12503 tree v2si_ftype_int_qi
= NULL_TREE
;
12504 unsigned builtin_mask
= rs6000_builtin_mask
;
12506 if (!TARGET_PAIRED_FLOAT
)
12508 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
12509 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
12512 /* Paired and SPE builtins are only available if you build a compiler with
12513 the appropriate options, so only create those builtins with the
12514 appropriate compiler option. Create Altivec and VSX builtins on machines
12515 with at least the general purpose extensions (970 and newer) to allow the
12516 use of the target attribute.. */
12518 if (TARGET_EXTRA_BUILTINS
)
12519 builtin_mask
|= RS6000_BTM_COMMON
;
12521 /* Add the ternary operators. */
12523 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
12526 unsigned mask
= d
->mask
;
12528 if ((mask
& builtin_mask
) != mask
)
12530 if (TARGET_DEBUG_BUILTIN
)
12531 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
12535 if (rs6000_overloaded_builtin_p (d
->code
))
12537 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
12538 type
= opaque_ftype_opaque_opaque_opaque
12539 = build_function_type_list (opaque_V4SI_type_node
,
12540 opaque_V4SI_type_node
,
12541 opaque_V4SI_type_node
,
12542 opaque_V4SI_type_node
,
12547 enum insn_code icode
= d
->icode
;
12548 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12551 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
12552 insn_data
[icode
].operand
[1].mode
,
12553 insn_data
[icode
].operand
[2].mode
,
12554 insn_data
[icode
].operand
[3].mode
,
12558 def_builtin (d
->name
, type
, d
->code
);
12561 /* Add the binary operators. */
12563 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
12565 enum machine_mode mode0
, mode1
, mode2
;
12567 unsigned mask
= d
->mask
;
12569 if ((mask
& builtin_mask
) != mask
)
12571 if (TARGET_DEBUG_BUILTIN
)
12572 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
12576 if (rs6000_overloaded_builtin_p (d
->code
))
12578 if (! (type
= opaque_ftype_opaque_opaque
))
12579 type
= opaque_ftype_opaque_opaque
12580 = build_function_type_list (opaque_V4SI_type_node
,
12581 opaque_V4SI_type_node
,
12582 opaque_V4SI_type_node
,
12587 enum insn_code icode
= d
->icode
;
12588 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12591 mode0
= insn_data
[icode
].operand
[0].mode
;
12592 mode1
= insn_data
[icode
].operand
[1].mode
;
12593 mode2
= insn_data
[icode
].operand
[2].mode
;
12595 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
12597 if (! (type
= v2si_ftype_v2si_qi
))
12598 type
= v2si_ftype_v2si_qi
12599 = build_function_type_list (opaque_V2SI_type_node
,
12600 opaque_V2SI_type_node
,
12605 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
12606 && mode2
== QImode
)
12608 if (! (type
= v2si_ftype_int_qi
))
12609 type
= v2si_ftype_int_qi
12610 = build_function_type_list (opaque_V2SI_type_node
,
12617 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
12621 def_builtin (d
->name
, type
, d
->code
);
12624 /* Add the simple unary operators. */
12626 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
12628 enum machine_mode mode0
, mode1
;
12630 unsigned mask
= d
->mask
;
12632 if ((mask
& builtin_mask
) != mask
)
12634 if (TARGET_DEBUG_BUILTIN
)
12635 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
12639 if (rs6000_overloaded_builtin_p (d
->code
))
12641 if (! (type
= opaque_ftype_opaque
))
12642 type
= opaque_ftype_opaque
12643 = build_function_type_list (opaque_V4SI_type_node
,
12644 opaque_V4SI_type_node
,
12649 enum insn_code icode
= d
->icode
;
12650 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12653 mode0
= insn_data
[icode
].operand
[0].mode
;
12654 mode1
= insn_data
[icode
].operand
[1].mode
;
12656 if (mode0
== V2SImode
&& mode1
== QImode
)
12658 if (! (type
= v2si_ftype_qi
))
12659 type
= v2si_ftype_qi
12660 = build_function_type_list (opaque_V2SI_type_node
,
12666 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
12670 def_builtin (d
->name
, type
, d
->code
);
12675 rs6000_init_libfuncs (void)
12677 if (DEFAULT_ABI
!= ABI_V4
&& TARGET_XCOFF
12678 && !TARGET_POWER2
&& !TARGET_POWERPC
)
12680 /* AIX library routines for float->int conversion. */
12681 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__itrunc");
12682 set_conv_libfunc (ufix_optab
, SImode
, DFmode
, "__uitrunc");
12683 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_qitrunc");
12684 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_quitrunc");
12687 if (!TARGET_IEEEQUAD
)
12688 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12689 if (!TARGET_XL_COMPAT
)
12691 set_optab_libfunc (add_optab
, TFmode
, "__gcc_qadd");
12692 set_optab_libfunc (sub_optab
, TFmode
, "__gcc_qsub");
12693 set_optab_libfunc (smul_optab
, TFmode
, "__gcc_qmul");
12694 set_optab_libfunc (sdiv_optab
, TFmode
, "__gcc_qdiv");
12696 if (!(TARGET_HARD_FLOAT
&& (TARGET_FPRS
|| TARGET_E500_DOUBLE
)))
12698 set_optab_libfunc (neg_optab
, TFmode
, "__gcc_qneg");
12699 set_optab_libfunc (eq_optab
, TFmode
, "__gcc_qeq");
12700 set_optab_libfunc (ne_optab
, TFmode
, "__gcc_qne");
12701 set_optab_libfunc (gt_optab
, TFmode
, "__gcc_qgt");
12702 set_optab_libfunc (ge_optab
, TFmode
, "__gcc_qge");
12703 set_optab_libfunc (lt_optab
, TFmode
, "__gcc_qlt");
12704 set_optab_libfunc (le_optab
, TFmode
, "__gcc_qle");
12706 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "__gcc_stoq");
12707 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "__gcc_dtoq");
12708 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "__gcc_qtos");
12709 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "__gcc_qtod");
12710 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "__gcc_qtoi");
12711 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "__gcc_qtou");
12712 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "__gcc_itoq");
12713 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "__gcc_utoq");
12716 if (!(TARGET_HARD_FLOAT
&& TARGET_FPRS
))
12717 set_optab_libfunc (unord_optab
, TFmode
, "__gcc_qunord");
12721 set_optab_libfunc (add_optab
, TFmode
, "_xlqadd");
12722 set_optab_libfunc (sub_optab
, TFmode
, "_xlqsub");
12723 set_optab_libfunc (smul_optab
, TFmode
, "_xlqmul");
12724 set_optab_libfunc (sdiv_optab
, TFmode
, "_xlqdiv");
12728 /* 32-bit SVR4 quad floating point routines. */
12730 set_optab_libfunc (add_optab
, TFmode
, "_q_add");
12731 set_optab_libfunc (sub_optab
, TFmode
, "_q_sub");
12732 set_optab_libfunc (neg_optab
, TFmode
, "_q_neg");
12733 set_optab_libfunc (smul_optab
, TFmode
, "_q_mul");
12734 set_optab_libfunc (sdiv_optab
, TFmode
, "_q_div");
12735 if (TARGET_PPC_GPOPT
|| TARGET_POWER2
)
12736 set_optab_libfunc (sqrt_optab
, TFmode
, "_q_sqrt");
12738 set_optab_libfunc (eq_optab
, TFmode
, "_q_feq");
12739 set_optab_libfunc (ne_optab
, TFmode
, "_q_fne");
12740 set_optab_libfunc (gt_optab
, TFmode
, "_q_fgt");
12741 set_optab_libfunc (ge_optab
, TFmode
, "_q_fge");
12742 set_optab_libfunc (lt_optab
, TFmode
, "_q_flt");
12743 set_optab_libfunc (le_optab
, TFmode
, "_q_fle");
12745 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_q_stoq");
12746 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_q_dtoq");
12747 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_q_qtos");
12748 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_q_qtod");
12749 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_q_qtoi");
12750 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_q_qtou");
12751 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_q_itoq");
12752 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_q_utoq");
12757 /* Expand a block clear operation, and return 1 if successful. Return 0
12758 if we should let the compiler generate normal code.
12760 operands[0] is the destination
12761 operands[1] is the length
12762 operands[3] is the alignment */
12765 expand_block_clear (rtx operands
[])
12767 rtx orig_dest
= operands
[0];
12768 rtx bytes_rtx
= operands
[1];
12769 rtx align_rtx
= operands
[3];
12770 bool constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
12771 HOST_WIDE_INT align
;
12772 HOST_WIDE_INT bytes
;
12777 /* If this is not a fixed size move, just call memcpy */
12781 /* This must be a fixed size alignment */
12782 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
12783 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
12785 /* Anything to clear? */
12786 bytes
= INTVAL (bytes_rtx
);
12790 /* Use the builtin memset after a point, to avoid huge code bloat.
12791 When optimize_size, avoid any significant code bloat; calling
12792 memset is about 4 instructions, so allow for one instruction to
12793 load zero and three to do clearing. */
12794 if (TARGET_ALTIVEC
&& align
>= 128)
12796 else if (TARGET_POWERPC64
&& align
>= 32)
12798 else if (TARGET_SPE
&& align
>= 64)
12803 if (optimize_size
&& bytes
> 3 * clear_step
)
12805 if (! optimize_size
&& bytes
> 8 * clear_step
)
12808 for (offset
= 0; bytes
> 0; offset
+= clear_bytes
, bytes
-= clear_bytes
)
12810 enum machine_mode mode
= BLKmode
;
12813 if (bytes
>= 16 && TARGET_ALTIVEC
&& align
>= 128)
12818 else if (bytes
>= 8 && TARGET_SPE
&& align
>= 64)
12823 else if (bytes
>= 8 && TARGET_POWERPC64
12824 /* 64-bit loads and stores require word-aligned
12826 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
12831 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
12832 { /* move 4 bytes */
12836 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
12837 { /* move 2 bytes */
12841 else /* move 1 byte at a time */
12847 dest
= adjust_address (orig_dest
, mode
, offset
);
12849 emit_move_insn (dest
, CONST0_RTX (mode
));
12856 /* Expand a block move operation, and return 1 if successful. Return 0
12857 if we should let the compiler generate normal code.
12859 operands[0] is the destination
12860 operands[1] is the source
12861 operands[2] is the length
12862 operands[3] is the alignment */
12864 #define MAX_MOVE_REG 4
12867 expand_block_move (rtx operands
[])
12869 rtx orig_dest
= operands
[0];
12870 rtx orig_src
= operands
[1];
12871 rtx bytes_rtx
= operands
[2];
12872 rtx align_rtx
= operands
[3];
12873 int constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
12878 rtx stores
[MAX_MOVE_REG
];
12881 /* If this is not a fixed size move, just call memcpy */
12885 /* This must be a fixed size alignment */
12886 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
12887 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
12889 /* Anything to move? */
12890 bytes
= INTVAL (bytes_rtx
);
12894 if (bytes
> rs6000_block_move_inline_limit
)
12897 for (offset
= 0; bytes
> 0; offset
+= move_bytes
, bytes
-= move_bytes
)
12900 rtx (*movmemsi
) (rtx
, rtx
, rtx
, rtx
);
12901 rtx (*mov
) (rtx
, rtx
);
12903 enum machine_mode mode
= BLKmode
;
12906 /* Altivec first, since it will be faster than a string move
12907 when it applies, and usually not significantly larger. */
12908 if (TARGET_ALTIVEC
&& bytes
>= 16 && align
>= 128)
12912 gen_func
.mov
= gen_movv4si
;
12914 else if (TARGET_SPE
&& bytes
>= 8 && align
>= 64)
12918 gen_func
.mov
= gen_movv2si
;
12920 else if (TARGET_STRING
12921 && bytes
> 24 /* move up to 32 bytes at a time */
12927 && ! fixed_regs
[10]
12928 && ! fixed_regs
[11]
12929 && ! fixed_regs
[12])
12931 move_bytes
= (bytes
> 32) ? 32 : bytes
;
12932 gen_func
.movmemsi
= gen_movmemsi_8reg
;
12934 else if (TARGET_STRING
12935 && bytes
> 16 /* move up to 24 bytes at a time */
12941 && ! fixed_regs
[10])
12943 move_bytes
= (bytes
> 24) ? 24 : bytes
;
12944 gen_func
.movmemsi
= gen_movmemsi_6reg
;
12946 else if (TARGET_STRING
12947 && bytes
> 8 /* move up to 16 bytes at a time */
12951 && ! fixed_regs
[8])
12953 move_bytes
= (bytes
> 16) ? 16 : bytes
;
12954 gen_func
.movmemsi
= gen_movmemsi_4reg
;
12956 else if (bytes
>= 8 && TARGET_POWERPC64
12957 /* 64-bit loads and stores require word-aligned
12959 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
12963 gen_func
.mov
= gen_movdi
;
12965 else if (TARGET_STRING
&& bytes
> 4 && !TARGET_POWERPC64
)
12966 { /* move up to 8 bytes at a time */
12967 move_bytes
= (bytes
> 8) ? 8 : bytes
;
12968 gen_func
.movmemsi
= gen_movmemsi_2reg
;
12970 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
12971 { /* move 4 bytes */
12974 gen_func
.mov
= gen_movsi
;
12976 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
12977 { /* move 2 bytes */
12980 gen_func
.mov
= gen_movhi
;
12982 else if (TARGET_STRING
&& bytes
> 1)
12983 { /* move up to 4 bytes at a time */
12984 move_bytes
= (bytes
> 4) ? 4 : bytes
;
12985 gen_func
.movmemsi
= gen_movmemsi_1reg
;
12987 else /* move 1 byte at a time */
12991 gen_func
.mov
= gen_movqi
;
12994 src
= adjust_address (orig_src
, mode
, offset
);
12995 dest
= adjust_address (orig_dest
, mode
, offset
);
12997 if (mode
!= BLKmode
)
12999 rtx tmp_reg
= gen_reg_rtx (mode
);
13001 emit_insn ((*gen_func
.mov
) (tmp_reg
, src
));
13002 stores
[num_reg
++] = (*gen_func
.mov
) (dest
, tmp_reg
);
13005 if (mode
== BLKmode
|| num_reg
>= MAX_MOVE_REG
|| bytes
== move_bytes
)
13008 for (i
= 0; i
< num_reg
; i
++)
13009 emit_insn (stores
[i
]);
13013 if (mode
== BLKmode
)
13015 /* Move the address into scratch registers. The movmemsi
13016 patterns require zero offset. */
13017 if (!REG_P (XEXP (src
, 0)))
13019 rtx src_reg
= copy_addr_to_reg (XEXP (src
, 0));
13020 src
= replace_equiv_address (src
, src_reg
);
13022 set_mem_size (src
, move_bytes
);
13024 if (!REG_P (XEXP (dest
, 0)))
13026 rtx dest_reg
= copy_addr_to_reg (XEXP (dest
, 0));
13027 dest
= replace_equiv_address (dest
, dest_reg
);
13029 set_mem_size (dest
, move_bytes
);
13031 emit_insn ((*gen_func
.movmemsi
) (dest
, src
,
13032 GEN_INT (move_bytes
& 31),
13041 /* Return a string to perform a load_multiple operation.
13042 operands[0] is the vector.
13043 operands[1] is the source address.
13044 operands[2] is the first destination register. */
13047 rs6000_output_load_multiple (rtx operands
[3])
13049 /* We have to handle the case where the pseudo used to contain the address
13050 is assigned to one of the output registers. */
13052 int words
= XVECLEN (operands
[0], 0);
13055 if (XVECLEN (operands
[0], 0) == 1)
13056 return "{l|lwz} %2,0(%1)";
13058 for (i
= 0; i
< words
; i
++)
13059 if (refers_to_regno_p (REGNO (operands
[2]) + i
,
13060 REGNO (operands
[2]) + i
+ 1, operands
[1], 0))
13064 xop
[0] = GEN_INT (4 * (words
-1));
13065 xop
[1] = operands
[1];
13066 xop
[2] = operands
[2];
13067 output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop
);
13072 xop
[0] = GEN_INT (4 * (words
-1));
13073 xop
[1] = operands
[1];
13074 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
13075 output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop
);
13080 for (j
= 0; j
< words
; j
++)
13083 xop
[0] = GEN_INT (j
* 4);
13084 xop
[1] = operands
[1];
13085 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + j
);
13086 output_asm_insn ("{l|lwz} %2,%0(%1)", xop
);
13088 xop
[0] = GEN_INT (i
* 4);
13089 xop
[1] = operands
[1];
13090 output_asm_insn ("{l|lwz} %1,%0(%1)", xop
);
13095 return "{lsi|lswi} %2,%1,%N0";
13099 /* A validation routine: say whether CODE, a condition code, and MODE
13100 match. The other alternatives either don't make sense or should
13101 never be generated. */
13104 validate_condition_mode (enum rtx_code code
, enum machine_mode mode
)
13106 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
13107 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
13108 && GET_MODE_CLASS (mode
) == MODE_CC
);
13110 /* These don't make sense. */
13111 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
13112 || mode
!= CCUNSmode
);
13114 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
13115 || mode
== CCUNSmode
);
13117 gcc_assert (mode
== CCFPmode
13118 || (code
!= ORDERED
&& code
!= UNORDERED
13119 && code
!= UNEQ
&& code
!= LTGT
13120 && code
!= UNGT
&& code
!= UNLT
13121 && code
!= UNGE
&& code
!= UNLE
));
13123 /* These should never be generated except for
13124 flag_finite_math_only. */
13125 gcc_assert (mode
!= CCFPmode
13126 || flag_finite_math_only
13127 || (code
!= LE
&& code
!= GE
13128 && code
!= UNEQ
&& code
!= LTGT
13129 && code
!= UNGT
&& code
!= UNLT
));
13131 /* These are invalid; the information is not there. */
13132 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
13136 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13137 mask required to convert the result of a rotate insn into a shift
13138 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13141 includes_lshift_p (rtx shiftop
, rtx andop
)
13143 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
13145 shift_mask
<<= INTVAL (shiftop
);
13147 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
13150 /* Similar, but for right shift. */
13153 includes_rshift_p (rtx shiftop
, rtx andop
)
13155 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
13157 shift_mask
>>= INTVAL (shiftop
);
13159 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
13162 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13163 to perform a left shift. It must have exactly SHIFTOP least
13164 significant 0's, then one or more 1's, then zero or more 0's. */
13167 includes_rldic_lshift_p (rtx shiftop
, rtx andop
)
13169 if (GET_CODE (andop
) == CONST_INT
)
13171 HOST_WIDE_INT c
, lsb
, shift_mask
;
13173 c
= INTVAL (andop
);
13174 if (c
== 0 || c
== ~0)
13178 shift_mask
<<= INTVAL (shiftop
);
13180 /* Find the least significant one bit. */
13183 /* It must coincide with the LSB of the shift mask. */
13184 if (-lsb
!= shift_mask
)
13187 /* Invert to look for the next transition (if any). */
13190 /* Remove the low group of ones (originally low group of zeros). */
13193 /* Again find the lsb, and check we have all 1's above. */
13197 else if (GET_CODE (andop
) == CONST_DOUBLE
13198 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
13200 HOST_WIDE_INT low
, high
, lsb
;
13201 HOST_WIDE_INT shift_mask_low
, shift_mask_high
;
13203 low
= CONST_DOUBLE_LOW (andop
);
13204 if (HOST_BITS_PER_WIDE_INT
< 64)
13205 high
= CONST_DOUBLE_HIGH (andop
);
13207 if ((low
== 0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== 0))
13208 || (low
== ~0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0)))
13211 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
13213 shift_mask_high
= ~0;
13214 if (INTVAL (shiftop
) > 32)
13215 shift_mask_high
<<= INTVAL (shiftop
) - 32;
13217 lsb
= high
& -high
;
13219 if (-lsb
!= shift_mask_high
|| INTVAL (shiftop
) < 32)
13225 lsb
= high
& -high
;
13226 return high
== -lsb
;
13229 shift_mask_low
= ~0;
13230 shift_mask_low
<<= INTVAL (shiftop
);
13234 if (-lsb
!= shift_mask_low
)
13237 if (HOST_BITS_PER_WIDE_INT
< 64)
13242 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
13244 lsb
= high
& -high
;
13245 return high
== -lsb
;
13249 return low
== -lsb
&& (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0);
13255 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13256 to perform a left shift. It must have SHIFTOP or more least
13257 significant 0's, with the remainder of the word 1's. */
13260 includes_rldicr_lshift_p (rtx shiftop
, rtx andop
)
13262 if (GET_CODE (andop
) == CONST_INT
)
13264 HOST_WIDE_INT c
, lsb
, shift_mask
;
13267 shift_mask
<<= INTVAL (shiftop
);
13268 c
= INTVAL (andop
);
13270 /* Find the least significant one bit. */
13273 /* It must be covered by the shift mask.
13274 This test also rejects c == 0. */
13275 if ((lsb
& shift_mask
) == 0)
13278 /* Check we have all 1's above the transition, and reject all 1's. */
13279 return c
== -lsb
&& lsb
!= 1;
13281 else if (GET_CODE (andop
) == CONST_DOUBLE
13282 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
13284 HOST_WIDE_INT low
, lsb
, shift_mask_low
;
13286 low
= CONST_DOUBLE_LOW (andop
);
13288 if (HOST_BITS_PER_WIDE_INT
< 64)
13290 HOST_WIDE_INT high
, shift_mask_high
;
13292 high
= CONST_DOUBLE_HIGH (andop
);
13296 shift_mask_high
= ~0;
13297 if (INTVAL (shiftop
) > 32)
13298 shift_mask_high
<<= INTVAL (shiftop
) - 32;
13300 lsb
= high
& -high
;
13302 if ((lsb
& shift_mask_high
) == 0)
13305 return high
== -lsb
;
13311 shift_mask_low
= ~0;
13312 shift_mask_low
<<= INTVAL (shiftop
);
13316 if ((lsb
& shift_mask_low
) == 0)
13319 return low
== -lsb
&& lsb
!= 1;
13325 /* Return 1 if operands will generate a valid arguments to rlwimi
13326 instruction for insert with right shift in 64-bit mode. The mask may
13327 not start on the first bit or stop on the last bit because wrap-around
13328 effects of instruction do not correspond to semantics of RTL insn. */
13331 insvdi_rshift_rlwimi_p (rtx sizeop
, rtx startop
, rtx shiftop
)
13333 if (INTVAL (startop
) > 32
13334 && INTVAL (startop
) < 64
13335 && INTVAL (sizeop
) > 1
13336 && INTVAL (sizeop
) + INTVAL (startop
) < 64
13337 && INTVAL (shiftop
) > 0
13338 && INTVAL (sizeop
) + INTVAL (shiftop
) < 32
13339 && (64 - (INTVAL (shiftop
) & 63)) >= INTVAL (sizeop
))
13345 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13346 for lfq and stfq insns iff the registers are hard registers. */
13349 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
13351 /* We might have been passed a SUBREG. */
13352 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
13355 /* We might have been passed non floating point registers. */
13356 if (!FP_REGNO_P (REGNO (reg1
))
13357 || !FP_REGNO_P (REGNO (reg2
)))
13360 return (REGNO (reg1
) == REGNO (reg2
) - 1);
13363 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13364 addr1 and addr2 must be in consecutive memory locations
13365 (addr2 == addr1 + 8). */
13368 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
13371 unsigned int reg1
, reg2
;
13372 int offset1
, offset2
;
13374 /* The mems cannot be volatile. */
13375 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
13378 addr1
= XEXP (mem1
, 0);
13379 addr2
= XEXP (mem2
, 0);
13381 /* Extract an offset (if used) from the first addr. */
13382 if (GET_CODE (addr1
) == PLUS
)
13384 /* If not a REG, return zero. */
13385 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
13389 reg1
= REGNO (XEXP (addr1
, 0));
13390 /* The offset must be constant! */
13391 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
13393 offset1
= INTVAL (XEXP (addr1
, 1));
13396 else if (GET_CODE (addr1
) != REG
)
13400 reg1
= REGNO (addr1
);
13401 /* This was a simple (mem (reg)) expression. Offset is 0. */
13405 /* And now for the second addr. */
13406 if (GET_CODE (addr2
) == PLUS
)
13408 /* If not a REG, return zero. */
13409 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
13413 reg2
= REGNO (XEXP (addr2
, 0));
13414 /* The offset must be constant. */
13415 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
13417 offset2
= INTVAL (XEXP (addr2
, 1));
13420 else if (GET_CODE (addr2
) != REG
)
13424 reg2
= REGNO (addr2
);
13425 /* This was a simple (mem (reg)) expression. Offset is 0. */
13429 /* Both of these must have the same base register. */
13433 /* The offset for the second addr must be 8 more than the first addr. */
13434 if (offset2
!= offset1
+ 8)
13437 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13444 rs6000_secondary_memory_needed_rtx (enum machine_mode mode
)
13446 static bool eliminated
= false;
13449 if (mode
!= SDmode
)
13450 ret
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
13453 rtx mem
= cfun
->machine
->sdmode_stack_slot
;
13454 gcc_assert (mem
!= NULL_RTX
);
13458 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
13459 cfun
->machine
->sdmode_stack_slot
= mem
;
13465 if (TARGET_DEBUG_ADDR
)
13467 fprintf (stderr
, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13468 GET_MODE_NAME (mode
));
13470 fprintf (stderr
, "\tNULL_RTX\n");
13479 rs6000_check_sdmode (tree
*tp
, int *walk_subtrees
, void *data ATTRIBUTE_UNUSED
)
13481 /* Don't walk into types. */
13482 if (*tp
== NULL_TREE
|| *tp
== error_mark_node
|| TYPE_P (*tp
))
13484 *walk_subtrees
= 0;
13488 switch (TREE_CODE (*tp
))
13497 case VIEW_CONVERT_EXPR
:
13498 if (TYPE_MODE (TREE_TYPE (*tp
)) == SDmode
)
13508 enum reload_reg_type
{
13510 VECTOR_REGISTER_TYPE
,
13511 OTHER_REGISTER_TYPE
13514 static enum reload_reg_type
13515 rs6000_reload_register_type (enum reg_class rclass
)
13521 return GPR_REGISTER_TYPE
;
13526 return VECTOR_REGISTER_TYPE
;
13529 return OTHER_REGISTER_TYPE
;
13533 /* Inform reload about cases where moving X with a mode MODE to a register in
13534 RCLASS requires an extra scratch or immediate register. Return the class
13535 needed for the immediate register.
13537 For VSX and Altivec, we may need a register to convert sp+offset into
13540 For misaligned 64-bit gpr loads and stores we need a register to
13541 convert an offset address to indirect. */
13544 rs6000_secondary_reload (bool in_p
,
13546 reg_class_t rclass_i
,
13547 enum machine_mode mode
,
13548 secondary_reload_info
*sri
)
13550 enum reg_class rclass
= (enum reg_class
) rclass_i
;
13551 reg_class_t ret
= ALL_REGS
;
13552 enum insn_code icode
;
13553 bool default_p
= false;
13555 sri
->icode
= CODE_FOR_nothing
;
13557 /* Convert vector loads and stores into gprs to use an additional base
13559 icode
= rs6000_vector_reload
[mode
][in_p
!= false];
13560 if (icode
!= CODE_FOR_nothing
)
13563 sri
->icode
= CODE_FOR_nothing
;
13564 sri
->extra_cost
= 0;
13566 if (GET_CODE (x
) == MEM
)
13568 rtx addr
= XEXP (x
, 0);
13570 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13571 an extra register in that case, but it would need an extra
13572 register if the addressing is reg+reg or (reg+reg)&(-16). */
13573 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
13575 if (!legitimate_indirect_address_p (addr
, false)
13576 && !rs6000_legitimate_offset_address_p (TImode
, addr
,
13579 sri
->icode
= icode
;
13580 /* account for splitting the loads, and converting the
13581 address from reg+reg to reg. */
13582 sri
->extra_cost
= (((TARGET_64BIT
) ? 3 : 5)
13583 + ((GET_CODE (addr
) == AND
) ? 1 : 0));
13586 /* Loads to and stores from vector registers can only do reg+reg
13587 addressing. Altivec registers can also do (reg+reg)&(-16). */
13588 else if (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
13589 || rclass
== FLOAT_REGS
|| rclass
== NO_REGS
)
13591 if (!VECTOR_MEM_ALTIVEC_P (mode
)
13592 && GET_CODE (addr
) == AND
13593 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
13594 && INTVAL (XEXP (addr
, 1)) == -16
13595 && (legitimate_indirect_address_p (XEXP (addr
, 0), false)
13596 || legitimate_indexed_address_p (XEXP (addr
, 0), false)))
13598 sri
->icode
= icode
;
13599 sri
->extra_cost
= ((GET_CODE (XEXP (addr
, 0)) == PLUS
)
13602 else if (!legitimate_indirect_address_p (addr
, false)
13603 && (rclass
== NO_REGS
13604 || !legitimate_indexed_address_p (addr
, false)))
13606 sri
->icode
= icode
;
13607 sri
->extra_cost
= 1;
13610 icode
= CODE_FOR_nothing
;
13612 /* Any other loads, including to pseudo registers which haven't been
13613 assigned to a register yet, default to require a scratch
13617 sri
->icode
= icode
;
13618 sri
->extra_cost
= 2;
13621 else if (REG_P (x
))
13623 int regno
= true_regnum (x
);
13625 icode
= CODE_FOR_nothing
;
13626 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
13630 enum reg_class xclass
= REGNO_REG_CLASS (regno
);
13631 enum reload_reg_type rtype1
= rs6000_reload_register_type (rclass
);
13632 enum reload_reg_type rtype2
= rs6000_reload_register_type (xclass
);
13634 /* If memory is needed, use default_secondary_reload to create the
13636 if (rtype1
!= rtype2
|| rtype1
== OTHER_REGISTER_TYPE
)
13645 else if (TARGET_POWERPC64
13646 && rs6000_reload_register_type (rclass
) == GPR_REGISTER_TYPE
13648 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
13650 rtx off
= address_offset (XEXP (x
, 0));
13652 if (off
!= NULL_RTX
&& (INTVAL (off
) & 3) != 0)
13655 sri
->icode
= CODE_FOR_reload_di_load
;
13657 sri
->icode
= CODE_FOR_reload_di_store
;
13658 sri
->extra_cost
= 2;
13664 else if (!TARGET_POWERPC64
13665 && rs6000_reload_register_type (rclass
) == GPR_REGISTER_TYPE
13667 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
13669 rtx off
= address_offset (XEXP (x
, 0));
13671 if (off
!= NULL_RTX
13672 && ((unsigned HOST_WIDE_INT
) INTVAL (off
) + 0x8000
13673 >= 0x1000u
- (GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
)))
13676 sri
->icode
= CODE_FOR_reload_si_load
;
13678 sri
->icode
= CODE_FOR_reload_si_store
;
13679 sri
->extra_cost
= 2;
13689 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
13691 gcc_assert (ret
!= ALL_REGS
);
13693 if (TARGET_DEBUG_ADDR
)
13696 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13698 reg_class_names
[ret
],
13699 in_p
? "true" : "false",
13700 reg_class_names
[rclass
],
13701 GET_MODE_NAME (mode
));
13704 fprintf (stderr
, ", default secondary reload");
13706 if (sri
->icode
!= CODE_FOR_nothing
)
13707 fprintf (stderr
, ", reload func = %s, extra cost = %d\n",
13708 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
13710 fprintf (stderr
, "\n");
13718 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13719 to SP+reg addressing. */
13722 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
13724 int regno
= true_regnum (reg
);
13725 enum machine_mode mode
= GET_MODE (reg
);
13726 enum reg_class rclass
;
13728 rtx and_op2
= NULL_RTX
;
13731 rtx scratch_or_premodify
= scratch
;
13735 if (TARGET_DEBUG_ADDR
)
13737 fprintf (stderr
, "\nrs6000_secondary_reload_inner, type = %s\n",
13738 store_p
? "store" : "load");
13739 fprintf (stderr
, "reg:\n");
13741 fprintf (stderr
, "mem:\n");
13743 fprintf (stderr
, "scratch:\n");
13744 debug_rtx (scratch
);
13747 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
13748 gcc_assert (GET_CODE (mem
) == MEM
);
13749 rclass
= REGNO_REG_CLASS (regno
);
13750 addr
= XEXP (mem
, 0);
13754 /* GPRs can handle reg + small constant, all other addresses need to use
13755 the scratch register. */
13758 if (GET_CODE (addr
) == AND
)
13760 and_op2
= XEXP (addr
, 1);
13761 addr
= XEXP (addr
, 0);
13764 if (GET_CODE (addr
) == PRE_MODIFY
)
13766 scratch_or_premodify
= XEXP (addr
, 0);
13767 gcc_assert (REG_P (scratch_or_premodify
));
13768 gcc_assert (GET_CODE (XEXP (addr
, 1)) == PLUS
);
13769 addr
= XEXP (addr
, 1);
13772 if (GET_CODE (addr
) == PLUS
13773 && (and_op2
!= NULL_RTX
13774 || !rs6000_legitimate_offset_address_p (TImode
, addr
,
13777 addr_op1
= XEXP (addr
, 0);
13778 addr_op2
= XEXP (addr
, 1);
13779 gcc_assert (legitimate_indirect_address_p (addr_op1
, false));
13781 if (!REG_P (addr_op2
)
13782 && (GET_CODE (addr_op2
) != CONST_INT
13783 || !satisfies_constraint_I (addr_op2
)))
13785 if (TARGET_DEBUG_ADDR
)
13788 "\nMove plus addr to register %s, mode = %s: ",
13789 rs6000_reg_names
[REGNO (scratch
)],
13790 GET_MODE_NAME (mode
));
13791 debug_rtx (addr_op2
);
13793 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
13794 addr_op2
= scratch
;
13797 emit_insn (gen_rtx_SET (VOIDmode
,
13798 scratch_or_premodify
,
13799 gen_rtx_PLUS (Pmode
,
13803 addr
= scratch_or_premodify
;
13804 scratch_or_premodify
= scratch
;
13806 else if (!legitimate_indirect_address_p (addr
, false)
13807 && !rs6000_legitimate_offset_address_p (TImode
, addr
,
13810 if (TARGET_DEBUG_ADDR
)
13812 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
13813 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
13814 GET_MODE_NAME (mode
));
13817 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13818 addr
= scratch_or_premodify
;
13819 scratch_or_premodify
= scratch
;
13823 /* Float/Altivec registers can only handle reg+reg addressing. Move
13824 other addresses into a scratch register. */
13829 /* With float regs, we need to handle the AND ourselves, since we can't
13830 use the Altivec instruction with an implicit AND -16. Allow scalar
13831 loads to float registers to use reg+offset even if VSX. */
13832 if (GET_CODE (addr
) == AND
13833 && (rclass
!= ALTIVEC_REGS
|| GET_MODE_SIZE (mode
) != 16
13834 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
13835 || INTVAL (XEXP (addr
, 1)) != -16
13836 || !VECTOR_MEM_ALTIVEC_P (mode
)))
13838 and_op2
= XEXP (addr
, 1);
13839 addr
= XEXP (addr
, 0);
13842 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13843 as the address later. */
13844 if (GET_CODE (addr
) == PRE_MODIFY
13845 && (!VECTOR_MEM_VSX_P (mode
)
13846 || and_op2
!= NULL_RTX
13847 || !legitimate_indexed_address_p (XEXP (addr
, 1), false)))
13849 scratch_or_premodify
= XEXP (addr
, 0);
13850 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify
,
13852 gcc_assert (GET_CODE (XEXP (addr
, 1)) == PLUS
);
13853 addr
= XEXP (addr
, 1);
13856 if (legitimate_indirect_address_p (addr
, false) /* reg */
13857 || legitimate_indexed_address_p (addr
, false) /* reg+reg */
13858 || GET_CODE (addr
) == PRE_MODIFY
/* VSX pre-modify */
13859 || (GET_CODE (addr
) == AND
/* Altivec memory */
13860 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
13861 && INTVAL (XEXP (addr
, 1)) == -16
13862 && VECTOR_MEM_ALTIVEC_P (mode
))
13863 || (rclass
== FLOAT_REGS
/* legacy float mem */
13864 && GET_MODE_SIZE (mode
) == 8
13865 && and_op2
== NULL_RTX
13866 && scratch_or_premodify
== scratch
13867 && rs6000_legitimate_offset_address_p (mode
, addr
, false, false)))
13870 else if (GET_CODE (addr
) == PLUS
)
13872 addr_op1
= XEXP (addr
, 0);
13873 addr_op2
= XEXP (addr
, 1);
13874 gcc_assert (REG_P (addr_op1
));
13876 if (TARGET_DEBUG_ADDR
)
13878 fprintf (stderr
, "\nMove plus addr to register %s, mode = %s: ",
13879 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
13880 debug_rtx (addr_op2
);
13882 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
13883 emit_insn (gen_rtx_SET (VOIDmode
,
13884 scratch_or_premodify
,
13885 gen_rtx_PLUS (Pmode
,
13888 addr
= scratch_or_premodify
;
13889 scratch_or_premodify
= scratch
;
13892 else if (GET_CODE (addr
) == SYMBOL_REF
|| GET_CODE (addr
) == CONST
13893 || GET_CODE (addr
) == CONST_INT
|| REG_P (addr
))
13895 if (TARGET_DEBUG_ADDR
)
13897 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
13898 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
13899 GET_MODE_NAME (mode
));
13903 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13904 addr
= scratch_or_premodify
;
13905 scratch_or_premodify
= scratch
;
13909 gcc_unreachable ();
13914 gcc_unreachable ();
13917 /* If the original address involved a pre-modify that we couldn't use the VSX
13918 memory instruction with update, and we haven't taken care of already,
13919 store the address in the pre-modify register and use that as the
13921 if (scratch_or_premodify
!= scratch
&& scratch_or_premodify
!= addr
)
13923 emit_insn (gen_rtx_SET (VOIDmode
, scratch_or_premodify
, addr
));
13924 addr
= scratch_or_premodify
;
13927 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13928 memory instruction, recreate the AND now, including the clobber which is
13929 generated by the general ANDSI3/ANDDI3 patterns for the
13930 andi. instruction. */
13931 if (and_op2
!= NULL_RTX
)
13933 if (! legitimate_indirect_address_p (addr
, false))
13935 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, addr
));
13939 if (TARGET_DEBUG_ADDR
)
13941 fprintf (stderr
, "\nAnd addr to register %s, mode = %s: ",
13942 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
13943 debug_rtx (and_op2
);
13946 and_rtx
= gen_rtx_SET (VOIDmode
,
13948 gen_rtx_AND (Pmode
,
13952 cc_clobber
= gen_rtx_CLOBBER (CCmode
, gen_rtx_SCRATCH (CCmode
));
13953 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
13954 gen_rtvec (2, and_rtx
, cc_clobber
)));
13958 /* Adjust the address if it changed. */
13959 if (addr
!= XEXP (mem
, 0))
13961 mem
= change_address (mem
, mode
, addr
);
13962 if (TARGET_DEBUG_ADDR
)
13963 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
13966 /* Now create the move. */
13968 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
13970 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
13975 /* Convert reloads involving 64-bit gprs and misaligned offset
13976 addressing, or multiple 32-bit gprs and offsets that are too large,
13977 to use indirect addressing. */
13980 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
13982 int regno
= true_regnum (reg
);
13983 enum reg_class rclass
;
13985 rtx scratch_or_premodify
= scratch
;
13987 if (TARGET_DEBUG_ADDR
)
13989 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
13990 store_p
? "store" : "load");
13991 fprintf (stderr
, "reg:\n");
13993 fprintf (stderr
, "mem:\n");
13995 fprintf (stderr
, "scratch:\n");
13996 debug_rtx (scratch
);
13999 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
14000 gcc_assert (GET_CODE (mem
) == MEM
);
14001 rclass
= REGNO_REG_CLASS (regno
);
14002 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
14003 addr
= XEXP (mem
, 0);
14005 if (GET_CODE (addr
) == PRE_MODIFY
)
14007 scratch_or_premodify
= XEXP (addr
, 0);
14008 gcc_assert (REG_P (scratch_or_premodify
));
14009 addr
= XEXP (addr
, 1);
14011 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
14013 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
14015 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
14017 /* Now create the move. */
14019 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
14021 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
14026 /* Allocate a 64-bit stack slot to be used for copying SDmode
14027 values through if this function has any SDmode references. */
14030 rs6000_alloc_sdmode_stack_slot (void)
14034 gimple_stmt_iterator gsi
;
14036 gcc_assert (cfun
->machine
->sdmode_stack_slot
== NULL_RTX
);
14039 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
14041 tree ret
= walk_gimple_op (gsi_stmt (gsi
), rs6000_check_sdmode
, NULL
);
14044 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
14045 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
14051 /* Check for any SDmode parameters of the function. */
14052 for (t
= DECL_ARGUMENTS (cfun
->decl
); t
; t
= DECL_CHAIN (t
))
14054 if (TREE_TYPE (t
) == error_mark_node
)
14057 if (TYPE_MODE (TREE_TYPE (t
)) == SDmode
14058 || TYPE_MODE (DECL_ARG_TYPE (t
)) == SDmode
)
14060 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
14061 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
14069 rs6000_instantiate_decls (void)
14071 if (cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
14072 instantiate_decl_rtl (cfun
->machine
->sdmode_stack_slot
);
14075 /* Given an rtx X being reloaded into a reg required to be
14076 in class CLASS, return the class of reg to actually use.
14077 In general this is just CLASS; but on some machines
14078 in some cases it is preferable to use a more restrictive class.
14080 On the RS/6000, we have to return NO_REGS when we want to reload a
14081 floating-point CONST_DOUBLE to force it to be copied to memory.
14083 We also don't want to reload integer values into floating-point
14084 registers if we can at all help it. In fact, this can
14085 cause reload to die, if it tries to generate a reload of CTR
14086 into a FP register and discovers it doesn't have the memory location
14089 ??? Would it be a good idea to have reload do the converse, that is
14090 try to reload floating modes into FP registers if possible?
14093 static enum reg_class
14094 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
14096 enum machine_mode mode
= GET_MODE (x
);
14098 if (VECTOR_UNIT_VSX_P (mode
)
14099 && x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
14102 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
14103 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
14104 && easy_vector_constant (x
, mode
))
14105 return ALTIVEC_REGS
;
14107 if (CONSTANT_P (x
) && reg_classes_intersect_p (rclass
, FLOAT_REGS
))
14110 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
14111 return GENERAL_REGS
;
14113 /* For VSX, prefer the traditional registers for 64-bit values because we can
14114 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14115 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14116 prefer Altivec loads.. */
14117 if (rclass
== VSX_REGS
)
14119 if (GET_MODE_SIZE (mode
) <= 8)
14122 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
))
14123 return ALTIVEC_REGS
;
14131 /* Debug version of rs6000_preferred_reload_class. */
14132 static enum reg_class
14133 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
14135 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
14138 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14140 reg_class_names
[ret
], reg_class_names
[rclass
],
14141 GET_MODE_NAME (GET_MODE (x
)));
14147 /* If we are copying between FP or AltiVec registers and anything else, we need
14148 a memory location. The exception is when we are targeting ppc64 and the
14149 move to/from fpr to gpr instructions are available. Also, under VSX, you
14150 can copy vector registers from the FP register set to the Altivec register
14151 set and vice versa. */
14154 rs6000_secondary_memory_needed (enum reg_class class1
,
14155 enum reg_class class2
,
14156 enum machine_mode mode
)
14158 if (class1
== class2
)
14161 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14162 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14163 between these classes. But we need memory for other things that can go in
14164 FLOAT_REGS like SFmode. */
14166 && (VECTOR_MEM_VSX_P (mode
) || VECTOR_UNIT_VSX_P (mode
))
14167 && (class1
== VSX_REGS
|| class1
== ALTIVEC_REGS
14168 || class1
== FLOAT_REGS
))
14169 return (class2
!= VSX_REGS
&& class2
!= ALTIVEC_REGS
14170 && class2
!= FLOAT_REGS
);
14172 if (class1
== VSX_REGS
|| class2
== VSX_REGS
)
14175 if (class1
== FLOAT_REGS
14176 && (!TARGET_MFPGPR
|| !TARGET_POWERPC64
14177 || ((mode
!= DFmode
)
14178 && (mode
!= DDmode
)
14179 && (mode
!= DImode
))))
14182 if (class2
== FLOAT_REGS
14183 && (!TARGET_MFPGPR
|| !TARGET_POWERPC64
14184 || ((mode
!= DFmode
)
14185 && (mode
!= DDmode
)
14186 && (mode
!= DImode
))))
14189 if (class1
== ALTIVEC_REGS
|| class2
== ALTIVEC_REGS
)
14195 /* Debug version of rs6000_secondary_memory_needed. */
14197 rs6000_debug_secondary_memory_needed (enum reg_class class1
,
14198 enum reg_class class2
,
14199 enum machine_mode mode
)
14201 bool ret
= rs6000_secondary_memory_needed (class1
, class2
, mode
);
14204 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14205 "class2 = %s, mode = %s\n",
14206 ret
? "true" : "false", reg_class_names
[class1
],
14207 reg_class_names
[class2
], GET_MODE_NAME (mode
));
14212 /* Return the register class of a scratch register needed to copy IN into
14213 or out of a register in RCLASS in MODE. If it can be done directly,
14214 NO_REGS is returned. */
14216 static enum reg_class
14217 rs6000_secondary_reload_class (enum reg_class rclass
, enum machine_mode mode
,
14222 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
14224 && MACHOPIC_INDIRECT
14228 /* We cannot copy a symbolic operand directly into anything
14229 other than BASE_REGS for TARGET_ELF. So indicate that a
14230 register from BASE_REGS is needed as an intermediate
14233 On Darwin, pic addresses require a load from memory, which
14234 needs a base register. */
14235 if (rclass
!= BASE_REGS
14236 && (GET_CODE (in
) == SYMBOL_REF
14237 || GET_CODE (in
) == HIGH
14238 || GET_CODE (in
) == LABEL_REF
14239 || GET_CODE (in
) == CONST
))
14243 if (GET_CODE (in
) == REG
)
14245 regno
= REGNO (in
);
14246 if (regno
>= FIRST_PSEUDO_REGISTER
)
14248 regno
= true_regnum (in
);
14249 if (regno
>= FIRST_PSEUDO_REGISTER
)
14253 else if (GET_CODE (in
) == SUBREG
)
14255 regno
= true_regnum (in
);
14256 if (regno
>= FIRST_PSEUDO_REGISTER
)
14262 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14264 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
14265 || (regno
>= 0 && INT_REGNO_P (regno
)))
14268 /* Constants, memory, and FP registers can go into FP registers. */
14269 if ((regno
== -1 || FP_REGNO_P (regno
))
14270 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
14271 return (mode
!= SDmode
) ? NO_REGS
: GENERAL_REGS
;
14273 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14276 && (regno
== -1 || VSX_REGNO_P (regno
))
14277 && VSX_REG_CLASS_P (rclass
))
14280 /* Memory, and AltiVec registers can go into AltiVec registers. */
14281 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
14282 && rclass
== ALTIVEC_REGS
)
14285 /* We can copy among the CR registers. */
14286 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
14287 && regno
>= 0 && CR_REGNO_P (regno
))
14290 /* Otherwise, we need GENERAL_REGS. */
14291 return GENERAL_REGS
;
14294 /* Debug version of rs6000_secondary_reload_class. */
14295 static enum reg_class
14296 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
14297 enum machine_mode mode
, rtx in
)
14299 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
14301 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14302 "mode = %s, input rtx:\n",
14303 reg_class_names
[ret
], reg_class_names
[rclass
],
14304 GET_MODE_NAME (mode
));
14310 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14313 rs6000_cannot_change_mode_class (enum machine_mode from
,
14314 enum machine_mode to
,
14315 enum reg_class rclass
)
14317 unsigned from_size
= GET_MODE_SIZE (from
);
14318 unsigned to_size
= GET_MODE_SIZE (to
);
14320 if (from_size
!= to_size
)
14322 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
14323 return ((from_size
< 8 || to_size
< 8 || TARGET_IEEEQUAD
)
14324 && reg_classes_intersect_p (xclass
, rclass
));
14327 if (TARGET_E500_DOUBLE
14328 && ((((to
) == DFmode
) + ((from
) == DFmode
)) == 1
14329 || (((to
) == TFmode
) + ((from
) == TFmode
)) == 1
14330 || (((to
) == DDmode
) + ((from
) == DDmode
)) == 1
14331 || (((to
) == TDmode
) + ((from
) == TDmode
)) == 1
14332 || (((to
) == DImode
) + ((from
) == DImode
)) == 1))
14335 /* Since the VSX register set includes traditional floating point registers
14336 and altivec registers, just check for the size being different instead of
14337 trying to check whether the modes are vector modes. Otherwise it won't
14338 allow say DF and DI to change classes. */
14339 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
14340 return (from_size
!= 8 && from_size
!= 16);
14342 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
14343 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
14346 if (TARGET_SPE
&& (SPE_VECTOR_MODE (from
) + SPE_VECTOR_MODE (to
)) == 1
14347 && reg_classes_intersect_p (GENERAL_REGS
, rclass
))
14353 /* Debug version of rs6000_cannot_change_mode_class. */
14355 rs6000_debug_cannot_change_mode_class (enum machine_mode from
,
14356 enum machine_mode to
,
14357 enum reg_class rclass
)
14359 bool ret
= rs6000_cannot_change_mode_class (from
, to
, rclass
);
14362 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14363 "to = %s, rclass = %s\n",
14364 ret
? "true" : "false",
14365 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
14366 reg_class_names
[rclass
]);
14371 /* Given a comparison operation, return the bit number in CCR to test. We
14372 know this is a valid comparison.
14374 SCC_P is 1 if this is for an scc. That means that %D will have been
14375 used instead of %C, so the bits will be in different places.
14377 Return -1 if OP isn't a valid comparison for some reason. */
14380 ccr_bit (rtx op
, int scc_p
)
14382 enum rtx_code code
= GET_CODE (op
);
14383 enum machine_mode cc_mode
;
14388 if (!COMPARISON_P (op
))
14391 reg
= XEXP (op
, 0);
14393 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
14395 cc_mode
= GET_MODE (reg
);
14396 cc_regnum
= REGNO (reg
);
14397 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
14399 validate_condition_mode (code
, cc_mode
);
14401 /* When generating a sCOND operation, only positive conditions are
14404 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
14405 || code
== GTU
|| code
== LTU
);
14410 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
14412 return base_bit
+ 2;
14413 case GT
: case GTU
: case UNLE
:
14414 return base_bit
+ 1;
14415 case LT
: case LTU
: case UNGE
:
14417 case ORDERED
: case UNORDERED
:
14418 return base_bit
+ 3;
14421 /* If scc, we will have done a cror to put the bit in the
14422 unordered position. So test that bit. For integer, this is ! LT
14423 unless this is an scc insn. */
14424 return scc_p
? base_bit
+ 3 : base_bit
;
14427 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
14430 gcc_unreachable ();
14434 /* Return the GOT register. */
14437 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
14439 /* The second flow pass currently (June 1999) can't update
14440 regs_ever_live without disturbing other parts of the compiler, so
14441 update it here to make the prolog/epilogue code happy. */
14442 if (!can_create_pseudo_p ()
14443 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
14444 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
14446 crtl
->uses_pic_offset_table
= 1;
14448 return pic_offset_table_rtx
;
14451 static rs6000_stack_t stack_info
;
14453 /* Function to init struct machine_function.
14454 This will be called, via a pointer variable,
14455 from push_function_context. */
14457 static struct machine_function
*
14458 rs6000_init_machine_status (void)
14460 stack_info
.reload_completed
= 0;
14461 return ggc_alloc_cleared_machine_function ();
14464 /* These macros test for integers and extract the low-order bits. */
14466 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14467 && GET_MODE (X) == VOIDmode)
14469 #define INT_LOWPART(X) \
14470 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14473 extract_MB (rtx op
)
14476 unsigned long val
= INT_LOWPART (op
);
14478 /* If the high bit is zero, the value is the first 1 bit we find
14480 if ((val
& 0x80000000) == 0)
14482 gcc_assert (val
& 0xffffffff);
14485 while (((val
<<= 1) & 0x80000000) == 0)
14490 /* If the high bit is set and the low bit is not, or the mask is all
14491 1's, the value is zero. */
14492 if ((val
& 1) == 0 || (val
& 0xffffffff) == 0xffffffff)
14495 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14498 while (((val
>>= 1) & 1) != 0)
14505 extract_ME (rtx op
)
14508 unsigned long val
= INT_LOWPART (op
);
14510 /* If the low bit is zero, the value is the first 1 bit we find from
14512 if ((val
& 1) == 0)
14514 gcc_assert (val
& 0xffffffff);
14517 while (((val
>>= 1) & 1) == 0)
14523 /* If the low bit is set and the high bit is not, or the mask is all
14524 1's, the value is 31. */
14525 if ((val
& 0x80000000) == 0 || (val
& 0xffffffff) == 0xffffffff)
14528 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14531 while (((val
<<= 1) & 0x80000000) != 0)
14537 /* Locate some local-dynamic symbol still in use by this function
14538 so that we can print its name in some tls_ld pattern. */
14540 static const char *
14541 rs6000_get_some_local_dynamic_name (void)
14545 if (cfun
->machine
->some_ld_name
)
14546 return cfun
->machine
->some_ld_name
;
14548 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
14550 && for_each_rtx (&PATTERN (insn
),
14551 rs6000_get_some_local_dynamic_name_1
, 0))
14552 return cfun
->machine
->some_ld_name
;
14554 gcc_unreachable ();
14557 /* Helper function for rs6000_get_some_local_dynamic_name. */
14560 rs6000_get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
14564 if (GET_CODE (x
) == SYMBOL_REF
)
14566 const char *str
= XSTR (x
, 0);
14567 if (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
14569 cfun
->machine
->some_ld_name
= str
;
14577 /* Write out a function code label. */
14580 rs6000_output_function_entry (FILE *file
, const char *fname
)
14582 if (fname
[0] != '.')
14584 switch (DEFAULT_ABI
)
14587 gcc_unreachable ();
14593 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
14602 RS6000_OUTPUT_BASENAME (file
, fname
);
14605 /* Print an operand. Recognize special options, documented below. */
14608 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14609 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14611 #define SMALL_DATA_RELOC "sda21"
14612 #define SMALL_DATA_REG 0
14616 print_operand (FILE *file
, rtx x
, int code
)
14619 unsigned HOST_WIDE_INT uval
;
14624 /* Write out an instruction after the call which may be replaced
14625 with glue code by the loader. This depends on the AIX version. */
14626 asm_fprintf (file
, RS6000_CALL_GLUE
);
14629 /* %a is output_address. */
14632 /* If X is a constant integer whose low-order 5 bits are zero,
14633 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
14634 in the AIX assembler where "sri" with a zero shift count
14635 writes a trash instruction. */
14636 if (GET_CODE (x
) == CONST_INT
&& (INTVAL (x
) & 31) == 0)
14643 /* If constant, low-order 16 bits of constant, unsigned.
14644 Otherwise, write normally. */
14646 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 0xffff);
14648 print_operand (file
, x
, 0);
14652 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14653 for 64-bit mask direction. */
14654 putc (((INT_LOWPART (x
) & 1) == 0 ? 'r' : 'l'), file
);
14657 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14661 /* X is a CR register. Print the number of the GT bit of the CR. */
14662 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14663 output_operand_lossage ("invalid %%c value");
14665 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 1);
14669 /* Like 'J' but get to the GT bit only. */
14670 gcc_assert (REG_P (x
));
14672 /* Bit 1 is GT bit. */
14673 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
14675 /* Add one for shift count in rlinm for scc. */
14676 fprintf (file
, "%d", i
+ 1);
14680 /* X is a CR register. Print the number of the EQ bit of the CR */
14681 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14682 output_operand_lossage ("invalid %%E value");
14684 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
14688 /* X is a CR register. Print the shift count needed to move it
14689 to the high-order four bits. */
14690 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14691 output_operand_lossage ("invalid %%f value");
14693 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
14697 /* Similar, but print the count for the rotate in the opposite
14699 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14700 output_operand_lossage ("invalid %%F value");
14702 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
14706 /* X is a constant integer. If it is negative, print "m",
14707 otherwise print "z". This is to make an aze or ame insn. */
14708 if (GET_CODE (x
) != CONST_INT
)
14709 output_operand_lossage ("invalid %%G value");
14710 else if (INTVAL (x
) >= 0)
14717 /* If constant, output low-order five bits. Otherwise, write
14720 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 31);
14722 print_operand (file
, x
, 0);
14726 /* If constant, output low-order six bits. Otherwise, write
14729 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 63);
14731 print_operand (file
, x
, 0);
14735 /* Print `i' if this is a constant, else nothing. */
14741 /* Write the bit number in CCR for jump. */
14742 i
= ccr_bit (x
, 0);
14744 output_operand_lossage ("invalid %%j code");
14746 fprintf (file
, "%d", i
);
14750 /* Similar, but add one for shift count in rlinm for scc and pass
14751 scc flag to `ccr_bit'. */
14752 i
= ccr_bit (x
, 1);
14754 output_operand_lossage ("invalid %%J code");
14756 /* If we want bit 31, write a shift count of zero, not 32. */
14757 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
14761 /* X must be a constant. Write the 1's complement of the
14764 output_operand_lossage ("invalid %%k value");
14766 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INT_LOWPART (x
));
14770 /* X must be a symbolic constant on ELF. Write an
14771 expression suitable for an 'addi' that adds in the low 16
14772 bits of the MEM. */
14773 if (GET_CODE (x
) == CONST
)
14775 if (GET_CODE (XEXP (x
, 0)) != PLUS
14776 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
14777 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
14778 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
14779 output_operand_lossage ("invalid %%K value");
14781 print_operand_address (file
, x
);
14782 fputs ("@l", file
);
14785 /* %l is output_asm_label. */
14788 /* Write second word of DImode or DFmode reference. Works on register
14789 or non-indexed memory only. */
14791 fputs (reg_names
[REGNO (x
) + 1], file
);
14792 else if (MEM_P (x
))
14794 /* Handle possible auto-increment. Since it is pre-increment and
14795 we have already done it, we can just use an offset of word. */
14796 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
14797 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
14798 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
14800 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
14801 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
14804 output_address (XEXP (adjust_address_nv (x
, SImode
,
14808 if (small_data_operand (x
, GET_MODE (x
)))
14809 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
14810 reg_names
[SMALL_DATA_REG
]);
14815 /* MB value for a mask operand. */
14816 if (! mask_operand (x
, SImode
))
14817 output_operand_lossage ("invalid %%m value");
14819 fprintf (file
, "%d", extract_MB (x
));
14823 /* ME value for a mask operand. */
14824 if (! mask_operand (x
, SImode
))
14825 output_operand_lossage ("invalid %%M value");
14827 fprintf (file
, "%d", extract_ME (x
));
14830 /* %n outputs the negative of its operand. */
14833 /* Write the number of elements in the vector times 4. */
14834 if (GET_CODE (x
) != PARALLEL
)
14835 output_operand_lossage ("invalid %%N value");
14837 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
14841 /* Similar, but subtract 1 first. */
14842 if (GET_CODE (x
) != PARALLEL
)
14843 output_operand_lossage ("invalid %%O value");
14845 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
14849 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14851 || INT_LOWPART (x
) < 0
14852 || (i
= exact_log2 (INT_LOWPART (x
))) < 0)
14853 output_operand_lossage ("invalid %%p value");
14855 fprintf (file
, "%d", i
);
14859 /* The operand must be an indirect memory reference. The result
14860 is the register name. */
14861 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
14862 || REGNO (XEXP (x
, 0)) >= 32)
14863 output_operand_lossage ("invalid %%P value");
14865 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
14869 /* This outputs the logical code corresponding to a boolean
14870 expression. The expression may have one or both operands
14871 negated (if one, only the first one). For condition register
14872 logical operations, it will also treat the negated
14873 CR codes as NOTs, but not handle NOTs of them. */
14875 const char *const *t
= 0;
14877 enum rtx_code code
= GET_CODE (x
);
14878 static const char * const tbl
[3][3] = {
14879 { "and", "andc", "nor" },
14880 { "or", "orc", "nand" },
14881 { "xor", "eqv", "xor" } };
14885 else if (code
== IOR
)
14887 else if (code
== XOR
)
14890 output_operand_lossage ("invalid %%q value");
14892 if (GET_CODE (XEXP (x
, 0)) != NOT
)
14896 if (GET_CODE (XEXP (x
, 1)) == NOT
)
14914 /* X is a CR register. Print the mask for `mtcrf'. */
14915 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14916 output_operand_lossage ("invalid %%R value");
14918 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
14922 /* Low 5 bits of 32 - value */
14924 output_operand_lossage ("invalid %%s value");
14926 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INT_LOWPART (x
)) & 31);
14930 /* PowerPC64 mask position. All 0's is excluded.
14931 CONST_INT 32-bit mask is considered sign-extended so any
14932 transition must occur within the CONST_INT, not on the boundary. */
14933 if (! mask64_operand (x
, DImode
))
14934 output_operand_lossage ("invalid %%S value");
14936 uval
= INT_LOWPART (x
);
14938 if (uval
& 1) /* Clear Left */
14940 #if HOST_BITS_PER_WIDE_INT > 64
14941 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
14945 else /* Clear Right */
14948 #if HOST_BITS_PER_WIDE_INT > 64
14949 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
14955 gcc_assert (i
>= 0);
14956 fprintf (file
, "%d", i
);
14960 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
14961 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
14963 /* Bit 3 is OV bit. */
14964 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
14966 /* If we want bit 31, write a shift count of zero, not 32. */
14967 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
14971 /* Print the symbolic name of a branch target register. */
14972 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
14973 && REGNO (x
) != CTR_REGNO
))
14974 output_operand_lossage ("invalid %%T value");
14975 else if (REGNO (x
) == LR_REGNO
)
14976 fputs (TARGET_NEW_MNEMONICS
? "lr" : "r", file
);
14978 fputs ("ctr", file
);
14982 /* High-order 16 bits of constant for use in unsigned operand. */
14984 output_operand_lossage ("invalid %%u value");
14986 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
14987 (INT_LOWPART (x
) >> 16) & 0xffff);
14991 /* High-order 16 bits of constant for use in signed operand. */
14993 output_operand_lossage ("invalid %%v value");
14995 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
14996 (INT_LOWPART (x
) >> 16) & 0xffff);
15000 /* Print `u' if this has an auto-increment or auto-decrement. */
15002 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
15003 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
15004 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
15009 /* Print the trap code for this operand. */
15010 switch (GET_CODE (x
))
15013 fputs ("eq", file
); /* 4 */
15016 fputs ("ne", file
); /* 24 */
15019 fputs ("lt", file
); /* 16 */
15022 fputs ("le", file
); /* 20 */
15025 fputs ("gt", file
); /* 8 */
15028 fputs ("ge", file
); /* 12 */
15031 fputs ("llt", file
); /* 2 */
15034 fputs ("lle", file
); /* 6 */
15037 fputs ("lgt", file
); /* 1 */
15040 fputs ("lge", file
); /* 5 */
15043 gcc_unreachable ();
15048 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
15051 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
15052 ((INT_LOWPART (x
) & 0xffff) ^ 0x8000) - 0x8000);
15054 print_operand (file
, x
, 0);
15058 /* MB value for a PowerPC64 rldic operand. */
15059 i
= clz_hwi (GET_CODE (x
) == CONST_INT
15060 ? INTVAL (x
) : CONST_DOUBLE_HIGH (x
));
15062 #if HOST_BITS_PER_WIDE_INT == 32
15063 if (GET_CODE (x
) == CONST_INT
&& i
> 0)
15064 i
+= 32; /* zero-extend high-part was all 0's */
15065 else if (GET_CODE (x
) == CONST_DOUBLE
&& i
== 32)
15066 i
= clz_hwi (CONST_DOUBLE_LOW (x
)) + 32;
15069 fprintf (file
, "%d", i
);
15073 /* X is a FPR or Altivec register used in a VSX context. */
15074 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
15075 output_operand_lossage ("invalid %%x value");
15078 int reg
= REGNO (x
);
15079 int vsx_reg
= (FP_REGNO_P (reg
)
15081 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
15083 #ifdef TARGET_REGNAMES
15084 if (TARGET_REGNAMES
)
15085 fprintf (file
, "%%vs%d", vsx_reg
);
15088 fprintf (file
, "%d", vsx_reg
);
15094 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
15095 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
15096 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
15101 /* Like 'L', for third word of TImode */
15103 fputs (reg_names
[REGNO (x
) + 2], file
);
15104 else if (MEM_P (x
))
15106 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
15107 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15108 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
15109 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15110 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
15112 output_address (XEXP (adjust_address_nv (x
, SImode
, 8), 0));
15113 if (small_data_operand (x
, GET_MODE (x
)))
15114 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15115 reg_names
[SMALL_DATA_REG
]);
15120 /* X is a SYMBOL_REF. Write out the name preceded by a
15121 period and without any trailing data in brackets. Used for function
15122 names. If we are configured for System V (or the embedded ABI) on
15123 the PowerPC, do not emit the period, since those systems do not use
15124 TOCs and the like. */
15125 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
15127 /* Mark the decl as referenced so that cgraph will output the
15129 if (SYMBOL_REF_DECL (x
))
15130 mark_decl_referenced (SYMBOL_REF_DECL (x
));
15132 /* For macho, check to see if we need a stub. */
15135 const char *name
= XSTR (x
, 0);
15137 if (darwin_emit_branch_islands
15138 && MACHOPIC_INDIRECT
15139 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
15140 name
= machopic_indirection_name (x
, /*stub_p=*/true);
15142 assemble_name (file
, name
);
15144 else if (!DOT_SYMBOLS
)
15145 assemble_name (file
, XSTR (x
, 0));
15147 rs6000_output_function_entry (file
, XSTR (x
, 0));
15151 /* Like 'L', for last word of TImode. */
15153 fputs (reg_names
[REGNO (x
) + 3], file
);
15154 else if (MEM_P (x
))
15156 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
15157 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15158 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
15159 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15160 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
15162 output_address (XEXP (adjust_address_nv (x
, SImode
, 12), 0));
15163 if (small_data_operand (x
, GET_MODE (x
)))
15164 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15165 reg_names
[SMALL_DATA_REG
]);
15169 /* Print AltiVec or SPE memory operand. */
15174 gcc_assert (MEM_P (x
));
15178 /* Ugly hack because %y is overloaded. */
15179 if ((TARGET_SPE
|| TARGET_E500_DOUBLE
)
15180 && (GET_MODE_SIZE (GET_MODE (x
)) == 8
15181 || GET_MODE (x
) == TFmode
15182 || GET_MODE (x
) == TImode
))
15184 /* Handle [reg]. */
15187 fprintf (file
, "0(%s)", reg_names
[REGNO (tmp
)]);
15190 /* Handle [reg+UIMM]. */
15191 else if (GET_CODE (tmp
) == PLUS
&&
15192 GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
15196 gcc_assert (REG_P (XEXP (tmp
, 0)));
15198 x
= INTVAL (XEXP (tmp
, 1));
15199 fprintf (file
, "%d(%s)", x
, reg_names
[REGNO (XEXP (tmp
, 0))]);
15203 /* Fall through. Must be [reg+reg]. */
15205 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
15206 && GET_CODE (tmp
) == AND
15207 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
15208 && INTVAL (XEXP (tmp
, 1)) == -16)
15209 tmp
= XEXP (tmp
, 0);
15210 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
15211 && GET_CODE (tmp
) == PRE_MODIFY
)
15212 tmp
= XEXP (tmp
, 1);
15214 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
15217 if (!GET_CODE (tmp
) == PLUS
15218 || !REG_P (XEXP (tmp
, 0))
15219 || !REG_P (XEXP (tmp
, 1)))
15221 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15225 if (REGNO (XEXP (tmp
, 0)) == 0)
15226 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
15227 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
15229 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
15230 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
15237 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
15238 else if (MEM_P (x
))
15240 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15241 know the width from the mode. */
15242 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
15243 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
15244 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
15245 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15246 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
15247 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
15248 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15249 output_address (XEXP (XEXP (x
, 0), 1));
15251 output_address (XEXP (x
, 0));
15255 if (toc_relative_expr_p (x
, false))
15256 /* This hack along with a corresponding hack in
15257 rs6000_output_addr_const_extra arranges to output addends
15258 where the assembler expects to find them. eg.
15259 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15260 without this hack would be output as "x@toc+4". We
15262 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
15264 output_addr_const (file
, x
);
15269 assemble_name (file
, rs6000_get_some_local_dynamic_name ());
15273 output_operand_lossage ("invalid %%xn code");
15277 /* Print the address of an operand. */
15280 print_operand_address (FILE *file
, rtx x
)
15283 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
15284 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
15285 || GET_CODE (x
) == LABEL_REF
)
15287 output_addr_const (file
, x
);
15288 if (small_data_operand (x
, GET_MODE (x
)))
15289 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15290 reg_names
[SMALL_DATA_REG
]);
15292 gcc_assert (!TARGET_TOC
);
15294 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
15295 && REG_P (XEXP (x
, 1)))
15297 if (REGNO (XEXP (x
, 0)) == 0)
15298 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
15299 reg_names
[ REGNO (XEXP (x
, 0)) ]);
15301 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
15302 reg_names
[ REGNO (XEXP (x
, 1)) ]);
15304 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
15305 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
15306 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
15307 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
15309 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
15310 && CONSTANT_P (XEXP (x
, 1)))
15312 fprintf (file
, "lo16(");
15313 output_addr_const (file
, XEXP (x
, 1));
15314 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
15318 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
15319 && CONSTANT_P (XEXP (x
, 1)))
15321 output_addr_const (file
, XEXP (x
, 1));
15322 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
15325 else if (toc_relative_expr_p (x
, false))
15327 /* This hack along with a corresponding hack in
15328 rs6000_output_addr_const_extra arranges to output addends
15329 where the assembler expects to find them. eg.
15331 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15332 without this hack would be output as "x@toc+8@l(9)". We
15333 want "x+8@toc@l(9)". */
15334 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
15335 if (GET_CODE (x
) == LO_SUM
)
15336 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
15338 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base
, 0, 1))]);
15341 gcc_unreachable ();
15344 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15347 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
15349 if (GET_CODE (x
) == UNSPEC
)
15350 switch (XINT (x
, 1))
15352 case UNSPEC_TOCREL
:
15353 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
15354 && REG_P (XVECEXP (x
, 0, 1))
15355 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
15356 output_addr_const (file
, XVECEXP (x
, 0, 0));
15357 if (x
== tocrel_base
&& tocrel_offset
!= const0_rtx
)
15359 if (INTVAL (tocrel_offset
) >= 0)
15360 fprintf (file
, "+");
15361 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset
));
15363 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
15366 assemble_name (file
, toc_label_name
);
15368 else if (TARGET_ELF
)
15369 fputs ("@toc", file
);
15373 case UNSPEC_MACHOPIC_OFFSET
:
15374 output_addr_const (file
, XVECEXP (x
, 0, 0));
15376 machopic_output_function_base_name (file
);
15383 /* Target hook for assembling integer objects. The PowerPC version has
15384 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15385 is defined. It also needs to handle DI-mode objects on 64-bit
15389 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
15391 #ifdef RELOCATABLE_NEEDS_FIXUP
15392 /* Special handling for SI values. */
15393 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
15395 static int recurse
= 0;
15397 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15398 the .fixup section. Since the TOC section is already relocated, we
15399 don't need to mark it here. We used to skip the text section, but it
15400 should never be valid for relocated addresses to be placed in the text
15402 if (TARGET_RELOCATABLE
15403 && in_section
!= toc_section
15405 && GET_CODE (x
) != CONST_INT
15406 && GET_CODE (x
) != CONST_DOUBLE
15412 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
15414 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
15415 fprintf (asm_out_file
, "\t.long\t(");
15416 output_addr_const (asm_out_file
, x
);
15417 fprintf (asm_out_file
, ")@fixup\n");
15418 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
15419 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
15420 fprintf (asm_out_file
, "\t.long\t");
15421 assemble_name (asm_out_file
, buf
);
15422 fprintf (asm_out_file
, "\n\t.previous\n");
15426 /* Remove initial .'s to turn a -mcall-aixdesc function
15427 address into the address of the descriptor, not the function
15429 else if (GET_CODE (x
) == SYMBOL_REF
15430 && XSTR (x
, 0)[0] == '.'
15431 && DEFAULT_ABI
== ABI_AIX
)
15433 const char *name
= XSTR (x
, 0);
15434 while (*name
== '.')
15437 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
15441 #endif /* RELOCATABLE_NEEDS_FIXUP */
15442 return default_assemble_integer (x
, size
, aligned_p
);
15445 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15446 /* Emit an assembler directive to set symbol visibility for DECL to
15447 VISIBILITY_TYPE. */
15450 rs6000_assemble_visibility (tree decl
, int vis
)
15452 /* Functions need to have their entry point symbol visibility set as
15453 well as their descriptor symbol visibility. */
15454 if (DEFAULT_ABI
== ABI_AIX
15456 && TREE_CODE (decl
) == FUNCTION_DECL
)
15458 static const char * const visibility_types
[] = {
15459 NULL
, "internal", "hidden", "protected"
15462 const char *name
, *type
;
15464 name
= ((* targetm
.strip_name_encoding
)
15465 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
15466 type
= visibility_types
[vis
];
15468 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
15469 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
15472 default_assemble_visibility (decl
, vis
);
15477 rs6000_reverse_condition (enum machine_mode mode
, enum rtx_code code
)
15479 /* Reversal of FP compares takes care -- an ordered compare
15480 becomes an unordered compare and vice versa. */
15481 if (mode
== CCFPmode
15482 && (!flag_finite_math_only
15483 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
15484 || code
== UNEQ
|| code
== LTGT
))
15485 return reverse_condition_maybe_unordered (code
);
15487 return reverse_condition (code
);
15490 /* Generate a compare for CODE. Return a brand-new rtx that
15491 represents the result of the compare. */
15494 rs6000_generate_compare (rtx cmp
, enum machine_mode mode
)
15496 enum machine_mode comp_mode
;
15497 rtx compare_result
;
15498 enum rtx_code code
= GET_CODE (cmp
);
15499 rtx op0
= XEXP (cmp
, 0);
15500 rtx op1
= XEXP (cmp
, 1);
15502 if (FLOAT_MODE_P (mode
))
15503 comp_mode
= CCFPmode
;
15504 else if (code
== GTU
|| code
== LTU
15505 || code
== GEU
|| code
== LEU
)
15506 comp_mode
= CCUNSmode
;
15507 else if ((code
== EQ
|| code
== NE
)
15508 && unsigned_reg_p (op0
)
15509 && (unsigned_reg_p (op1
)
15510 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
15511 /* These are unsigned values, perhaps there will be a later
15512 ordering compare that can be shared with this one. */
15513 comp_mode
= CCUNSmode
;
15515 comp_mode
= CCmode
;
15517 /* If we have an unsigned compare, make sure we don't have a signed value as
15519 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
15520 && INTVAL (op1
) < 0)
15522 op0
= copy_rtx_if_shared (op0
);
15523 op1
= force_reg (GET_MODE (op0
), op1
);
15524 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
15527 /* First, the compare. */
15528 compare_result
= gen_reg_rtx (comp_mode
);
15530 /* E500 FP compare instructions on the GPRs. Yuck! */
15531 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
)
15532 && FLOAT_MODE_P (mode
))
15534 rtx cmp
, or_result
, compare_result2
;
15535 enum machine_mode op_mode
= GET_MODE (op0
);
15537 if (op_mode
== VOIDmode
)
15538 op_mode
= GET_MODE (op1
);
15540 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15541 This explains the following mess. */
15545 case EQ
: case UNEQ
: case NE
: case LTGT
:
15549 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15550 ? gen_tstsfeq_gpr (compare_result
, op0
, op1
)
15551 : gen_cmpsfeq_gpr (compare_result
, op0
, op1
);
15555 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15556 ? gen_tstdfeq_gpr (compare_result
, op0
, op1
)
15557 : gen_cmpdfeq_gpr (compare_result
, op0
, op1
);
15561 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15562 ? gen_tsttfeq_gpr (compare_result
, op0
, op1
)
15563 : gen_cmptfeq_gpr (compare_result
, op0
, op1
);
15567 gcc_unreachable ();
15571 case GT
: case GTU
: case UNGT
: case UNGE
: case GE
: case GEU
:
15575 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15576 ? gen_tstsfgt_gpr (compare_result
, op0
, op1
)
15577 : gen_cmpsfgt_gpr (compare_result
, op0
, op1
);
15581 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15582 ? gen_tstdfgt_gpr (compare_result
, op0
, op1
)
15583 : gen_cmpdfgt_gpr (compare_result
, op0
, op1
);
15587 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15588 ? gen_tsttfgt_gpr (compare_result
, op0
, op1
)
15589 : gen_cmptfgt_gpr (compare_result
, op0
, op1
);
15593 gcc_unreachable ();
15597 case LT
: case LTU
: case UNLT
: case UNLE
: case LE
: case LEU
:
15601 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15602 ? gen_tstsflt_gpr (compare_result
, op0
, op1
)
15603 : gen_cmpsflt_gpr (compare_result
, op0
, op1
);
15607 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15608 ? gen_tstdflt_gpr (compare_result
, op0
, op1
)
15609 : gen_cmpdflt_gpr (compare_result
, op0
, op1
);
15613 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15614 ? gen_tsttflt_gpr (compare_result
, op0
, op1
)
15615 : gen_cmptflt_gpr (compare_result
, op0
, op1
);
15619 gcc_unreachable ();
15623 gcc_unreachable ();
15626 /* Synthesize LE and GE from LT/GT || EQ. */
15627 if (code
== LE
|| code
== GE
|| code
== LEU
|| code
== GEU
)
15633 case LE
: code
= LT
; break;
15634 case GE
: code
= GT
; break;
15635 case LEU
: code
= LT
; break;
15636 case GEU
: code
= GT
; break;
15637 default: gcc_unreachable ();
15640 compare_result2
= gen_reg_rtx (CCFPmode
);
15646 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15647 ? gen_tstsfeq_gpr (compare_result2
, op0
, op1
)
15648 : gen_cmpsfeq_gpr (compare_result2
, op0
, op1
);
15652 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15653 ? gen_tstdfeq_gpr (compare_result2
, op0
, op1
)
15654 : gen_cmpdfeq_gpr (compare_result2
, op0
, op1
);
15658 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15659 ? gen_tsttfeq_gpr (compare_result2
, op0
, op1
)
15660 : gen_cmptfeq_gpr (compare_result2
, op0
, op1
);
15664 gcc_unreachable ();
15668 /* OR them together. */
15669 or_result
= gen_reg_rtx (CCFPmode
);
15670 cmp
= gen_e500_cr_ior_compare (or_result
, compare_result
,
15672 compare_result
= or_result
;
15677 if (code
== NE
|| code
== LTGT
)
15687 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15688 CLOBBERs to match cmptf_internal2 pattern. */
15689 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
15690 && GET_MODE (op0
) == TFmode
15691 && !TARGET_IEEEQUAD
15692 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_LONG_DOUBLE_128
)
15693 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
15695 gen_rtx_SET (VOIDmode
,
15697 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
15698 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15699 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15700 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15701 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15702 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15703 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15704 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15705 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15706 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
15707 else if (GET_CODE (op1
) == UNSPEC
15708 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
15710 rtx op1b
= XVECEXP (op1
, 0, 0);
15711 comp_mode
= CCEQmode
;
15712 compare_result
= gen_reg_rtx (CCEQmode
);
15714 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
15716 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
15719 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
15720 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
15723 /* Some kinds of FP comparisons need an OR operation;
15724 under flag_finite_math_only we don't bother. */
15725 if (FLOAT_MODE_P (mode
)
15726 && !flag_finite_math_only
15727 && !(TARGET_HARD_FLOAT
&& !TARGET_FPRS
)
15728 && (code
== LE
|| code
== GE
15729 || code
== UNEQ
|| code
== LTGT
15730 || code
== UNGT
|| code
== UNLT
))
15732 enum rtx_code or1
, or2
;
15733 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
15734 rtx or_result
= gen_reg_rtx (CCEQmode
);
15738 case LE
: or1
= LT
; or2
= EQ
; break;
15739 case GE
: or1
= GT
; or2
= EQ
; break;
15740 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
15741 case LTGT
: or1
= LT
; or2
= GT
; break;
15742 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
15743 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
15744 default: gcc_unreachable ();
15746 validate_condition_mode (or1
, comp_mode
);
15747 validate_condition_mode (or2
, comp_mode
);
15748 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
15749 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
15750 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
15751 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
15753 emit_insn (gen_rtx_SET (VOIDmode
, or_result
, compare2_rtx
));
15755 compare_result
= or_result
;
15759 validate_condition_mode (code
, GET_MODE (compare_result
));
15761 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
15765 /* Emit the RTL for an sISEL pattern. */
15768 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
15770 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
15774 rs6000_emit_sCOND (enum machine_mode mode
, rtx operands
[])
15777 enum machine_mode op_mode
;
15778 enum rtx_code cond_code
;
15779 rtx result
= operands
[0];
15781 if (TARGET_ISEL
&& (mode
== SImode
|| mode
== DImode
))
15783 rs6000_emit_sISEL (mode
, operands
);
15787 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
15788 cond_code
= GET_CODE (condition_rtx
);
15790 if (FLOAT_MODE_P (mode
)
15791 && !TARGET_FPRS
&& TARGET_HARD_FLOAT
)
15795 PUT_MODE (condition_rtx
, SImode
);
15796 t
= XEXP (condition_rtx
, 0);
15798 gcc_assert (cond_code
== NE
|| cond_code
== EQ
);
15800 if (cond_code
== NE
)
15801 emit_insn (gen_e500_flip_gt_bit (t
, t
));
15803 emit_insn (gen_move_from_CR_gt_bit (result
, t
));
15807 if (cond_code
== NE
15808 || cond_code
== GE
|| cond_code
== LE
15809 || cond_code
== GEU
|| cond_code
== LEU
15810 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
15812 rtx not_result
= gen_reg_rtx (CCEQmode
);
15813 rtx not_op
, rev_cond_rtx
;
15814 enum machine_mode cc_mode
;
15816 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
15818 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
15819 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
15820 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
15821 emit_insn (gen_rtx_SET (VOIDmode
, not_result
, not_op
));
15822 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
15825 op_mode
= GET_MODE (XEXP (operands
[1], 0));
15826 if (op_mode
== VOIDmode
)
15827 op_mode
= GET_MODE (XEXP (operands
[1], 1));
15829 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
15831 PUT_MODE (condition_rtx
, DImode
);
15832 convert_move (result
, condition_rtx
, 0);
15836 PUT_MODE (condition_rtx
, SImode
);
15837 emit_insn (gen_rtx_SET (VOIDmode
, result
, condition_rtx
));
15841 /* Emit a branch of kind CODE to location LOC. */
15844 rs6000_emit_cbranch (enum machine_mode mode
, rtx operands
[])
15846 rtx condition_rtx
, loc_ref
;
15848 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
15849 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
15850 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
15851 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
15852 loc_ref
, pc_rtx
)));
15855 /* Return the string to output a conditional branch to LABEL, which is
15856 the operand number of the label, or -1 if the branch is really a
15857 conditional return.
15859 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15860 condition code register and its mode specifies what kind of
15861 comparison we made.
15863 REVERSED is nonzero if we should reverse the sense of the comparison.
15865 INSN is the insn. */
15868 output_cbranch (rtx op
, const char *label
, int reversed
, rtx insn
)
15870 static char string
[64];
15871 enum rtx_code code
= GET_CODE (op
);
15872 rtx cc_reg
= XEXP (op
, 0);
15873 enum machine_mode mode
= GET_MODE (cc_reg
);
15874 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
15875 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
15876 int really_reversed
= reversed
^ need_longbranch
;
15882 validate_condition_mode (code
, mode
);
15884 /* Work out which way this really branches. We could use
15885 reverse_condition_maybe_unordered here always but this
15886 makes the resulting assembler clearer. */
15887 if (really_reversed
)
15889 /* Reversal of FP compares takes care -- an ordered compare
15890 becomes an unordered compare and vice versa. */
15891 if (mode
== CCFPmode
)
15892 code
= reverse_condition_maybe_unordered (code
);
15894 code
= reverse_condition (code
);
15897 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
) && mode
== CCFPmode
)
15899 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15904 /* Opposite of GT. */
15913 gcc_unreachable ();
15919 /* Not all of these are actually distinct opcodes, but
15920 we distinguish them for clarity of the resulting assembler. */
15921 case NE
: case LTGT
:
15922 ccode
= "ne"; break;
15923 case EQ
: case UNEQ
:
15924 ccode
= "eq"; break;
15926 ccode
= "ge"; break;
15927 case GT
: case GTU
: case UNGT
:
15928 ccode
= "gt"; break;
15930 ccode
= "le"; break;
15931 case LT
: case LTU
: case UNLT
:
15932 ccode
= "lt"; break;
15933 case UNORDERED
: ccode
= "un"; break;
15934 case ORDERED
: ccode
= "nu"; break;
15935 case UNGE
: ccode
= "nl"; break;
15936 case UNLE
: ccode
= "ng"; break;
15938 gcc_unreachable ();
15941 /* Maybe we have a guess as to how likely the branch is.
15942 The old mnemonics don't have a way to specify this information. */
15944 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
15945 if (note
!= NULL_RTX
)
15947 /* PROB is the difference from 50%. */
15948 int prob
= INTVAL (XEXP (note
, 0)) - REG_BR_PROB_BASE
/ 2;
15950 /* Only hint for highly probable/improbable branches on newer
15951 cpus as static prediction overrides processor dynamic
15952 prediction. For older cpus we may as well always hint, but
15953 assume not taken for branches that are very close to 50% as a
15954 mispredicted taken branch is more expensive than a
15955 mispredicted not-taken branch. */
15956 if (rs6000_always_hint
15957 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
15958 && br_prob_note_reliable_p (note
)))
15960 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
15961 && ((prob
> 0) ^ need_longbranch
))
15969 s
+= sprintf (s
, "{b%sr|b%slr%s} ", ccode
, ccode
, pred
);
15971 s
+= sprintf (s
, "{b%s|b%s%s} ", ccode
, ccode
, pred
);
15973 /* We need to escape any '%' characters in the reg_names string.
15974 Assume they'd only be the first character.... */
15975 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
15977 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
15981 /* If the branch distance was too far, we may have to use an
15982 unconditional branch to go the distance. */
15983 if (need_longbranch
)
15984 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
15986 s
+= sprintf (s
, ",%s", label
);
15992 /* Return the string to flip the GT bit on a CR. */
15994 output_e500_flip_gt_bit (rtx dst
, rtx src
)
15996 static char string
[64];
15999 gcc_assert (GET_CODE (dst
) == REG
&& CR_REGNO_P (REGNO (dst
))
16000 && GET_CODE (src
) == REG
&& CR_REGNO_P (REGNO (src
)));
16003 a
= 4 * (REGNO (dst
) - CR0_REGNO
) + 1;
16004 b
= 4 * (REGNO (src
) - CR0_REGNO
) + 1;
16006 sprintf (string
, "crnot %d,%d", a
, b
);
16010 /* Return insn for VSX or Altivec comparisons. */
16013 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
16016 enum machine_mode mode
= GET_MODE (op0
);
16024 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
16034 mask
= gen_reg_rtx (mode
);
16035 emit_insn (gen_rtx_SET (VOIDmode
,
16037 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
16044 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
16045 DMODE is expected destination mode. This is a recursive function. */
16048 rs6000_emit_vector_compare (enum rtx_code rcode
,
16050 enum machine_mode dmode
)
16053 bool swap_operands
= false;
16054 bool try_again
= false;
16056 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
16057 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
16059 /* See if the comparison works as is. */
16060 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
16068 swap_operands
= true;
16073 swap_operands
= true;
16081 /* Invert condition and try again.
16082 e.g., A != B becomes ~(A==B). */
16084 enum rtx_code rev_code
;
16085 enum insn_code nor_code
;
16088 rev_code
= reverse_condition_maybe_unordered (rcode
);
16089 if (rev_code
== UNKNOWN
)
16092 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
16093 if (nor_code
== CODE_FOR_nothing
)
16096 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
16100 mask
= gen_reg_rtx (dmode
);
16101 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
16109 /* Try GT/GTU/LT/LTU OR EQ */
16112 enum insn_code ior_code
;
16113 enum rtx_code new_code
;
16134 gcc_unreachable ();
16137 ior_code
= optab_handler (ior_optab
, dmode
);
16138 if (ior_code
== CODE_FOR_nothing
)
16141 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
16145 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
16149 mask
= gen_reg_rtx (dmode
);
16150 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
16168 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
16173 /* You only get two chances. */
16177 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16178 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16179 operands for the relation operation COND. */
16182 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
16183 rtx cond
, rtx cc_op0
, rtx cc_op1
)
16185 enum machine_mode dest_mode
= GET_MODE (dest
);
16186 enum machine_mode mask_mode
= GET_MODE (cc_op0
);
16187 enum rtx_code rcode
= GET_CODE (cond
);
16188 enum machine_mode cc_mode
= CCmode
;
16192 bool invert_move
= false;
16194 if (VECTOR_UNIT_NONE_P (dest_mode
))
16197 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
16198 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
16202 /* Swap operands if we can, and fall back to doing the operation as
16203 specified, and doing a NOR to invert the test. */
16209 /* Invert condition and try again.
16210 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16211 invert_move
= true;
16212 rcode
= reverse_condition_maybe_unordered (rcode
);
16213 if (rcode
== UNKNOWN
)
16217 /* Mark unsigned tests with CCUNSmode. */
16222 cc_mode
= CCUNSmode
;
16229 /* Get the vector mask for the given relational operations. */
16230 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
16238 op_true
= op_false
;
16242 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
16243 CONST0_RTX (dest_mode
));
16244 emit_insn (gen_rtx_SET (VOIDmode
,
16246 gen_rtx_IF_THEN_ELSE (dest_mode
,
16253 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16254 operands of the last comparison is nonzero/true, FALSE_COND if it
16255 is zero/false. Return 0 if the hardware has no such operation. */
16258 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
16260 enum rtx_code code
= GET_CODE (op
);
16261 rtx op0
= XEXP (op
, 0);
16262 rtx op1
= XEXP (op
, 1);
16263 REAL_VALUE_TYPE c1
;
16264 enum machine_mode compare_mode
= GET_MODE (op0
);
16265 enum machine_mode result_mode
= GET_MODE (dest
);
16267 bool is_against_zero
;
16269 /* These modes should always match. */
16270 if (GET_MODE (op1
) != compare_mode
16271 /* In the isel case however, we can use a compare immediate, so
16272 op1 may be a small constant. */
16273 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
16275 if (GET_MODE (true_cond
) != result_mode
)
16277 if (GET_MODE (false_cond
) != result_mode
)
16280 /* Don't allow using floating point comparisons for integer results for
16282 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
16285 /* First, work out if the hardware can do this at all, or
16286 if it's too slow.... */
16287 if (!FLOAT_MODE_P (compare_mode
))
16290 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
16293 else if (TARGET_HARD_FLOAT
&& !TARGET_FPRS
16294 && SCALAR_FLOAT_MODE_P (compare_mode
))
16297 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
16299 /* A floating-point subtract might overflow, underflow, or produce
16300 an inexact result, thus changing the floating-point flags, so it
16301 can't be generated if we care about that. It's safe if one side
16302 of the construct is zero, since then no subtract will be
16304 if (SCALAR_FLOAT_MODE_P (compare_mode
)
16305 && flag_trapping_math
&& ! is_against_zero
)
16308 /* Eliminate half of the comparisons by switching operands, this
16309 makes the remaining code simpler. */
16310 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
16311 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
16313 code
= reverse_condition_maybe_unordered (code
);
16315 true_cond
= false_cond
;
16319 /* UNEQ and LTGT take four instructions for a comparison with zero,
16320 it'll probably be faster to use a branch here too. */
16321 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
16324 if (GET_CODE (op1
) == CONST_DOUBLE
)
16325 REAL_VALUE_FROM_CONST_DOUBLE (c1
, op1
);
16327 /* We're going to try to implement comparisons by performing
16328 a subtract, then comparing against zero. Unfortunately,
16329 Inf - Inf is NaN which is not zero, and so if we don't
16330 know that the operand is finite and the comparison
16331 would treat EQ different to UNORDERED, we can't do it. */
16332 if (HONOR_INFINITIES (compare_mode
)
16333 && code
!= GT
&& code
!= UNGE
16334 && (GET_CODE (op1
) != CONST_DOUBLE
|| real_isinf (&c1
))
16335 /* Constructs of the form (a OP b ? a : b) are safe. */
16336 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
16337 || (! rtx_equal_p (op0
, true_cond
)
16338 && ! rtx_equal_p (op1
, true_cond
))))
16341 /* At this point we know we can use fsel. */
16343 /* Reduce the comparison to a comparison against zero. */
16344 if (! is_against_zero
)
16346 temp
= gen_reg_rtx (compare_mode
);
16347 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16348 gen_rtx_MINUS (compare_mode
, op0
, op1
)));
16350 op1
= CONST0_RTX (compare_mode
);
16353 /* If we don't care about NaNs we can reduce some of the comparisons
16354 down to faster ones. */
16355 if (! HONOR_NANS (compare_mode
))
16361 true_cond
= false_cond
;
16374 /* Now, reduce everything down to a GE. */
16381 temp
= gen_reg_rtx (compare_mode
);
16382 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16387 temp
= gen_reg_rtx (compare_mode
);
16388 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_ABS (compare_mode
, op0
)));
16393 temp
= gen_reg_rtx (compare_mode
);
16394 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16395 gen_rtx_NEG (compare_mode
,
16396 gen_rtx_ABS (compare_mode
, op0
))));
16401 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16402 temp
= gen_reg_rtx (result_mode
);
16403 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16404 gen_rtx_IF_THEN_ELSE (result_mode
,
16405 gen_rtx_GE (VOIDmode
,
16407 true_cond
, false_cond
)));
16408 false_cond
= true_cond
;
16411 temp
= gen_reg_rtx (compare_mode
);
16412 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16417 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16418 temp
= gen_reg_rtx (result_mode
);
16419 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16420 gen_rtx_IF_THEN_ELSE (result_mode
,
16421 gen_rtx_GE (VOIDmode
,
16423 true_cond
, false_cond
)));
16424 true_cond
= false_cond
;
16427 temp
= gen_reg_rtx (compare_mode
);
16428 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16433 gcc_unreachable ();
16436 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
16437 gen_rtx_IF_THEN_ELSE (result_mode
,
16438 gen_rtx_GE (VOIDmode
,
16440 true_cond
, false_cond
)));
16444 /* Same as above, but for ints (isel). */
16447 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
16449 rtx condition_rtx
, cr
;
16450 enum machine_mode mode
= GET_MODE (dest
);
16451 enum rtx_code cond_code
;
16452 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
16455 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
16458 /* We still have to do the compare, because isel doesn't do a
16459 compare, it just looks at the CRx bits set by a previous compare
16461 condition_rtx
= rs6000_generate_compare (op
, mode
);
16462 cond_code
= GET_CODE (condition_rtx
);
16463 cr
= XEXP (condition_rtx
, 0);
16464 signedp
= GET_MODE (cr
) == CCmode
;
16466 isel_func
= (mode
== SImode
16467 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
16468 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
16472 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
16473 /* isel handles these directly. */
16477 /* We need to swap the sense of the comparison. */
16480 true_cond
= false_cond
;
16482 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
16487 false_cond
= force_reg (mode
, false_cond
);
16488 if (true_cond
!= const0_rtx
)
16489 true_cond
= force_reg (mode
, true_cond
);
16491 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
16497 output_isel (rtx
*operands
)
16499 enum rtx_code code
;
16501 code
= GET_CODE (operands
[1]);
16503 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
16505 gcc_assert (GET_CODE (operands
[2]) == REG
16506 && GET_CODE (operands
[3]) == REG
);
16507 PUT_CODE (operands
[1], reverse_condition (code
));
16508 return "isel %0,%3,%2,%j1";
16511 return "isel %0,%2,%3,%j1";
16515 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
16517 enum machine_mode mode
= GET_MODE (op0
);
16521 /* VSX/altivec have direct min/max insns. */
16522 if ((code
== SMAX
|| code
== SMIN
)
16523 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
16524 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
16526 emit_insn (gen_rtx_SET (VOIDmode
,
16528 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
16532 if (code
== SMAX
|| code
== SMIN
)
16537 if (code
== SMAX
|| code
== UMAX
)
16538 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
16539 op0
, op1
, mode
, 0);
16541 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
16542 op1
, op0
, mode
, 0);
16543 gcc_assert (target
);
16544 if (target
!= dest
)
16545 emit_move_insn (dest
, target
);
16548 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16549 COND is true. Mark the jump as unlikely to be taken. */
16552 emit_unlikely_jump (rtx cond
, rtx label
)
16554 rtx very_unlikely
= GEN_INT (REG_BR_PROB_BASE
/ 100 - 1);
16557 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
16558 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
16559 add_reg_note (x
, REG_BR_PROB
, very_unlikely
);
16562 /* A subroutine of the atomic operation splitters. Emit a load-locked
16563 instruction in MODE. */
16566 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
16568 rtx (*fn
) (rtx
, rtx
) = NULL
;
16573 fn
= gen_load_lockedsi
;
16576 fn
= gen_load_lockeddi
;
16579 gcc_unreachable ();
16581 emit_insn (fn (reg
, mem
));
16584 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16585 instruction in MODE. */
16588 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
16590 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
16595 fn
= gen_store_conditionalsi
;
16598 fn
= gen_store_conditionaldi
;
16601 gcc_unreachable ();
16604 /* Emit sync before stwcx. to address PPC405 Erratum. */
16605 if (PPC405_ERRATUM77
)
16606 emit_insn (gen_hwsync ());
16608 emit_insn (fn (res
, mem
, val
));
16611 /* Expand barriers before and after a load_locked/store_cond sequence. */
16614 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
16616 rtx addr
= XEXP (mem
, 0);
16617 int strict_p
= (reload_in_progress
|| reload_completed
);
16619 if (!legitimate_indirect_address_p (addr
, strict_p
)
16620 && !legitimate_indexed_address_p (addr
, strict_p
))
16622 addr
= force_reg (Pmode
, addr
);
16623 mem
= replace_equiv_address_nv (mem
, addr
);
16628 case MEMMODEL_RELAXED
:
16629 case MEMMODEL_CONSUME
:
16630 case MEMMODEL_ACQUIRE
:
16632 case MEMMODEL_RELEASE
:
16633 case MEMMODEL_ACQ_REL
:
16634 emit_insn (gen_lwsync ());
16636 case MEMMODEL_SEQ_CST
:
16637 emit_insn (gen_hwsync ());
16640 gcc_unreachable ();
16646 rs6000_post_atomic_barrier (enum memmodel model
)
16650 case MEMMODEL_RELAXED
:
16651 case MEMMODEL_CONSUME
:
16652 case MEMMODEL_RELEASE
:
16654 case MEMMODEL_ACQUIRE
:
16655 case MEMMODEL_ACQ_REL
:
16656 case MEMMODEL_SEQ_CST
:
16657 emit_insn (gen_isync ());
16660 gcc_unreachable ();
16664 /* A subroutine of the various atomic expanders. For sub-word operations,
16665 we must adjust things to operate on SImode. Given the original MEM,
16666 return a new aligned memory. Also build and return the quantities by
16667 which to shift and mask. */
16670 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
16672 rtx addr
, align
, shift
, mask
, mem
;
16673 HOST_WIDE_INT shift_mask
;
16674 enum machine_mode mode
= GET_MODE (orig_mem
);
16676 /* For smaller modes, we have to implement this via SImode. */
16677 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
16679 addr
= XEXP (orig_mem
, 0);
16680 addr
= force_reg (GET_MODE (addr
), addr
);
16682 /* Aligned memory containing subword. Generate a new memory. We
16683 do not want any of the existing MEM_ATTR data, as we're now
16684 accessing memory outside the original object. */
16685 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
16686 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16687 mem
= gen_rtx_MEM (SImode
, align
);
16688 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
16689 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
16690 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
16692 /* Shift amount for subword relative to aligned word. */
16693 shift
= gen_reg_rtx (SImode
);
16694 addr
= gen_lowpart (SImode
, addr
);
16695 emit_insn (gen_rlwinm (shift
, addr
, GEN_INT (3), GEN_INT (shift_mask
)));
16696 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
16697 shift
, 1, OPTAB_LIB_WIDEN
);
16700 /* Mask for insertion. */
16701 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
16702 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16708 /* A subroutine of the various atomic expanders. For sub-word operands,
16709 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16712 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
16716 x
= gen_reg_rtx (SImode
);
16717 emit_insn (gen_rtx_SET (VOIDmode
, x
,
16718 gen_rtx_AND (SImode
,
16719 gen_rtx_NOT (SImode
, mask
),
16722 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
16727 /* A subroutine of the various atomic expanders. For sub-word operands,
16728 extract WIDE to NARROW via SHIFT. */
16731 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
16733 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
16734 wide
, 1, OPTAB_LIB_WIDEN
);
16735 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
16738 /* Expand an atomic compare and swap operation. */
16741 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
16743 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
16744 rtx label1
, label2
, x
, mask
, shift
;
16745 enum machine_mode mode
;
16746 enum memmodel mod_s
, mod_f
;
16749 boolval
= operands
[0];
16750 retval
= operands
[1];
16752 oldval
= operands
[3];
16753 newval
= operands
[4];
16754 is_weak
= (INTVAL (operands
[5]) != 0);
16755 mod_s
= (enum memmodel
) INTVAL (operands
[6]);
16756 mod_f
= (enum memmodel
) INTVAL (operands
[7]);
16757 mode
= GET_MODE (mem
);
16759 mask
= shift
= NULL_RTX
;
16760 if (mode
== QImode
|| mode
== HImode
)
16762 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16764 /* Shift and mask OLDVAL into position with the word. */
16765 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
16766 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
16767 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16769 /* Shift and mask NEWVAL into position within the word. */
16770 newval
= convert_modes (SImode
, mode
, newval
, 1);
16771 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
16772 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16774 /* Prepare to adjust the return value. */
16775 retval
= gen_reg_rtx (SImode
);
16778 else if (reg_overlap_mentioned_p (retval
, oldval
))
16779 oldval
= copy_to_reg (oldval
);
16781 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
16786 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16787 emit_label (XEXP (label1
, 0));
16789 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16791 emit_load_locked (mode
, retval
, mem
);
16796 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
16797 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16800 cond
= gen_reg_rtx (CCmode
);
16801 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
16802 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
16804 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16805 emit_unlikely_jump (x
, label2
);
16809 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
16811 emit_store_conditional (mode
, cond
, mem
, x
);
16815 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16816 emit_unlikely_jump (x
, label1
);
16819 if (mod_f
!= MEMMODEL_RELAXED
)
16820 emit_label (XEXP (label2
, 0));
16822 rs6000_post_atomic_barrier (mod_s
);
16824 if (mod_f
== MEMMODEL_RELAXED
)
16825 emit_label (XEXP (label2
, 0));
16828 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
16830 /* In all cases, CR0 contains EQ on success, and NE on failure. */
16831 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
16832 emit_insn (gen_rtx_SET (VOIDmode
, boolval
, x
));
16835 /* Expand an atomic exchange operation. */
16838 rs6000_expand_atomic_exchange (rtx operands
[])
16840 rtx retval
, mem
, val
, cond
;
16841 enum machine_mode mode
;
16842 enum memmodel model
;
16843 rtx label
, x
, mask
, shift
;
16845 retval
= operands
[0];
16848 model
= (enum memmodel
) INTVAL (operands
[3]);
16849 mode
= GET_MODE (mem
);
16851 mask
= shift
= NULL_RTX
;
16852 if (mode
== QImode
|| mode
== HImode
)
16854 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16856 /* Shift and mask VAL into position with the word. */
16857 val
= convert_modes (SImode
, mode
, val
, 1);
16858 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
16859 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16861 /* Prepare to adjust the return value. */
16862 retval
= gen_reg_rtx (SImode
);
16866 mem
= rs6000_pre_atomic_barrier (mem
, model
);
16868 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16869 emit_label (XEXP (label
, 0));
16871 emit_load_locked (mode
, retval
, mem
);
16875 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
16877 cond
= gen_reg_rtx (CCmode
);
16878 emit_store_conditional (mode
, cond
, mem
, x
);
16880 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16881 emit_unlikely_jump (x
, label
);
16883 rs6000_post_atomic_barrier (model
);
16886 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
16889 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16890 to perform. MEM is the memory on which to operate. VAL is the second
16891 operand of the binary operator. BEFORE and AFTER are optional locations to
16892 return the value of MEM either before of after the operation. MODEL_RTX
16893 is a CONST_INT containing the memory model to use. */
16896 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
16897 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
16899 enum memmodel model
= (enum memmodel
) INTVAL (model_rtx
);
16900 enum machine_mode mode
= GET_MODE (mem
);
16901 rtx label
, x
, cond
, mask
, shift
;
16902 rtx before
= orig_before
, after
= orig_after
;
16904 mask
= shift
= NULL_RTX
;
16905 if (mode
== QImode
|| mode
== HImode
)
16907 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16909 /* Shift and mask VAL into position with the word. */
16910 val
= convert_modes (SImode
, mode
, val
, 1);
16911 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
16912 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16918 /* We've already zero-extended VAL. That is sufficient to
16919 make certain that it does not affect other bits. */
16924 /* If we make certain that all of the other bits in VAL are
16925 set, that will be sufficient to not affect other bits. */
16926 x
= gen_rtx_NOT (SImode
, mask
);
16927 x
= gen_rtx_IOR (SImode
, x
, val
);
16928 emit_insn (gen_rtx_SET (VOIDmode
, val
, x
));
16935 /* These will all affect bits outside the field and need
16936 adjustment via MASK within the loop. */
16940 gcc_unreachable ();
16943 /* Prepare to adjust the return value. */
16944 before
= gen_reg_rtx (SImode
);
16946 after
= gen_reg_rtx (SImode
);
16950 mem
= rs6000_pre_atomic_barrier (mem
, model
);
16952 label
= gen_label_rtx ();
16953 emit_label (label
);
16954 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
16956 if (before
== NULL_RTX
)
16957 before
= gen_reg_rtx (mode
);
16959 emit_load_locked (mode
, before
, mem
);
16963 x
= expand_simple_binop (mode
, AND
, before
, val
,
16964 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16965 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
16969 after
= expand_simple_binop (mode
, code
, before
, val
,
16970 after
, 1, OPTAB_LIB_WIDEN
);
16976 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
16977 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16978 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
16981 cond
= gen_reg_rtx (CCmode
);
16982 emit_store_conditional (mode
, cond
, mem
, x
);
16984 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16985 emit_unlikely_jump (x
, label
);
16987 rs6000_post_atomic_barrier (model
);
16992 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
16994 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
16996 else if (orig_after
&& after
!= orig_after
)
16997 emit_move_insn (orig_after
, after
);
17000 /* Emit instructions to move SRC to DST. Called by splitters for
17001 multi-register moves. It will emit at most one instruction for
17002 each register that is accessed; that is, it won't emit li/lis pairs
17003 (or equivalent for 64-bit code). One of SRC or DST must be a hard
17007 rs6000_split_multireg_move (rtx dst
, rtx src
)
17009 /* The register number of the first register being moved. */
17011 /* The mode that is to be moved. */
17012 enum machine_mode mode
;
17013 /* The mode that the move is being done in, and its size. */
17014 enum machine_mode reg_mode
;
17016 /* The number of registers that will be moved. */
17019 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
17020 mode
= GET_MODE (dst
);
17021 nregs
= hard_regno_nregs
[reg
][mode
];
17022 if (FP_REGNO_P (reg
))
17023 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
17024 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
17025 else if (ALTIVEC_REGNO_P (reg
))
17026 reg_mode
= V16QImode
;
17027 else if (TARGET_E500_DOUBLE
&& mode
== TFmode
)
17030 reg_mode
= word_mode
;
17031 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
17033 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
17035 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
17037 /* Move register range backwards, if we might have destructive
17040 for (i
= nregs
- 1; i
>= 0; i
--)
17041 emit_insn (gen_rtx_SET (VOIDmode
,
17042 simplify_gen_subreg (reg_mode
, dst
, mode
,
17043 i
* reg_mode_size
),
17044 simplify_gen_subreg (reg_mode
, src
, mode
,
17045 i
* reg_mode_size
)));
17051 bool used_update
= false;
17052 rtx restore_basereg
= NULL_RTX
;
17054 if (MEM_P (src
) && INT_REGNO_P (reg
))
17058 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
17059 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
17062 breg
= XEXP (XEXP (src
, 0), 0);
17063 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
17064 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
17065 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
17066 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
17067 src
= replace_equiv_address (src
, breg
);
17069 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
17071 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
17073 rtx basereg
= XEXP (XEXP (src
, 0), 0);
17076 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
17077 emit_insn (gen_rtx_SET (VOIDmode
, ndst
,
17078 gen_rtx_MEM (reg_mode
, XEXP (src
, 0))));
17079 used_update
= true;
17082 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
17083 XEXP (XEXP (src
, 0), 1)));
17084 src
= replace_equiv_address (src
, basereg
);
17088 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
17089 emit_insn (gen_rtx_SET (VOIDmode
, basereg
, XEXP (src
, 0)));
17090 src
= replace_equiv_address (src
, basereg
);
17094 breg
= XEXP (src
, 0);
17095 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
17096 breg
= XEXP (breg
, 0);
17098 /* If the base register we are using to address memory is
17099 also a destination reg, then change that register last. */
17101 && REGNO (breg
) >= REGNO (dst
)
17102 && REGNO (breg
) < REGNO (dst
) + nregs
)
17103 j
= REGNO (breg
) - REGNO (dst
);
17105 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
17109 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
17110 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
17113 breg
= XEXP (XEXP (dst
, 0), 0);
17114 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
17115 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
17116 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
17118 /* We have to update the breg before doing the store.
17119 Use store with update, if available. */
17123 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
17124 emit_insn (TARGET_32BIT
17125 ? (TARGET_POWERPC64
17126 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
17127 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
17128 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
17129 used_update
= true;
17132 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
17133 dst
= replace_equiv_address (dst
, breg
);
17135 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
17136 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
17138 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
17140 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
17143 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
17144 emit_insn (gen_rtx_SET (VOIDmode
,
17145 gen_rtx_MEM (reg_mode
, XEXP (dst
, 0)), nsrc
));
17146 used_update
= true;
17149 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
17150 XEXP (XEXP (dst
, 0), 1)));
17151 dst
= replace_equiv_address (dst
, basereg
);
17155 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
17156 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
17157 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
17159 && REG_P (offsetreg
)
17160 && REGNO (basereg
) != REGNO (offsetreg
));
17161 if (REGNO (basereg
) == 0)
17163 rtx tmp
= offsetreg
;
17164 offsetreg
= basereg
;
17167 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
17168 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
17169 dst
= replace_equiv_address (dst
, basereg
);
17172 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
17173 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
17176 for (i
= 0; i
< nregs
; i
++)
17178 /* Calculate index to next subword. */
17183 /* If compiler already emitted move of first word by
17184 store with update, no need to do anything. */
17185 if (j
== 0 && used_update
)
17188 emit_insn (gen_rtx_SET (VOIDmode
,
17189 simplify_gen_subreg (reg_mode
, dst
, mode
,
17190 j
* reg_mode_size
),
17191 simplify_gen_subreg (reg_mode
, src
, mode
,
17192 j
* reg_mode_size
)));
17194 if (restore_basereg
!= NULL_RTX
)
17195 emit_insn (restore_basereg
);
17200 /* This page contains routines that are used to determine what the
17201 function prologue and epilogue code will do and write them out. */
17206 return !call_used_regs
[r
] && df_regs_ever_live_p (r
);
17209 /* Return the first fixed-point register that is required to be
17210 saved. 32 if none. */
17213 first_reg_to_save (void)
17217 /* Find lowest numbered live register. */
17218 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
17219 if (save_reg_p (first_reg
))
17222 if (first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
17223 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
17224 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
17225 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
))
17226 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
17227 first_reg
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
17231 && crtl
->uses_pic_offset_table
17232 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
17233 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
17239 /* Similar, for FP regs. */
17242 first_fp_reg_to_save (void)
17246 /* Find lowest numbered live register. */
17247 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
17248 if (save_reg_p (first_reg
))
17254 /* Similar, for AltiVec regs. */
17257 first_altivec_reg_to_save (void)
17261 /* Stack frame remains as is unless we are in AltiVec ABI. */
17262 if (! TARGET_ALTIVEC_ABI
)
17263 return LAST_ALTIVEC_REGNO
+ 1;
17265 /* On Darwin, the unwind routines are compiled without
17266 TARGET_ALTIVEC, and use save_world to save/restore the
17267 altivec registers when necessary. */
17268 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
17269 && ! TARGET_ALTIVEC
)
17270 return FIRST_ALTIVEC_REGNO
+ 20;
17272 /* Find lowest numbered live register. */
17273 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17274 if (save_reg_p (i
))
17280 /* Return a 32-bit mask of the AltiVec registers we need to set in
17281 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17282 the 32-bit word is 0. */
17284 static unsigned int
17285 compute_vrsave_mask (void)
17287 unsigned int i
, mask
= 0;
17289 /* On Darwin, the unwind routines are compiled without
17290 TARGET_ALTIVEC, and use save_world to save/restore the
17291 call-saved altivec registers when necessary. */
17292 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
17293 && ! TARGET_ALTIVEC
)
17296 /* First, find out if we use _any_ altivec registers. */
17297 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17298 if (df_regs_ever_live_p (i
))
17299 mask
|= ALTIVEC_REG_BIT (i
);
17304 /* Next, remove the argument registers from the set. These must
17305 be in the VRSAVE mask set by the caller, so we don't need to add
17306 them in again. More importantly, the mask we compute here is
17307 used to generate CLOBBERs in the set_vrsave insn, and we do not
17308 wish the argument registers to die. */
17309 for (i
= crtl
->args
.info
.vregno
- 1; i
>= ALTIVEC_ARG_MIN_REG
; --i
)
17310 mask
&= ~ALTIVEC_REG_BIT (i
);
17312 /* Similarly, remove the return value from the set. */
17315 diddle_return_value (is_altivec_return_reg
, &yes
);
17317 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
17323 /* For a very restricted set of circumstances, we can cut down the
17324 size of prologues/epilogues by calling our own save/restore-the-world
17328 compute_save_world_info (rs6000_stack_t
*info_ptr
)
17330 info_ptr
->world_save_p
= 1;
17331 info_ptr
->world_save_p
17332 = (WORLD_SAVE_P (info_ptr
)
17333 && DEFAULT_ABI
== ABI_DARWIN
17334 && !cfun
->has_nonlocal_label
17335 && info_ptr
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
17336 && info_ptr
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
17337 && info_ptr
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
17338 && info_ptr
->cr_save_p
);
17340 /* This will not work in conjunction with sibcalls. Make sure there
17341 are none. (This check is expensive, but seldom executed.) */
17342 if (WORLD_SAVE_P (info_ptr
))
17345 for ( insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
17346 if ( GET_CODE (insn
) == CALL_INSN
17347 && SIBLING_CALL_P (insn
))
17349 info_ptr
->world_save_p
= 0;
17354 if (WORLD_SAVE_P (info_ptr
))
17356 /* Even if we're not touching VRsave, make sure there's room on the
17357 stack for it, if it looks like we're calling SAVE_WORLD, which
17358 will attempt to save it. */
17359 info_ptr
->vrsave_size
= 4;
17361 /* If we are going to save the world, we need to save the link register too. */
17362 info_ptr
->lr_save_p
= 1;
17364 /* "Save" the VRsave register too if we're saving the world. */
17365 if (info_ptr
->vrsave_mask
== 0)
17366 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
17368 /* Because the Darwin register save/restore routines only handle
17369 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17371 gcc_assert (info_ptr
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
17372 && (info_ptr
->first_altivec_reg_save
17373 >= FIRST_SAVED_ALTIVEC_REGNO
));
17380 is_altivec_return_reg (rtx reg
, void *xyes
)
17382 bool *yes
= (bool *) xyes
;
17383 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
17388 /* Look for user-defined global regs in the range FIRST to LAST-1.
17389 We should not restore these, and so cannot use lmw or out-of-line
17390 restore functions if there are any. We also can't save them
17391 (well, emit frame notes for them), because frame unwinding during
17392 exception handling will restore saved registers. */
17395 global_regs_p (unsigned first
, unsigned last
)
17397 while (first
< last
)
17398 if (global_regs
[first
++])
17403 /* Determine the strategy for savings/restoring registers. */
17406 SAVRES_MULTIPLE
= 0x1,
17407 SAVE_INLINE_FPRS
= 0x2,
17408 SAVE_INLINE_GPRS
= 0x4,
17409 REST_INLINE_FPRS
= 0x8,
17410 REST_INLINE_GPRS
= 0x10,
17411 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x20,
17412 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x40,
17413 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x80,
17414 SAVE_INLINE_VRS
= 0x100,
17415 REST_INLINE_VRS
= 0x200
17419 rs6000_savres_strategy (rs6000_stack_t
*info
,
17420 bool using_static_chain_p
)
17425 if (TARGET_MULTIPLE
17426 && !TARGET_POWERPC64
17427 && !(TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
)
17428 && info
->first_gp_reg_save
< 31
17429 && !global_regs_p (info
->first_gp_reg_save
, 32))
17430 strategy
|= SAVRES_MULTIPLE
;
17432 if (crtl
->calls_eh_return
17433 || cfun
->machine
->ra_need_lr
)
17434 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
17435 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
17436 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
17438 if (info
->first_fp_reg_save
== 64
17439 /* The out-of-line FP routines use double-precision stores;
17440 we can't use those routines if we don't have such stores. */
17441 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
)
17442 || global_regs_p (info
->first_fp_reg_save
, 64))
17443 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17445 if (info
->first_gp_reg_save
== 32
17446 || (!(strategy
& SAVRES_MULTIPLE
)
17447 && global_regs_p (info
->first_gp_reg_save
, 32)))
17448 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17450 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
17451 || global_regs_p (info
->first_altivec_reg_save
, LAST_ALTIVEC_REGNO
+ 1))
17452 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17454 /* Define cutoff for using out-of-line functions to save registers. */
17455 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
17457 if (!optimize_size
)
17459 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17460 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17461 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17465 /* Prefer out-of-line restore if it will exit. */
17466 if (info
->first_fp_reg_save
> 61)
17467 strategy
|= SAVE_INLINE_FPRS
;
17468 if (info
->first_gp_reg_save
> 29)
17470 if (info
->first_fp_reg_save
== 64)
17471 strategy
|= SAVE_INLINE_GPRS
;
17473 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17475 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
17476 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17479 else if (DEFAULT_ABI
== ABI_DARWIN
)
17481 if (info
->first_fp_reg_save
> 60)
17482 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17483 if (info
->first_gp_reg_save
> 29)
17484 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17485 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17489 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
);
17490 if (info
->first_fp_reg_save
> 61)
17491 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17492 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17493 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17496 /* Don't bother to try to save things out-of-line if r11 is occupied
17497 by the static chain. It would require too much fiddling and the
17498 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17499 pointer on Darwin, and AIX uses r1 or r12. */
17500 if (using_static_chain_p
&& DEFAULT_ABI
!= ABI_AIX
)
17501 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
17503 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
17505 /* We can only use the out-of-line routines to restore if we've
17506 saved all the registers from first_fp_reg_save in the prologue.
17507 Otherwise, we risk loading garbage. */
17508 if ((strategy
& (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
)) == SAVE_INLINE_FPRS
)
17512 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
17513 if (!save_reg_p (i
))
17515 strategy
|= REST_INLINE_FPRS
;
17520 /* If we are going to use store multiple, then don't even bother
17521 with the out-of-line routines, since the store-multiple
17522 instruction will always be smaller. */
17523 if ((strategy
& SAVRES_MULTIPLE
))
17524 strategy
|= SAVE_INLINE_GPRS
;
17526 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17527 saved is an out-of-line save or restore. Set up the value for
17528 the next test (excluding out-of-line gpr restore). */
17529 lr_save_p
= (info
->lr_save_p
17530 || !(strategy
& SAVE_INLINE_GPRS
)
17531 || !(strategy
& SAVE_INLINE_FPRS
)
17532 || !(strategy
& SAVE_INLINE_VRS
)
17533 || !(strategy
& REST_INLINE_FPRS
)
17534 || !(strategy
& REST_INLINE_VRS
));
17536 /* The situation is more complicated with load multiple. We'd
17537 prefer to use the out-of-line routines for restores, since the
17538 "exit" out-of-line routines can handle the restore of LR and the
17539 frame teardown. However if doesn't make sense to use the
17540 out-of-line routine if that is the only reason we'd need to save
17541 LR, and we can't use the "exit" out-of-line gpr restore if we
17542 have saved some fprs; In those cases it is advantageous to use
17543 load multiple when available. */
17544 if ((strategy
& SAVRES_MULTIPLE
)
17546 || info
->first_fp_reg_save
!= 64))
17547 strategy
|= REST_INLINE_GPRS
;
17549 /* Saving CR interferes with the exit routines used on the SPE, so
17552 && info
->spe_64bit_regs_used
17553 && info
->cr_save_p
)
17554 strategy
|= REST_INLINE_GPRS
;
17556 /* We can only use load multiple or the out-of-line routines to
17557 restore if we've used store multiple or out-of-line routines
17558 in the prologue, i.e. if we've saved all the registers from
17559 first_gp_reg_save. Otherwise, we risk loading garbage. */
17560 if ((strategy
& (SAVE_INLINE_GPRS
| REST_INLINE_GPRS
| SAVRES_MULTIPLE
))
17561 == SAVE_INLINE_GPRS
)
17565 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
17566 if (!save_reg_p (i
))
17568 strategy
|= REST_INLINE_GPRS
;
17573 if (TARGET_ELF
&& TARGET_64BIT
)
17575 if (!(strategy
& SAVE_INLINE_FPRS
))
17576 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
17577 else if (!(strategy
& SAVE_INLINE_GPRS
)
17578 && info
->first_fp_reg_save
== 64)
17579 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
17581 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
17582 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
17584 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
17585 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
17590 /* Calculate the stack information for the current function. This is
17591 complicated by having two separate calling sequences, the AIX calling
17592 sequence and the V.4 calling sequence.
17594 AIX (and Darwin/Mac OS X) stack frames look like:
17596 SP----> +---------------------------------------+
17597 | back chain to caller | 0 0
17598 +---------------------------------------+
17599 | saved CR | 4 8 (8-11)
17600 +---------------------------------------+
17602 +---------------------------------------+
17603 | reserved for compilers | 12 24
17604 +---------------------------------------+
17605 | reserved for binders | 16 32
17606 +---------------------------------------+
17607 | saved TOC pointer | 20 40
17608 +---------------------------------------+
17609 | Parameter save area (P) | 24 48
17610 +---------------------------------------+
17611 | Alloca space (A) | 24+P etc.
17612 +---------------------------------------+
17613 | Local variable space (L) | 24+P+A
17614 +---------------------------------------+
17615 | Float/int conversion temporary (X) | 24+P+A+L
17616 +---------------------------------------+
17617 | Save area for AltiVec registers (W) | 24+P+A+L+X
17618 +---------------------------------------+
17619 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17620 +---------------------------------------+
17621 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17622 +---------------------------------------+
17623 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17624 +---------------------------------------+
17625 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17626 +---------------------------------------+
17627 old SP->| back chain to caller's caller |
17628 +---------------------------------------+
17630 The required alignment for AIX configurations is two words (i.e., 8
17634 V.4 stack frames look like:
17636 SP----> +---------------------------------------+
17637 | back chain to caller | 0
17638 +---------------------------------------+
17639 | caller's saved LR | 4
17640 +---------------------------------------+
17641 | Parameter save area (P) | 8
17642 +---------------------------------------+
17643 | Alloca space (A) | 8+P
17644 +---------------------------------------+
17645 | Varargs save area (V) | 8+P+A
17646 +---------------------------------------+
17647 | Local variable space (L) | 8+P+A+V
17648 +---------------------------------------+
17649 | Float/int conversion temporary (X) | 8+P+A+V+L
17650 +---------------------------------------+
17651 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17652 +---------------------------------------+
17653 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17654 +---------------------------------------+
17655 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17656 +---------------------------------------+
17657 | SPE: area for 64-bit GP registers |
17658 +---------------------------------------+
17659 | SPE alignment padding |
17660 +---------------------------------------+
17661 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17662 +---------------------------------------+
17663 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17664 +---------------------------------------+
17665 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17666 +---------------------------------------+
17667 old SP->| back chain to caller's caller |
17668 +---------------------------------------+
17670 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17671 given. (But note below and in sysv4.h that we require only 8 and
17672 may round up the size of our stack frame anyways. The historical
17673 reason is early versions of powerpc-linux which didn't properly
17674 align the stack at program startup. A happy side-effect is that
17675 -mno-eabi libraries can be used with -meabi programs.)
17677 The EABI configuration defaults to the V.4 layout. However,
17678 the stack alignment requirements may differ. If -mno-eabi is not
17679 given, the required stack alignment is 8 bytes; if -mno-eabi is
17680 given, the required alignment is 16 bytes. (But see V.4 comment
17683 #ifndef ABI_STACK_BOUNDARY
17684 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17687 static rs6000_stack_t
*
17688 rs6000_stack_info (void)
17690 rs6000_stack_t
*info_ptr
= &stack_info
;
17691 int reg_size
= TARGET_32BIT
? 4 : 8;
17695 HOST_WIDE_INT non_fixed_size
;
17696 bool using_static_chain_p
;
17698 if (reload_completed
&& info_ptr
->reload_completed
)
17701 memset (info_ptr
, 0, sizeof (*info_ptr
));
17702 info_ptr
->reload_completed
= reload_completed
;
17706 /* Cache value so we don't rescan instruction chain over and over. */
17707 if (cfun
->machine
->insn_chain_scanned_p
== 0)
17708 cfun
->machine
->insn_chain_scanned_p
17709 = spe_func_has_64bit_regs_p () + 1;
17710 info_ptr
->spe_64bit_regs_used
= cfun
->machine
->insn_chain_scanned_p
- 1;
17713 /* Select which calling sequence. */
17714 info_ptr
->abi
= DEFAULT_ABI
;
17716 /* Calculate which registers need to be saved & save area size. */
17717 info_ptr
->first_gp_reg_save
= first_reg_to_save ();
17718 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17719 even if it currently looks like we won't. Reload may need it to
17720 get at a constant; if so, it will have already created a constant
17721 pool entry for it. */
17722 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
17723 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
17724 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
17725 && crtl
->uses_const_pool
17726 && info_ptr
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
17727 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
17729 first_gp
= info_ptr
->first_gp_reg_save
;
17731 info_ptr
->gp_size
= reg_size
* (32 - first_gp
);
17733 /* For the SPE, we have an additional upper 32-bits on each GPR.
17734 Ideally we should save the entire 64-bits only when the upper
17735 half is used in SIMD instructions. Since we only record
17736 registers live (not the size they are used in), this proves
17737 difficult because we'd have to traverse the instruction chain at
17738 the right time, taking reload into account. This is a real pain,
17739 so we opt to save the GPRs in 64-bits always if but one register
17740 gets used in 64-bits. Otherwise, all the registers in the frame
17741 get saved in 32-bits.
17743 So... since when we save all GPRs (except the SP) in 64-bits, the
17744 traditional GP save area will be empty. */
17745 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17746 info_ptr
->gp_size
= 0;
17748 info_ptr
->first_fp_reg_save
= first_fp_reg_to_save ();
17749 info_ptr
->fp_size
= 8 * (64 - info_ptr
->first_fp_reg_save
);
17751 info_ptr
->first_altivec_reg_save
= first_altivec_reg_to_save ();
17752 info_ptr
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
17753 - info_ptr
->first_altivec_reg_save
);
17755 /* Does this function call anything? */
17756 info_ptr
->calls_p
= (! crtl
->is_leaf
17757 || cfun
->machine
->ra_needs_full_frame
);
17759 /* Determine if we need to save the condition code registers. */
17760 if (df_regs_ever_live_p (CR2_REGNO
)
17761 || df_regs_ever_live_p (CR3_REGNO
)
17762 || df_regs_ever_live_p (CR4_REGNO
))
17764 info_ptr
->cr_save_p
= 1;
17765 if (DEFAULT_ABI
== ABI_V4
)
17766 info_ptr
->cr_size
= reg_size
;
17769 /* If the current function calls __builtin_eh_return, then we need
17770 to allocate stack space for registers that will hold data for
17771 the exception handler. */
17772 if (crtl
->calls_eh_return
)
17775 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
17778 /* SPE saves EH registers in 64-bits. */
17779 ehrd_size
= i
* (TARGET_SPE_ABI
17780 && info_ptr
->spe_64bit_regs_used
!= 0
17781 ? UNITS_PER_SPE_WORD
: UNITS_PER_WORD
);
17786 /* Determine various sizes. */
17787 info_ptr
->reg_size
= reg_size
;
17788 info_ptr
->fixed_size
= RS6000_SAVE_AREA
;
17789 info_ptr
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
17790 info_ptr
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
17791 TARGET_ALTIVEC
? 16 : 8);
17792 if (FRAME_GROWS_DOWNWARD
)
17793 info_ptr
->vars_size
17794 += RS6000_ALIGN (info_ptr
->fixed_size
+ info_ptr
->vars_size
17795 + info_ptr
->parm_size
,
17796 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
17797 - (info_ptr
->fixed_size
+ info_ptr
->vars_size
17798 + info_ptr
->parm_size
);
17800 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17801 info_ptr
->spe_gp_size
= 8 * (32 - first_gp
);
17803 info_ptr
->spe_gp_size
= 0;
17805 if (TARGET_ALTIVEC_ABI
)
17806 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
17808 info_ptr
->vrsave_mask
= 0;
17810 if (TARGET_ALTIVEC_VRSAVE
&& info_ptr
->vrsave_mask
)
17811 info_ptr
->vrsave_size
= 4;
17813 info_ptr
->vrsave_size
= 0;
17815 compute_save_world_info (info_ptr
);
17817 /* Calculate the offsets. */
17818 switch (DEFAULT_ABI
)
17822 gcc_unreachable ();
17826 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
17827 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
17829 if (TARGET_ALTIVEC_ABI
)
17831 info_ptr
->vrsave_save_offset
17832 = info_ptr
->gp_save_offset
- info_ptr
->vrsave_size
;
17834 /* Align stack so vector save area is on a quadword boundary.
17835 The padding goes above the vectors. */
17836 if (info_ptr
->altivec_size
!= 0)
17837 info_ptr
->altivec_padding_size
17838 = info_ptr
->vrsave_save_offset
& 0xF;
17840 info_ptr
->altivec_padding_size
= 0;
17842 info_ptr
->altivec_save_offset
17843 = info_ptr
->vrsave_save_offset
17844 - info_ptr
->altivec_padding_size
17845 - info_ptr
->altivec_size
;
17846 gcc_assert (info_ptr
->altivec_size
== 0
17847 || info_ptr
->altivec_save_offset
% 16 == 0);
17849 /* Adjust for AltiVec case. */
17850 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
- ehrd_size
;
17853 info_ptr
->ehrd_offset
= info_ptr
->gp_save_offset
- ehrd_size
;
17854 info_ptr
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
17855 info_ptr
->lr_save_offset
= 2*reg_size
;
17859 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
17860 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
17861 info_ptr
->cr_save_offset
= info_ptr
->gp_save_offset
- info_ptr
->cr_size
;
17863 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17865 /* Align stack so SPE GPR save area is aligned on a
17866 double-word boundary. */
17867 if (info_ptr
->spe_gp_size
!= 0 && info_ptr
->cr_save_offset
!= 0)
17868 info_ptr
->spe_padding_size
17869 = 8 - (-info_ptr
->cr_save_offset
% 8);
17871 info_ptr
->spe_padding_size
= 0;
17873 info_ptr
->spe_gp_save_offset
17874 = info_ptr
->cr_save_offset
17875 - info_ptr
->spe_padding_size
17876 - info_ptr
->spe_gp_size
;
17878 /* Adjust for SPE case. */
17879 info_ptr
->ehrd_offset
= info_ptr
->spe_gp_save_offset
;
17881 else if (TARGET_ALTIVEC_ABI
)
17883 info_ptr
->vrsave_save_offset
17884 = info_ptr
->cr_save_offset
- info_ptr
->vrsave_size
;
17886 /* Align stack so vector save area is on a quadword boundary. */
17887 if (info_ptr
->altivec_size
!= 0)
17888 info_ptr
->altivec_padding_size
17889 = 16 - (-info_ptr
->vrsave_save_offset
% 16);
17891 info_ptr
->altivec_padding_size
= 0;
17893 info_ptr
->altivec_save_offset
17894 = info_ptr
->vrsave_save_offset
17895 - info_ptr
->altivec_padding_size
17896 - info_ptr
->altivec_size
;
17898 /* Adjust for AltiVec case. */
17899 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
;
17902 info_ptr
->ehrd_offset
= info_ptr
->cr_save_offset
;
17903 info_ptr
->ehrd_offset
-= ehrd_size
;
17904 info_ptr
->lr_save_offset
= reg_size
;
17908 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
17909 info_ptr
->save_size
= RS6000_ALIGN (info_ptr
->fp_size
17910 + info_ptr
->gp_size
17911 + info_ptr
->altivec_size
17912 + info_ptr
->altivec_padding_size
17913 + info_ptr
->spe_gp_size
17914 + info_ptr
->spe_padding_size
17916 + info_ptr
->cr_size
17917 + info_ptr
->vrsave_size
,
17920 non_fixed_size
= (info_ptr
->vars_size
17921 + info_ptr
->parm_size
17922 + info_ptr
->save_size
);
17924 info_ptr
->total_size
= RS6000_ALIGN (non_fixed_size
+ info_ptr
->fixed_size
,
17925 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
17927 /* Determine if we need to save the link register. */
17928 if (info_ptr
->calls_p
17929 || (DEFAULT_ABI
== ABI_AIX
17931 && !TARGET_PROFILE_KERNEL
)
17932 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
17933 #ifdef TARGET_RELOCATABLE
17934 || (TARGET_RELOCATABLE
&& (get_pool_size () != 0))
17936 || rs6000_ra_ever_killed ())
17937 info_ptr
->lr_save_p
= 1;
17939 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
17940 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
17941 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
17942 info_ptr
->savres_strategy
= rs6000_savres_strategy (info_ptr
,
17943 using_static_chain_p
);
17945 if (!(info_ptr
->savres_strategy
& SAVE_INLINE_GPRS
)
17946 || !(info_ptr
->savres_strategy
& SAVE_INLINE_FPRS
)
17947 || !(info_ptr
->savres_strategy
& SAVE_INLINE_VRS
)
17948 || !(info_ptr
->savres_strategy
& REST_INLINE_GPRS
)
17949 || !(info_ptr
->savres_strategy
& REST_INLINE_FPRS
)
17950 || !(info_ptr
->savres_strategy
& REST_INLINE_VRS
))
17951 info_ptr
->lr_save_p
= 1;
17953 if (info_ptr
->lr_save_p
)
17954 df_set_regs_ever_live (LR_REGNO
, true);
17956 /* Determine if we need to allocate any stack frame:
17958 For AIX we need to push the stack if a frame pointer is needed
17959 (because the stack might be dynamically adjusted), if we are
17960 debugging, if we make calls, or if the sum of fp_save, gp_save,
17961 and local variables are more than the space needed to save all
17962 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
17963 + 18*8 = 288 (GPR13 reserved).
17965 For V.4 we don't have the stack cushion that AIX uses, but assume
17966 that the debugger can handle stackless frames. */
17968 if (info_ptr
->calls_p
)
17969 info_ptr
->push_p
= 1;
17971 else if (DEFAULT_ABI
== ABI_V4
)
17972 info_ptr
->push_p
= non_fixed_size
!= 0;
17974 else if (frame_pointer_needed
)
17975 info_ptr
->push_p
= 1;
17977 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
17978 info_ptr
->push_p
= 1;
17981 info_ptr
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
17983 /* Zero offsets if we're not saving those registers. */
17984 if (info_ptr
->fp_size
== 0)
17985 info_ptr
->fp_save_offset
= 0;
17987 if (info_ptr
->gp_size
== 0)
17988 info_ptr
->gp_save_offset
= 0;
17990 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->altivec_size
== 0)
17991 info_ptr
->altivec_save_offset
= 0;
17993 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->vrsave_mask
== 0)
17994 info_ptr
->vrsave_save_offset
= 0;
17996 if (! TARGET_SPE_ABI
17997 || info_ptr
->spe_64bit_regs_used
== 0
17998 || info_ptr
->spe_gp_size
== 0)
17999 info_ptr
->spe_gp_save_offset
= 0;
18001 if (! info_ptr
->lr_save_p
)
18002 info_ptr
->lr_save_offset
= 0;
18004 if (! info_ptr
->cr_save_p
)
18005 info_ptr
->cr_save_offset
= 0;
18010 /* Return true if the current function uses any GPRs in 64-bit SIMD
18014 spe_func_has_64bit_regs_p (void)
18018 /* Functions that save and restore all the call-saved registers will
18019 need to save/restore the registers in 64-bits. */
18020 if (crtl
->calls_eh_return
18021 || cfun
->calls_setjmp
18022 || crtl
->has_nonlocal_goto
)
18025 insns
= get_insns ();
18027 for (insn
= NEXT_INSN (insns
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
18033 /* FIXME: This should be implemented with attributes...
18035 (set_attr "spe64" "true")....then,
18036 if (get_spe64(insn)) return true;
18038 It's the only reliable way to do the stuff below. */
18040 i
= PATTERN (insn
);
18041 if (GET_CODE (i
) == SET
)
18043 enum machine_mode mode
= GET_MODE (SET_SRC (i
));
18045 if (SPE_VECTOR_MODE (mode
))
18047 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
))
18057 debug_stack_info (rs6000_stack_t
*info
)
18059 const char *abi_string
;
18062 info
= rs6000_stack_info ();
18064 fprintf (stderr
, "\nStack information for function %s:\n",
18065 ((current_function_decl
&& DECL_NAME (current_function_decl
))
18066 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
18071 default: abi_string
= "Unknown"; break;
18072 case ABI_NONE
: abi_string
= "NONE"; break;
18073 case ABI_AIX
: abi_string
= "AIX"; break;
18074 case ABI_DARWIN
: abi_string
= "Darwin"; break;
18075 case ABI_V4
: abi_string
= "V.4"; break;
18078 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
18080 if (TARGET_ALTIVEC_ABI
)
18081 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
18083 if (TARGET_SPE_ABI
)
18084 fprintf (stderr
, "\tSPE ABI extensions enabled.\n");
18086 if (info
->first_gp_reg_save
!= 32)
18087 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
18089 if (info
->first_fp_reg_save
!= 64)
18090 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
18092 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
18093 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
18094 info
->first_altivec_reg_save
);
18096 if (info
->lr_save_p
)
18097 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
18099 if (info
->cr_save_p
)
18100 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
18102 if (info
->vrsave_mask
)
18103 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
18106 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
18109 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
18111 if (info
->gp_save_offset
)
18112 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
18114 if (info
->fp_save_offset
)
18115 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
18117 if (info
->altivec_save_offset
)
18118 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
18119 info
->altivec_save_offset
);
18121 if (info
->spe_gp_save_offset
)
18122 fprintf (stderr
, "\tspe_gp_save_offset = %5d\n",
18123 info
->spe_gp_save_offset
);
18125 if (info
->vrsave_save_offset
)
18126 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
18127 info
->vrsave_save_offset
);
18129 if (info
->lr_save_offset
)
18130 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
18132 if (info
->cr_save_offset
)
18133 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
18135 if (info
->varargs_save_offset
)
18136 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
18138 if (info
->total_size
)
18139 fprintf (stderr
, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
18142 if (info
->vars_size
)
18143 fprintf (stderr
, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
18146 if (info
->parm_size
)
18147 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
18149 if (info
->fixed_size
)
18150 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
18153 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
18155 if (info
->spe_gp_size
)
18156 fprintf (stderr
, "\tspe_gp_size = %5d\n", info
->spe_gp_size
);
18159 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
18161 if (info
->altivec_size
)
18162 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
18164 if (info
->vrsave_size
)
18165 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
18167 if (info
->altivec_padding_size
)
18168 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
18169 info
->altivec_padding_size
);
18171 if (info
->spe_padding_size
)
18172 fprintf (stderr
, "\tspe_padding_size = %5d\n",
18173 info
->spe_padding_size
);
18176 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
18178 if (info
->save_size
)
18179 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
18181 if (info
->reg_size
!= 4)
18182 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
18184 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
18186 fprintf (stderr
, "\n");
18190 rs6000_return_addr (int count
, rtx frame
)
18192 /* Currently we don't optimize very well between prolog and body
18193 code and for PIC code the code can be actually quite bad, so
18194 don't try to be too clever here. */
18195 if (count
!= 0 || (DEFAULT_ABI
!= ABI_AIX
&& flag_pic
))
18197 cfun
->machine
->ra_needs_full_frame
= 1;
18204 plus_constant (Pmode
,
18206 (gen_rtx_MEM (Pmode
,
18207 memory_address (Pmode
, frame
))),
18208 RETURN_ADDRESS_OFFSET
)));
18211 cfun
->machine
->ra_need_lr
= 1;
18212 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
18215 /* Say whether a function is a candidate for sibcall handling or not. */
18218 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
18223 fntype
= TREE_TYPE (decl
);
18225 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
18227 /* We can't do it if the called function has more vector parameters
18228 than the current function; there's nowhere to put the VRsave code. */
18229 if (TARGET_ALTIVEC_ABI
18230 && TARGET_ALTIVEC_VRSAVE
18231 && !(decl
&& decl
== current_function_decl
))
18233 function_args_iterator args_iter
;
18237 /* Functions with vector parameters are required to have a
18238 prototype, so the argument type info must be available
18240 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
18241 if (TREE_CODE (type
) == VECTOR_TYPE
18242 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
18245 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
18246 if (TREE_CODE (type
) == VECTOR_TYPE
18247 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
18254 /* Under the AIX ABI we can't allow calls to non-local functions,
18255 because the callee may have a different TOC pointer to the
18256 caller and there's no way to ensure we restore the TOC when we
18257 return. With the secure-plt SYSV ABI we can't make non-local
18258 calls when -fpic/PIC because the plt call stubs use r30. */
18259 if (DEFAULT_ABI
== ABI_DARWIN
18260 || (DEFAULT_ABI
== ABI_AIX
18262 && !DECL_EXTERNAL (decl
)
18263 && (*targetm
.binds_local_p
) (decl
))
18264 || (DEFAULT_ABI
== ABI_V4
18265 && (!TARGET_SECURE_PLT
18268 && (*targetm
.binds_local_p
) (decl
)))))
18270 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
18272 if (!lookup_attribute ("longcall", attr_list
)
18273 || lookup_attribute ("shortcall", attr_list
))
18280 /* NULL if INSN insn is valid within a low-overhead loop.
18281 Otherwise return why doloop cannot be applied.
18282 PowerPC uses the COUNT register for branch on table instructions. */
18284 static const char *
18285 rs6000_invalid_within_doloop (const_rtx insn
)
18288 return "Function call in the loop.";
18291 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
18292 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
))
18293 return "Computed branch in the loop.";
18299 rs6000_ra_ever_killed (void)
18305 if (cfun
->is_thunk
)
18308 if (cfun
->machine
->lr_save_state
)
18309 return cfun
->machine
->lr_save_state
- 1;
18311 /* regs_ever_live has LR marked as used if any sibcalls are present,
18312 but this should not force saving and restoring in the
18313 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18314 clobbers LR, so that is inappropriate. */
18316 /* Also, the prologue can generate a store into LR that
18317 doesn't really count, like this:
18320 bcl to set PIC register
18324 When we're called from the epilogue, we need to avoid counting
18325 this as a store. */
18327 push_topmost_sequence ();
18328 top
= get_insns ();
18329 pop_topmost_sequence ();
18330 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
18332 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
18338 if (!SIBLING_CALL_P (insn
))
18341 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
18343 else if (set_of (reg
, insn
) != NULL_RTX
18344 && !prologue_epilogue_contains (insn
))
18351 /* Emit instructions needed to load the TOC register.
18352 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18353 a constant pool; or for SVR4 -fpic. */
18356 rs6000_emit_load_toc_table (int fromprolog
)
18359 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
18361 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
)
18364 rtx lab
, tmp1
, tmp2
, got
;
18366 lab
= gen_label_rtx ();
18367 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
18368 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18370 got
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
18372 got
= rs6000_got_sym ();
18373 tmp1
= tmp2
= dest
;
18376 tmp1
= gen_reg_rtx (Pmode
);
18377 tmp2
= gen_reg_rtx (Pmode
);
18379 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
18380 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
18381 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
18382 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
18384 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
18386 emit_insn (gen_load_toc_v4_pic_si ());
18387 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18389 else if (TARGET_ELF
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
== 2)
18392 rtx temp0
= (fromprolog
18393 ? gen_rtx_REG (Pmode
, 0)
18394 : gen_reg_rtx (Pmode
));
18400 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
18401 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18403 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
18404 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18406 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
18407 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18408 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
18414 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
18415 lab
= gen_label_rtx ();
18416 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
18417 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18418 if (TARGET_LINK_STACK
)
18419 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
18420 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
18422 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
18424 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
18426 /* This is for AIX code running in non-PIC ELF32. */
18429 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
18430 realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18432 emit_insn (gen_elf_high (dest
, realsym
));
18433 emit_insn (gen_elf_low (dest
, dest
, realsym
));
18437 gcc_assert (DEFAULT_ABI
== ABI_AIX
);
18440 emit_insn (gen_load_toc_aix_si (dest
));
18442 emit_insn (gen_load_toc_aix_di (dest
));
18446 /* Emit instructions to restore the link register after determining where
18447 its value has been stored. */
18450 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
18452 rs6000_stack_t
*info
= rs6000_stack_info ();
18455 operands
[0] = source
;
18456 operands
[1] = scratch
;
18458 if (info
->lr_save_p
)
18460 rtx frame_rtx
= stack_pointer_rtx
;
18461 HOST_WIDE_INT sp_offset
= 0;
18464 if (frame_pointer_needed
18465 || cfun
->calls_alloca
18466 || info
->total_size
> 32767)
18468 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
18469 emit_move_insn (operands
[1], tmp
);
18470 frame_rtx
= operands
[1];
18472 else if (info
->push_p
)
18473 sp_offset
= info
->total_size
;
18475 tmp
= plus_constant (Pmode
, frame_rtx
,
18476 info
->lr_save_offset
+ sp_offset
);
18477 tmp
= gen_frame_mem (Pmode
, tmp
);
18478 emit_move_insn (tmp
, operands
[0]);
18481 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
18483 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18484 state of lr_save_p so any change from here on would be a bug. In
18485 particular, stop rs6000_ra_ever_killed from considering the SET
18486 of lr we may have added just above. */
18487 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
18490 static GTY(()) alias_set_type set
= -1;
18493 get_TOC_alias_set (void)
18496 set
= new_alias_set ();
18500 /* This returns nonzero if the current function uses the TOC. This is
18501 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18502 is generated by the ABI_V4 load_toc_* patterns. */
18509 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
18512 rtx pat
= PATTERN (insn
);
18515 if (GET_CODE (pat
) == PARALLEL
)
18516 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
18518 rtx sub
= XVECEXP (pat
, 0, i
);
18519 if (GET_CODE (sub
) == USE
)
18521 sub
= XEXP (sub
, 0);
18522 if (GET_CODE (sub
) == UNSPEC
18523 && XINT (sub
, 1) == UNSPEC_TOC
)
18533 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
18535 rtx tocrel
, tocreg
, hi
;
18537 if (TARGET_DEBUG_ADDR
)
18539 if (GET_CODE (symbol
) == SYMBOL_REF
)
18540 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18544 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
18545 GET_RTX_NAME (GET_CODE (symbol
)));
18546 debug_rtx (symbol
);
18550 if (!can_create_pseudo_p ())
18551 df_set_regs_ever_live (TOC_REGISTER
, true);
18553 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
18554 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
18555 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
18558 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
18559 if (largetoc_reg
!= NULL
)
18561 emit_move_insn (largetoc_reg
, hi
);
18564 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
18567 /* Issue assembly directives that create a reference to the given DWARF
18568 FRAME_TABLE_LABEL from the current function section. */
18570 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
18572 fprintf (asm_out_file
, "\t.ref %s\n",
18573 (* targetm
.strip_name_encoding
) (frame_table_label
));
18576 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18577 and the change to the stack pointer. */
18580 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
18587 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
18588 if (hard_frame_needed
)
18589 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
18590 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
18591 || (hard_frame_needed
18592 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
18595 p
= rtvec_alloc (i
);
18598 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
18599 RTVEC_ELT (p
, i
) = gen_rtx_SET (VOIDmode
, mem
, const0_rtx
);
18602 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
18605 /* Emit the correct code for allocating stack space, as insns.
18606 If COPY_REG, make sure a copy of the old frame is left there.
18607 The generated code may use hard register 0 as a temporary. */
18610 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
18613 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
18614 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
18615 rtx todec
= gen_int_mode (-size
, Pmode
);
18618 if (INTVAL (todec
) != -size
)
18620 warning (0, "stack frame too large");
18621 emit_insn (gen_trap ());
18625 if (crtl
->limit_stack
)
18627 if (REG_P (stack_limit_rtx
)
18628 && REGNO (stack_limit_rtx
) > 1
18629 && REGNO (stack_limit_rtx
) <= 31)
18631 emit_insn (gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
)));
18632 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
18635 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
18637 && DEFAULT_ABI
== ABI_V4
)
18639 rtx toload
= gen_rtx_CONST (VOIDmode
,
18640 gen_rtx_PLUS (Pmode
,
18644 emit_insn (gen_elf_high (tmp_reg
, toload
));
18645 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
18646 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
18650 warning (0, "stack limit expression is not supported");
18656 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
18658 emit_move_insn (copy_reg
, stack_reg
);
18663 /* Need a note here so that try_split doesn't get confused. */
18664 if (get_last_insn () == NULL_RTX
)
18665 emit_note (NOTE_INSN_DELETED
);
18666 insn
= emit_move_insn (tmp_reg
, todec
);
18667 try_split (PATTERN (insn
), insn
, 0);
18671 insn
= emit_insn (TARGET_32BIT
18672 ? gen_movsi_update_stack (stack_reg
, stack_reg
,
18674 : gen_movdi_di_update_stack (stack_reg
, stack_reg
,
18675 todec
, stack_reg
));
18676 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18677 it now and set the alias set/attributes. The above gen_*_update
18678 calls will generate a PARALLEL with the MEM set being the first
18680 par
= PATTERN (insn
);
18681 gcc_assert (GET_CODE (par
) == PARALLEL
);
18682 set
= XVECEXP (par
, 0, 0);
18683 gcc_assert (GET_CODE (set
) == SET
);
18684 mem
= SET_DEST (set
);
18685 gcc_assert (MEM_P (mem
));
18686 MEM_NOTRAP_P (mem
) = 1;
18687 set_mem_alias_set (mem
, get_frame_alias_set ());
18689 RTX_FRAME_RELATED_P (insn
) = 1;
18690 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
18691 gen_rtx_SET (VOIDmode
, stack_reg
,
18692 gen_rtx_PLUS (Pmode
, stack_reg
,
18693 GEN_INT (-size
))));
18696 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18698 #if PROBE_INTERVAL > 32768
18699 #error Cannot use indexed addressing mode for stack probing
18702 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18703 inclusive. These are offsets from the current stack pointer. */
18706 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
18708 /* See if we have a constant small number of probes to generate. If so,
18709 that's the easy case. */
18710 if (first
+ size
<= 32768)
18714 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18715 it exceeds SIZE. If only one probe is needed, this will not
18716 generate any code. Then probe at FIRST + SIZE. */
18717 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
18718 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
18721 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
18725 /* Otherwise, do the same as above, but in a loop. Note that we must be
18726 extra careful with variables wrapping around because we might be at
18727 the very top (or the very bottom) of the address space and we have
18728 to be able to handle this case properly; in particular, we use an
18729 equality test for the loop condition. */
18732 HOST_WIDE_INT rounded_size
;
18733 rtx r12
= gen_rtx_REG (Pmode
, 12);
18734 rtx r0
= gen_rtx_REG (Pmode
, 0);
18736 /* Sanity check for the addressing mode we're going to use. */
18737 gcc_assert (first
<= 32768);
18739 /* Step 1: round SIZE to the previous multiple of the interval. */
18741 rounded_size
= size
& -PROBE_INTERVAL
;
18744 /* Step 2: compute initial and final value of the loop counter. */
18746 /* TEST_ADDR = SP + FIRST. */
18747 emit_insn (gen_rtx_SET (VOIDmode
, r12
,
18748 plus_constant (Pmode
, stack_pointer_rtx
,
18751 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18752 if (rounded_size
> 32768)
18754 emit_move_insn (r0
, GEN_INT (-rounded_size
));
18755 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
18756 gen_rtx_PLUS (Pmode
, r12
, r0
)));
18759 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
18760 plus_constant (Pmode
, r12
, -rounded_size
)));
18763 /* Step 3: the loop
18765 while (TEST_ADDR != LAST_ADDR)
18767 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18771 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18772 until it is equal to ROUNDED_SIZE. */
18775 emit_insn (gen_probe_stack_rangedi (r12
, r12
, r0
));
18777 emit_insn (gen_probe_stack_rangesi (r12
, r12
, r0
));
18780 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18781 that SIZE is equal to ROUNDED_SIZE. */
18783 if (size
!= rounded_size
)
18784 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
18788 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18789 absolute addresses. */
18792 output_probe_stack_range (rtx reg1
, rtx reg2
)
18794 static int labelno
= 0;
18795 char loop_lab
[32], end_lab
[32];
18798 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
18799 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
18801 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
18803 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
18807 output_asm_insn ("{cmp|cmpd} 0,%0,%1", xops
);
18809 output_asm_insn ("{cmp|cmpw} 0,%0,%1", xops
);
18811 fputs ("\tbeq 0,", asm_out_file
);
18812 assemble_name_raw (asm_out_file
, end_lab
);
18813 fputc ('\n', asm_out_file
);
18815 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
18816 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
18817 output_asm_insn ("{cal %0,%1(%0)|addi %0,%0,%1}", xops
);
18819 /* Probe at TEST_ADDR and branch. */
18820 xops
[1] = gen_rtx_REG (Pmode
, 0);
18821 output_asm_insn ("{st|stw} %1,0(%0)", xops
);
18822 fprintf (asm_out_file
, "\tb ");
18823 assemble_name_raw (asm_out_file
, loop_lab
);
18824 fputc ('\n', asm_out_file
);
18826 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
18831 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18832 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18833 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18834 deduce these equivalences by itself so it wasn't necessary to hold
18835 its hand so much. Don't be tempted to always supply d2_f_d_e with
18836 the actual cfa register, ie. r31 when we are using a hard frame
18837 pointer. That fails when saving regs off r1, and sched moves the
18838 r31 setup past the reg saves. */
18841 rs6000_frame_related (rtx insn
, rtx reg
, HOST_WIDE_INT val
,
18842 rtx reg2
, rtx rreg
)
18846 if (REGNO (reg
) == STACK_POINTER_REGNUM
&& reg2
== NULL_RTX
)
18848 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
18851 gcc_checking_assert (val
== 0);
18852 real
= PATTERN (insn
);
18853 if (GET_CODE (real
) == PARALLEL
)
18854 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
18855 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
18857 rtx set
= XVECEXP (real
, 0, i
);
18859 RTX_FRAME_RELATED_P (set
) = 1;
18861 RTX_FRAME_RELATED_P (insn
) = 1;
18865 /* copy_rtx will not make unique copies of registers, so we need to
18866 ensure we don't have unwanted sharing here. */
18868 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
18871 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
18873 real
= copy_rtx (PATTERN (insn
));
18875 if (reg2
!= NULL_RTX
)
18876 real
= replace_rtx (real
, reg2
, rreg
);
18878 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
18879 gcc_checking_assert (val
== 0);
18881 real
= replace_rtx (real
, reg
,
18882 gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
,
18883 STACK_POINTER_REGNUM
),
18886 /* We expect that 'real' is either a SET or a PARALLEL containing
18887 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18888 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18890 if (GET_CODE (real
) == SET
)
18894 temp
= simplify_rtx (SET_SRC (set
));
18896 SET_SRC (set
) = temp
;
18897 temp
= simplify_rtx (SET_DEST (set
));
18899 SET_DEST (set
) = temp
;
18900 if (GET_CODE (SET_DEST (set
)) == MEM
)
18902 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
18904 XEXP (SET_DEST (set
), 0) = temp
;
18911 gcc_assert (GET_CODE (real
) == PARALLEL
);
18912 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
18913 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
18915 rtx set
= XVECEXP (real
, 0, i
);
18917 temp
= simplify_rtx (SET_SRC (set
));
18919 SET_SRC (set
) = temp
;
18920 temp
= simplify_rtx (SET_DEST (set
));
18922 SET_DEST (set
) = temp
;
18923 if (GET_CODE (SET_DEST (set
)) == MEM
)
18925 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
18927 XEXP (SET_DEST (set
), 0) = temp
;
18929 RTX_FRAME_RELATED_P (set
) = 1;
18933 RTX_FRAME_RELATED_P (insn
) = 1;
18934 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, real
);
18939 /* Returns an insn that has a vrsave set operation with the
18940 appropriate CLOBBERs. */
18943 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
18946 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
18947 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
18950 = gen_rtx_SET (VOIDmode
,
18952 gen_rtx_UNSPEC_VOLATILE (SImode
,
18953 gen_rtvec (2, reg
, vrsave
),
18954 UNSPECV_SET_VRSAVE
));
18958 /* We need to clobber the registers in the mask so the scheduler
18959 does not move sets to VRSAVE before sets of AltiVec registers.
18961 However, if the function receives nonlocal gotos, reload will set
18962 all call saved registers live. We will end up with:
18964 (set (reg 999) (mem))
18965 (parallel [ (set (reg vrsave) (unspec blah))
18966 (clobber (reg 999))])
18968 The clobber will cause the store into reg 999 to be dead, and
18969 flow will attempt to delete an epilogue insn. In this case, we
18970 need an unspec use/set of the register. */
18972 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
18973 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
18975 if (!epiloguep
|| call_used_regs
[i
])
18976 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
18977 gen_rtx_REG (V4SImode
, i
));
18980 rtx reg
= gen_rtx_REG (V4SImode
, i
);
18983 = gen_rtx_SET (VOIDmode
,
18985 gen_rtx_UNSPEC (V4SImode
,
18986 gen_rtvec (1, reg
), 27));
18990 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
18992 for (i
= 0; i
< nclobs
; ++i
)
18993 XVECEXP (insn
, 0, i
) = clobs
[i
];
18999 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
19003 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
19004 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
19005 return gen_rtx_SET (VOIDmode
, store
? mem
: reg
, store
? reg
: mem
);
19009 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
19011 return gen_frame_set (reg
, frame_reg
, offset
, false);
19015 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
19017 return gen_frame_set (reg
, frame_reg
, offset
, true);
19020 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
19021 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
19024 emit_frame_save (rtx frame_reg
, enum machine_mode mode
,
19025 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
19029 /* Some cases that need register indexed addressing. */
19030 gcc_checking_assert (!((TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
19031 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
19032 || (TARGET_E500_DOUBLE
&& mode
== DFmode
)
19034 && SPE_VECTOR_MODE (mode
)
19035 && !SPE_CONST_OFFSET_OK (offset
))));
19037 reg
= gen_rtx_REG (mode
, regno
);
19038 insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
19039 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
19040 NULL_RTX
, NULL_RTX
);
19043 /* Emit an offset memory reference suitable for a frame store, while
19044 converting to a valid addressing mode. */
19047 gen_frame_mem_offset (enum machine_mode mode
, rtx reg
, int offset
)
19049 rtx int_rtx
, offset_rtx
;
19051 int_rtx
= GEN_INT (offset
);
19053 if ((TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode
) && !SPE_CONST_OFFSET_OK (offset
))
19054 || (TARGET_E500_DOUBLE
&& mode
== DFmode
))
19056 offset_rtx
= gen_rtx_REG (Pmode
, FIXED_SCRATCH
);
19057 emit_move_insn (offset_rtx
, int_rtx
);
19060 offset_rtx
= int_rtx
;
19062 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, offset_rtx
));
19065 #ifndef TARGET_FIX_AND_CONTINUE
19066 #define TARGET_FIX_AND_CONTINUE 0
19069 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19070 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19071 #define LAST_SAVRES_REGISTER 31
19072 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19083 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
19085 /* Temporary holding space for an out-of-line register save/restore
19087 static char savres_routine_name
[30];
19089 /* Return the name for an out-of-line register save/restore routine.
19090 We are saving/restoring GPRs if GPR is true. */
19093 rs6000_savres_routine_name (rs6000_stack_t
*info
, int regno
, int sel
)
19095 const char *prefix
= "";
19096 const char *suffix
= "";
19098 /* Different targets are supposed to define
19099 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19100 routine name could be defined with:
19102 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19104 This is a nice idea in practice, but in reality, things are
19105 complicated in several ways:
19107 - ELF targets have save/restore routines for GPRs.
19109 - SPE targets use different prefixes for 32/64-bit registers, and
19110 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19112 - PPC64 ELF targets have routines for save/restore of GPRs that
19113 differ in what they do with the link register, so having a set
19114 prefix doesn't work. (We only use one of the save routines at
19115 the moment, though.)
19117 - PPC32 elf targets have "exit" versions of the restore routines
19118 that restore the link register and can save some extra space.
19119 These require an extra suffix. (There are also "tail" versions
19120 of the restore routines and "GOT" versions of the save routines,
19121 but we don't generate those at present. Same problems apply,
19124 We deal with all this by synthesizing our own prefix/suffix and
19125 using that for the simple sprintf call shown above. */
19128 /* No floating point saves on the SPE. */
19129 gcc_assert ((sel
& SAVRES_REG
) == SAVRES_GPR
);
19131 if ((sel
& SAVRES_SAVE
))
19132 prefix
= info
->spe_64bit_regs_used
? "_save64gpr_" : "_save32gpr_";
19134 prefix
= info
->spe_64bit_regs_used
? "_rest64gpr_" : "_rest32gpr_";
19136 if ((sel
& SAVRES_LR
))
19139 else if (DEFAULT_ABI
== ABI_V4
)
19144 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19145 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
19146 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19147 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
19148 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19149 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
19153 if ((sel
& SAVRES_LR
))
19156 else if (DEFAULT_ABI
== ABI_AIX
)
19158 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19159 /* No out-of-line save/restore routines for GPRs on AIX. */
19160 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
19164 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19165 prefix
= ((sel
& SAVRES_SAVE
)
19166 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
19167 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
19168 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19170 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19171 if ((sel
& SAVRES_LR
))
19172 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
19176 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
19177 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
19180 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19181 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
19186 if (DEFAULT_ABI
== ABI_DARWIN
)
19188 /* The Darwin approach is (slightly) different, in order to be
19189 compatible with code generated by the system toolchain. There is a
19190 single symbol for the start of save sequence, and the code here
19191 embeds an offset into that code on the basis of the first register
19193 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
19194 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19195 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
19196 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
19197 (regno
- 13) * 4, prefix
, regno
);
19198 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19199 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
19200 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
19201 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19202 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
19203 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
19208 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
19210 return savres_routine_name
;
19213 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19214 We are saving/restoring GPRs if GPR is true. */
19217 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
19219 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19220 ? info
->first_gp_reg_save
19221 : (sel
& SAVRES_REG
) == SAVRES_FPR
19222 ? info
->first_fp_reg_save
- 32
19223 : (sel
& SAVRES_REG
) == SAVRES_VR
19224 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
19229 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19230 versions of the gpr routines. */
19231 if (TARGET_SPE_ABI
&& (sel
& SAVRES_REG
) == SAVRES_GPR
19232 && info
->spe_64bit_regs_used
)
19233 select
^= SAVRES_FPR
^ SAVRES_GPR
;
19235 /* Don't generate bogus routine names. */
19236 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
19237 && regno
<= LAST_SAVRES_REGISTER
19238 && select
>= 0 && select
<= 12);
19240 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
19246 name
= rs6000_savres_routine_name (info
, regno
, sel
);
19248 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
19249 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
19250 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
19256 /* Emit a sequence of insns, including a stack tie if needed, for
19257 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19258 reset the stack pointer, but move the base of the frame into
19259 reg UPDT_REGNO for use by out-of-line register restore routines. */
19262 rs6000_emit_stack_reset (rs6000_stack_t
*info
,
19263 rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
19264 unsigned updt_regno
)
19268 /* This blockage is needed so that sched doesn't decide to move
19269 the sp change before the register restores. */
19270 if (DEFAULT_ABI
== ABI_V4
19272 && info
->spe_64bit_regs_used
!= 0
19273 && info
->first_gp_reg_save
!= 32))
19274 rs6000_emit_stack_tie (frame_reg_rtx
, frame_pointer_needed
);
19276 /* If we are restoring registers out-of-line, we will be using the
19277 "exit" variants of the restore routines, which will reset the
19278 stack for us. But we do need to point updt_reg into the
19279 right place for those routines. */
19280 updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
19282 if (frame_off
!= 0)
19283 return emit_insn (gen_add3_insn (updt_reg_rtx
,
19284 frame_reg_rtx
, GEN_INT (frame_off
)));
19285 else if (REGNO (frame_reg_rtx
) != updt_regno
)
19286 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
19291 /* Return the register number used as a pointer by out-of-line
19292 save/restore functions. */
19294 static inline unsigned
19295 ptr_regno_for_savres (int sel
)
19297 if (DEFAULT_ABI
== ABI_AIX
)
19298 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
19299 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
19302 /* Construct a parallel rtx describing the effect of a call to an
19303 out-of-line register save/restore routine, and emit the insn
19304 or jump_insn as appropriate. */
19307 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
19308 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
19309 enum machine_mode reg_mode
, int sel
)
19312 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
19313 int reg_size
= GET_MODE_SIZE (reg_mode
);
19319 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19320 ? info
->first_gp_reg_save
19321 : (sel
& SAVRES_REG
) == SAVRES_FPR
19322 ? info
->first_fp_reg_save
19323 : (sel
& SAVRES_REG
) == SAVRES_VR
19324 ? info
->first_altivec_reg_save
19326 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19328 : (sel
& SAVRES_REG
) == SAVRES_FPR
19330 : (sel
& SAVRES_REG
) == SAVRES_VR
19331 ? LAST_ALTIVEC_REGNO
+ 1
19333 n_regs
= end_reg
- start_reg
;
19334 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
19335 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
19338 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19339 RTVEC_ELT (p
, offset
++) = ret_rtx
;
19341 RTVEC_ELT (p
, offset
++)
19342 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
19344 sym
= rs6000_savres_routine_sym (info
, sel
);
19345 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
19347 use_reg
= ptr_regno_for_savres (sel
);
19348 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19350 /* Vector regs are saved/restored using [reg+reg] addressing. */
19351 RTVEC_ELT (p
, offset
++)
19352 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
19353 RTVEC_ELT (p
, offset
++)
19354 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
19357 RTVEC_ELT (p
, offset
++)
19358 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
19360 for (i
= 0; i
< end_reg
- start_reg
; i
++)
19361 RTVEC_ELT (p
, i
+ offset
)
19362 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
19363 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
19364 (sel
& SAVRES_SAVE
) != 0);
19366 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19367 RTVEC_ELT (p
, i
+ offset
)
19368 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
19370 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
19372 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19374 insn
= emit_jump_insn (par
);
19375 JUMP_LABEL (insn
) = ret_rtx
;
19378 insn
= emit_insn (par
);
19382 /* Determine whether the gp REG is really used. */
19385 rs6000_reg_live_or_pic_offset_p (int reg
)
19387 /* If the function calls eh_return, claim used all the registers that would
19388 be checked for liveness otherwise. This is required for the PIC offset
19389 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19390 register allocation purposes in this case. */
19392 return (((crtl
->calls_eh_return
|| df_regs_ever_live_p (reg
))
19393 && (!call_used_regs
[reg
]
19394 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
19395 && !TARGET_SINGLE_PIC_BASE
19396 && TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
19397 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
19398 && !TARGET_SINGLE_PIC_BASE
19399 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
19400 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
))));
19403 /* Emit function prologue as insns. */
19406 rs6000_emit_prologue (void)
19408 rs6000_stack_t
*info
= rs6000_stack_info ();
19409 enum machine_mode reg_mode
= Pmode
;
19410 int reg_size
= TARGET_32BIT
? 4 : 8;
19411 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
19412 rtx frame_reg_rtx
= sp_reg_rtx
;
19413 unsigned int cr_save_regno
;
19414 rtx cr_save_rtx
= NULL_RTX
;
19417 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
19418 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
19419 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
19420 /* Offset to top of frame for frame_reg and sp respectively. */
19421 HOST_WIDE_INT frame_off
= 0;
19422 HOST_WIDE_INT sp_off
= 0;
19424 #ifdef ENABLE_CHECKING
19425 /* Track and check usage of r0, r11, r12. */
19426 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
19427 #define START_USE(R) do \
19429 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19430 reg_inuse |= 1 << (R); \
19432 #define END_USE(R) do \
19434 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19435 reg_inuse &= ~(1 << (R)); \
19437 #define NOT_INUSE(R) do \
19439 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19442 #define START_USE(R) do {} while (0)
19443 #define END_USE(R) do {} while (0)
19444 #define NOT_INUSE(R) do {} while (0)
19447 if (flag_stack_usage_info
)
19448 current_function_static_stack_size
= info
->total_size
;
19450 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
&& info
->total_size
)
19451 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
, info
->total_size
);
19453 if (TARGET_FIX_AND_CONTINUE
)
19455 /* gdb on darwin arranges to forward a function from the old
19456 address by modifying the first 5 instructions of the function
19457 to branch to the overriding function. This is necessary to
19458 permit function pointers that point to the old function to
19459 actually forward to the new function. */
19460 emit_insn (gen_nop ());
19461 emit_insn (gen_nop ());
19462 emit_insn (gen_nop ());
19463 emit_insn (gen_nop ());
19464 emit_insn (gen_nop ());
19467 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
19469 reg_mode
= V2SImode
;
19473 /* Handle world saves specially here. */
19474 if (WORLD_SAVE_P (info
))
19481 /* save_world expects lr in r0. */
19482 reg0
= gen_rtx_REG (Pmode
, 0);
19483 if (info
->lr_save_p
)
19485 insn
= emit_move_insn (reg0
,
19486 gen_rtx_REG (Pmode
, LR_REGNO
));
19487 RTX_FRAME_RELATED_P (insn
) = 1;
19490 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19491 assumptions about the offsets of various bits of the stack
19493 gcc_assert (info
->gp_save_offset
== -220
19494 && info
->fp_save_offset
== -144
19495 && info
->lr_save_offset
== 8
19496 && info
->cr_save_offset
== 4
19499 && (!crtl
->calls_eh_return
19500 || info
->ehrd_offset
== -432)
19501 && info
->vrsave_save_offset
== -224
19502 && info
->altivec_save_offset
== -416);
19504 treg
= gen_rtx_REG (SImode
, 11);
19505 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
19507 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19508 in R11. It also clobbers R12, so beware! */
19510 /* Preserve CR2 for save_world prologues */
19512 sz
+= 32 - info
->first_gp_reg_save
;
19513 sz
+= 64 - info
->first_fp_reg_save
;
19514 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
19515 p
= rtvec_alloc (sz
);
19517 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
19518 gen_rtx_REG (SImode
,
19520 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
19521 gen_rtx_SYMBOL_REF (Pmode
,
19523 /* We do floats first so that the instruction pattern matches
19525 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
19527 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
19529 info
->first_fp_reg_save
+ i
),
19531 info
->fp_save_offset
+ frame_off
+ 8 * i
);
19532 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
19534 = gen_frame_store (gen_rtx_REG (V4SImode
,
19535 info
->first_altivec_reg_save
+ i
),
19537 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
19538 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19540 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
19542 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
19544 /* CR register traditionally saved as CR2. */
19546 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
19547 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
19548 /* Explain about use of R0. */
19549 if (info
->lr_save_p
)
19551 = gen_frame_store (reg0
,
19552 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
19553 /* Explain what happens to the stack pointer. */
19555 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
19556 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, sp_reg_rtx
, newval
);
19559 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
19560 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19561 treg
, GEN_INT (-info
->total_size
));
19562 sp_off
= frame_off
= info
->total_size
;
19565 strategy
= info
->savres_strategy
;
19567 /* For V.4, update stack before we do any saving and set back pointer. */
19568 if (! WORLD_SAVE_P (info
)
19570 && (DEFAULT_ABI
== ABI_V4
19571 || crtl
->calls_eh_return
))
19573 bool need_r11
= (TARGET_SPE
19574 ? (!(strategy
& SAVE_INLINE_GPRS
)
19575 && info
->spe_64bit_regs_used
== 0)
19576 : (!(strategy
& SAVE_INLINE_FPRS
)
19577 || !(strategy
& SAVE_INLINE_GPRS
)
19578 || !(strategy
& SAVE_INLINE_VRS
)));
19579 int ptr_regno
= -1;
19580 rtx ptr_reg
= NULL_RTX
;
19583 if (info
->total_size
< 32767)
19584 frame_off
= info
->total_size
;
19587 else if (info
->cr_save_p
19589 || info
->first_fp_reg_save
< 64
19590 || info
->first_gp_reg_save
< 32
19591 || info
->altivec_size
!= 0
19592 || info
->vrsave_mask
!= 0
19593 || crtl
->calls_eh_return
)
19597 /* The prologue won't be saving any regs so there is no need
19598 to set up a frame register to access any frame save area.
19599 We also won't be using frame_off anywhere below, but set
19600 the correct value anyway to protect against future
19601 changes to this function. */
19602 frame_off
= info
->total_size
;
19604 if (ptr_regno
!= -1)
19606 /* Set up the frame offset to that needed by the first
19607 out-of-line save function. */
19608 START_USE (ptr_regno
);
19609 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19610 frame_reg_rtx
= ptr_reg
;
19611 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
19612 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
19613 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
19614 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
19615 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
19616 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
19617 frame_off
= -ptr_off
;
19619 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
19620 sp_off
= info
->total_size
;
19621 if (frame_reg_rtx
!= sp_reg_rtx
)
19622 rs6000_emit_stack_tie (frame_reg_rtx
, false);
19625 /* If we use the link register, get it into r0. */
19626 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
)
19628 rtx addr
, reg
, mem
;
19630 reg
= gen_rtx_REG (Pmode
, 0);
19632 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
19633 RTX_FRAME_RELATED_P (insn
) = 1;
19635 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
19636 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
19638 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
19639 GEN_INT (info
->lr_save_offset
+ frame_off
));
19640 mem
= gen_rtx_MEM (Pmode
, addr
);
19641 /* This should not be of rs6000_sr_alias_set, because of
19642 __builtin_return_address. */
19644 insn
= emit_move_insn (mem
, reg
);
19645 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19646 NULL_RTX
, NULL_RTX
);
19651 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19652 r12 will be needed by out-of-line gpr restore. */
19653 cr_save_regno
= (DEFAULT_ABI
== ABI_AIX
19654 && !(strategy
& (SAVE_INLINE_GPRS
19655 | SAVE_NOINLINE_GPRS_SAVES_LR
))
19657 if (!WORLD_SAVE_P (info
)
19659 && REGNO (frame_reg_rtx
) != cr_save_regno
19660 && !(using_static_chain_p
&& cr_save_regno
== 11))
19664 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
19665 START_USE (cr_save_regno
);
19666 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
19667 RTX_FRAME_RELATED_P (insn
) = 1;
19668 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19669 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19670 But that's OK. All we have to do is specify that _one_ condition
19671 code register is saved in this stack slot. The thrower's epilogue
19672 will then restore all the call-saved registers.
19673 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19674 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
,
19675 gen_rtx_REG (SImode
, CR2_REGNO
));
19676 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
19679 /* Do any required saving of fpr's. If only one or two to save, do
19680 it ourselves. Otherwise, call function. */
19681 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
19684 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
19685 if (save_reg_p (info
->first_fp_reg_save
+ i
))
19686 emit_frame_save (frame_reg_rtx
,
19687 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
19688 ? DFmode
: SFmode
),
19689 info
->first_fp_reg_save
+ i
,
19690 info
->fp_save_offset
+ frame_off
+ 8 * i
,
19691 sp_off
- frame_off
);
19693 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
19695 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
19696 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
19697 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
19698 rtx ptr_reg
= frame_reg_rtx
;
19700 if (REGNO (frame_reg_rtx
) == ptr_regno
)
19701 gcc_checking_assert (frame_off
== 0);
19704 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19705 NOT_INUSE (ptr_regno
);
19706 emit_insn (gen_add3_insn (ptr_reg
,
19707 frame_reg_rtx
, GEN_INT (frame_off
)));
19709 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
19710 info
->fp_save_offset
,
19711 info
->lr_save_offset
,
19713 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
19714 NULL_RTX
, NULL_RTX
);
19719 /* Save GPRs. This is done as a PARALLEL if we are using
19720 the store-multiple instructions. */
19721 if (!WORLD_SAVE_P (info
)
19723 && info
->spe_64bit_regs_used
!= 0
19724 && info
->first_gp_reg_save
!= 32)
19727 rtx spe_save_area_ptr
;
19728 HOST_WIDE_INT save_off
;
19729 int ool_adjust
= 0;
19731 /* Determine whether we can address all of the registers that need
19732 to be saved with an offset from frame_reg_rtx that fits in
19733 the small const field for SPE memory instructions. */
19734 int spe_regs_addressable
19735 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
19736 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
19737 && (strategy
& SAVE_INLINE_GPRS
));
19739 if (spe_regs_addressable
)
19741 spe_save_area_ptr
= frame_reg_rtx
;
19742 save_off
= frame_off
;
19746 /* Make r11 point to the start of the SPE save area. We need
19747 to be careful here if r11 is holding the static chain. If
19748 it is, then temporarily save it in r0. */
19749 HOST_WIDE_INT offset
;
19751 if (!(strategy
& SAVE_INLINE_GPRS
))
19752 ool_adjust
= 8 * (info
->first_gp_reg_save
19753 - (FIRST_SAVRES_REGISTER
+ 1));
19754 offset
= info
->spe_gp_save_offset
+ frame_off
- ool_adjust
;
19755 spe_save_area_ptr
= gen_rtx_REG (Pmode
, 11);
19756 save_off
= frame_off
- offset
;
19758 if (using_static_chain_p
)
19760 rtx r0
= gen_rtx_REG (Pmode
, 0);
19763 gcc_assert (info
->first_gp_reg_save
> 11);
19765 emit_move_insn (r0
, spe_save_area_ptr
);
19767 else if (REGNO (frame_reg_rtx
) != 11)
19770 emit_insn (gen_addsi3 (spe_save_area_ptr
,
19771 frame_reg_rtx
, GEN_INT (offset
)));
19772 if (!using_static_chain_p
&& REGNO (frame_reg_rtx
) == 11)
19773 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
19776 if ((strategy
& SAVE_INLINE_GPRS
))
19778 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19779 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
19780 emit_frame_save (spe_save_area_ptr
, reg_mode
,
19781 info
->first_gp_reg_save
+ i
,
19782 (info
->spe_gp_save_offset
+ save_off
19784 sp_off
- save_off
);
19788 insn
= rs6000_emit_savres_rtx (info
, spe_save_area_ptr
,
19789 info
->spe_gp_save_offset
+ save_off
,
19791 SAVRES_SAVE
| SAVRES_GPR
);
19793 rs6000_frame_related (insn
, spe_save_area_ptr
, sp_off
- save_off
,
19794 NULL_RTX
, NULL_RTX
);
19797 /* Move the static chain pointer back. */
19798 if (!spe_regs_addressable
)
19800 if (using_static_chain_p
)
19802 emit_move_insn (spe_save_area_ptr
, gen_rtx_REG (Pmode
, 0));
19805 else if (REGNO (frame_reg_rtx
) != 11)
19809 else if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
19811 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
19812 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
19813 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
19814 rtx ptr_reg
= frame_reg_rtx
;
19815 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
19816 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
19820 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19822 /* Need to adjust r11 (r12) if we saved any FPRs. */
19823 if (end_save
+ frame_off
!= 0)
19825 rtx offset
= GEN_INT (end_save
+ frame_off
);
19828 frame_off
= -end_save
;
19830 NOT_INUSE (ptr_regno
);
19831 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
19833 else if (!ptr_set_up
)
19835 NOT_INUSE (ptr_regno
);
19836 emit_move_insn (ptr_reg
, frame_reg_rtx
);
19838 ptr_off
= -end_save
;
19839 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
19840 info
->gp_save_offset
+ ptr_off
,
19841 info
->lr_save_offset
+ ptr_off
,
19843 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
19844 NULL_RTX
, NULL_RTX
);
19848 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVRES_MULTIPLE
))
19852 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
19853 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19855 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
19857 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
19858 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
19859 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19860 NULL_RTX
, NULL_RTX
);
19862 else if (!WORLD_SAVE_P (info
))
19865 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19866 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
19867 emit_frame_save (frame_reg_rtx
, reg_mode
,
19868 info
->first_gp_reg_save
+ i
,
19869 info
->gp_save_offset
+ frame_off
+ reg_size
* i
,
19870 sp_off
- frame_off
);
19873 if (crtl
->calls_eh_return
)
19880 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
19881 if (regno
== INVALID_REGNUM
)
19885 p
= rtvec_alloc (i
);
19889 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
19890 if (regno
== INVALID_REGNUM
)
19894 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
19896 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
19897 RTVEC_ELT (p
, i
) = insn
;
19898 RTX_FRAME_RELATED_P (insn
) = 1;
19901 insn
= emit_insn (gen_blockage ());
19902 RTX_FRAME_RELATED_P (insn
) = 1;
19903 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
19906 /* In AIX ABI we need to make sure r2 is really saved. */
19907 if (TARGET_AIX
&& crtl
->calls_eh_return
)
19909 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
19910 rtx save_insn
, join_insn
, note
;
19911 long toc_restore_insn
;
19913 tmp_reg
= gen_rtx_REG (Pmode
, 11);
19914 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
19915 if (using_static_chain_p
)
19918 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
19922 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
19923 /* Peek at instruction to which this function returns. If it's
19924 restoring r2, then we know we've already saved r2. We can't
19925 unconditionally save r2 because the value we have will already
19926 be updated if we arrived at this function via a plt call or
19927 toc adjusting stub. */
19928 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
19929 toc_restore_insn
= TARGET_32BIT
? 0x80410014 : 0xE8410028;
19930 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
19931 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
19932 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
19933 validate_condition_mode (EQ
, CCUNSmode
);
19934 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
19935 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
19936 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
19937 toc_save_done
= gen_label_rtx ();
19938 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
19939 gen_rtx_EQ (VOIDmode
, compare_result
,
19941 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
19943 jump
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, jump
));
19944 JUMP_LABEL (jump
) = toc_save_done
;
19945 LABEL_NUSES (toc_save_done
) += 1;
19947 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
19948 TOC_REGNUM
, frame_off
+ 5 * reg_size
,
19949 sp_off
- frame_off
);
19951 emit_label (toc_save_done
);
19953 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
19954 have a CFG that has different saves along different paths.
19955 Move the note to a dummy blockage insn, which describes that
19956 R2 is unconditionally saved after the label. */
19957 /* ??? An alternate representation might be a special insn pattern
19958 containing both the branch and the store. That might let the
19959 code that minimizes the number of DW_CFA_advance opcodes better
19960 freedom in placing the annotations. */
19961 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
19963 remove_note (save_insn
, note
);
19965 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
19966 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
19967 RTX_FRAME_RELATED_P (save_insn
) = 0;
19969 join_insn
= emit_insn (gen_blockage ());
19970 REG_NOTES (join_insn
) = note
;
19971 RTX_FRAME_RELATED_P (join_insn
) = 1;
19973 if (using_static_chain_p
)
19975 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
19982 /* Save CR if we use any that must be preserved. */
19983 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
19985 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
19986 GEN_INT (info
->cr_save_offset
+ frame_off
));
19987 rtx mem
= gen_frame_mem (SImode
, addr
);
19988 /* See the large comment above about why CR2_REGNO is used. */
19989 rtx magic_eh_cr_reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
19991 /* If we didn't copy cr before, do so now using r0. */
19992 if (cr_save_rtx
== NULL_RTX
)
19997 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
19998 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
19999 RTX_FRAME_RELATED_P (insn
) = 1;
20000 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
, magic_eh_cr_reg
);
20001 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
20003 insn
= emit_move_insn (mem
, cr_save_rtx
);
20004 END_USE (REGNO (cr_save_rtx
));
20006 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
20007 NULL_RTX
, NULL_RTX
);
20010 /* Update stack and set back pointer unless this is V.4,
20011 for which it was done previously. */
20012 if (!WORLD_SAVE_P (info
) && info
->push_p
20013 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
20015 rtx ptr_reg
= NULL
;
20018 /* If saving altivec regs we need to be able to address all save
20019 locations using a 16-bit offset. */
20020 if ((strategy
& SAVE_INLINE_VRS
) == 0
20021 || (info
->altivec_size
!= 0
20022 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
20023 + info
->total_size
- frame_off
) > 32767)
20024 || (info
->vrsave_mask
!= 0
20025 && (info
->vrsave_save_offset
20026 + info
->total_size
- frame_off
) > 32767))
20028 int sel
= SAVRES_SAVE
| SAVRES_VR
;
20029 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
20031 if (using_static_chain_p
20032 && ptr_regno
== STATIC_CHAIN_REGNUM
)
20034 if (REGNO (frame_reg_rtx
) != ptr_regno
)
20035 START_USE (ptr_regno
);
20036 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
20037 frame_reg_rtx
= ptr_reg
;
20038 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
20039 frame_off
= -ptr_off
;
20041 else if (REGNO (frame_reg_rtx
) == 1)
20042 frame_off
= info
->total_size
;
20043 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
20044 sp_off
= info
->total_size
;
20045 if (frame_reg_rtx
!= sp_reg_rtx
)
20046 rs6000_emit_stack_tie (frame_reg_rtx
, false);
20049 /* Set frame pointer, if needed. */
20050 if (frame_pointer_needed
)
20052 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
20054 RTX_FRAME_RELATED_P (insn
) = 1;
20057 /* Save AltiVec registers if needed. Save here because the red zone does
20058 not always include AltiVec registers. */
20059 if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
20060 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
20062 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20064 /* Oddly, the vector save/restore functions point r0 at the end
20065 of the save area, then use r11 or r12 to load offsets for
20066 [reg+reg] addressing. */
20067 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20068 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
20069 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20071 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
20073 if (end_save
+ frame_off
!= 0)
20075 rtx offset
= GEN_INT (end_save
+ frame_off
);
20077 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20080 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20082 ptr_off
= -end_save
;
20083 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20084 info
->altivec_save_offset
+ ptr_off
,
20085 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
20086 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
20087 NULL_RTX
, NULL_RTX
);
20088 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
20090 /* The oddity mentioned above clobbered our frame reg. */
20091 emit_move_insn (frame_reg_rtx
, ptr_reg
);
20092 frame_off
= ptr_off
;
20095 else if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
20096 && info
->altivec_size
!= 0)
20100 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20101 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20103 rtx areg
, savereg
, mem
;
20106 offset
= (info
->altivec_save_offset
+ frame_off
20107 + 16 * (i
- info
->first_altivec_reg_save
));
20109 savereg
= gen_rtx_REG (V4SImode
, i
);
20112 areg
= gen_rtx_REG (Pmode
, 0);
20113 emit_move_insn (areg
, GEN_INT (offset
));
20115 /* AltiVec addressing mode is [reg+reg]. */
20116 mem
= gen_frame_mem (V4SImode
,
20117 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
20119 insn
= emit_move_insn (mem
, savereg
);
20121 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
20122 areg
, GEN_INT (offset
));
20126 /* VRSAVE is a bit vector representing which AltiVec registers
20127 are used. The OS uses this to determine which vector
20128 registers to save on a context switch. We need to save
20129 VRSAVE on the stack frame, add whatever AltiVec registers we
20130 used in this function, and do the corresponding magic in the
20133 if (!WORLD_SAVE_P (info
)
20135 && TARGET_ALTIVEC_VRSAVE
20136 && info
->vrsave_mask
!= 0)
20142 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20143 be using r12 as frame_reg_rtx and r11 as the static chain
20144 pointer for nested functions. */
20146 if (DEFAULT_ABI
== ABI_AIX
&& !using_static_chain_p
)
20148 else if (REGNO (frame_reg_rtx
) == 12)
20151 if (using_static_chain_p
)
20155 NOT_INUSE (save_regno
);
20156 reg
= gen_rtx_REG (SImode
, save_regno
);
20157 vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
20159 emit_insn (gen_get_vrsave_internal (reg
));
20161 emit_insn (gen_rtx_SET (VOIDmode
, reg
, vrsave
));
20164 offset
= info
->vrsave_save_offset
+ frame_off
;
20165 insn
= emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
20167 /* Include the registers in the mask. */
20168 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT ((int) info
->vrsave_mask
)));
20170 insn
= emit_insn (generate_set_vrsave (reg
, info
, 0));
20173 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20174 if (!TARGET_SINGLE_PIC_BASE
20175 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
20176 || (DEFAULT_ABI
== ABI_V4
20177 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
20178 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
20180 /* If emit_load_toc_table will use the link register, we need to save
20181 it. We use R12 for this purpose because emit_load_toc_table
20182 can use register 0. This allows us to use a plain 'blr' to return
20183 from the procedure more often. */
20184 int save_LR_around_toc_setup
= (TARGET_ELF
20185 && DEFAULT_ABI
!= ABI_AIX
20187 && ! info
->lr_save_p
20188 && EDGE_COUNT (EXIT_BLOCK_PTR
->preds
) > 0);
20189 if (save_LR_around_toc_setup
)
20191 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20192 rtx tmp
= gen_rtx_REG (Pmode
, 12);
20194 insn
= emit_move_insn (tmp
, lr
);
20195 RTX_FRAME_RELATED_P (insn
) = 1;
20197 rs6000_emit_load_toc_table (TRUE
);
20199 insn
= emit_move_insn (lr
, tmp
);
20200 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
20201 RTX_FRAME_RELATED_P (insn
) = 1;
20204 rs6000_emit_load_toc_table (TRUE
);
20208 if (!TARGET_SINGLE_PIC_BASE
20209 && DEFAULT_ABI
== ABI_DARWIN
20210 && flag_pic
&& crtl
->uses_pic_offset_table
)
20212 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20213 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
20215 /* Save and restore LR locally around this call (in R0). */
20216 if (!info
->lr_save_p
)
20217 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
20219 emit_insn (gen_load_macho_picbase (src
));
20221 emit_move_insn (gen_rtx_REG (Pmode
,
20222 RS6000_PIC_OFFSET_TABLE_REGNUM
),
20225 if (!info
->lr_save_p
)
20226 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
20230 /* If we need to, save the TOC register after doing the stack setup.
20231 Do not emit eh frame info for this save. The unwinder wants info,
20232 conceptually attached to instructions in this function, about
20233 register values in the caller of this function. This R2 may have
20234 already been changed from the value in the caller.
20235 We don't attempt to write accurate DWARF EH frame info for R2
20236 because code emitted by gcc for a (non-pointer) function call
20237 doesn't save and restore R2. Instead, R2 is managed out-of-line
20238 by a linker generated plt call stub when the function resides in
20239 a shared library. This behaviour is costly to describe in DWARF,
20240 both in terms of the size of DWARF info and the time taken in the
20241 unwinder to interpret it. R2 changes, apart from the
20242 calls_eh_return case earlier in this function, are handled by
20243 linux-unwind.h frob_update_context. */
20244 if (rs6000_save_toc_in_prologue_p ())
20246 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
20247 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, 5 * reg_size
));
20251 /* Write function prologue. */
20254 rs6000_output_function_prologue (FILE *file
,
20255 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
20257 rs6000_stack_t
*info
= rs6000_stack_info ();
20259 if (TARGET_DEBUG_STACK
)
20260 debug_stack_info (info
);
20262 /* Write .extern for any function we will call to save and restore
20264 if (info
->first_fp_reg_save
< 64
20269 int regno
= info
->first_fp_reg_save
- 32;
20271 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
20273 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
20274 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20275 name
= rs6000_savres_routine_name (info
, regno
, sel
);
20276 fprintf (file
, "\t.extern %s\n", name
);
20278 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
20280 bool lr
= (info
->savres_strategy
20281 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
20282 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20283 name
= rs6000_savres_routine_name (info
, regno
, sel
);
20284 fprintf (file
, "\t.extern %s\n", name
);
20288 /* Write .extern for AIX common mode routines, if needed. */
20289 if (! TARGET_POWER
&& ! TARGET_POWERPC
&& ! common_mode_defined
)
20291 fputs ("\t.extern __mulh\n", file
);
20292 fputs ("\t.extern __mull\n", file
);
20293 fputs ("\t.extern __divss\n", file
);
20294 fputs ("\t.extern __divus\n", file
);
20295 fputs ("\t.extern __quoss\n", file
);
20296 fputs ("\t.extern __quous\n", file
);
20297 common_mode_defined
= 1;
20300 rs6000_pic_labelno
++;
20303 /* Non-zero if vmx regs are restored before the frame pop, zero if
20304 we restore after the pop when possible. */
20305 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20307 /* Restoring cr is a two step process: loading a reg from the frame
20308 save, then moving the reg to cr. For ABI_V4 we must let the
20309 unwinder know that the stack location is no longer valid at or
20310 before the stack deallocation, but we can't emit a cfa_restore for
20311 cr at the stack deallocation like we do for other registers.
20312 The trouble is that it is possible for the move to cr to be
20313 scheduled after the stack deallocation. So say exactly where cr
20314 is located on each of the two insns. */
20317 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
20319 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
20320 rtx reg
= gen_rtx_REG (SImode
, regno
);
20321 rtx insn
= emit_move_insn (reg
, mem
);
20323 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
20325 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
20326 rtx set
= gen_rtx_SET (VOIDmode
, reg
, cr
);
20328 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
20329 RTX_FRAME_RELATED_P (insn
) = 1;
20334 /* Reload CR from REG. */
20337 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
20342 if (using_mfcr_multiple
)
20344 for (i
= 0; i
< 8; i
++)
20345 if (save_reg_p (CR0_REGNO
+ i
))
20347 gcc_assert (count
);
20350 if (using_mfcr_multiple
&& count
> 1)
20355 p
= rtvec_alloc (count
);
20358 for (i
= 0; i
< 8; i
++)
20359 if (save_reg_p (CR0_REGNO
+ i
))
20361 rtvec r
= rtvec_alloc (2);
20362 RTVEC_ELT (r
, 0) = reg
;
20363 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
20364 RTVEC_ELT (p
, ndx
) =
20365 gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
20366 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
20369 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
20370 gcc_assert (ndx
== count
);
20373 for (i
= 0; i
< 8; i
++)
20374 if (save_reg_p (CR0_REGNO
+ i
))
20375 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
20378 if (!exit_func
&& (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
20380 rtx insn
= get_last_insn ();
20381 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
20383 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
20384 RTX_FRAME_RELATED_P (insn
) = 1;
20388 /* Like cr, the move to lr instruction can be scheduled after the
20389 stack deallocation, but unlike cr, its stack frame save is still
20390 valid. So we only need to emit the cfa_restore on the correct
20394 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
20396 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
20397 rtx reg
= gen_rtx_REG (Pmode
, regno
);
20399 emit_move_insn (reg
, mem
);
20403 restore_saved_lr (int regno
, bool exit_func
)
20405 rtx reg
= gen_rtx_REG (Pmode
, regno
);
20406 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20407 rtx insn
= emit_move_insn (lr
, reg
);
20409 if (!exit_func
&& flag_shrink_wrap
)
20411 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
20412 RTX_FRAME_RELATED_P (insn
) = 1;
20417 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
20419 if (info
->cr_save_p
)
20420 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20421 gen_rtx_REG (SImode
, CR2_REGNO
),
20423 if (info
->lr_save_p
)
20424 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20425 gen_rtx_REG (Pmode
, LR_REGNO
),
20427 return cfa_restores
;
20430 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20431 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20432 below stack pointer not cloberred by signals. */
20435 offset_below_red_zone_p (HOST_WIDE_INT offset
)
20437 return offset
< (DEFAULT_ABI
== ABI_V4
20439 : TARGET_32BIT
? -220 : -288);
20442 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20445 emit_cfa_restores (rtx cfa_restores
)
20447 rtx insn
= get_last_insn ();
20448 rtx
*loc
= ®_NOTES (insn
);
20451 loc
= &XEXP (*loc
, 1);
20452 *loc
= cfa_restores
;
20453 RTX_FRAME_RELATED_P (insn
) = 1;
20456 /* Emit function epilogue as insns. */
20459 rs6000_emit_epilogue (int sibcall
)
20461 rs6000_stack_t
*info
;
20462 int restoring_GPRs_inline
;
20463 int restoring_FPRs_inline
;
20464 int using_load_multiple
;
20465 int using_mtcr_multiple
;
20466 int use_backchain_to_restore_sp
;
20469 HOST_WIDE_INT frame_off
= 0;
20470 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
20471 rtx frame_reg_rtx
= sp_reg_rtx
;
20472 rtx cfa_restores
= NULL_RTX
;
20474 rtx cr_save_reg
= NULL_RTX
;
20475 enum machine_mode reg_mode
= Pmode
;
20476 int reg_size
= TARGET_32BIT
? 4 : 8;
20479 unsigned ptr_regno
;
20481 info
= rs6000_stack_info ();
20483 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
20485 reg_mode
= V2SImode
;
20489 strategy
= info
->savres_strategy
;
20490 using_load_multiple
= strategy
& SAVRES_MULTIPLE
;
20491 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
20492 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
20493 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
20494 || rs6000_cpu
== PROCESSOR_PPC603
20495 || rs6000_cpu
== PROCESSOR_PPC750
20497 /* Restore via the backchain when we have a large frame, since this
20498 is more efficient than an addis, addi pair. The second condition
20499 here will not trigger at the moment; We don't actually need a
20500 frame pointer for alloca, but the generic parts of the compiler
20501 give us one anyway. */
20502 use_backchain_to_restore_sp
= (info
->total_size
> 32767 - info
->lr_save_offset
20503 || (cfun
->calls_alloca
20504 && !frame_pointer_needed
));
20505 restore_lr
= (info
->lr_save_p
20506 && (restoring_FPRs_inline
20507 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
20508 && (restoring_GPRs_inline
20509 || info
->first_fp_reg_save
< 64));
20511 if (WORLD_SAVE_P (info
))
20515 const char *alloc_rname
;
20518 /* eh_rest_world_r10 will return to the location saved in the LR
20519 stack slot (which is not likely to be our caller.)
20520 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20521 rest_world is similar, except any R10 parameter is ignored.
20522 The exception-handling stuff that was here in 2.95 is no
20523 longer necessary. */
20527 + 32 - info
->first_gp_reg_save
20528 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
20529 + 63 + 1 - info
->first_fp_reg_save
);
20531 strcpy (rname
, ((crtl
->calls_eh_return
) ?
20532 "*eh_rest_world_r10" : "*rest_world"));
20533 alloc_rname
= ggc_strdup (rname
);
20536 RTVEC_ELT (p
, j
++) = ret_rtx
;
20537 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
20538 gen_rtx_REG (Pmode
,
20541 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
20542 /* The instruction pattern requires a clobber here;
20543 it is shared with the restVEC helper. */
20545 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
20548 /* CR register traditionally saved as CR2. */
20549 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
20551 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
20552 if (flag_shrink_wrap
)
20554 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20555 gen_rtx_REG (Pmode
, LR_REGNO
),
20557 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20561 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
20563 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
20565 = gen_frame_load (reg
,
20566 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
20567 if (flag_shrink_wrap
)
20568 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20570 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
20572 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
20574 = gen_frame_load (reg
,
20575 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
20576 if (flag_shrink_wrap
)
20577 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20579 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
20581 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
20582 ? DFmode
: SFmode
),
20583 info
->first_fp_reg_save
+ i
);
20585 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
20586 if (flag_shrink_wrap
)
20587 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20590 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
20592 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
20594 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
20596 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
20598 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
20599 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
20601 if (flag_shrink_wrap
)
20603 REG_NOTES (insn
) = cfa_restores
;
20604 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
20605 RTX_FRAME_RELATED_P (insn
) = 1;
20610 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20612 frame_off
= info
->total_size
;
20614 /* Restore AltiVec registers if we must do so before adjusting the
20616 if (TARGET_ALTIVEC_ABI
20617 && info
->altivec_size
!= 0
20618 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20619 || (DEFAULT_ABI
!= ABI_V4
20620 && offset_below_red_zone_p (info
->altivec_save_offset
))))
20623 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
20625 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
20626 if (use_backchain_to_restore_sp
)
20628 int frame_regno
= 11;
20630 if ((strategy
& REST_INLINE_VRS
) == 0)
20632 /* Of r11 and r12, select the one not clobbered by an
20633 out-of-line restore function for the frame register. */
20634 frame_regno
= 11 + 12 - scratch_regno
;
20636 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
20637 emit_move_insn (frame_reg_rtx
,
20638 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20641 else if (frame_pointer_needed
)
20642 frame_reg_rtx
= hard_frame_pointer_rtx
;
20644 if ((strategy
& REST_INLINE_VRS
) == 0)
20646 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20648 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20649 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20651 if (end_save
+ frame_off
!= 0)
20653 rtx offset
= GEN_INT (end_save
+ frame_off
);
20655 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20658 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20660 ptr_off
= -end_save
;
20661 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20662 info
->altivec_save_offset
+ ptr_off
,
20663 0, V4SImode
, SAVRES_VR
);
20667 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20668 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20670 rtx addr
, areg
, mem
, reg
;
20672 areg
= gen_rtx_REG (Pmode
, 0);
20674 (areg
, GEN_INT (info
->altivec_save_offset
20676 + 16 * (i
- info
->first_altivec_reg_save
)));
20678 /* AltiVec addressing mode is [reg+reg]. */
20679 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
20680 mem
= gen_frame_mem (V4SImode
, addr
);
20682 reg
= gen_rtx_REG (V4SImode
, i
);
20683 emit_move_insn (reg
, mem
);
20687 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20688 if (((strategy
& REST_INLINE_VRS
) == 0
20689 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
20690 && (flag_shrink_wrap
20691 || (offset_below_red_zone_p
20692 (info
->altivec_save_offset
20693 + 16 * (i
- info
->first_altivec_reg_save
)))))
20695 rtx reg
= gen_rtx_REG (V4SImode
, i
);
20696 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20700 /* Restore VRSAVE if we must do so before adjusting the stack. */
20702 && TARGET_ALTIVEC_VRSAVE
20703 && info
->vrsave_mask
!= 0
20704 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20705 || (DEFAULT_ABI
!= ABI_V4
20706 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
20710 if (frame_reg_rtx
== sp_reg_rtx
)
20712 if (use_backchain_to_restore_sp
)
20714 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20715 emit_move_insn (frame_reg_rtx
,
20716 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20719 else if (frame_pointer_needed
)
20720 frame_reg_rtx
= hard_frame_pointer_rtx
;
20723 reg
= gen_rtx_REG (SImode
, 12);
20724 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20725 info
->vrsave_save_offset
+ frame_off
));
20727 emit_insn (generate_set_vrsave (reg
, info
, 1));
20731 /* If we have a large stack frame, restore the old stack pointer
20732 using the backchain. */
20733 if (use_backchain_to_restore_sp
)
20735 if (frame_reg_rtx
== sp_reg_rtx
)
20737 /* Under V.4, don't reset the stack pointer until after we're done
20738 loading the saved registers. */
20739 if (DEFAULT_ABI
== ABI_V4
)
20740 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20742 insn
= emit_move_insn (frame_reg_rtx
,
20743 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20746 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20747 && DEFAULT_ABI
== ABI_V4
)
20748 /* frame_reg_rtx has been set up by the altivec restore. */
20752 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
20753 frame_reg_rtx
= sp_reg_rtx
;
20756 /* If we have a frame pointer, we can restore the old stack pointer
20758 else if (frame_pointer_needed
)
20760 frame_reg_rtx
= sp_reg_rtx
;
20761 if (DEFAULT_ABI
== ABI_V4
)
20762 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20763 /* Prevent reordering memory accesses against stack pointer restore. */
20764 else if (cfun
->calls_alloca
20765 || offset_below_red_zone_p (-info
->total_size
))
20766 rs6000_emit_stack_tie (frame_reg_rtx
, true);
20768 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
20769 GEN_INT (info
->total_size
)));
20772 else if (info
->push_p
20773 && DEFAULT_ABI
!= ABI_V4
20774 && !crtl
->calls_eh_return
)
20776 /* Prevent reordering memory accesses against stack pointer restore. */
20777 if (cfun
->calls_alloca
20778 || offset_below_red_zone_p (-info
->total_size
))
20779 rs6000_emit_stack_tie (frame_reg_rtx
, false);
20780 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
20781 GEN_INT (info
->total_size
)));
20784 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
20788 REG_NOTES (insn
) = cfa_restores
;
20789 cfa_restores
= NULL_RTX
;
20791 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
20792 RTX_FRAME_RELATED_P (insn
) = 1;
20795 /* Restore AltiVec registers if we have not done so already. */
20796 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20797 && TARGET_ALTIVEC_ABI
20798 && info
->altivec_size
!= 0
20799 && (DEFAULT_ABI
== ABI_V4
20800 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
20804 if ((strategy
& REST_INLINE_VRS
) == 0)
20806 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20808 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20809 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
20810 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20812 if (end_save
+ frame_off
!= 0)
20814 rtx offset
= GEN_INT (end_save
+ frame_off
);
20816 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20819 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20821 ptr_off
= -end_save
;
20822 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20823 info
->altivec_save_offset
+ ptr_off
,
20824 0, V4SImode
, SAVRES_VR
);
20825 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
20827 /* Frame reg was clobbered by out-of-line save. Restore it
20828 from ptr_reg, and if we are calling out-of-line gpr or
20829 fpr restore set up the correct pointer and offset. */
20830 unsigned newptr_regno
= 1;
20831 if (!restoring_GPRs_inline
)
20833 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
20834 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
20835 newptr_regno
= ptr_regno_for_savres (sel
);
20836 end_save
= info
->gp_save_offset
+ info
->gp_size
;
20838 else if (!restoring_FPRs_inline
)
20840 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
20841 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20842 newptr_regno
= ptr_regno_for_savres (sel
);
20843 end_save
= info
->gp_save_offset
+ info
->gp_size
;
20846 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
20847 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
20849 if (end_save
+ ptr_off
!= 0)
20851 rtx offset
= GEN_INT (end_save
+ ptr_off
);
20853 frame_off
= -end_save
;
20854 emit_insn (gen_add3_insn (frame_reg_rtx
, ptr_reg
, offset
));
20858 frame_off
= ptr_off
;
20859 emit_move_insn (frame_reg_rtx
, ptr_reg
);
20865 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20866 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20868 rtx addr
, areg
, mem
, reg
;
20870 areg
= gen_rtx_REG (Pmode
, 0);
20872 (areg
, GEN_INT (info
->altivec_save_offset
20874 + 16 * (i
- info
->first_altivec_reg_save
)));
20876 /* AltiVec addressing mode is [reg+reg]. */
20877 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
20878 mem
= gen_frame_mem (V4SImode
, addr
);
20880 reg
= gen_rtx_REG (V4SImode
, i
);
20881 emit_move_insn (reg
, mem
);
20885 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20886 if (((strategy
& REST_INLINE_VRS
) == 0
20887 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
20888 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
20890 rtx reg
= gen_rtx_REG (V4SImode
, i
);
20891 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20895 /* Restore VRSAVE if we have not done so already. */
20896 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20898 && TARGET_ALTIVEC_VRSAVE
20899 && info
->vrsave_mask
!= 0
20900 && (DEFAULT_ABI
== ABI_V4
20901 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
20905 reg
= gen_rtx_REG (SImode
, 12);
20906 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20907 info
->vrsave_save_offset
+ frame_off
));
20909 emit_insn (generate_set_vrsave (reg
, info
, 1));
20912 /* If we exit by an out-of-line restore function on ABI_V4 then that
20913 function will deallocate the stack, so we don't need to worry
20914 about the unwinder restoring cr from an invalid stack frame
20916 exit_func
= (!restoring_FPRs_inline
20917 || (!restoring_GPRs_inline
20918 && info
->first_fp_reg_save
== 64));
20920 /* Get the old lr if we saved it. If we are restoring registers
20921 out-of-line, then the out-of-line routines can do this for us. */
20922 if (restore_lr
&& restoring_GPRs_inline
)
20923 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
20925 /* Get the old cr if we saved it. */
20926 if (info
->cr_save_p
)
20928 unsigned cr_save_regno
= 12;
20930 if (!restoring_GPRs_inline
)
20932 /* Ensure we don't use the register used by the out-of-line
20933 gpr register restore below. */
20934 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
20935 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
20936 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
20938 if (gpr_ptr_regno
== 12)
20939 cr_save_regno
= 11;
20940 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
20942 else if (REGNO (frame_reg_rtx
) == 12)
20943 cr_save_regno
= 11;
20945 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
20946 info
->cr_save_offset
+ frame_off
,
20950 /* Set LR here to try to overlap restores below. */
20951 if (restore_lr
&& restoring_GPRs_inline
)
20952 restore_saved_lr (0, exit_func
);
20954 /* Load exception handler data registers, if needed. */
20955 if (crtl
->calls_eh_return
)
20957 unsigned int i
, regno
;
20961 rtx reg
= gen_rtx_REG (reg_mode
, 2);
20962 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20963 frame_off
+ 5 * reg_size
));
20970 regno
= EH_RETURN_DATA_REGNO (i
);
20971 if (regno
== INVALID_REGNUM
)
20974 /* Note: possible use of r0 here to address SPE regs. */
20975 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
20976 info
->ehrd_offset
+ frame_off
20977 + reg_size
* (int) i
);
20979 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
20983 /* Restore GPRs. This is done as a PARALLEL if we are using
20984 the load-multiple instructions. */
20986 && info
->spe_64bit_regs_used
20987 && info
->first_gp_reg_save
!= 32)
20989 /* Determine whether we can address all of the registers that need
20990 to be saved with an offset from frame_reg_rtx that fits in
20991 the small const field for SPE memory instructions. */
20992 int spe_regs_addressable
20993 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
20994 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
20995 && restoring_GPRs_inline
);
20997 if (!spe_regs_addressable
)
20999 int ool_adjust
= 0;
21000 rtx old_frame_reg_rtx
= frame_reg_rtx
;
21001 /* Make r11 point to the start of the SPE save area. We worried about
21002 not clobbering it when we were saving registers in the prologue.
21003 There's no need to worry here because the static chain is passed
21004 anew to every function. */
21006 if (!restoring_GPRs_inline
)
21007 ool_adjust
= 8 * (info
->first_gp_reg_save
21008 - (FIRST_SAVRES_REGISTER
+ 1));
21009 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
21010 emit_insn (gen_addsi3 (frame_reg_rtx
, old_frame_reg_rtx
,
21011 GEN_INT (info
->spe_gp_save_offset
21014 /* Keep the invariant that frame_reg_rtx + frame_off points
21015 at the top of the stack frame. */
21016 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
21019 if (restoring_GPRs_inline
)
21021 HOST_WIDE_INT spe_offset
= info
->spe_gp_save_offset
+ frame_off
;
21023 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21024 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
21026 rtx offset
, addr
, mem
, reg
;
21028 /* We're doing all this to ensure that the immediate offset
21029 fits into the immediate field of 'evldd'. */
21030 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset
+ reg_size
* i
));
21032 offset
= GEN_INT (spe_offset
+ reg_size
* i
);
21033 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, offset
);
21034 mem
= gen_rtx_MEM (V2SImode
, addr
);
21035 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
21037 emit_move_insn (reg
, mem
);
21041 rs6000_emit_savres_rtx (info
, frame_reg_rtx
,
21042 info
->spe_gp_save_offset
+ frame_off
,
21043 info
->lr_save_offset
+ frame_off
,
21045 SAVRES_GPR
| SAVRES_LR
);
21047 else if (!restoring_GPRs_inline
)
21049 /* We are jumping to an out-of-line function. */
21051 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
21052 bool can_use_exit
= end_save
== 0;
21053 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
21056 /* Emit stack reset code if we need it. */
21057 ptr_regno
= ptr_regno_for_savres (sel
);
21058 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
21060 rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
21061 else if (end_save
+ frame_off
!= 0)
21062 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
21063 GEN_INT (end_save
+ frame_off
)));
21064 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
21065 emit_move_insn (ptr_reg
, frame_reg_rtx
);
21066 if (REGNO (frame_reg_rtx
) == ptr_regno
)
21067 frame_off
= -end_save
;
21069 if (can_use_exit
&& info
->cr_save_p
)
21070 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
21072 ptr_off
= -end_save
;
21073 rs6000_emit_savres_rtx (info
, ptr_reg
,
21074 info
->gp_save_offset
+ ptr_off
,
21075 info
->lr_save_offset
+ ptr_off
,
21078 else if (using_load_multiple
)
21081 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
21082 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21084 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
21086 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
21087 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
21091 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21092 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
21093 emit_insn (gen_frame_load
21094 (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
21096 info
->gp_save_offset
+ frame_off
+ reg_size
* i
));
21099 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
21101 /* If the frame pointer was used then we can't delay emitting
21102 a REG_CFA_DEF_CFA note. This must happen on the insn that
21103 restores the frame pointer, r31. We may have already emitted
21104 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21105 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21106 be harmless if emitted. */
21107 if (frame_pointer_needed
)
21109 insn
= get_last_insn ();
21110 add_reg_note (insn
, REG_CFA_DEF_CFA
,
21111 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
21112 RTX_FRAME_RELATED_P (insn
) = 1;
21115 /* Set up cfa_restores. We always need these when
21116 shrink-wrapping. If not shrink-wrapping then we only need
21117 the cfa_restore when the stack location is no longer valid.
21118 The cfa_restores must be emitted on or before the insn that
21119 invalidates the stack, and of course must not be emitted
21120 before the insn that actually does the restore. The latter
21121 is why it is a bad idea to emit the cfa_restores as a group
21122 on the last instruction here that actually does a restore:
21123 That insn may be reordered with respect to others doing
21125 if (flag_shrink_wrap
21126 && !restoring_GPRs_inline
21127 && info
->first_fp_reg_save
== 64)
21128 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
21130 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
21131 if (!restoring_GPRs_inline
21132 || using_load_multiple
21133 || rs6000_reg_live_or_pic_offset_p (i
))
21135 rtx reg
= gen_rtx_REG (reg_mode
, i
);
21137 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
21141 if (!restoring_GPRs_inline
21142 && info
->first_fp_reg_save
== 64)
21144 /* We are jumping to an out-of-line function. */
21146 emit_cfa_restores (cfa_restores
);
21150 if (restore_lr
&& !restoring_GPRs_inline
)
21152 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
21153 restore_saved_lr (0, exit_func
);
21156 /* Restore fpr's if we need to do it without calling a function. */
21157 if (restoring_FPRs_inline
)
21158 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
21159 if (save_reg_p (info
->first_fp_reg_save
+ i
))
21161 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
21162 ? DFmode
: SFmode
),
21163 info
->first_fp_reg_save
+ i
);
21164 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
21165 info
->fp_save_offset
+ frame_off
+ 8 * i
));
21166 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
21167 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
21170 /* If we saved cr, restore it here. Just those that were used. */
21171 if (info
->cr_save_p
)
21172 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
21174 /* If this is V.4, unwind the stack pointer after all of the loads
21175 have been done, or set up r11 if we are restoring fp out of line. */
21177 if (!restoring_FPRs_inline
)
21179 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
21180 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
21181 ptr_regno
= ptr_regno_for_savres (sel
);
21184 insn
= rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
21185 if (REGNO (frame_reg_rtx
) == ptr_regno
)
21188 if (insn
&& restoring_FPRs_inline
)
21192 REG_NOTES (insn
) = cfa_restores
;
21193 cfa_restores
= NULL_RTX
;
21195 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
21196 RTX_FRAME_RELATED_P (insn
) = 1;
21199 if (crtl
->calls_eh_return
)
21201 rtx sa
= EH_RETURN_STACKADJ_RTX
;
21202 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
21208 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
21209 if (! restoring_FPRs_inline
)
21211 p
= rtvec_alloc (4 + 64 - info
->first_fp_reg_save
);
21212 RTVEC_ELT (p
, 0) = ret_rtx
;
21218 /* We can't hang the cfa_restores off a simple return,
21219 since the shrink-wrap code sometimes uses an existing
21220 return. This means there might be a path from
21221 pre-prologue code to this return, and dwarf2cfi code
21222 wants the eh_frame unwinder state to be the same on
21223 all paths to any point. So we need to emit the
21224 cfa_restores before the return. For -m64 we really
21225 don't need epilogue cfa_restores at all, except for
21226 this irritating dwarf2cfi with shrink-wrap
21227 requirement; The stack red-zone means eh_frame info
21228 from the prologue telling the unwinder to restore
21229 from the stack is perfectly good right to the end of
21231 emit_insn (gen_blockage ());
21232 emit_cfa_restores (cfa_restores
);
21233 cfa_restores
= NULL_RTX
;
21235 p
= rtvec_alloc (2);
21236 RTVEC_ELT (p
, 0) = simple_return_rtx
;
21239 RTVEC_ELT (p
, 1) = ((restoring_FPRs_inline
|| !lr
)
21240 ? gen_rtx_USE (VOIDmode
,
21241 gen_rtx_REG (Pmode
, LR_REGNO
))
21242 : gen_rtx_CLOBBER (VOIDmode
,
21243 gen_rtx_REG (Pmode
, LR_REGNO
)));
21245 /* If we have to restore more than two FP registers, branch to the
21246 restore function. It will return to our caller. */
21247 if (! restoring_FPRs_inline
)
21252 if (flag_shrink_wrap
)
21253 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
21255 sym
= rs6000_savres_routine_sym (info
,
21256 SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
21257 RTVEC_ELT (p
, 2) = gen_rtx_USE (VOIDmode
, sym
);
21258 RTVEC_ELT (p
, 3) = gen_rtx_USE (VOIDmode
,
21259 gen_rtx_REG (Pmode
,
21260 DEFAULT_ABI
== ABI_AIX
21262 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
21264 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
21266 RTVEC_ELT (p
, i
+ 4)
21267 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
21268 if (flag_shrink_wrap
)
21269 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
21274 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
21280 /* Ensure the cfa_restores are hung off an insn that won't
21281 be reordered above other restores. */
21282 emit_insn (gen_blockage ());
21284 emit_cfa_restores (cfa_restores
);
21288 /* Write function epilogue. */
21291 rs6000_output_function_epilogue (FILE *file
,
21292 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
21295 macho_branch_islands ();
21296 /* Mach-O doesn't support labels at the end of objects, so if
21297 it looks like we might want one, insert a NOP. */
21299 rtx insn
= get_last_insn ();
21300 rtx deleted_debug_label
= NULL_RTX
;
21303 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
21305 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21306 notes only, instead set their CODE_LABEL_NUMBER to -1,
21307 otherwise there would be code generation differences
21308 in between -g and -g0. */
21309 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
21310 deleted_debug_label
= insn
;
21311 insn
= PREV_INSN (insn
);
21316 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
21317 fputs ("\tnop\n", file
);
21318 else if (deleted_debug_label
)
21319 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
21320 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
21321 CODE_LABEL_NUMBER (insn
) = -1;
21325 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21328 We don't output a traceback table if -finhibit-size-directive was
21329 used. The documentation for -finhibit-size-directive reads
21330 ``don't output a @code{.size} assembler directive, or anything
21331 else that would cause trouble if the function is split in the
21332 middle, and the two halves are placed at locations far apart in
21333 memory.'' The traceback table has this property, since it
21334 includes the offset from the start of the function to the
21335 traceback table itself.
21337 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21338 different traceback table. */
21339 if (DEFAULT_ABI
== ABI_AIX
&& ! flag_inhibit_size_directive
21340 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
21342 const char *fname
= NULL
;
21343 const char *language_string
= lang_hooks
.name
;
21344 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
21346 int optional_tbtab
;
21347 rs6000_stack_t
*info
= rs6000_stack_info ();
21349 if (rs6000_traceback
== traceback_full
)
21350 optional_tbtab
= 1;
21351 else if (rs6000_traceback
== traceback_part
)
21352 optional_tbtab
= 0;
21354 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
21356 if (optional_tbtab
)
21358 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
21359 while (*fname
== '.') /* V.4 encodes . in the name */
21362 /* Need label immediately before tbtab, so we can compute
21363 its offset from the function start. */
21364 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
21365 ASM_OUTPUT_LABEL (file
, fname
);
21368 /* The .tbtab pseudo-op can only be used for the first eight
21369 expressions, since it can't handle the possibly variable
21370 length fields that follow. However, if you omit the optional
21371 fields, the assembler outputs zeros for all optional fields
21372 anyways, giving each variable length field is minimum length
21373 (as defined in sys/debug.h). Thus we can not use the .tbtab
21374 pseudo-op at all. */
21376 /* An all-zero word flags the start of the tbtab, for debuggers
21377 that have to find it by searching forward from the entry
21378 point or from the current pc. */
21379 fputs ("\t.long 0\n", file
);
21381 /* Tbtab format type. Use format type 0. */
21382 fputs ("\t.byte 0,", file
);
21384 /* Language type. Unfortunately, there does not seem to be any
21385 official way to discover the language being compiled, so we
21386 use language_string.
21387 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21388 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21389 a number, so for now use 9. LTO and Go aren't assigned numbers
21390 either, so for now use 0. */
21391 if (! strcmp (language_string
, "GNU C")
21392 || ! strcmp (language_string
, "GNU GIMPLE")
21393 || ! strcmp (language_string
, "GNU Go"))
21395 else if (! strcmp (language_string
, "GNU F77")
21396 || ! strcmp (language_string
, "GNU Fortran"))
21398 else if (! strcmp (language_string
, "GNU Pascal"))
21400 else if (! strcmp (language_string
, "GNU Ada"))
21402 else if (! strcmp (language_string
, "GNU C++")
21403 || ! strcmp (language_string
, "GNU Objective-C++"))
21405 else if (! strcmp (language_string
, "GNU Java"))
21407 else if (! strcmp (language_string
, "GNU Objective-C"))
21410 gcc_unreachable ();
21411 fprintf (file
, "%d,", i
);
21413 /* 8 single bit fields: global linkage (not set for C extern linkage,
21414 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21415 from start of procedure stored in tbtab, internal function, function
21416 has controlled storage, function has no toc, function uses fp,
21417 function logs/aborts fp operations. */
21418 /* Assume that fp operations are used if any fp reg must be saved. */
21419 fprintf (file
, "%d,",
21420 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
21422 /* 6 bitfields: function is interrupt handler, name present in
21423 proc table, function calls alloca, on condition directives
21424 (controls stack walks, 3 bits), saves condition reg, saves
21426 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21427 set up as a frame pointer, even when there is no alloca call. */
21428 fprintf (file
, "%d,",
21429 ((optional_tbtab
<< 6)
21430 | ((optional_tbtab
& frame_pointer_needed
) << 5)
21431 | (info
->cr_save_p
<< 1)
21432 | (info
->lr_save_p
)));
21434 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21436 fprintf (file
, "%d,",
21437 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
21439 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21440 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
21442 if (optional_tbtab
)
21444 /* Compute the parameter info from the function decl argument
21447 int next_parm_info_bit
= 31;
21449 for (decl
= DECL_ARGUMENTS (current_function_decl
);
21450 decl
; decl
= DECL_CHAIN (decl
))
21452 rtx parameter
= DECL_INCOMING_RTL (decl
);
21453 enum machine_mode mode
= GET_MODE (parameter
);
21455 if (GET_CODE (parameter
) == REG
)
21457 if (SCALAR_FLOAT_MODE_P (mode
))
21478 gcc_unreachable ();
21481 /* If only one bit will fit, don't or in this entry. */
21482 if (next_parm_info_bit
> 0)
21483 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
21484 next_parm_info_bit
-= 2;
21488 fixed_parms
+= ((GET_MODE_SIZE (mode
)
21489 + (UNITS_PER_WORD
- 1))
21491 next_parm_info_bit
-= 1;
21497 /* Number of fixed point parameters. */
21498 /* This is actually the number of words of fixed point parameters; thus
21499 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21500 fprintf (file
, "%d,", fixed_parms
);
21502 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21504 /* This is actually the number of fp registers that hold parameters;
21505 and thus the maximum value is 13. */
21506 /* Set parameters on stack bit if parameters are not in their original
21507 registers, regardless of whether they are on the stack? Xlc
21508 seems to set the bit when not optimizing. */
21509 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
21511 if (! optional_tbtab
)
21514 /* Optional fields follow. Some are variable length. */
21516 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21517 11 double float. */
21518 /* There is an entry for each parameter in a register, in the order that
21519 they occur in the parameter list. Any intervening arguments on the
21520 stack are ignored. If the list overflows a long (max possible length
21521 34 bits) then completely leave off all elements that don't fit. */
21522 /* Only emit this long if there was at least one parameter. */
21523 if (fixed_parms
|| float_parms
)
21524 fprintf (file
, "\t.long %d\n", parm_info
);
21526 /* Offset from start of code to tb table. */
21527 fputs ("\t.long ", file
);
21528 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
21529 RS6000_OUTPUT_BASENAME (file
, fname
);
21531 rs6000_output_function_entry (file
, fname
);
21534 /* Interrupt handler mask. */
21535 /* Omit this long, since we never set the interrupt handler bit
21538 /* Number of CTL (controlled storage) anchors. */
21539 /* Omit this long, since the has_ctl bit is never set above. */
21541 /* Displacement into stack of each CTL anchor. */
21542 /* Omit this list of longs, because there are no CTL anchors. */
21544 /* Length of function name. */
21547 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
21549 /* Function name. */
21550 assemble_string (fname
, strlen (fname
));
21552 /* Register for alloca automatic storage; this is always reg 31.
21553 Only emit this if the alloca bit was set above. */
21554 if (frame_pointer_needed
)
21555 fputs ("\t.byte 31\n", file
);
21557 fputs ("\t.align 2\n", file
);
21561 /* A C compound statement that outputs the assembler code for a thunk
21562 function, used to implement C++ virtual function calls with
21563 multiple inheritance. The thunk acts as a wrapper around a virtual
21564 function, adjusting the implicit object parameter before handing
21565 control off to the real function.
21567 First, emit code to add the integer DELTA to the location that
21568 contains the incoming first argument. Assume that this argument
21569 contains a pointer, and is the one used to pass the `this' pointer
21570 in C++. This is the incoming argument *before* the function
21571 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21572 values of all other incoming arguments.
21574 After the addition, emit code to jump to FUNCTION, which is a
21575 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21576 not touch the return address. Hence returning from FUNCTION will
21577 return to whoever called the current `thunk'.
21579 The effect must be as if FUNCTION had been called directly with the
21580 adjusted first argument. This macro is responsible for emitting
21581 all of the code for a thunk function; output_function_prologue()
21582 and output_function_epilogue() are not invoked.
21584 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21585 been extracted from it.) It might possibly be useful on some
21586 targets, but probably not.
21588 If you do not define this macro, the target-independent code in the
21589 C++ frontend will generate a less efficient heavyweight thunk that
21590 calls FUNCTION instead of jumping to it. The generic approach does
21591 not support varargs. */
21594 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
21595 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
21598 rtx this_rtx
, insn
, funexp
;
21600 reload_completed
= 1;
21601 epilogue_completed
= 1;
21603 /* Mark the end of the (empty) prologue. */
21604 emit_note (NOTE_INSN_PROLOGUE_END
);
21606 /* Find the "this" pointer. If the function returns a structure,
21607 the structure return pointer is in r3. */
21608 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
21609 this_rtx
= gen_rtx_REG (Pmode
, 4);
21611 this_rtx
= gen_rtx_REG (Pmode
, 3);
21613 /* Apply the constant offset, if required. */
21615 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
21617 /* Apply the offset from the vtable, if required. */
21620 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
21621 rtx tmp
= gen_rtx_REG (Pmode
, 12);
21623 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
21624 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
21626 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
21627 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
21631 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
21633 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
21635 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
21638 /* Generate a tail call to the target function. */
21639 if (!TREE_USED (function
))
21641 assemble_external (function
);
21642 TREE_USED (function
) = 1;
21644 funexp
= XEXP (DECL_RTL (function
), 0);
21645 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
21648 if (MACHOPIC_INDIRECT
)
21649 funexp
= machopic_indirect_call_target (funexp
);
21652 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21653 generate sibcall RTL explicitly. */
21654 insn
= emit_call_insn (
21655 gen_rtx_PARALLEL (VOIDmode
,
21657 gen_rtx_CALL (VOIDmode
,
21658 funexp
, const0_rtx
),
21659 gen_rtx_USE (VOIDmode
, const0_rtx
),
21660 gen_rtx_USE (VOIDmode
,
21661 gen_rtx_REG (SImode
,
21663 simple_return_rtx
)));
21664 SIBLING_CALL_P (insn
) = 1;
21667 /* Run just enough of rest_of_compilation to get the insns emitted.
21668 There's not really enough bulk here to make other passes such as
21669 instruction scheduling worth while. Note that use_thunk calls
21670 assemble_start_function and assemble_end_function. */
21671 insn
= get_insns ();
21672 insn_locators_alloc ();
21673 shorten_branches (insn
);
21674 final_start_function (insn
, file
, 1);
21675 final (insn
, file
, 1);
21676 final_end_function ();
21678 reload_completed
= 0;
21679 epilogue_completed
= 0;
21682 /* A quick summary of the various types of 'constant-pool tables'
21685 Target Flags Name One table per
21686 AIX (none) AIX TOC object file
21687 AIX -mfull-toc AIX TOC object file
21688 AIX -mminimal-toc AIX minimal TOC translation unit
21689 SVR4/EABI (none) SVR4 SDATA object file
21690 SVR4/EABI -fpic SVR4 pic object file
21691 SVR4/EABI -fPIC SVR4 PIC translation unit
21692 SVR4/EABI -mrelocatable EABI TOC function
21693 SVR4/EABI -maix AIX TOC object file
21694 SVR4/EABI -maix -mminimal-toc
21695 AIX minimal TOC translation unit
21697 Name Reg. Set by entries contains:
21698 made by addrs? fp? sum?
21700 AIX TOC 2 crt0 as Y option option
21701 AIX minimal TOC 30 prolog gcc Y Y option
21702 SVR4 SDATA 13 crt0 gcc N Y N
21703 SVR4 pic 30 prolog ld Y not yet N
21704 SVR4 PIC 30 prolog gcc Y option option
21705 EABI TOC 30 prolog gcc Y option option
21709 /* Hash functions for the hash table. */
21712 rs6000_hash_constant (rtx k
)
21714 enum rtx_code code
= GET_CODE (k
);
21715 enum machine_mode mode
= GET_MODE (k
);
21716 unsigned result
= (code
<< 3) ^ mode
;
21717 const char *format
;
21720 format
= GET_RTX_FORMAT (code
);
21721 flen
= strlen (format
);
21727 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
21730 if (mode
!= VOIDmode
)
21731 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
21743 for (; fidx
< flen
; fidx
++)
21744 switch (format
[fidx
])
21749 const char *str
= XSTR (k
, fidx
);
21750 len
= strlen (str
);
21751 result
= result
* 613 + len
;
21752 for (i
= 0; i
< len
; i
++)
21753 result
= result
* 613 + (unsigned) str
[i
];
21758 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
21762 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
21765 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
21766 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
21770 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
21771 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
21778 gcc_unreachable ();
21785 toc_hash_function (const void *hash_entry
)
21787 const struct toc_hash_struct
*thc
=
21788 (const struct toc_hash_struct
*) hash_entry
;
21789 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
21792 /* Compare H1 and H2 for equivalence. */
21795 toc_hash_eq (const void *h1
, const void *h2
)
21797 rtx r1
= ((const struct toc_hash_struct
*) h1
)->key
;
21798 rtx r2
= ((const struct toc_hash_struct
*) h2
)->key
;
21800 if (((const struct toc_hash_struct
*) h1
)->key_mode
21801 != ((const struct toc_hash_struct
*) h2
)->key_mode
)
21804 return rtx_equal_p (r1
, r2
);
21807 /* These are the names given by the C++ front-end to vtables, and
21808 vtable-like objects. Ideally, this logic should not be here;
21809 instead, there should be some programmatic way of inquiring as
21810 to whether or not an object is a vtable. */
21812 #define VTABLE_NAME_P(NAME) \
21813 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
21814 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
21815 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
21816 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
21817 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
21819 #ifdef NO_DOLLAR_IN_LABEL
21820 /* Return a GGC-allocated character string translating dollar signs in
21821 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
21824 rs6000_xcoff_strip_dollar (const char *name
)
21830 q
= (const char *) strchr (name
, '$');
21832 if (q
== 0 || q
== name
)
21835 len
= strlen (name
);
21836 strip
= XALLOCAVEC (char, len
+ 1);
21837 strcpy (strip
, name
);
21838 p
= strip
+ (q
- name
);
21842 p
= strchr (p
+ 1, '$');
21845 return ggc_alloc_string (strip
, len
);
21850 rs6000_output_symbol_ref (FILE *file
, rtx x
)
21852 /* Currently C++ toc references to vtables can be emitted before it
21853 is decided whether the vtable is public or private. If this is
21854 the case, then the linker will eventually complain that there is
21855 a reference to an unknown section. Thus, for vtables only,
21856 we emit the TOC reference to reference the symbol and not the
21858 const char *name
= XSTR (x
, 0);
21860 if (VTABLE_NAME_P (name
))
21862 RS6000_OUTPUT_BASENAME (file
, name
);
21865 assemble_name (file
, name
);
21868 /* Output a TOC entry. We derive the entry name from what is being
21872 output_toc (FILE *file
, rtx x
, int labelno
, enum machine_mode mode
)
21875 const char *name
= buf
;
21877 HOST_WIDE_INT offset
= 0;
21879 gcc_assert (!TARGET_NO_TOC
);
21881 /* When the linker won't eliminate them, don't output duplicate
21882 TOC entries (this happens on AIX if there is any kind of TOC,
21883 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
21885 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
21887 struct toc_hash_struct
*h
;
21890 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
21891 time because GGC is not initialized at that point. */
21892 if (toc_hash_table
== NULL
)
21893 toc_hash_table
= htab_create_ggc (1021, toc_hash_function
,
21894 toc_hash_eq
, NULL
);
21896 h
= ggc_alloc_toc_hash_struct ();
21898 h
->key_mode
= mode
;
21899 h
->labelno
= labelno
;
21901 found
= htab_find_slot (toc_hash_table
, h
, INSERT
);
21902 if (*found
== NULL
)
21904 else /* This is indeed a duplicate.
21905 Set this label equal to that label. */
21907 fputs ("\t.set ", file
);
21908 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
21909 fprintf (file
, "%d,", labelno
);
21910 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
21911 fprintf (file
, "%d\n", ((*(const struct toc_hash_struct
**)
21917 /* If we're going to put a double constant in the TOC, make sure it's
21918 aligned properly when strict alignment is on. */
21919 if (GET_CODE (x
) == CONST_DOUBLE
21920 && STRICT_ALIGNMENT
21921 && GET_MODE_BITSIZE (mode
) >= 64
21922 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
21923 ASM_OUTPUT_ALIGN (file
, 3);
21926 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
21928 /* Handle FP constants specially. Note that if we have a minimal
21929 TOC, things we put here aren't actually in the TOC, so we can allow
21931 if (GET_CODE (x
) == CONST_DOUBLE
&&
21932 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
))
21934 REAL_VALUE_TYPE rv
;
21937 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
21938 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
21939 REAL_VALUE_TO_TARGET_DECIMAL128 (rv
, k
);
21941 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv
, k
);
21945 if (TARGET_MINIMAL_TOC
)
21946 fputs (DOUBLE_INT_ASM_OP
, file
);
21948 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21949 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21950 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21951 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
21952 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21953 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21958 if (TARGET_MINIMAL_TOC
)
21959 fputs ("\t.long ", file
);
21961 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21962 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21963 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21964 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
21965 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21966 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21970 else if (GET_CODE (x
) == CONST_DOUBLE
&&
21971 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
21973 REAL_VALUE_TYPE rv
;
21976 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
21978 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
21979 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, k
);
21981 REAL_VALUE_TO_TARGET_DOUBLE (rv
, k
);
21985 if (TARGET_MINIMAL_TOC
)
21986 fputs (DOUBLE_INT_ASM_OP
, file
);
21988 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
21989 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21990 fprintf (file
, "0x%lx%08lx\n",
21991 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21996 if (TARGET_MINIMAL_TOC
)
21997 fputs ("\t.long ", file
);
21999 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
22000 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
22001 fprintf (file
, "0x%lx,0x%lx\n",
22002 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
22006 else if (GET_CODE (x
) == CONST_DOUBLE
&&
22007 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
22009 REAL_VALUE_TYPE rv
;
22012 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
22013 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
22014 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
22016 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
22020 if (TARGET_MINIMAL_TOC
)
22021 fputs (DOUBLE_INT_ASM_OP
, file
);
22023 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
22024 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
22029 if (TARGET_MINIMAL_TOC
)
22030 fputs ("\t.long ", file
);
22032 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
22033 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
22037 else if (GET_MODE (x
) == VOIDmode
22038 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
22040 unsigned HOST_WIDE_INT low
;
22041 HOST_WIDE_INT high
;
22043 if (GET_CODE (x
) == CONST_DOUBLE
)
22045 low
= CONST_DOUBLE_LOW (x
);
22046 high
= CONST_DOUBLE_HIGH (x
);
22049 #if HOST_BITS_PER_WIDE_INT == 32
22052 high
= (low
& 0x80000000) ? ~0 : 0;
22056 low
= INTVAL (x
) & 0xffffffff;
22057 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
22061 /* TOC entries are always Pmode-sized, but since this
22062 is a bigendian machine then if we're putting smaller
22063 integer constants in the TOC we have to pad them.
22064 (This is still a win over putting the constants in
22065 a separate constant pool, because then we'd have
22066 to have both a TOC entry _and_ the actual constant.)
22068 For a 32-bit target, CONST_INT values are loaded and shifted
22069 entirely within `low' and can be stored in one TOC entry. */
22071 /* It would be easy to make this work, but it doesn't now. */
22072 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
22074 if (POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
22076 #if HOST_BITS_PER_WIDE_INT == 32
22077 lshift_double (low
, high
, POINTER_SIZE
- GET_MODE_BITSIZE (mode
),
22078 POINTER_SIZE
, &low
, &high
, 0);
22081 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
22082 high
= (HOST_WIDE_INT
) low
>> 32;
22089 if (TARGET_MINIMAL_TOC
)
22090 fputs (DOUBLE_INT_ASM_OP
, file
);
22092 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
22093 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22094 fprintf (file
, "0x%lx%08lx\n",
22095 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22100 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
22102 if (TARGET_MINIMAL_TOC
)
22103 fputs ("\t.long ", file
);
22105 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
22106 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22107 fprintf (file
, "0x%lx,0x%lx\n",
22108 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22112 if (TARGET_MINIMAL_TOC
)
22113 fputs ("\t.long ", file
);
22115 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
22116 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
22122 if (GET_CODE (x
) == CONST
)
22124 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
22125 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
22127 base
= XEXP (XEXP (x
, 0), 0);
22128 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
22131 switch (GET_CODE (base
))
22134 name
= XSTR (base
, 0);
22138 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
22139 CODE_LABEL_NUMBER (XEXP (base
, 0)));
22143 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
22147 gcc_unreachable ();
22150 if (TARGET_MINIMAL_TOC
)
22151 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
22154 fputs ("\t.tc ", file
);
22155 RS6000_OUTPUT_BASENAME (file
, name
);
22158 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
22160 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
22162 fputs ("[TC],", file
);
22165 /* Currently C++ toc references to vtables can be emitted before it
22166 is decided whether the vtable is public or private. If this is
22167 the case, then the linker will eventually complain that there is
22168 a TOC reference to an unknown section. Thus, for vtables only,
22169 we emit the TOC reference to reference the symbol and not the
22171 if (VTABLE_NAME_P (name
))
22173 RS6000_OUTPUT_BASENAME (file
, name
);
22175 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
22176 else if (offset
> 0)
22177 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
22180 output_addr_const (file
, x
);
22184 /* Output an assembler pseudo-op to write an ASCII string of N characters
22185 starting at P to FILE.
22187 On the RS/6000, we have to do this using the .byte operation and
22188 write out special characters outside the quoted string.
22189 Also, the assembler is broken; very long strings are truncated,
22190 so we must artificially break them up early. */
22193 output_ascii (FILE *file
, const char *p
, int n
)
22196 int i
, count_string
;
22197 const char *for_string
= "\t.byte \"";
22198 const char *for_decimal
= "\t.byte ";
22199 const char *to_close
= NULL
;
22202 for (i
= 0; i
< n
; i
++)
22205 if (c
>= ' ' && c
< 0177)
22208 fputs (for_string
, file
);
22211 /* Write two quotes to get one. */
22219 for_decimal
= "\"\n\t.byte ";
22223 if (count_string
>= 512)
22225 fputs (to_close
, file
);
22227 for_string
= "\t.byte \"";
22228 for_decimal
= "\t.byte ";
22236 fputs (for_decimal
, file
);
22237 fprintf (file
, "%d", c
);
22239 for_string
= "\n\t.byte \"";
22240 for_decimal
= ", ";
22246 /* Now close the string if we have written one. Then end the line. */
22248 fputs (to_close
, file
);
22251 /* Generate a unique section name for FILENAME for a section type
22252 represented by SECTION_DESC. Output goes into BUF.
22254 SECTION_DESC can be any string, as long as it is different for each
22255 possible section type.
22257 We name the section in the same manner as xlc. The name begins with an
22258 underscore followed by the filename (after stripping any leading directory
22259 names) with the last period replaced by the string SECTION_DESC. If
22260 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22264 rs6000_gen_section_name (char **buf
, const char *filename
,
22265 const char *section_desc
)
22267 const char *q
, *after_last_slash
, *last_period
= 0;
22271 after_last_slash
= filename
;
22272 for (q
= filename
; *q
; q
++)
22275 after_last_slash
= q
+ 1;
22276 else if (*q
== '.')
22280 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
22281 *buf
= (char *) xmalloc (len
);
22286 for (q
= after_last_slash
; *q
; q
++)
22288 if (q
== last_period
)
22290 strcpy (p
, section_desc
);
22291 p
+= strlen (section_desc
);
22295 else if (ISALNUM (*q
))
22299 if (last_period
== 0)
22300 strcpy (p
, section_desc
);
22305 /* Emit profile function. */
22308 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
22310 /* Non-standard profiling for kernels, which just saves LR then calls
22311 _mcount without worrying about arg saves. The idea is to change
22312 the function prologue as little as possible as it isn't easy to
22313 account for arg save/restore code added just for _mcount. */
22314 if (TARGET_PROFILE_KERNEL
)
22317 if (DEFAULT_ABI
== ABI_AIX
)
22319 #ifndef NO_PROFILE_COUNTERS
22320 # define NO_PROFILE_COUNTERS 0
22322 if (NO_PROFILE_COUNTERS
)
22323 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
22324 LCT_NORMAL
, VOIDmode
, 0);
22328 const char *label_name
;
22331 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
22332 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
22333 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
22335 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
22336 LCT_NORMAL
, VOIDmode
, 1, fun
, Pmode
);
22339 else if (DEFAULT_ABI
== ABI_DARWIN
)
22341 const char *mcount_name
= RS6000_MCOUNT
;
22342 int caller_addr_regno
= LR_REGNO
;
22344 /* Be conservative and always set this, at least for now. */
22345 crtl
->uses_pic_offset_table
= 1;
22348 /* For PIC code, set up a stub and collect the caller's address
22349 from r0, which is where the prologue puts it. */
22350 if (MACHOPIC_INDIRECT
22351 && crtl
->uses_pic_offset_table
)
22352 caller_addr_regno
= 0;
22354 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
22355 LCT_NORMAL
, VOIDmode
, 1,
22356 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
22360 /* Write function profiler code. */
22363 output_function_profiler (FILE *file
, int labelno
)
22367 switch (DEFAULT_ABI
)
22370 gcc_unreachable ();
22375 warning (0, "no profiling of 64-bit code for this ABI");
22378 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
22379 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
22380 if (NO_PROFILE_COUNTERS
)
22382 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22383 reg_names
[0], reg_names
[1]);
22385 else if (TARGET_SECURE_PLT
&& flag_pic
)
22387 if (TARGET_LINK_STACK
)
22390 get_ppc476_thunk_name (name
);
22391 asm_fprintf (file
, "\tbl %s\n", name
);
22394 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
22395 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22396 reg_names
[0], reg_names
[1]);
22397 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
22398 asm_fprintf (file
, "\t{cau|addis} %s,%s,",
22399 reg_names
[12], reg_names
[12]);
22400 assemble_name (file
, buf
);
22401 asm_fprintf (file
, "-1b@ha\n\t{cal|la} %s,", reg_names
[0]);
22402 assemble_name (file
, buf
);
22403 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
22405 else if (flag_pic
== 1)
22407 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
22408 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22409 reg_names
[0], reg_names
[1]);
22410 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
22411 asm_fprintf (file
, "\t{l|lwz} %s,", reg_names
[0]);
22412 assemble_name (file
, buf
);
22413 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
22415 else if (flag_pic
> 1)
22417 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22418 reg_names
[0], reg_names
[1]);
22419 /* Now, we need to get the address of the label. */
22420 if (TARGET_LINK_STACK
)
22423 get_ppc476_thunk_name (name
);
22424 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
22425 assemble_name (file
, buf
);
22426 fputs ("-.\n1:", file
);
22427 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
22428 asm_fprintf (file
, "\taddi %s,%s,4\n",
22429 reg_names
[11], reg_names
[11]);
22433 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
22434 assemble_name (file
, buf
);
22435 fputs ("-.\n1:", file
);
22436 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
22438 asm_fprintf (file
, "\t{l|lwz} %s,0(%s)\n",
22439 reg_names
[0], reg_names
[11]);
22440 asm_fprintf (file
, "\t{cax|add} %s,%s,%s\n",
22441 reg_names
[0], reg_names
[0], reg_names
[11]);
22445 asm_fprintf (file
, "\t{liu|lis} %s,", reg_names
[12]);
22446 assemble_name (file
, buf
);
22447 fputs ("@ha\n", file
);
22448 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22449 reg_names
[0], reg_names
[1]);
22450 asm_fprintf (file
, "\t{cal|la} %s,", reg_names
[0]);
22451 assemble_name (file
, buf
);
22452 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
22455 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22456 fprintf (file
, "\tbl %s%s\n",
22457 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
22462 if (!TARGET_PROFILE_KERNEL
)
22464 /* Don't do anything, done in output_profile_hook (). */
22468 gcc_assert (!TARGET_32BIT
);
22470 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
22471 asm_fprintf (file
, "\tstd %s,16(%s)\n", reg_names
[0], reg_names
[1]);
22473 if (cfun
->static_chain_decl
!= NULL
)
22475 asm_fprintf (file
, "\tstd %s,24(%s)\n",
22476 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
22477 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
22478 asm_fprintf (file
, "\tld %s,24(%s)\n",
22479 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
22482 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
22490 /* The following variable value is the last issued insn. */
22492 static rtx last_scheduled_insn
;
22494 /* The following variable helps to balance issuing of load and
22495 store instructions */
22497 static int load_store_pendulum
;
22499 /* Power4 load update and store update instructions are cracked into a
22500 load or store and an integer insn which are executed in the same cycle.
22501 Branches have their own dispatch slot which does not count against the
22502 GCC issue rate, but it changes the program flow so there are no other
22503 instructions to issue in this cycle. */
22506 rs6000_variable_issue_1 (rtx insn
, int more
)
22508 last_scheduled_insn
= insn
;
22509 if (GET_CODE (PATTERN (insn
)) == USE
22510 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22512 cached_can_issue_more
= more
;
22513 return cached_can_issue_more
;
22516 if (insn_terminates_group_p (insn
, current_group
))
22518 cached_can_issue_more
= 0;
22519 return cached_can_issue_more
;
22522 /* If no reservation, but reach here */
22523 if (recog_memoized (insn
) < 0)
22526 if (rs6000_sched_groups
)
22528 if (is_microcoded_insn (insn
))
22529 cached_can_issue_more
= 0;
22530 else if (is_cracked_insn (insn
))
22531 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
22533 cached_can_issue_more
= more
- 1;
22535 return cached_can_issue_more
;
22538 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
22541 cached_can_issue_more
= more
- 1;
22542 return cached_can_issue_more
;
22546 rs6000_variable_issue (FILE *stream
, int verbose
, rtx insn
, int more
)
22548 int r
= rs6000_variable_issue_1 (insn
, more
);
22550 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
22554 /* Adjust the cost of a scheduling dependency. Return the new cost of
22555 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22558 rs6000_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
22560 enum attr_type attr_type
;
22562 if (! recog_memoized (insn
))
22565 switch (REG_NOTE_KIND (link
))
22569 /* Data dependency; DEP_INSN writes a register that INSN reads
22570 some cycles later. */
22572 /* Separate a load from a narrower, dependent store. */
22573 if (rs6000_sched_groups
22574 && GET_CODE (PATTERN (insn
)) == SET
22575 && GET_CODE (PATTERN (dep_insn
)) == SET
22576 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
22577 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
22578 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
22579 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
22582 attr_type
= get_attr_type (insn
);
22587 /* Tell the first scheduling pass about the latency between
22588 a mtctr and bctr (and mtlr and br/blr). The first
22589 scheduling pass will not know about this latency since
22590 the mtctr instruction, which has the latency associated
22591 to it, will be generated by reload. */
22592 return TARGET_POWER
? 5 : 4;
22594 /* Leave some extra cycles between a compare and its
22595 dependent branch, to inhibit expensive mispredicts. */
22596 if ((rs6000_cpu_attr
== CPU_PPC603
22597 || rs6000_cpu_attr
== CPU_PPC604
22598 || rs6000_cpu_attr
== CPU_PPC604E
22599 || rs6000_cpu_attr
== CPU_PPC620
22600 || rs6000_cpu_attr
== CPU_PPC630
22601 || rs6000_cpu_attr
== CPU_PPC750
22602 || rs6000_cpu_attr
== CPU_PPC7400
22603 || rs6000_cpu_attr
== CPU_PPC7450
22604 || rs6000_cpu_attr
== CPU_PPCE5500
22605 || rs6000_cpu_attr
== CPU_PPCE6500
22606 || rs6000_cpu_attr
== CPU_POWER4
22607 || rs6000_cpu_attr
== CPU_POWER5
22608 || rs6000_cpu_attr
== CPU_POWER7
22609 || rs6000_cpu_attr
== CPU_CELL
)
22610 && recog_memoized (dep_insn
)
22611 && (INSN_CODE (dep_insn
) >= 0))
22613 switch (get_attr_type (dep_insn
))
22617 case TYPE_DELAYED_COMPARE
:
22618 case TYPE_IMUL_COMPARE
:
22619 case TYPE_LMUL_COMPARE
:
22620 case TYPE_FPCOMPARE
:
22621 case TYPE_CR_LOGICAL
:
22622 case TYPE_DELAYED_CR
:
22631 case TYPE_STORE_UX
:
22633 case TYPE_FPSTORE_U
:
22634 case TYPE_FPSTORE_UX
:
22635 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22636 && recog_memoized (dep_insn
)
22637 && (INSN_CODE (dep_insn
) >= 0))
22640 if (GET_CODE (PATTERN (insn
)) != SET
)
22641 /* If this happens, we have to extend this to schedule
22642 optimally. Return default for now. */
22645 /* Adjust the cost for the case where the value written
22646 by a fixed point operation is used as the address
22647 gen value on a store. */
22648 switch (get_attr_type (dep_insn
))
22655 if (! store_data_bypass_p (dep_insn
, insn
))
22659 case TYPE_LOAD_EXT
:
22660 case TYPE_LOAD_EXT_U
:
22661 case TYPE_LOAD_EXT_UX
:
22662 case TYPE_VAR_SHIFT_ROTATE
:
22663 case TYPE_VAR_DELAYED_COMPARE
:
22665 if (! store_data_bypass_p (dep_insn
, insn
))
22671 case TYPE_FAST_COMPARE
:
22674 case TYPE_INSERT_WORD
:
22675 case TYPE_INSERT_DWORD
:
22676 case TYPE_FPLOAD_U
:
22677 case TYPE_FPLOAD_UX
:
22679 case TYPE_STORE_UX
:
22680 case TYPE_FPSTORE_U
:
22681 case TYPE_FPSTORE_UX
:
22683 if (! store_data_bypass_p (dep_insn
, insn
))
22691 case TYPE_IMUL_COMPARE
:
22692 case TYPE_LMUL_COMPARE
:
22694 if (! store_data_bypass_p (dep_insn
, insn
))
22700 if (! store_data_bypass_p (dep_insn
, insn
))
22706 if (! store_data_bypass_p (dep_insn
, insn
))
22719 case TYPE_LOAD_EXT
:
22720 case TYPE_LOAD_EXT_U
:
22721 case TYPE_LOAD_EXT_UX
:
22722 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22723 && recog_memoized (dep_insn
)
22724 && (INSN_CODE (dep_insn
) >= 0))
22727 /* Adjust the cost for the case where the value written
22728 by a fixed point instruction is used within the address
22729 gen portion of a subsequent load(u)(x) */
22730 switch (get_attr_type (dep_insn
))
22737 if (set_to_load_agen (dep_insn
, insn
))
22741 case TYPE_LOAD_EXT
:
22742 case TYPE_LOAD_EXT_U
:
22743 case TYPE_LOAD_EXT_UX
:
22744 case TYPE_VAR_SHIFT_ROTATE
:
22745 case TYPE_VAR_DELAYED_COMPARE
:
22747 if (set_to_load_agen (dep_insn
, insn
))
22753 case TYPE_FAST_COMPARE
:
22756 case TYPE_INSERT_WORD
:
22757 case TYPE_INSERT_DWORD
:
22758 case TYPE_FPLOAD_U
:
22759 case TYPE_FPLOAD_UX
:
22761 case TYPE_STORE_UX
:
22762 case TYPE_FPSTORE_U
:
22763 case TYPE_FPSTORE_UX
:
22765 if (set_to_load_agen (dep_insn
, insn
))
22773 case TYPE_IMUL_COMPARE
:
22774 case TYPE_LMUL_COMPARE
:
22776 if (set_to_load_agen (dep_insn
, insn
))
22782 if (set_to_load_agen (dep_insn
, insn
))
22788 if (set_to_load_agen (dep_insn
, insn
))
22799 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22800 && recog_memoized (dep_insn
)
22801 && (INSN_CODE (dep_insn
) >= 0)
22802 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
22809 /* Fall out to return default cost. */
22813 case REG_DEP_OUTPUT
:
22814 /* Output dependency; DEP_INSN writes a register that INSN writes some
22816 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22817 && recog_memoized (dep_insn
)
22818 && (INSN_CODE (dep_insn
) >= 0))
22820 attr_type
= get_attr_type (insn
);
22825 if (get_attr_type (dep_insn
) == TYPE_FP
)
22829 if (get_attr_type (dep_insn
) == TYPE_MFFGPR
)
22837 /* Anti dependency; DEP_INSN reads a register that INSN writes some
22842 gcc_unreachable ();
22848 /* Debug version of rs6000_adjust_cost. */
22851 rs6000_debug_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
22853 int ret
= rs6000_adjust_cost (insn
, link
, dep_insn
, cost
);
22859 switch (REG_NOTE_KIND (link
))
22861 default: dep
= "unknown depencency"; break;
22862 case REG_DEP_TRUE
: dep
= "data dependency"; break;
22863 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
22864 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
22868 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
22869 "%s, insn:\n", ret
, cost
, dep
);
22877 /* The function returns a true if INSN is microcoded.
22878 Return false otherwise. */
22881 is_microcoded_insn (rtx insn
)
22883 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22884 || GET_CODE (PATTERN (insn
)) == USE
22885 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22888 if (rs6000_cpu_attr
== CPU_CELL
)
22889 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
22891 if (rs6000_sched_groups
)
22893 enum attr_type type
= get_attr_type (insn
);
22894 if (type
== TYPE_LOAD_EXT_U
22895 || type
== TYPE_LOAD_EXT_UX
22896 || type
== TYPE_LOAD_UX
22897 || type
== TYPE_STORE_UX
22898 || type
== TYPE_MFCR
)
22905 /* The function returns true if INSN is cracked into 2 instructions
22906 by the processor (and therefore occupies 2 issue slots). */
22909 is_cracked_insn (rtx insn
)
22911 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22912 || GET_CODE (PATTERN (insn
)) == USE
22913 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22916 if (rs6000_sched_groups
)
22918 enum attr_type type
= get_attr_type (insn
);
22919 if (type
== TYPE_LOAD_U
|| type
== TYPE_STORE_U
22920 || type
== TYPE_FPLOAD_U
|| type
== TYPE_FPSTORE_U
22921 || type
== TYPE_FPLOAD_UX
|| type
== TYPE_FPSTORE_UX
22922 || type
== TYPE_LOAD_EXT
|| type
== TYPE_DELAYED_CR
22923 || type
== TYPE_COMPARE
|| type
== TYPE_DELAYED_COMPARE
22924 || type
== TYPE_IMUL_COMPARE
|| type
== TYPE_LMUL_COMPARE
22925 || type
== TYPE_IDIV
|| type
== TYPE_LDIV
22926 || type
== TYPE_INSERT_WORD
)
22933 /* The function returns true if INSN can be issued only from
22934 the branch slot. */
22937 is_branch_slot_insn (rtx insn
)
22939 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22940 || GET_CODE (PATTERN (insn
)) == USE
22941 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22944 if (rs6000_sched_groups
)
22946 enum attr_type type
= get_attr_type (insn
);
22947 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
22955 /* The function returns true if out_inst sets a value that is
22956 used in the address generation computation of in_insn */
22958 set_to_load_agen (rtx out_insn
, rtx in_insn
)
22960 rtx out_set
, in_set
;
22962 /* For performance reasons, only handle the simple case where
22963 both loads are a single_set. */
22964 out_set
= single_set (out_insn
);
22967 in_set
= single_set (in_insn
);
22969 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
22975 /* Try to determine base/offset/size parts of the given MEM.
22976 Return true if successful, false if all the values couldn't
22979 This function only looks for REG or REG+CONST address forms.
22980 REG+REG address form will return false. */
22983 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
22984 HOST_WIDE_INT
*size
)
22987 if MEM_SIZE_KNOWN_P (mem
)
22988 *size
= MEM_SIZE (mem
);
22992 if (GET_CODE (XEXP (mem
, 0)) == PRE_MODIFY
)
22993 addr_rtx
= XEXP (XEXP (mem
, 0), 1);
22995 addr_rtx
= (XEXP (mem
, 0));
22997 if (GET_CODE (addr_rtx
) == REG
)
23002 else if (GET_CODE (addr_rtx
) == PLUS
23003 && CONST_INT_P (XEXP (addr_rtx
, 1)))
23005 *base
= XEXP (addr_rtx
, 0);
23006 *offset
= INTVAL (XEXP (addr_rtx
, 1));
23014 /* The function returns true if the target storage location of
23015 mem1 is adjacent to the target storage location of mem2 */
23016 /* Return 1 if memory locations are adjacent. */
23019 adjacent_mem_locations (rtx mem1
, rtx mem2
)
23022 HOST_WIDE_INT off1
, size1
, off2
, size2
;
23024 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
23025 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
23026 return ((REGNO (reg1
) == REGNO (reg2
))
23027 && ((off1
+ size1
== off2
)
23028 || (off2
+ size2
== off1
)));
23033 /* This function returns true if it can be determined that the two MEM
23034 locations overlap by at least 1 byte based on base reg/offset/size. */
23037 mem_locations_overlap (rtx mem1
, rtx mem2
)
23040 HOST_WIDE_INT off1
, size1
, off2
, size2
;
23042 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
23043 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
23044 return ((REGNO (reg1
) == REGNO (reg2
))
23045 && (((off1
<= off2
) && (off1
+ size1
> off2
))
23046 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
23051 /* A C statement (sans semicolon) to update the integer scheduling
23052 priority INSN_PRIORITY (INSN). Increase the priority to execute the
23053 INSN earlier, reduce the priority to execute INSN later. Do not
23054 define this macro if you do not need to adjust the scheduling
23055 priorities of insns. */
23058 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED
, int priority
)
23060 rtx load_mem
, str_mem
;
23061 /* On machines (like the 750) which have asymmetric integer units,
23062 where one integer unit can do multiply and divides and the other
23063 can't, reduce the priority of multiply/divide so it is scheduled
23064 before other integer operations. */
23067 if (! INSN_P (insn
))
23070 if (GET_CODE (PATTERN (insn
)) == USE
)
23073 switch (rs6000_cpu_attr
) {
23075 switch (get_attr_type (insn
))
23082 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
23083 priority
, priority
);
23084 if (priority
>= 0 && priority
< 0x01000000)
23091 if (insn_must_be_first_in_group (insn
)
23092 && reload_completed
23093 && current_sched_info
->sched_max_insns_priority
23094 && rs6000_sched_restricted_insns_priority
)
23097 /* Prioritize insns that can be dispatched only in the first
23099 if (rs6000_sched_restricted_insns_priority
== 1)
23100 /* Attach highest priority to insn. This means that in
23101 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23102 precede 'priority' (critical path) considerations. */
23103 return current_sched_info
->sched_max_insns_priority
;
23104 else if (rs6000_sched_restricted_insns_priority
== 2)
23105 /* Increase priority of insn by a minimal amount. This means that in
23106 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23107 considerations precede dispatch-slot restriction considerations. */
23108 return (priority
+ 1);
23111 if (rs6000_cpu
== PROCESSOR_POWER6
23112 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
23113 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
23114 /* Attach highest priority to insn if the scheduler has just issued two
23115 stores and this instruction is a load, or two loads and this instruction
23116 is a store. Power6 wants loads and stores scheduled alternately
23118 return current_sched_info
->sched_max_insns_priority
;
23123 /* Return true if the instruction is nonpipelined on the Cell. */
23125 is_nonpipeline_insn (rtx insn
)
23127 enum attr_type type
;
23128 if (!insn
|| !NONDEBUG_INSN_P (insn
)
23129 || GET_CODE (PATTERN (insn
)) == USE
23130 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23133 type
= get_attr_type (insn
);
23134 if (type
== TYPE_IMUL
23135 || type
== TYPE_IMUL2
23136 || type
== TYPE_IMUL3
23137 || type
== TYPE_LMUL
23138 || type
== TYPE_IDIV
23139 || type
== TYPE_LDIV
23140 || type
== TYPE_SDIV
23141 || type
== TYPE_DDIV
23142 || type
== TYPE_SSQRT
23143 || type
== TYPE_DSQRT
23144 || type
== TYPE_MFCR
23145 || type
== TYPE_MFCRF
23146 || type
== TYPE_MFJMPR
)
23154 /* Return how many instructions the machine can issue per cycle. */
23157 rs6000_issue_rate (void)
23159 /* Unless scheduling for register pressure, use issue rate of 1 for
23160 first scheduling pass to decrease degradation. */
23161 if (!reload_completed
&& !flag_sched_pressure
)
23164 switch (rs6000_cpu_attr
) {
23165 case CPU_RIOS1
: /* ? */
23167 case CPU_PPC601
: /* ? */
23177 case CPU_PPCE300C2
:
23178 case CPU_PPCE300C3
:
23179 case CPU_PPCE500MC
:
23180 case CPU_PPCE500MC64
:
23202 /* Return how many instructions to look ahead for better insn
23206 rs6000_use_sched_lookahead (void)
23208 switch (rs6000_cpu_attr
)
23215 return (reload_completed
? 8 : 0);
23222 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23224 rs6000_use_sched_lookahead_guard (rtx insn
)
23226 if (rs6000_cpu_attr
!= CPU_CELL
)
23229 if (insn
== NULL_RTX
|| !INSN_P (insn
))
23232 if (!reload_completed
23233 || is_nonpipeline_insn (insn
)
23234 || is_microcoded_insn (insn
))
23240 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23241 and return true. */
23244 find_mem_ref (rtx pat
, rtx
*mem_ref
)
23249 /* stack_tie does not produce any real memory traffic. */
23250 if (tie_operand (pat
, VOIDmode
))
23253 if (GET_CODE (pat
) == MEM
)
23259 /* Recursively process the pattern. */
23260 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
23262 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
23266 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
23269 else if (fmt
[i
] == 'E')
23270 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
23272 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
23280 /* Determine if PAT is a PATTERN of a load insn. */
23283 is_load_insn1 (rtx pat
, rtx
*load_mem
)
23285 if (!pat
|| pat
== NULL_RTX
)
23288 if (GET_CODE (pat
) == SET
)
23289 return find_mem_ref (SET_SRC (pat
), load_mem
);
23291 if (GET_CODE (pat
) == PARALLEL
)
23295 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
23296 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
23303 /* Determine if INSN loads from memory. */
23306 is_load_insn (rtx insn
, rtx
*load_mem
)
23308 if (!insn
|| !INSN_P (insn
))
23311 if (GET_CODE (insn
) == CALL_INSN
)
23314 return is_load_insn1 (PATTERN (insn
), load_mem
);
23317 /* Determine if PAT is a PATTERN of a store insn. */
23320 is_store_insn1 (rtx pat
, rtx
*str_mem
)
23322 if (!pat
|| pat
== NULL_RTX
)
23325 if (GET_CODE (pat
) == SET
)
23326 return find_mem_ref (SET_DEST (pat
), str_mem
);
23328 if (GET_CODE (pat
) == PARALLEL
)
23332 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
23333 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
23340 /* Determine if INSN stores to memory. */
23343 is_store_insn (rtx insn
, rtx
*str_mem
)
23345 if (!insn
|| !INSN_P (insn
))
23348 return is_store_insn1 (PATTERN (insn
), str_mem
);
23351 /* Returns whether the dependence between INSN and NEXT is considered
23352 costly by the given target. */
23355 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
23359 rtx load_mem
, str_mem
;
23361 /* If the flag is not enabled - no dependence is considered costly;
23362 allow all dependent insns in the same group.
23363 This is the most aggressive option. */
23364 if (rs6000_sched_costly_dep
== no_dep_costly
)
23367 /* If the flag is set to 1 - a dependence is always considered costly;
23368 do not allow dependent instructions in the same group.
23369 This is the most conservative option. */
23370 if (rs6000_sched_costly_dep
== all_deps_costly
)
23373 insn
= DEP_PRO (dep
);
23374 next
= DEP_CON (dep
);
23376 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
23377 && is_load_insn (next
, &load_mem
)
23378 && is_store_insn (insn
, &str_mem
))
23379 /* Prevent load after store in the same group. */
23382 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
23383 && is_load_insn (next
, &load_mem
)
23384 && is_store_insn (insn
, &str_mem
)
23385 && DEP_TYPE (dep
) == REG_DEP_TRUE
23386 && mem_locations_overlap(str_mem
, load_mem
))
23387 /* Prevent load after store in the same group if it is a true
23391 /* The flag is set to X; dependences with latency >= X are considered costly,
23392 and will not be scheduled in the same group. */
23393 if (rs6000_sched_costly_dep
<= max_dep_latency
23394 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
23400 /* Return the next insn after INSN that is found before TAIL is reached,
23401 skipping any "non-active" insns - insns that will not actually occupy
23402 an issue slot. Return NULL_RTX if such an insn is not found. */
23405 get_next_active_insn (rtx insn
, rtx tail
)
23407 if (insn
== NULL_RTX
|| insn
== tail
)
23412 insn
= NEXT_INSN (insn
);
23413 if (insn
== NULL_RTX
|| insn
== tail
)
23418 || (NONJUMP_INSN_P (insn
)
23419 && GET_CODE (PATTERN (insn
)) != USE
23420 && GET_CODE (PATTERN (insn
)) != CLOBBER
23421 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
23427 /* We are about to begin issuing insns for this clock cycle. */
23430 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
23431 rtx
*ready ATTRIBUTE_UNUSED
,
23432 int *pn_ready ATTRIBUTE_UNUSED
,
23433 int clock_var ATTRIBUTE_UNUSED
)
23435 int n_ready
= *pn_ready
;
23438 fprintf (dump
, "// rs6000_sched_reorder :\n");
23440 /* Reorder the ready list, if the second to last ready insn
23441 is a nonepipeline insn. */
23442 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
23444 if (is_nonpipeline_insn (ready
[n_ready
- 1])
23445 && (recog_memoized (ready
[n_ready
- 2]) > 0))
23446 /* Simply swap first two insns. */
23448 rtx tmp
= ready
[n_ready
- 1];
23449 ready
[n_ready
- 1] = ready
[n_ready
- 2];
23450 ready
[n_ready
- 2] = tmp
;
23454 if (rs6000_cpu
== PROCESSOR_POWER6
)
23455 load_store_pendulum
= 0;
23457 return rs6000_issue_rate ();
23460 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23463 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx
*ready
,
23464 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
23467 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
23469 /* For Power6, we need to handle some special cases to try and keep the
23470 store queue from overflowing and triggering expensive flushes.
23472 This code monitors how load and store instructions are being issued
23473 and skews the ready list one way or the other to increase the likelihood
23474 that a desired instruction is issued at the proper time.
23476 A couple of things are done. First, we maintain a "load_store_pendulum"
23477 to track the current state of load/store issue.
23479 - If the pendulum is at zero, then no loads or stores have been
23480 issued in the current cycle so we do nothing.
23482 - If the pendulum is 1, then a single load has been issued in this
23483 cycle and we attempt to locate another load in the ready list to
23486 - If the pendulum is -2, then two stores have already been
23487 issued in this cycle, so we increase the priority of the first load
23488 in the ready list to increase it's likelihood of being chosen first
23491 - If the pendulum is -1, then a single store has been issued in this
23492 cycle and we attempt to locate another store in the ready list to
23493 issue with it, preferring a store to an adjacent memory location to
23494 facilitate store pairing in the store queue.
23496 - If the pendulum is 2, then two loads have already been
23497 issued in this cycle, so we increase the priority of the first store
23498 in the ready list to increase it's likelihood of being chosen first
23501 - If the pendulum < -2 or > 2, then do nothing.
23503 Note: This code covers the most common scenarios. There exist non
23504 load/store instructions which make use of the LSU and which
23505 would need to be accounted for to strictly model the behavior
23506 of the machine. Those instructions are currently unaccounted
23507 for to help minimize compile time overhead of this code.
23509 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
23513 rtx tmp
, load_mem
, str_mem
;
23515 if (is_store_insn (last_scheduled_insn
, &str_mem
))
23516 /* Issuing a store, swing the load_store_pendulum to the left */
23517 load_store_pendulum
--;
23518 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
23519 /* Issuing a load, swing the load_store_pendulum to the right */
23520 load_store_pendulum
++;
23522 return cached_can_issue_more
;
23524 /* If the pendulum is balanced, or there is only one instruction on
23525 the ready list, then all is well, so return. */
23526 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
23527 return cached_can_issue_more
;
23529 if (load_store_pendulum
== 1)
23531 /* A load has been issued in this cycle. Scan the ready list
23532 for another load to issue with it */
23537 if (is_load_insn (ready
[pos
], &load_mem
))
23539 /* Found a load. Move it to the head of the ready list,
23540 and adjust it's priority so that it is more likely to
23543 for (i
=pos
; i
<*pn_ready
-1; i
++)
23544 ready
[i
] = ready
[i
+ 1];
23545 ready
[*pn_ready
-1] = tmp
;
23547 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23548 INSN_PRIORITY (tmp
)++;
23554 else if (load_store_pendulum
== -2)
23556 /* Two stores have been issued in this cycle. Increase the
23557 priority of the first load in the ready list to favor it for
23558 issuing in the next cycle. */
23563 if (is_load_insn (ready
[pos
], &load_mem
)
23565 && INSN_PRIORITY_KNOWN (ready
[pos
]))
23567 INSN_PRIORITY (ready
[pos
])++;
23569 /* Adjust the pendulum to account for the fact that a load
23570 was found and increased in priority. This is to prevent
23571 increasing the priority of multiple loads */
23572 load_store_pendulum
--;
23579 else if (load_store_pendulum
== -1)
23581 /* A store has been issued in this cycle. Scan the ready list for
23582 another store to issue with it, preferring a store to an adjacent
23584 int first_store_pos
= -1;
23590 if (is_store_insn (ready
[pos
], &str_mem
))
23593 /* Maintain the index of the first store found on the
23595 if (first_store_pos
== -1)
23596 first_store_pos
= pos
;
23598 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
23599 && adjacent_mem_locations (str_mem
, str_mem2
))
23601 /* Found an adjacent store. Move it to the head of the
23602 ready list, and adjust it's priority so that it is
23603 more likely to stay there */
23605 for (i
=pos
; i
<*pn_ready
-1; i
++)
23606 ready
[i
] = ready
[i
+ 1];
23607 ready
[*pn_ready
-1] = tmp
;
23609 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23610 INSN_PRIORITY (tmp
)++;
23612 first_store_pos
= -1;
23620 if (first_store_pos
>= 0)
23622 /* An adjacent store wasn't found, but a non-adjacent store was,
23623 so move the non-adjacent store to the front of the ready
23624 list, and adjust its priority so that it is more likely to
23626 tmp
= ready
[first_store_pos
];
23627 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
23628 ready
[i
] = ready
[i
+ 1];
23629 ready
[*pn_ready
-1] = tmp
;
23630 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23631 INSN_PRIORITY (tmp
)++;
23634 else if (load_store_pendulum
== 2)
23636 /* Two loads have been issued in this cycle. Increase the priority
23637 of the first store in the ready list to favor it for issuing in
23643 if (is_store_insn (ready
[pos
], &str_mem
)
23645 && INSN_PRIORITY_KNOWN (ready
[pos
]))
23647 INSN_PRIORITY (ready
[pos
])++;
23649 /* Adjust the pendulum to account for the fact that a store
23650 was found and increased in priority. This is to prevent
23651 increasing the priority of multiple stores */
23652 load_store_pendulum
++;
23661 return cached_can_issue_more
;
23664 /* Return whether the presence of INSN causes a dispatch group termination
23665 of group WHICH_GROUP.
23667 If WHICH_GROUP == current_group, this function will return true if INSN
23668 causes the termination of the current group (i.e, the dispatch group to
23669 which INSN belongs). This means that INSN will be the last insn in the
23670 group it belongs to.
23672 If WHICH_GROUP == previous_group, this function will return true if INSN
23673 causes the termination of the previous group (i.e, the dispatch group that
23674 precedes the group to which INSN belongs). This means that INSN will be
23675 the first insn in the group it belongs to). */
23678 insn_terminates_group_p (rtx insn
, enum group_termination which_group
)
23685 first
= insn_must_be_first_in_group (insn
);
23686 last
= insn_must_be_last_in_group (insn
);
23691 if (which_group
== current_group
)
23693 else if (which_group
== previous_group
)
23701 insn_must_be_first_in_group (rtx insn
)
23703 enum attr_type type
;
23706 || GET_CODE (insn
) == NOTE
23707 || DEBUG_INSN_P (insn
)
23708 || GET_CODE (PATTERN (insn
)) == USE
23709 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23712 switch (rs6000_cpu
)
23714 case PROCESSOR_POWER5
:
23715 if (is_cracked_insn (insn
))
23717 case PROCESSOR_POWER4
:
23718 if (is_microcoded_insn (insn
))
23721 if (!rs6000_sched_groups
)
23724 type
= get_attr_type (insn
);
23731 case TYPE_DELAYED_CR
:
23732 case TYPE_CR_LOGICAL
:
23746 case PROCESSOR_POWER6
:
23747 type
= get_attr_type (insn
);
23751 case TYPE_INSERT_DWORD
:
23755 case TYPE_VAR_SHIFT_ROTATE
:
23762 case TYPE_INSERT_WORD
:
23763 case TYPE_DELAYED_COMPARE
:
23764 case TYPE_IMUL_COMPARE
:
23765 case TYPE_LMUL_COMPARE
:
23766 case TYPE_FPCOMPARE
:
23777 case TYPE_LOAD_EXT_UX
:
23779 case TYPE_STORE_UX
:
23780 case TYPE_FPLOAD_U
:
23781 case TYPE_FPLOAD_UX
:
23782 case TYPE_FPSTORE_U
:
23783 case TYPE_FPSTORE_UX
:
23789 case PROCESSOR_POWER7
:
23790 type
= get_attr_type (insn
);
23794 case TYPE_CR_LOGICAL
:
23801 case TYPE_DELAYED_COMPARE
:
23802 case TYPE_VAR_DELAYED_COMPARE
:
23808 case TYPE_LOAD_EXT
:
23809 case TYPE_LOAD_EXT_U
:
23810 case TYPE_LOAD_EXT_UX
:
23812 case TYPE_STORE_UX
:
23813 case TYPE_FPLOAD_U
:
23814 case TYPE_FPLOAD_UX
:
23815 case TYPE_FPSTORE_U
:
23816 case TYPE_FPSTORE_UX
:
23832 insn_must_be_last_in_group (rtx insn
)
23834 enum attr_type type
;
23837 || GET_CODE (insn
) == NOTE
23838 || DEBUG_INSN_P (insn
)
23839 || GET_CODE (PATTERN (insn
)) == USE
23840 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23843 switch (rs6000_cpu
) {
23844 case PROCESSOR_POWER4
:
23845 case PROCESSOR_POWER5
:
23846 if (is_microcoded_insn (insn
))
23849 if (is_branch_slot_insn (insn
))
23853 case PROCESSOR_POWER6
:
23854 type
= get_attr_type (insn
);
23861 case TYPE_VAR_SHIFT_ROTATE
:
23868 case TYPE_DELAYED_COMPARE
:
23869 case TYPE_IMUL_COMPARE
:
23870 case TYPE_LMUL_COMPARE
:
23871 case TYPE_FPCOMPARE
:
23885 case PROCESSOR_POWER7
:
23886 type
= get_attr_type (insn
);
23894 case TYPE_LOAD_EXT_U
:
23895 case TYPE_LOAD_EXT_UX
:
23896 case TYPE_STORE_UX
:
23909 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
23910 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
23913 is_costly_group (rtx
*group_insns
, rtx next_insn
)
23916 int issue_rate
= rs6000_issue_rate ();
23918 for (i
= 0; i
< issue_rate
; i
++)
23920 sd_iterator_def sd_it
;
23922 rtx insn
= group_insns
[i
];
23927 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
23929 rtx next
= DEP_CON (dep
);
23931 if (next
== next_insn
23932 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
23940 /* Utility of the function redefine_groups.
23941 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
23942 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
23943 to keep it "far" (in a separate group) from GROUP_INSNS, following
23944 one of the following schemes, depending on the value of the flag
23945 -minsert_sched_nops = X:
23946 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
23947 in order to force NEXT_INSN into a separate group.
23948 (2) X < sched_finish_regroup_exact: insert exactly X nops.
23949 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
23950 insertion (has a group just ended, how many vacant issue slots remain in the
23951 last group, and how many dispatch groups were encountered so far). */
23954 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
23955 rtx next_insn
, bool *group_end
, int can_issue_more
,
23960 int issue_rate
= rs6000_issue_rate ();
23961 bool end
= *group_end
;
23964 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
23965 return can_issue_more
;
23967 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
23968 return can_issue_more
;
23970 force
= is_costly_group (group_insns
, next_insn
);
23972 return can_issue_more
;
23974 if (sched_verbose
> 6)
23975 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
23976 *group_count
,can_issue_more
);
23978 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
23981 can_issue_more
= 0;
23983 /* Since only a branch can be issued in the last issue_slot, it is
23984 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
23985 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
23986 in this case the last nop will start a new group and the branch
23987 will be forced to the new group. */
23988 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
23991 /* Power6 and Power7 have special group ending nop. */
23992 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
)
23994 nop
= gen_group_ending_nop ();
23995 emit_insn_before (nop
, next_insn
);
23996 can_issue_more
= 0;
23999 while (can_issue_more
> 0)
24002 emit_insn_before (nop
, next_insn
);
24010 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
24012 int n_nops
= rs6000_sched_insert_nops
;
24014 /* Nops can't be issued from the branch slot, so the effective
24015 issue_rate for nops is 'issue_rate - 1'. */
24016 if (can_issue_more
== 0)
24017 can_issue_more
= issue_rate
;
24019 if (can_issue_more
== 0)
24021 can_issue_more
= issue_rate
- 1;
24024 for (i
= 0; i
< issue_rate
; i
++)
24026 group_insns
[i
] = 0;
24033 emit_insn_before (nop
, next_insn
);
24034 if (can_issue_more
== issue_rate
- 1) /* new group begins */
24037 if (can_issue_more
== 0)
24039 can_issue_more
= issue_rate
- 1;
24042 for (i
= 0; i
< issue_rate
; i
++)
24044 group_insns
[i
] = 0;
24050 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
24053 /* Is next_insn going to start a new group? */
24056 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
24057 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
24058 || (can_issue_more
< issue_rate
&&
24059 insn_terminates_group_p (next_insn
, previous_group
)));
24060 if (*group_end
&& end
)
24063 if (sched_verbose
> 6)
24064 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
24065 *group_count
, can_issue_more
);
24066 return can_issue_more
;
24069 return can_issue_more
;
24072 /* This function tries to synch the dispatch groups that the compiler "sees"
24073 with the dispatch groups that the processor dispatcher is expected to
24074 form in practice. It tries to achieve this synchronization by forcing the
24075 estimated processor grouping on the compiler (as opposed to the function
24076 'pad_goups' which tries to force the scheduler's grouping on the processor).
24078 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24079 examines the (estimated) dispatch groups that will be formed by the processor
24080 dispatcher. It marks these group boundaries to reflect the estimated
24081 processor grouping, overriding the grouping that the scheduler had marked.
24082 Depending on the value of the flag '-minsert-sched-nops' this function can
24083 force certain insns into separate groups or force a certain distance between
24084 them by inserting nops, for example, if there exists a "costly dependence"
24087 The function estimates the group boundaries that the processor will form as
24088 follows: It keeps track of how many vacant issue slots are available after
24089 each insn. A subsequent insn will start a new group if one of the following
24091 - no more vacant issue slots remain in the current dispatch group.
24092 - only the last issue slot, which is the branch slot, is vacant, but the next
24093 insn is not a branch.
24094 - only the last 2 or less issue slots, including the branch slot, are vacant,
24095 which means that a cracked insn (which occupies two issue slots) can't be
24096 issued in this group.
24097 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24098 start a new group. */
24101 redefine_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
24103 rtx insn
, next_insn
;
24105 int can_issue_more
;
24108 int group_count
= 0;
24112 issue_rate
= rs6000_issue_rate ();
24113 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
24114 for (i
= 0; i
< issue_rate
; i
++)
24116 group_insns
[i
] = 0;
24118 can_issue_more
= issue_rate
;
24120 insn
= get_next_active_insn (prev_head_insn
, tail
);
24123 while (insn
!= NULL_RTX
)
24125 slot
= (issue_rate
- can_issue_more
);
24126 group_insns
[slot
] = insn
;
24128 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
24129 if (insn_terminates_group_p (insn
, current_group
))
24130 can_issue_more
= 0;
24132 next_insn
= get_next_active_insn (insn
, tail
);
24133 if (next_insn
== NULL_RTX
)
24134 return group_count
+ 1;
24136 /* Is next_insn going to start a new group? */
24138 = (can_issue_more
== 0
24139 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
24140 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
24141 || (can_issue_more
< issue_rate
&&
24142 insn_terminates_group_p (next_insn
, previous_group
)));
24144 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
24145 next_insn
, &group_end
, can_issue_more
,
24151 can_issue_more
= 0;
24152 for (i
= 0; i
< issue_rate
; i
++)
24154 group_insns
[i
] = 0;
24158 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
24159 PUT_MODE (next_insn
, VOIDmode
);
24160 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
24161 PUT_MODE (next_insn
, TImode
);
24164 if (can_issue_more
== 0)
24165 can_issue_more
= issue_rate
;
24168 return group_count
;
24171 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24172 dispatch group boundaries that the scheduler had marked. Pad with nops
24173 any dispatch groups which have vacant issue slots, in order to force the
24174 scheduler's grouping on the processor dispatcher. The function
24175 returns the number of dispatch groups found. */
24178 pad_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
24180 rtx insn
, next_insn
;
24183 int can_issue_more
;
24185 int group_count
= 0;
24187 /* Initialize issue_rate. */
24188 issue_rate
= rs6000_issue_rate ();
24189 can_issue_more
= issue_rate
;
24191 insn
= get_next_active_insn (prev_head_insn
, tail
);
24192 next_insn
= get_next_active_insn (insn
, tail
);
24194 while (insn
!= NULL_RTX
)
24197 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
24199 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
24201 if (next_insn
== NULL_RTX
)
24206 /* If the scheduler had marked group termination at this location
24207 (between insn and next_insn), and neither insn nor next_insn will
24208 force group termination, pad the group with nops to force group
24211 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
24212 && !insn_terminates_group_p (insn
, current_group
)
24213 && !insn_terminates_group_p (next_insn
, previous_group
))
24215 if (!is_branch_slot_insn (next_insn
))
24218 while (can_issue_more
)
24221 emit_insn_before (nop
, next_insn
);
24226 can_issue_more
= issue_rate
;
24231 next_insn
= get_next_active_insn (insn
, tail
);
24234 return group_count
;
24237 /* We're beginning a new block. Initialize data structures as necessary. */
24240 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
24241 int sched_verbose ATTRIBUTE_UNUSED
,
24242 int max_ready ATTRIBUTE_UNUSED
)
24244 last_scheduled_insn
= NULL_RTX
;
24245 load_store_pendulum
= 0;
24248 /* The following function is called at the end of scheduling BB.
24249 After reload, it inserts nops at insn group bundling. */
24252 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
24257 fprintf (dump
, "=== Finishing schedule.\n");
24259 if (reload_completed
&& rs6000_sched_groups
)
24261 /* Do not run sched_finish hook when selective scheduling enabled. */
24262 if (sel_sched_p ())
24265 if (rs6000_sched_insert_nops
== sched_finish_none
)
24268 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
24269 n_groups
= pad_groups (dump
, sched_verbose
,
24270 current_sched_info
->prev_head
,
24271 current_sched_info
->next_tail
);
24273 n_groups
= redefine_groups (dump
, sched_verbose
,
24274 current_sched_info
->prev_head
,
24275 current_sched_info
->next_tail
);
24277 if (sched_verbose
>= 6)
24279 fprintf (dump
, "ngroups = %d\n", n_groups
);
24280 print_rtl (dump
, current_sched_info
->prev_head
);
24281 fprintf (dump
, "Done finish_sched\n");
24286 struct _rs6000_sched_context
24288 short cached_can_issue_more
;
24289 rtx last_scheduled_insn
;
24290 int load_store_pendulum
;
24293 typedef struct _rs6000_sched_context rs6000_sched_context_def
;
24294 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
24296 /* Allocate store for new scheduling context. */
24298 rs6000_alloc_sched_context (void)
24300 return xmalloc (sizeof (rs6000_sched_context_def
));
24303 /* If CLEAN_P is true then initializes _SC with clean data,
24304 and from the global context otherwise. */
24306 rs6000_init_sched_context (void *_sc
, bool clean_p
)
24308 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
24312 sc
->cached_can_issue_more
= 0;
24313 sc
->last_scheduled_insn
= NULL_RTX
;
24314 sc
->load_store_pendulum
= 0;
24318 sc
->cached_can_issue_more
= cached_can_issue_more
;
24319 sc
->last_scheduled_insn
= last_scheduled_insn
;
24320 sc
->load_store_pendulum
= load_store_pendulum
;
24324 /* Sets the global scheduling context to the one pointed to by _SC. */
24326 rs6000_set_sched_context (void *_sc
)
24328 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
24330 gcc_assert (sc
!= NULL
);
24332 cached_can_issue_more
= sc
->cached_can_issue_more
;
24333 last_scheduled_insn
= sc
->last_scheduled_insn
;
24334 load_store_pendulum
= sc
->load_store_pendulum
;
24339 rs6000_free_sched_context (void *_sc
)
24341 gcc_assert (_sc
!= NULL
);
24347 /* Length in units of the trampoline for entering a nested function. */
24350 rs6000_trampoline_size (void)
24354 switch (DEFAULT_ABI
)
24357 gcc_unreachable ();
24360 ret
= (TARGET_32BIT
) ? 12 : 24;
24365 ret
= (TARGET_32BIT
) ? 40 : 48;
24372 /* Emit RTL insns to initialize the variable parts of a trampoline.
24373 FNADDR is an RTX for the address of the function's pure code.
24374 CXT is an RTX for the static chain value for the function. */
24377 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
24379 int regsize
= (TARGET_32BIT
) ? 4 : 8;
24380 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
24381 rtx ctx_reg
= force_reg (Pmode
, cxt
);
24382 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
24384 switch (DEFAULT_ABI
)
24387 gcc_unreachable ();
24389 /* Under AIX, just build the 3 word function descriptor */
24392 rtx fnmem
, fn_reg
, toc_reg
;
24394 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
24395 error ("You cannot take the address of a nested function if you use "
24396 "the -mno-pointers-to-nested-functions option.");
24398 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
24399 fn_reg
= gen_reg_rtx (Pmode
);
24400 toc_reg
= gen_reg_rtx (Pmode
);
24402 /* Macro to shorten the code expansions below. */
24403 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24405 m_tramp
= replace_equiv_address (m_tramp
, addr
);
24407 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
24408 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
24409 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
24410 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
24411 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
24417 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24420 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
24421 LCT_NORMAL
, VOIDmode
, 4,
24423 GEN_INT (rs6000_trampoline_size ()), SImode
,
24431 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24432 identifier as an argument, so the front end shouldn't look it up. */
24435 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
24437 return is_attribute_p ("altivec", attr_id
);
24440 /* Handle the "altivec" attribute. The attribute may have
24441 arguments as follows:
24443 __attribute__((altivec(vector__)))
24444 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24445 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24447 and may appear more than once (e.g., 'vector bool char') in a
24448 given declaration. */
24451 rs6000_handle_altivec_attribute (tree
*node
,
24452 tree name ATTRIBUTE_UNUSED
,
24454 int flags ATTRIBUTE_UNUSED
,
24455 bool *no_add_attrs
)
24457 tree type
= *node
, result
= NULL_TREE
;
24458 enum machine_mode mode
;
24461 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
24462 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
24463 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
24466 while (POINTER_TYPE_P (type
)
24467 || TREE_CODE (type
) == FUNCTION_TYPE
24468 || TREE_CODE (type
) == METHOD_TYPE
24469 || TREE_CODE (type
) == ARRAY_TYPE
)
24470 type
= TREE_TYPE (type
);
24472 mode
= TYPE_MODE (type
);
24474 /* Check for invalid AltiVec type qualifiers. */
24475 if (type
== long_double_type_node
)
24476 error ("use of %<long double%> in AltiVec types is invalid");
24477 else if (type
== boolean_type_node
)
24478 error ("use of boolean types in AltiVec types is invalid");
24479 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
24480 error ("use of %<complex%> in AltiVec types is invalid");
24481 else if (DECIMAL_FLOAT_MODE_P (mode
))
24482 error ("use of decimal floating point types in AltiVec types is invalid");
24483 else if (!TARGET_VSX
)
24485 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
24488 error ("use of %<long%> in AltiVec types is invalid for "
24489 "64-bit code without -mvsx");
24490 else if (rs6000_warn_altivec_long
)
24491 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24494 else if (type
== long_long_unsigned_type_node
24495 || type
== long_long_integer_type_node
)
24496 error ("use of %<long long%> in AltiVec types is invalid without "
24498 else if (type
== double_type_node
)
24499 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24502 switch (altivec_type
)
24505 unsigned_p
= TYPE_UNSIGNED (type
);
24509 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
24512 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
24515 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
24518 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
24520 case SFmode
: result
= V4SF_type_node
; break;
24521 case DFmode
: result
= V2DF_type_node
; break;
24522 /* If the user says 'vector int bool', we may be handed the 'bool'
24523 attribute _before_ the 'vector' attribute, and so select the
24524 proper type in the 'b' case below. */
24525 case V4SImode
: case V8HImode
: case V16QImode
: case V4SFmode
:
24526 case V2DImode
: case V2DFmode
:
24534 case DImode
: case V2DImode
: result
= bool_V2DI_type_node
; break;
24535 case SImode
: case V4SImode
: result
= bool_V4SI_type_node
; break;
24536 case HImode
: case V8HImode
: result
= bool_V8HI_type_node
; break;
24537 case QImode
: case V16QImode
: result
= bool_V16QI_type_node
;
24544 case V8HImode
: result
= pixel_V8HI_type_node
;
24550 /* Propagate qualifiers attached to the element type
24551 onto the vector type. */
24552 if (result
&& result
!= type
&& TYPE_QUALS (type
))
24553 result
= build_qualified_type (result
, TYPE_QUALS (type
));
24555 *no_add_attrs
= true; /* No need to hang on to the attribute. */
24558 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
24563 /* AltiVec defines four built-in scalar types that serve as vector
24564 elements; we must teach the compiler how to mangle them. */
24566 static const char *
24567 rs6000_mangle_type (const_tree type
)
24569 type
= TYPE_MAIN_VARIANT (type
);
24571 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
24572 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
24575 if (type
== bool_char_type_node
) return "U6__boolc";
24576 if (type
== bool_short_type_node
) return "U6__bools";
24577 if (type
== pixel_type_node
) return "u7__pixel";
24578 if (type
== bool_int_type_node
) return "U6__booli";
24579 if (type
== bool_long_type_node
) return "U6__booll";
24581 /* Mangle IBM extended float long double as `g' (__float128) on
24582 powerpc*-linux where long-double-64 previously was the default. */
24583 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
24585 && TARGET_LONG_DOUBLE_128
24586 && !TARGET_IEEEQUAD
)
24589 /* For all other types, use normal C++ mangling. */
24593 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24594 struct attribute_spec.handler. */
24597 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
24598 tree args ATTRIBUTE_UNUSED
,
24599 int flags ATTRIBUTE_UNUSED
,
24600 bool *no_add_attrs
)
24602 if (TREE_CODE (*node
) != FUNCTION_TYPE
24603 && TREE_CODE (*node
) != FIELD_DECL
24604 && TREE_CODE (*node
) != TYPE_DECL
)
24606 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
24608 *no_add_attrs
= true;
24614 /* Set longcall attributes on all functions declared when
24615 rs6000_default_long_calls is true. */
24617 rs6000_set_default_type_attributes (tree type
)
24619 if (rs6000_default_long_calls
24620 && (TREE_CODE (type
) == FUNCTION_TYPE
24621 || TREE_CODE (type
) == METHOD_TYPE
))
24622 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
24624 TYPE_ATTRIBUTES (type
));
24627 darwin_set_default_type_attributes (type
);
24631 /* Return a reference suitable for calling a function with the
24632 longcall attribute. */
24635 rs6000_longcall_ref (rtx call_ref
)
24637 const char *call_name
;
24640 if (GET_CODE (call_ref
) != SYMBOL_REF
)
24643 /* System V adds '.' to the internal name, so skip them. */
24644 call_name
= XSTR (call_ref
, 0);
24645 if (*call_name
== '.')
24647 while (*call_name
== '.')
24650 node
= get_identifier (call_name
);
24651 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
24654 return force_reg (Pmode
, call_ref
);
24657 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24658 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24661 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24662 struct attribute_spec.handler. */
24664 rs6000_handle_struct_attribute (tree
*node
, tree name
,
24665 tree args ATTRIBUTE_UNUSED
,
24666 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
24669 if (DECL_P (*node
))
24671 if (TREE_CODE (*node
) == TYPE_DECL
)
24672 type
= &TREE_TYPE (*node
);
24677 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
24678 || TREE_CODE (*type
) == UNION_TYPE
)))
24680 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
24681 *no_add_attrs
= true;
24684 else if ((is_attribute_p ("ms_struct", name
)
24685 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
24686 || ((is_attribute_p ("gcc_struct", name
)
24687 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
24689 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
24691 *no_add_attrs
= true;
24698 rs6000_ms_bitfield_layout_p (const_tree record_type
)
24700 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
24701 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
24702 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
24705 #ifdef USING_ELFOS_H
24707 /* A get_unnamed_section callback, used for switching to toc_section. */
24710 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
24712 if (DEFAULT_ABI
== ABI_AIX
24713 && TARGET_MINIMAL_TOC
24714 && !TARGET_RELOCATABLE
)
24716 if (!toc_initialized
)
24718 toc_initialized
= 1;
24719 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
24720 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
24721 fprintf (asm_out_file
, "\t.tc ");
24722 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
24723 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24724 fprintf (asm_out_file
, "\n");
24726 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24727 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24728 fprintf (asm_out_file
, " = .+32768\n");
24731 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24733 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_RELOCATABLE
)
24734 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
24737 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24738 if (!toc_initialized
)
24740 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24741 fprintf (asm_out_file
, " = .+32768\n");
24742 toc_initialized
= 1;
24747 /* Implement TARGET_ASM_INIT_SECTIONS. */
24750 rs6000_elf_asm_init_sections (void)
24753 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
24756 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
24757 SDATA2_SECTION_ASM_OP
);
24760 /* Implement TARGET_SELECT_RTX_SECTION. */
24763 rs6000_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
24764 unsigned HOST_WIDE_INT align
)
24766 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
24767 return toc_section
;
24769 return default_elf_select_rtx_section (mode
, x
, align
);
24772 /* For a SYMBOL_REF, set generic flags and then perform some
24773 target-specific processing.
24775 When the AIX ABI is requested on a non-AIX system, replace the
24776 function name with the real name (with a leading .) rather than the
24777 function descriptor name. This saves a lot of overriding code to
24778 read the prefixes. */
24780 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
24782 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
24784 default_encode_section_info (decl
, rtl
, first
);
24787 && TREE_CODE (decl
) == FUNCTION_DECL
24789 && DEFAULT_ABI
== ABI_AIX
)
24791 rtx sym_ref
= XEXP (rtl
, 0);
24792 size_t len
= strlen (XSTR (sym_ref
, 0));
24793 char *str
= XALLOCAVEC (char, len
+ 2);
24795 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
24796 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
24801 compare_section_name (const char *section
, const char *templ
)
24805 len
= strlen (templ
);
24806 return (strncmp (section
, templ
, len
) == 0
24807 && (section
[len
] == 0 || section
[len
] == '.'));
24811 rs6000_elf_in_small_data_p (const_tree decl
)
24813 if (rs6000_sdata
== SDATA_NONE
)
24816 /* We want to merge strings, so we never consider them small data. */
24817 if (TREE_CODE (decl
) == STRING_CST
)
24820 /* Functions are never in the small data area. */
24821 if (TREE_CODE (decl
) == FUNCTION_DECL
)
24824 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
24826 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
24827 if (compare_section_name (section
, ".sdata")
24828 || compare_section_name (section
, ".sdata2")
24829 || compare_section_name (section
, ".gnu.linkonce.s")
24830 || compare_section_name (section
, ".sbss")
24831 || compare_section_name (section
, ".sbss2")
24832 || compare_section_name (section
, ".gnu.linkonce.sb")
24833 || strcmp (section
, ".PPC.EMB.sdata0") == 0
24834 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
24839 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
24842 && size
<= g_switch_value
24843 /* If it's not public, and we're not going to reference it there,
24844 there's no need to put it in the small data section. */
24845 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
24852 #endif /* USING_ELFOS_H */
24854 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
24857 rs6000_use_blocks_for_constant_p (enum machine_mode mode
, const_rtx x
)
24859 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
24862 /* Return a REG that occurs in ADDR with coefficient 1.
24863 ADDR can be effectively incremented by incrementing REG.
24865 r0 is special and we must not select it as an address
24866 register by this routine since our caller will try to
24867 increment the returned register via an "la" instruction. */
24870 find_addr_reg (rtx addr
)
24872 while (GET_CODE (addr
) == PLUS
)
24874 if (GET_CODE (XEXP (addr
, 0)) == REG
24875 && REGNO (XEXP (addr
, 0)) != 0)
24876 addr
= XEXP (addr
, 0);
24877 else if (GET_CODE (XEXP (addr
, 1)) == REG
24878 && REGNO (XEXP (addr
, 1)) != 0)
24879 addr
= XEXP (addr
, 1);
24880 else if (CONSTANT_P (XEXP (addr
, 0)))
24881 addr
= XEXP (addr
, 1);
24882 else if (CONSTANT_P (XEXP (addr
, 1)))
24883 addr
= XEXP (addr
, 0);
24885 gcc_unreachable ();
24887 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
24892 rs6000_fatal_bad_address (rtx op
)
24894 fatal_insn ("bad address", op
);
24899 typedef struct branch_island_d
{
24900 tree function_name
;
24905 DEF_VEC_O(branch_island
);
24906 DEF_VEC_ALLOC_O(branch_island
,gc
);
24908 static VEC(branch_island
,gc
) *branch_islands
;
24910 /* Remember to generate a branch island for far calls to the given
24914 add_compiler_branch_island (tree label_name
, tree function_name
,
24917 branch_island
*bi
= VEC_safe_push (branch_island
, gc
, branch_islands
, NULL
);
24919 bi
->function_name
= function_name
;
24920 bi
->label_name
= label_name
;
24921 bi
->line_number
= line_number
;
24924 /* Generate far-jump branch islands for everything recorded in
24925 branch_islands. Invoked immediately after the last instruction of
24926 the epilogue has been emitted; the branch islands must be appended
24927 to, and contiguous with, the function body. Mach-O stubs are
24928 generated in machopic_output_stub(). */
24931 macho_branch_islands (void)
24935 while (!VEC_empty (branch_island
, branch_islands
))
24937 branch_island
*bi
= VEC_last (branch_island
, branch_islands
);
24938 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
24939 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
24940 char name_buf
[512];
24941 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
24942 if (name
[0] == '*' || name
[0] == '&')
24943 strcpy (name_buf
, name
+1);
24947 strcpy (name_buf
+1, name
);
24949 strcpy (tmp_buf
, "\n");
24950 strcat (tmp_buf
, label
);
24951 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24952 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
24953 dbxout_stabd (N_SLINE
, bi
->line_number
);
24954 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24957 if (TARGET_LINK_STACK
)
24960 get_ppc476_thunk_name (name
);
24961 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
24962 strcat (tmp_buf
, name
);
24963 strcat (tmp_buf
, "\n");
24964 strcat (tmp_buf
, label
);
24965 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
24969 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
24970 strcat (tmp_buf
, label
);
24971 strcat (tmp_buf
, "_pic\n");
24972 strcat (tmp_buf
, label
);
24973 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
24976 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
24977 strcat (tmp_buf
, name_buf
);
24978 strcat (tmp_buf
, " - ");
24979 strcat (tmp_buf
, label
);
24980 strcat (tmp_buf
, "_pic)\n");
24982 strcat (tmp_buf
, "\tmtlr r0\n");
24984 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
24985 strcat (tmp_buf
, name_buf
);
24986 strcat (tmp_buf
, " - ");
24987 strcat (tmp_buf
, label
);
24988 strcat (tmp_buf
, "_pic)\n");
24990 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
24994 strcat (tmp_buf
, ":\nlis r12,hi16(");
24995 strcat (tmp_buf
, name_buf
);
24996 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
24997 strcat (tmp_buf
, name_buf
);
24998 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
25000 output_asm_insn (tmp_buf
, 0);
25001 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
25002 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
25003 dbxout_stabd (N_SLINE
, bi
->line_number
);
25004 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
25005 VEC_pop (branch_island
, branch_islands
);
25009 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
25010 already there or not. */
25013 no_previous_def (tree function_name
)
25018 FOR_EACH_VEC_ELT (branch_island
, branch_islands
, ix
, bi
)
25019 if (function_name
== bi
->function_name
)
25024 /* GET_PREV_LABEL gets the label name from the previous definition of
25028 get_prev_label (tree function_name
)
25033 FOR_EACH_VEC_ELT (branch_island
, branch_islands
, ix
, bi
)
25034 if (function_name
== bi
->function_name
)
25035 return bi
->label_name
;
25039 /* INSN is either a function call or a millicode call. It may have an
25040 unconditional jump in its delay slot.
25042 CALL_DEST is the routine we are calling. */
25045 output_call (rtx insn
, rtx
*operands
, int dest_operand_number
,
25046 int cookie_operand_number
)
25048 static char buf
[256];
25049 if (darwin_emit_branch_islands
25050 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
25051 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
25054 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
25056 if (no_previous_def (funname
))
25058 rtx label_rtx
= gen_label_rtx ();
25059 char *label_buf
, temp_buf
[256];
25060 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
25061 CODE_LABEL_NUMBER (label_rtx
));
25062 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
25063 labelname
= get_identifier (label_buf
);
25064 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
25067 labelname
= get_prev_label (funname
);
25069 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25070 instruction will reach 'foo', otherwise link as 'bl L42'".
25071 "L42" should be a 'branch island', that will do a far jump to
25072 'foo'. Branch islands are generated in
25073 macho_branch_islands(). */
25074 sprintf (buf
, "jbsr %%z%d,%.246s",
25075 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
25078 sprintf (buf
, "bl %%z%d", dest_operand_number
);
25082 /* Generate PIC and indirect symbol stubs. */
25085 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
25087 unsigned int length
;
25088 char *symbol_name
, *lazy_ptr_name
;
25089 char *local_label_0
;
25090 static int label
= 0;
25092 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25093 symb
= (*targetm
.strip_name_encoding
) (symb
);
25096 length
= strlen (symb
);
25097 symbol_name
= XALLOCAVEC (char, length
+ 32);
25098 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
25100 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
25101 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
25104 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
25106 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
25110 fprintf (file
, "\t.align 5\n");
25112 fprintf (file
, "%s:\n", stub
);
25113 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25116 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25117 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
25119 fprintf (file
, "\tmflr r0\n");
25120 if (TARGET_LINK_STACK
)
25123 get_ppc476_thunk_name (name
);
25124 fprintf (file
, "\tbl %s\n", name
);
25125 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
25129 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
25130 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
25132 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
25133 lazy_ptr_name
, local_label_0
);
25134 fprintf (file
, "\tmtlr r0\n");
25135 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
25136 (TARGET_64BIT
? "ldu" : "lwzu"),
25137 lazy_ptr_name
, local_label_0
);
25138 fprintf (file
, "\tmtctr r12\n");
25139 fprintf (file
, "\tbctr\n");
25143 fprintf (file
, "\t.align 4\n");
25145 fprintf (file
, "%s:\n", stub
);
25146 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25148 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
25149 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
25150 (TARGET_64BIT
? "ldu" : "lwzu"),
25152 fprintf (file
, "\tmtctr r12\n");
25153 fprintf (file
, "\tbctr\n");
25156 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
25157 fprintf (file
, "%s:\n", lazy_ptr_name
);
25158 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25159 fprintf (file
, "%sdyld_stub_binding_helper\n",
25160 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
25163 /* Legitimize PIC addresses. If the address is already
25164 position-independent, we return ORIG. Newly generated
25165 position-independent addresses go into a reg. This is REG if non
25166 zero, otherwise we allocate register(s) as necessary. */
25168 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25171 rs6000_machopic_legitimize_pic_address (rtx orig
, enum machine_mode mode
,
25176 if (reg
== NULL
&& ! reload_in_progress
&& ! reload_completed
)
25177 reg
= gen_reg_rtx (Pmode
);
25179 if (GET_CODE (orig
) == CONST
)
25183 if (GET_CODE (XEXP (orig
, 0)) == PLUS
25184 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
25187 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
25189 /* Use a different reg for the intermediate value, as
25190 it will be marked UNCHANGING. */
25191 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
25192 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
25195 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
25198 if (GET_CODE (offset
) == CONST_INT
)
25200 if (SMALL_INT (offset
))
25201 return plus_constant (Pmode
, base
, INTVAL (offset
));
25202 else if (! reload_in_progress
&& ! reload_completed
)
25203 offset
= force_reg (Pmode
, offset
);
25206 rtx mem
= force_const_mem (Pmode
, orig
);
25207 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
25210 return gen_rtx_PLUS (Pmode
, base
, offset
);
25213 /* Fall back on generic machopic code. */
25214 return machopic_legitimize_pic_address (orig
, mode
, reg
);
25217 /* Output a .machine directive for the Darwin assembler, and call
25218 the generic start_file routine. */
25221 rs6000_darwin_file_start (void)
25223 static const struct
25229 { "ppc64", "ppc64", MASK_64BIT
},
25230 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
25231 { "power4", "ppc970", 0 },
25232 { "G5", "ppc970", 0 },
25233 { "7450", "ppc7450", 0 },
25234 { "7400", "ppc7400", MASK_ALTIVEC
},
25235 { "G4", "ppc7400", 0 },
25236 { "750", "ppc750", 0 },
25237 { "740", "ppc750", 0 },
25238 { "G3", "ppc750", 0 },
25239 { "604e", "ppc604e", 0 },
25240 { "604", "ppc604", 0 },
25241 { "603e", "ppc603", 0 },
25242 { "603", "ppc603", 0 },
25243 { "601", "ppc601", 0 },
25244 { NULL
, "ppc", 0 } };
25245 const char *cpu_id
= "";
25248 rs6000_file_start ();
25249 darwin_file_start ();
25251 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25253 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
25254 cpu_id
= rs6000_default_cpu
;
25256 if (global_options_set
.x_rs6000_cpu_index
)
25257 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
25259 /* Look through the mapping array. Pick the first name that either
25260 matches the argument, has a bit set in IF_SET that is also set
25261 in the target flags, or has a NULL name. */
25264 while (mapping
[i
].arg
!= NULL
25265 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
25266 && (mapping
[i
].if_set
& target_flags
) == 0)
25269 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
25272 #endif /* TARGET_MACHO */
25276 rs6000_elf_reloc_rw_mask (void)
25280 else if (DEFAULT_ABI
== ABI_AIX
)
25286 /* Record an element in the table of global constructors. SYMBOL is
25287 a SYMBOL_REF of the function to be called; PRIORITY is a number
25288 between 0 and MAX_INIT_PRIORITY.
25290 This differs from default_named_section_asm_out_constructor in
25291 that we have special handling for -mrelocatable. */
25293 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
25295 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
25297 const char *section
= ".ctors";
25300 if (priority
!= DEFAULT_INIT_PRIORITY
)
25302 sprintf (buf
, ".ctors.%.5u",
25303 /* Invert the numbering so the linker puts us in the proper
25304 order; constructors are run from right to left, and the
25305 linker sorts in increasing order. */
25306 MAX_INIT_PRIORITY
- priority
);
25310 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
25311 assemble_align (POINTER_SIZE
);
25313 if (TARGET_RELOCATABLE
)
25315 fputs ("\t.long (", asm_out_file
);
25316 output_addr_const (asm_out_file
, symbol
);
25317 fputs (")@fixup\n", asm_out_file
);
25320 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
25323 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
25325 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
25327 const char *section
= ".dtors";
25330 if (priority
!= DEFAULT_INIT_PRIORITY
)
25332 sprintf (buf
, ".dtors.%.5u",
25333 /* Invert the numbering so the linker puts us in the proper
25334 order; constructors are run from right to left, and the
25335 linker sorts in increasing order. */
25336 MAX_INIT_PRIORITY
- priority
);
25340 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
25341 assemble_align (POINTER_SIZE
);
25343 if (TARGET_RELOCATABLE
)
25345 fputs ("\t.long (", asm_out_file
);
25346 output_addr_const (asm_out_file
, symbol
);
25347 fputs (")@fixup\n", asm_out_file
);
25350 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
25354 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
25358 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
25359 ASM_OUTPUT_LABEL (file
, name
);
25360 fputs (DOUBLE_INT_ASM_OP
, file
);
25361 rs6000_output_function_entry (file
, name
);
25362 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
25365 fputs ("\t.size\t", file
);
25366 assemble_name (file
, name
);
25367 fputs (",24\n\t.type\t.", file
);
25368 assemble_name (file
, name
);
25369 fputs (",@function\n", file
);
25370 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
25372 fputs ("\t.globl\t.", file
);
25373 assemble_name (file
, name
);
25378 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
25379 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
25380 rs6000_output_function_entry (file
, name
);
25381 fputs (":\n", file
);
25385 if (TARGET_RELOCATABLE
25386 && !TARGET_SECURE_PLT
25387 && (get_pool_size () != 0 || crtl
->profile
)
25392 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
25394 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
25395 fprintf (file
, "\t.long ");
25396 assemble_name (file
, buf
);
25398 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
25399 assemble_name (file
, buf
);
25403 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
25404 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
25406 if (DEFAULT_ABI
== ABI_AIX
)
25408 const char *desc_name
, *orig_name
;
25410 orig_name
= (*targetm
.strip_name_encoding
) (name
);
25411 desc_name
= orig_name
;
25412 while (*desc_name
== '.')
25415 if (TREE_PUBLIC (decl
))
25416 fprintf (file
, "\t.globl %s\n", desc_name
);
25418 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
25419 fprintf (file
, "%s:\n", desc_name
);
25420 fprintf (file
, "\t.long %s\n", orig_name
);
25421 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
25422 if (DEFAULT_ABI
== ABI_AIX
)
25423 fputs ("\t.long 0\n", file
);
25424 fprintf (file
, "\t.previous\n");
25426 ASM_OUTPUT_LABEL (file
, name
);
25429 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
25431 rs6000_elf_file_end (void)
25433 #ifdef HAVE_AS_GNU_ATTRIBUTE
25434 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
25436 if (rs6000_passes_float
)
25437 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n",
25438 ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
) ? 1
25439 : (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_SINGLE_FLOAT
) ? 3
25441 if (rs6000_passes_vector
)
25442 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
25443 (TARGET_ALTIVEC_ABI
? 2
25444 : TARGET_SPE_ABI
? 3
25446 if (rs6000_returns_struct
)
25447 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
25448 aix_struct_return
? 2 : 1);
25451 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25453 file_end_indicate_exec_stack ();
25460 rs6000_xcoff_asm_output_anchor (rtx symbol
)
25464 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
25465 SYMBOL_REF_BLOCK_OFFSET (symbol
));
25466 ASM_OUTPUT_DEF (asm_out_file
, XSTR (symbol
, 0), buffer
);
25470 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
25472 fputs (GLOBAL_ASM_OP
, stream
);
25473 RS6000_OUTPUT_BASENAME (stream
, name
);
25474 putc ('\n', stream
);
25477 /* A get_unnamed_decl callback, used for read-only sections. PTR
25478 points to the section string variable. */
25481 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
25483 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
25484 *(const char *const *) directive
,
25485 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
25488 /* Likewise for read-write sections. */
25491 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
25493 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
25494 *(const char *const *) directive
,
25495 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
25498 /* A get_unnamed_section callback, used for switching to toc_section. */
25501 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
25503 if (TARGET_MINIMAL_TOC
)
25505 /* toc_section is always selected at least once from
25506 rs6000_xcoff_file_start, so this is guaranteed to
25507 always be defined once and only once in each file. */
25508 if (!toc_initialized
)
25510 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
25511 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
25512 toc_initialized
= 1;
25514 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
25515 (TARGET_32BIT
? "" : ",3"));
25518 fputs ("\t.toc\n", asm_out_file
);
25521 /* Implement TARGET_ASM_INIT_SECTIONS. */
25524 rs6000_xcoff_asm_init_sections (void)
25526 read_only_data_section
25527 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
25528 &xcoff_read_only_section_name
);
25530 private_data_section
25531 = get_unnamed_section (SECTION_WRITE
,
25532 rs6000_xcoff_output_readwrite_section_asm_op
,
25533 &xcoff_private_data_section_name
);
25535 read_only_private_data_section
25536 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
25537 &xcoff_private_data_section_name
);
25540 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
25542 readonly_data_section
= read_only_data_section
;
25543 exception_section
= data_section
;
25547 rs6000_xcoff_reloc_rw_mask (void)
25553 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
25554 tree decl ATTRIBUTE_UNUSED
)
25557 static const char * const suffix
[3] = { "PR", "RO", "RW" };
25559 if (flags
& SECTION_CODE
)
25561 else if (flags
& SECTION_WRITE
)
25566 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
25567 (flags
& SECTION_CODE
) ? "." : "",
25568 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
25572 rs6000_xcoff_select_section (tree decl
, int reloc
,
25573 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
25575 if (decl_readonly_section (decl
, reloc
))
25577 if (TREE_PUBLIC (decl
))
25578 return read_only_data_section
;
25580 return read_only_private_data_section
;
25584 if (TREE_PUBLIC (decl
))
25585 return data_section
;
25587 return private_data_section
;
25592 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
25596 /* Use select_section for private and uninitialized data. */
25597 if (!TREE_PUBLIC (decl
)
25598 || DECL_COMMON (decl
)
25599 || DECL_INITIAL (decl
) == NULL_TREE
25600 || DECL_INITIAL (decl
) == error_mark_node
25601 || (flag_zero_initialized_in_bss
25602 && initializer_zerop (DECL_INITIAL (decl
))))
25605 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
25606 name
= (*targetm
.strip_name_encoding
) (name
);
25607 DECL_SECTION_NAME (decl
) = build_string (strlen (name
), name
);
25610 /* Select section for constant in constant pool.
25612 On RS/6000, all constants are in the private read-only data area.
25613 However, if this is being placed in the TOC it must be output as a
25617 rs6000_xcoff_select_rtx_section (enum machine_mode mode
, rtx x
,
25618 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
25620 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
25621 return toc_section
;
25623 return read_only_private_data_section
;
25626 /* Remove any trailing [DS] or the like from the symbol name. */
25628 static const char *
25629 rs6000_xcoff_strip_name_encoding (const char *name
)
25634 len
= strlen (name
);
25635 if (name
[len
- 1] == ']')
25636 return ggc_alloc_string (name
, len
- 4);
25641 /* Section attributes. AIX is always PIC. */
25643 static unsigned int
25644 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
25646 unsigned int align
;
25647 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
25649 /* Align to at least UNIT size. */
25650 if (flags
& SECTION_CODE
|| !decl
)
25651 align
= MIN_UNITS_PER_WORD
;
25653 /* Increase alignment of large objects if not already stricter. */
25654 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
25655 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
25656 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
25658 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
25661 /* Output at beginning of assembler file.
25663 Initialize the section names for the RS/6000 at this point.
25665 Specify filename, including full path, to assembler.
25667 We want to go into the TOC section so at least one .toc will be emitted.
25668 Also, in order to output proper .bs/.es pairs, we need at least one static
25669 [RW] section emitted.
25671 Finally, declare mcount when profiling to make the assembler happy. */
25674 rs6000_xcoff_file_start (void)
25676 rs6000_gen_section_name (&xcoff_bss_section_name
,
25677 main_input_filename
, ".bss_");
25678 rs6000_gen_section_name (&xcoff_private_data_section_name
,
25679 main_input_filename
, ".rw_");
25680 rs6000_gen_section_name (&xcoff_read_only_section_name
,
25681 main_input_filename
, ".ro_");
25683 fputs ("\t.file\t", asm_out_file
);
25684 output_quoted_string (asm_out_file
, main_input_filename
);
25685 fputc ('\n', asm_out_file
);
25686 if (write_symbols
!= NO_DEBUG
)
25687 switch_to_section (private_data_section
);
25688 switch_to_section (text_section
);
25690 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
25691 rs6000_file_start ();
25694 /* Output at end of assembler file.
25695 On the RS/6000, referencing data should automatically pull in text. */
25698 rs6000_xcoff_file_end (void)
25700 switch_to_section (text_section
);
25701 fputs ("_section_.text:\n", asm_out_file
);
25702 switch_to_section (data_section
);
25703 fputs (TARGET_32BIT
25704 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25707 #endif /* TARGET_XCOFF */
25709 /* Compute a (partial) cost for rtx X. Return true if the complete
25710 cost has been computed, and false if subexpressions should be
25711 scanned. In either case, *TOTAL contains the cost result. */
25714 rs6000_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
25715 int *total
, bool speed
)
25717 enum machine_mode mode
= GET_MODE (x
);
25721 /* On the RS/6000, if it is valid in the insn, it is free. */
25723 if (((outer_code
== SET
25724 || outer_code
== PLUS
25725 || outer_code
== MINUS
)
25726 && (satisfies_constraint_I (x
)
25727 || satisfies_constraint_L (x
)))
25728 || (outer_code
== AND
25729 && (satisfies_constraint_K (x
)
25731 ? satisfies_constraint_L (x
)
25732 : satisfies_constraint_J (x
))
25733 || mask_operand (x
, mode
)
25735 && mask64_operand (x
, DImode
))))
25736 || ((outer_code
== IOR
|| outer_code
== XOR
)
25737 && (satisfies_constraint_K (x
)
25739 ? satisfies_constraint_L (x
)
25740 : satisfies_constraint_J (x
))))
25741 || outer_code
== ASHIFT
25742 || outer_code
== ASHIFTRT
25743 || outer_code
== LSHIFTRT
25744 || outer_code
== ROTATE
25745 || outer_code
== ROTATERT
25746 || outer_code
== ZERO_EXTRACT
25747 || (outer_code
== MULT
25748 && satisfies_constraint_I (x
))
25749 || ((outer_code
== DIV
|| outer_code
== UDIV
25750 || outer_code
== MOD
|| outer_code
== UMOD
)
25751 && exact_log2 (INTVAL (x
)) >= 0)
25752 || (outer_code
== COMPARE
25753 && (satisfies_constraint_I (x
)
25754 || satisfies_constraint_K (x
)))
25755 || ((outer_code
== EQ
|| outer_code
== NE
)
25756 && (satisfies_constraint_I (x
)
25757 || satisfies_constraint_K (x
)
25759 ? satisfies_constraint_L (x
)
25760 : satisfies_constraint_J (x
))))
25761 || (outer_code
== GTU
25762 && satisfies_constraint_I (x
))
25763 || (outer_code
== LTU
25764 && satisfies_constraint_P (x
)))
25769 else if ((outer_code
== PLUS
25770 && reg_or_add_cint_operand (x
, VOIDmode
))
25771 || (outer_code
== MINUS
25772 && reg_or_sub_cint_operand (x
, VOIDmode
))
25773 || ((outer_code
== SET
25774 || outer_code
== IOR
25775 || outer_code
== XOR
)
25777 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
25779 *total
= COSTS_N_INSNS (1);
25785 if (mode
== DImode
&& code
== CONST_DOUBLE
)
25787 if ((outer_code
== IOR
|| outer_code
== XOR
)
25788 && CONST_DOUBLE_HIGH (x
) == 0
25789 && (CONST_DOUBLE_LOW (x
)
25790 & ~ (unsigned HOST_WIDE_INT
) 0xffff) == 0)
25795 else if ((outer_code
== AND
&& and64_2_operand (x
, DImode
))
25796 || ((outer_code
== SET
25797 || outer_code
== IOR
25798 || outer_code
== XOR
)
25799 && CONST_DOUBLE_HIGH (x
) == 0))
25801 *total
= COSTS_N_INSNS (1);
25811 /* When optimizing for size, MEM should be slightly more expensive
25812 than generating address, e.g., (plus (reg) (const)).
25813 L1 cache latency is about two instructions. */
25814 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
25823 if (FLOAT_MODE_P (mode
))
25824 *total
= rs6000_cost
->fp
;
25826 *total
= COSTS_N_INSNS (1);
25830 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
25831 && satisfies_constraint_I (XEXP (x
, 1)))
25833 if (INTVAL (XEXP (x
, 1)) >= -256
25834 && INTVAL (XEXP (x
, 1)) <= 255)
25835 *total
= rs6000_cost
->mulsi_const9
;
25837 *total
= rs6000_cost
->mulsi_const
;
25839 else if (mode
== SFmode
)
25840 *total
= rs6000_cost
->fp
;
25841 else if (FLOAT_MODE_P (mode
))
25842 *total
= rs6000_cost
->dmul
;
25843 else if (mode
== DImode
)
25844 *total
= rs6000_cost
->muldi
;
25846 *total
= rs6000_cost
->mulsi
;
25850 if (mode
== SFmode
)
25851 *total
= rs6000_cost
->fp
;
25853 *total
= rs6000_cost
->dmul
;
25858 if (FLOAT_MODE_P (mode
))
25860 *total
= mode
== DFmode
? rs6000_cost
->ddiv
25861 : rs6000_cost
->sdiv
;
25868 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
25869 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
25871 if (code
== DIV
|| code
== MOD
)
25873 *total
= COSTS_N_INSNS (2);
25876 *total
= COSTS_N_INSNS (1);
25880 if (GET_MODE (XEXP (x
, 1)) == DImode
)
25881 *total
= rs6000_cost
->divdi
;
25883 *total
= rs6000_cost
->divsi
;
25885 /* Add in shift and subtract for MOD. */
25886 if (code
== MOD
|| code
== UMOD
)
25887 *total
+= COSTS_N_INSNS (2);
25892 *total
= COSTS_N_INSNS (4);
25896 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
25900 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
25904 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
25916 *total
= COSTS_N_INSNS (1);
25924 /* Handle mul_highpart. */
25925 if (outer_code
== TRUNCATE
25926 && GET_CODE (XEXP (x
, 0)) == MULT
)
25928 if (mode
== DImode
)
25929 *total
= rs6000_cost
->muldi
;
25931 *total
= rs6000_cost
->mulsi
;
25934 else if (outer_code
== AND
)
25937 *total
= COSTS_N_INSNS (1);
25942 if (GET_CODE (XEXP (x
, 0)) == MEM
)
25945 *total
= COSTS_N_INSNS (1);
25951 if (!FLOAT_MODE_P (mode
))
25953 *total
= COSTS_N_INSNS (1);
25959 case UNSIGNED_FLOAT
:
25962 case FLOAT_TRUNCATE
:
25963 *total
= rs6000_cost
->fp
;
25967 if (mode
== DFmode
)
25970 *total
= rs6000_cost
->fp
;
25974 switch (XINT (x
, 1))
25977 *total
= rs6000_cost
->fp
;
25989 *total
= COSTS_N_INSNS (1);
25992 else if (FLOAT_MODE_P (mode
)
25993 && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
&& TARGET_FPRS
)
25995 *total
= rs6000_cost
->fp
;
26003 /* Carry bit requires mode == Pmode.
26004 NEG or PLUS already counted so only add one. */
26006 && (outer_code
== NEG
|| outer_code
== PLUS
))
26008 *total
= COSTS_N_INSNS (1);
26011 if (outer_code
== SET
)
26013 if (XEXP (x
, 1) == const0_rtx
)
26015 if (TARGET_ISEL
&& !TARGET_MFCRF
)
26016 *total
= COSTS_N_INSNS (8);
26018 *total
= COSTS_N_INSNS (2);
26021 else if (mode
== Pmode
)
26023 *total
= COSTS_N_INSNS (3);
26032 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
26034 if (TARGET_ISEL
&& !TARGET_MFCRF
)
26035 *total
= COSTS_N_INSNS (8);
26037 *total
= COSTS_N_INSNS (2);
26041 if (outer_code
== COMPARE
)
26055 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
26058 rs6000_debug_rtx_costs (rtx x
, int code
, int outer_code
, int opno
, int *total
,
26061 bool ret
= rs6000_rtx_costs (x
, code
, outer_code
, opno
, total
, speed
);
26064 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
26065 "opno = %d, total = %d, speed = %s, x:\n",
26066 ret
? "complete" : "scan inner",
26067 GET_RTX_NAME (code
),
26068 GET_RTX_NAME (outer_code
),
26071 speed
? "true" : "false");
26078 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26081 rs6000_debug_address_cost (rtx x
, bool speed
)
26083 int ret
= TARGET_ADDRESS_COST (x
, speed
);
26085 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26086 ret
, speed
? "true" : "false");
26093 /* A C expression returning the cost of moving data from a register of class
26094 CLASS1 to one of CLASS2. */
26097 rs6000_register_move_cost (enum machine_mode mode
,
26098 reg_class_t from
, reg_class_t to
)
26102 if (TARGET_DEBUG_COST
)
26105 /* Moves from/to GENERAL_REGS. */
26106 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
26107 || reg_classes_intersect_p (from
, GENERAL_REGS
))
26109 reg_class_t rclass
= from
;
26111 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
26114 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
26115 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
26116 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
26118 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26120 else if (rclass
== CR_REGS
)
26123 /* For those processors that have slow LR/CTR moves, make them more
26124 expensive than memory in order to bias spills to memory .*/
26125 else if ((rs6000_cpu
== PROCESSOR_POWER6
26126 || rs6000_cpu
== PROCESSOR_POWER7
)
26127 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
26128 ret
= 6 * hard_regno_nregs
[0][mode
];
26131 /* A move will cost one instruction per GPR moved. */
26132 ret
= 2 * hard_regno_nregs
[0][mode
];
26135 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26136 else if (VECTOR_UNIT_VSX_P (mode
)
26137 && reg_classes_intersect_p (to
, VSX_REGS
)
26138 && reg_classes_intersect_p (from
, VSX_REGS
))
26139 ret
= 2 * hard_regno_nregs
[32][mode
];
26141 /* Moving between two similar registers is just one instruction. */
26142 else if (reg_classes_intersect_p (to
, from
))
26143 ret
= (mode
== TFmode
|| mode
== TDmode
) ? 4 : 2;
26145 /* Everything else has to go through GENERAL_REGS. */
26147 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
26148 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
26150 if (TARGET_DEBUG_COST
)
26152 if (dbg_cost_ctrl
== 1)
26154 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26155 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
26156 reg_class_names
[to
]);
26163 /* A C expressions returning the cost of moving data of MODE from a register to
26167 rs6000_memory_move_cost (enum machine_mode mode
, reg_class_t rclass
,
26168 bool in ATTRIBUTE_UNUSED
)
26172 if (TARGET_DEBUG_COST
)
26175 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
26176 ret
= 4 * hard_regno_nregs
[0][mode
];
26177 else if (reg_classes_intersect_p (rclass
, FLOAT_REGS
))
26178 ret
= 4 * hard_regno_nregs
[32][mode
];
26179 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
26180 ret
= 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
26182 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
26184 if (TARGET_DEBUG_COST
)
26186 if (dbg_cost_ctrl
== 1)
26188 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26189 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
26196 /* Returns a code for a target-specific builtin that implements
26197 reciprocal of the function, or NULL_TREE if not available. */
26200 rs6000_builtin_reciprocal (unsigned int fn
, bool md_fn
,
26201 bool sqrt ATTRIBUTE_UNUSED
)
26203 if (optimize_insn_for_size_p ())
26209 case VSX_BUILTIN_XVSQRTDP
:
26210 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
26213 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
26215 case VSX_BUILTIN_XVSQRTSP
:
26216 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
26219 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
26228 case BUILT_IN_SQRT
:
26229 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode
))
26232 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRT
];
26234 case BUILT_IN_SQRTF
:
26235 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode
))
26238 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRTF
];
26245 /* Load up a constant. If the mode is a vector mode, splat the value across
26246 all of the vector elements. */
26249 rs6000_load_constant_and_splat (enum machine_mode mode
, REAL_VALUE_TYPE dconst
)
26253 if (mode
== SFmode
|| mode
== DFmode
)
26255 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, mode
);
26256 reg
= force_reg (mode
, d
);
26258 else if (mode
== V4SFmode
)
26260 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, SFmode
);
26261 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
26262 reg
= gen_reg_rtx (mode
);
26263 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
26265 else if (mode
== V2DFmode
)
26267 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, DFmode
);
26268 rtvec v
= gen_rtvec (2, d
, d
);
26269 reg
= gen_reg_rtx (mode
);
26270 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
26273 gcc_unreachable ();
26278 /* Generate an FMA instruction. */
26281 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
26283 enum machine_mode mode
= GET_MODE (target
);
26286 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
26287 gcc_assert (dst
!= NULL
);
26290 emit_move_insn (target
, dst
);
26293 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26296 rs6000_emit_msub (rtx target
, rtx m1
, rtx m2
, rtx a
)
26298 enum machine_mode mode
= GET_MODE (target
);
26301 /* Altivec does not support fms directly;
26302 generate in terms of fma in that case. */
26303 if (optab_handler (fms_optab
, mode
) != CODE_FOR_nothing
)
26304 dst
= expand_ternary_op (mode
, fms_optab
, m1
, m2
, a
, target
, 0);
26307 a
= expand_unop (mode
, neg_optab
, a
, NULL_RTX
, 0);
26308 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
26310 gcc_assert (dst
!= NULL
);
26313 emit_move_insn (target
, dst
);
26316 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26319 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
26321 enum machine_mode mode
= GET_MODE (dst
);
26324 /* This is a tad more complicated, since the fnma_optab is for
26325 a different expression: fma(-m1, m2, a), which is the same
26326 thing except in the case of signed zeros.
26328 Fortunately we know that if FMA is supported that FNMSUB is
26329 also supported in the ISA. Just expand it directly. */
26331 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
26333 r
= gen_rtx_NEG (mode
, a
);
26334 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
26335 r
= gen_rtx_NEG (mode
, r
);
26336 emit_insn (gen_rtx_SET (VOIDmode
, dst
, r
));
26339 /* Newton-Raphson approximation of floating point divide with just 2 passes
26340 (either single precision floating point, or newer machines with higher
26341 accuracy estimates). Support both scalar and vector divide. Assumes no
26342 trapping math and finite arguments. */
26345 rs6000_emit_swdiv_high_precision (rtx dst
, rtx n
, rtx d
)
26347 enum machine_mode mode
= GET_MODE (dst
);
26348 rtx x0
, e0
, e1
, y1
, u0
, v0
;
26349 enum insn_code code
= optab_handler (smul_optab
, mode
);
26350 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26351 rtx one
= rs6000_load_constant_and_splat (mode
, dconst1
);
26353 gcc_assert (code
!= CODE_FOR_nothing
);
26355 /* x0 = 1./d estimate */
26356 x0
= gen_reg_rtx (mode
);
26357 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26358 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
26361 e0
= gen_reg_rtx (mode
);
26362 rs6000_emit_nmsub (e0
, d
, x0
, one
); /* e0 = 1. - (d * x0) */
26364 e1
= gen_reg_rtx (mode
);
26365 rs6000_emit_madd (e1
, e0
, e0
, e0
); /* e1 = (e0 * e0) + e0 */
26367 y1
= gen_reg_rtx (mode
);
26368 rs6000_emit_madd (y1
, e1
, x0
, x0
); /* y1 = (e1 * x0) + x0 */
26370 u0
= gen_reg_rtx (mode
);
26371 emit_insn (gen_mul (u0
, n
, y1
)); /* u0 = n * y1 */
26373 v0
= gen_reg_rtx (mode
);
26374 rs6000_emit_nmsub (v0
, d
, u0
, n
); /* v0 = n - (d * u0) */
26376 rs6000_emit_madd (dst
, v0
, y1
, u0
); /* dst = (v0 * y1) + u0 */
26379 /* Newton-Raphson approximation of floating point divide that has a low
26380 precision estimate. Assumes no trapping math and finite arguments. */
26383 rs6000_emit_swdiv_low_precision (rtx dst
, rtx n
, rtx d
)
26385 enum machine_mode mode
= GET_MODE (dst
);
26386 rtx x0
, e0
, e1
, e2
, y1
, y2
, y3
, u0
, v0
, one
;
26387 enum insn_code code
= optab_handler (smul_optab
, mode
);
26388 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26390 gcc_assert (code
!= CODE_FOR_nothing
);
26392 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
26394 /* x0 = 1./d estimate */
26395 x0
= gen_reg_rtx (mode
);
26396 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26397 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
26400 e0
= gen_reg_rtx (mode
);
26401 rs6000_emit_nmsub (e0
, d
, x0
, one
); /* e0 = 1. - d * x0 */
26403 y1
= gen_reg_rtx (mode
);
26404 rs6000_emit_madd (y1
, e0
, x0
, x0
); /* y1 = x0 + e0 * x0 */
26406 e1
= gen_reg_rtx (mode
);
26407 emit_insn (gen_mul (e1
, e0
, e0
)); /* e1 = e0 * e0 */
26409 y2
= gen_reg_rtx (mode
);
26410 rs6000_emit_madd (y2
, e1
, y1
, y1
); /* y2 = y1 + e1 * y1 */
26412 e2
= gen_reg_rtx (mode
);
26413 emit_insn (gen_mul (e2
, e1
, e1
)); /* e2 = e1 * e1 */
26415 y3
= gen_reg_rtx (mode
);
26416 rs6000_emit_madd (y3
, e2
, y2
, y2
); /* y3 = y2 + e2 * y2 */
26418 u0
= gen_reg_rtx (mode
);
26419 emit_insn (gen_mul (u0
, n
, y3
)); /* u0 = n * y3 */
26421 v0
= gen_reg_rtx (mode
);
26422 rs6000_emit_nmsub (v0
, d
, u0
, n
); /* v0 = n - d * u0 */
26424 rs6000_emit_madd (dst
, v0
, y3
, u0
); /* dst = u0 + v0 * y3 */
26427 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26428 add a reg_note saying that this was a division. Support both scalar and
26429 vector divide. Assumes no trapping math and finite arguments. */
26432 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
26434 enum machine_mode mode
= GET_MODE (dst
);
26436 if (RS6000_RECIP_HIGH_PRECISION_P (mode
))
26437 rs6000_emit_swdiv_high_precision (dst
, n
, d
);
26439 rs6000_emit_swdiv_low_precision (dst
, n
, d
);
26442 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
26445 /* Newton-Raphson approximation of single/double-precision floating point
26446 rsqrt. Assumes no trapping math and finite arguments. */
26449 rs6000_emit_swrsqrt (rtx dst
, rtx src
)
26451 enum machine_mode mode
= GET_MODE (src
);
26452 rtx x0
= gen_reg_rtx (mode
);
26453 rtx y
= gen_reg_rtx (mode
);
26454 int passes
= (TARGET_RECIP_PRECISION
) ? 2 : 3;
26455 REAL_VALUE_TYPE dconst3_2
;
26458 enum insn_code code
= optab_handler (smul_optab
, mode
);
26459 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26461 gcc_assert (code
!= CODE_FOR_nothing
);
26463 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26464 real_from_integer (&dconst3_2
, VOIDmode
, 3, 0, 0);
26465 SET_REAL_EXP (&dconst3_2
, REAL_EXP (&dconst3_2
) - 1);
26467 halfthree
= rs6000_load_constant_and_splat (mode
, dconst3_2
);
26469 /* x0 = rsqrt estimate */
26470 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26471 gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
26474 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26475 rs6000_emit_msub (y
, src
, halfthree
, src
);
26477 for (i
= 0; i
< passes
; i
++)
26479 rtx x1
= gen_reg_rtx (mode
);
26480 rtx u
= gen_reg_rtx (mode
);
26481 rtx v
= gen_reg_rtx (mode
);
26483 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26484 emit_insn (gen_mul (u
, x0
, x0
));
26485 rs6000_emit_nmsub (v
, y
, u
, halfthree
);
26486 emit_insn (gen_mul (x1
, x0
, v
));
26490 emit_move_insn (dst
, x0
);
26494 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26495 (Power7) targets. DST is the target, and SRC is the argument operand. */
26498 rs6000_emit_popcount (rtx dst
, rtx src
)
26500 enum machine_mode mode
= GET_MODE (dst
);
26503 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26504 if (TARGET_POPCNTD
)
26506 if (mode
== SImode
)
26507 emit_insn (gen_popcntdsi2 (dst
, src
));
26509 emit_insn (gen_popcntddi2 (dst
, src
));
26513 tmp1
= gen_reg_rtx (mode
);
26515 if (mode
== SImode
)
26517 emit_insn (gen_popcntbsi2 (tmp1
, src
));
26518 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
26520 tmp2
= force_reg (SImode
, tmp2
);
26521 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
26525 emit_insn (gen_popcntbdi2 (tmp1
, src
));
26526 tmp2
= expand_mult (DImode
, tmp1
,
26527 GEN_INT ((HOST_WIDE_INT
)
26528 0x01010101 << 32 | 0x01010101),
26530 tmp2
= force_reg (DImode
, tmp2
);
26531 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
26536 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26537 target, and SRC is the argument operand. */
26540 rs6000_emit_parity (rtx dst
, rtx src
)
26542 enum machine_mode mode
= GET_MODE (dst
);
26545 tmp
= gen_reg_rtx (mode
);
26547 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26550 if (mode
== SImode
)
26552 emit_insn (gen_popcntbsi2 (tmp
, src
));
26553 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
26557 emit_insn (gen_popcntbdi2 (tmp
, src
));
26558 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
26563 if (mode
== SImode
)
26565 /* Is mult+shift >= shift+xor+shift+xor? */
26566 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
26568 rtx tmp1
, tmp2
, tmp3
, tmp4
;
26570 tmp1
= gen_reg_rtx (SImode
);
26571 emit_insn (gen_popcntbsi2 (tmp1
, src
));
26573 tmp2
= gen_reg_rtx (SImode
);
26574 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
26575 tmp3
= gen_reg_rtx (SImode
);
26576 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
26578 tmp4
= gen_reg_rtx (SImode
);
26579 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
26580 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
26583 rs6000_emit_popcount (tmp
, src
);
26584 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
26588 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26589 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
26591 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
26593 tmp1
= gen_reg_rtx (DImode
);
26594 emit_insn (gen_popcntbdi2 (tmp1
, src
));
26596 tmp2
= gen_reg_rtx (DImode
);
26597 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
26598 tmp3
= gen_reg_rtx (DImode
);
26599 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
26601 tmp4
= gen_reg_rtx (DImode
);
26602 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
26603 tmp5
= gen_reg_rtx (DImode
);
26604 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
26606 tmp6
= gen_reg_rtx (DImode
);
26607 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
26608 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
26611 rs6000_emit_popcount (tmp
, src
);
26612 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
26616 /* Expand an Altivec constant permutation. Return true if we match
26617 an efficient implementation; false to fall back to VPERM. */
26620 altivec_expand_vec_perm_const (rtx operands
[4])
26622 struct altivec_perm_insn
{
26623 enum insn_code impl
;
26624 unsigned char perm
[16];
26626 static const struct altivec_perm_insn patterns
[] = {
26627 { CODE_FOR_altivec_vpkuhum
,
26628 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26629 { CODE_FOR_altivec_vpkuwum
,
26630 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26631 { CODE_FOR_altivec_vmrghb
,
26632 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26633 { CODE_FOR_altivec_vmrghh
,
26634 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26635 { CODE_FOR_altivec_vmrghw
,
26636 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26637 { CODE_FOR_altivec_vmrglb
,
26638 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26639 { CODE_FOR_altivec_vmrglh
,
26640 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26641 { CODE_FOR_altivec_vmrglw
,
26642 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26645 unsigned int i
, j
, elt
, which
;
26646 unsigned char perm
[16];
26647 rtx target
, op0
, op1
, sel
, x
;
26650 target
= operands
[0];
26655 /* Unpack the constant selector. */
26656 for (i
= which
= 0; i
< 16; ++i
)
26658 rtx e
= XVECEXP (sel
, 0, i
);
26659 elt
= INTVAL (e
) & 31;
26660 which
|= (elt
< 16 ? 1 : 2);
26664 /* Simplify the constant selector based on operands. */
26668 gcc_unreachable ();
26672 if (!rtx_equal_p (op0
, op1
))
26677 for (i
= 0; i
< 16; ++i
)
26689 /* Look for splat patterns. */
26694 for (i
= 0; i
< 16; ++i
)
26695 if (perm
[i
] != elt
)
26699 emit_insn (gen_altivec_vspltb (target
, op0
, GEN_INT (elt
)));
26705 for (i
= 0; i
< 16; i
+= 2)
26706 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
26710 x
= gen_reg_rtx (V8HImode
);
26711 emit_insn (gen_altivec_vsplth (x
, gen_lowpart (V8HImode
, op0
),
26712 GEN_INT (elt
/ 2)));
26713 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26720 for (i
= 0; i
< 16; i
+= 4)
26722 || perm
[i
+ 1] != elt
+ 1
26723 || perm
[i
+ 2] != elt
+ 2
26724 || perm
[i
+ 3] != elt
+ 3)
26728 x
= gen_reg_rtx (V4SImode
);
26729 emit_insn (gen_altivec_vspltw (x
, gen_lowpart (V4SImode
, op0
),
26730 GEN_INT (elt
/ 4)));
26731 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26737 /* Look for merge and pack patterns. */
26738 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
26742 elt
= patterns
[j
].perm
[0];
26743 if (perm
[0] == elt
)
26745 else if (perm
[0] == elt
+ 16)
26749 for (i
= 1; i
< 16; ++i
)
26751 elt
= patterns
[j
].perm
[i
];
26753 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
26754 else if (one_vec
&& elt
>= 16)
26756 if (perm
[i
] != elt
)
26761 enum insn_code icode
= patterns
[j
].impl
;
26762 enum machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
26763 enum machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
26766 x
= op0
, op0
= op1
, op1
= x
;
26767 if (imode
!= V16QImode
)
26769 op0
= gen_lowpart (imode
, op0
);
26770 op1
= gen_lowpart (imode
, op1
);
26772 if (omode
== V16QImode
)
26775 x
= gen_reg_rtx (omode
);
26776 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
26777 if (omode
!= V16QImode
)
26778 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26786 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
26787 Return true if we match an efficient implementation. */
26790 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
26791 unsigned char perm0
, unsigned char perm1
)
26795 /* If both selectors come from the same operand, fold to single op. */
26796 if ((perm0
& 2) == (perm1
& 2))
26803 /* If both operands are equal, fold to simpler permutation. */
26804 if (rtx_equal_p (op0
, op1
))
26807 perm1
= (perm1
& 1) + 2;
26809 /* If the first selector comes from the second operand, swap. */
26810 else if (perm0
& 2)
26816 x
= op0
, op0
= op1
, op1
= x
;
26818 /* If the second selector does not come from the second operand, fail. */
26819 else if ((perm1
& 2) == 0)
26823 if (target
!= NULL
)
26825 enum machine_mode vmode
, dmode
;
26828 vmode
= GET_MODE (target
);
26829 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
26830 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4);
26832 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
26833 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
26834 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
26835 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
26841 rs6000_expand_vec_perm_const (rtx operands
[4])
26843 rtx target
, op0
, op1
, sel
;
26844 unsigned char perm0
, perm1
;
26846 target
= operands
[0];
26851 /* Unpack the constant selector. */
26852 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
26853 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
26855 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
26858 /* Test whether a constant permutation is supported. */
26861 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
26862 const unsigned char *sel
)
26864 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
26865 if (TARGET_ALTIVEC
)
26868 /* Check for ps_merge* or evmerge* insns. */
26869 if ((TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
26870 || (TARGET_SPE
&& vmode
== V2SImode
))
26872 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
26873 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
26874 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
26880 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
26883 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
26884 enum machine_mode vmode
, unsigned nelt
, rtx perm
[])
26886 enum machine_mode imode
;
26890 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
26892 imode
= GET_MODE_INNER (vmode
);
26893 imode
= mode_for_size (GET_MODE_BITSIZE (imode
), MODE_INT
, 0);
26894 imode
= mode_for_vector (imode
, nelt
);
26897 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
26898 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
26900 emit_move_insn (target
, x
);
26903 /* Expand an extract even operation. */
26906 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
26908 enum machine_mode vmode
= GET_MODE (target
);
26909 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
26912 for (i
= 0; i
< nelt
; i
++)
26913 perm
[i
] = GEN_INT (i
* 2);
26915 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
26918 /* Expand a vector interleave operation. */
26921 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
26923 enum machine_mode vmode
= GET_MODE (target
);
26924 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
26927 high
= (highp
== BYTES_BIG_ENDIAN
? 0 : nelt
/ 2);
26928 for (i
= 0; i
< nelt
/ 2; i
++)
26930 perm
[i
* 2] = GEN_INT (i
+ high
);
26931 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
26934 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
26937 /* Return an RTX representing where to find the function value of a
26938 function returning MODE. */
26940 rs6000_complex_function_value (enum machine_mode mode
)
26942 unsigned int regno
;
26944 enum machine_mode inner
= GET_MODE_INNER (mode
);
26945 unsigned int inner_bytes
= GET_MODE_SIZE (inner
);
26947 if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
26948 regno
= FP_ARG_RETURN
;
26951 regno
= GP_ARG_RETURN
;
26953 /* 32-bit is OK since it'll go in r3/r4. */
26954 if (TARGET_32BIT
&& inner_bytes
>= 4)
26955 return gen_rtx_REG (mode
, regno
);
26958 if (inner_bytes
>= 8)
26959 return gen_rtx_REG (mode
, regno
);
26961 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
26963 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
26964 GEN_INT (inner_bytes
));
26965 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
26968 /* Target hook for TARGET_FUNCTION_VALUE.
26970 On the SPE, both FPs and vectors are returned in r3.
26972 On RS/6000 an integer value is in r3 and a floating-point value is in
26973 fp1, unless -msoft-float. */
26976 rs6000_function_value (const_tree valtype
,
26977 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
26978 bool outgoing ATTRIBUTE_UNUSED
)
26980 enum machine_mode mode
;
26981 unsigned int regno
;
26983 /* Special handling for structs in darwin64. */
26985 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
26987 CUMULATIVE_ARGS valcum
;
26991 valcum
.fregno
= FP_ARG_MIN_REG
;
26992 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
26993 /* Do a trial code generation as if this were going to be passed as
26994 an argument; if any part goes in memory, we return NULL. */
26995 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
26998 /* Otherwise fall through to standard ABI rules. */
27001 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DImode
)
27003 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27004 return gen_rtx_PARALLEL (DImode
,
27006 gen_rtx_EXPR_LIST (VOIDmode
,
27007 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
27009 gen_rtx_EXPR_LIST (VOIDmode
,
27010 gen_rtx_REG (SImode
,
27011 GP_ARG_RETURN
+ 1),
27014 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DCmode
)
27016 return gen_rtx_PARALLEL (DCmode
,
27018 gen_rtx_EXPR_LIST (VOIDmode
,
27019 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
27021 gen_rtx_EXPR_LIST (VOIDmode
,
27022 gen_rtx_REG (SImode
,
27023 GP_ARG_RETURN
+ 1),
27025 gen_rtx_EXPR_LIST (VOIDmode
,
27026 gen_rtx_REG (SImode
,
27027 GP_ARG_RETURN
+ 2),
27029 gen_rtx_EXPR_LIST (VOIDmode
,
27030 gen_rtx_REG (SImode
,
27031 GP_ARG_RETURN
+ 3),
27035 mode
= TYPE_MODE (valtype
);
27036 if ((INTEGRAL_TYPE_P (valtype
) && GET_MODE_BITSIZE (mode
) < BITS_PER_WORD
)
27037 || POINTER_TYPE_P (valtype
))
27038 mode
= TARGET_32BIT
? SImode
: DImode
;
27040 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
27041 /* _Decimal128 must use an even/odd register pair. */
27042 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
27043 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
27044 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
27045 regno
= FP_ARG_RETURN
;
27046 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
27047 && targetm
.calls
.split_complex_arg
)
27048 return rs6000_complex_function_value (mode
);
27049 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27050 return register is used in both cases, and we won't see V2DImode/V2DFmode
27051 for pure altivec, combine the two cases. */
27052 else if (TREE_CODE (valtype
) == VECTOR_TYPE
27053 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
27054 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
27055 regno
= ALTIVEC_ARG_RETURN
;
27056 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
27057 && (mode
== DFmode
|| mode
== DCmode
27058 || mode
== TFmode
|| mode
== TCmode
))
27059 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
27061 regno
= GP_ARG_RETURN
;
27063 return gen_rtx_REG (mode
, regno
);
27066 /* Define how to find the value returned by a library function
27067 assuming the value has mode MODE. */
27069 rs6000_libcall_value (enum machine_mode mode
)
27071 unsigned int regno
;
27073 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
27075 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27076 return gen_rtx_PARALLEL (DImode
,
27078 gen_rtx_EXPR_LIST (VOIDmode
,
27079 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
27081 gen_rtx_EXPR_LIST (VOIDmode
,
27082 gen_rtx_REG (SImode
,
27083 GP_ARG_RETURN
+ 1),
27087 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
27088 /* _Decimal128 must use an even/odd register pair. */
27089 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
27090 else if (SCALAR_FLOAT_MODE_P (mode
)
27091 && TARGET_HARD_FLOAT
&& TARGET_FPRS
27092 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
27093 regno
= FP_ARG_RETURN
;
27094 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27095 return register is used in both cases, and we won't see V2DImode/V2DFmode
27096 for pure altivec, combine the two cases. */
27097 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
27098 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
27099 regno
= ALTIVEC_ARG_RETURN
;
27100 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
27101 return rs6000_complex_function_value (mode
);
27102 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
27103 && (mode
== DFmode
|| mode
== DCmode
27104 || mode
== TFmode
|| mode
== TCmode
))
27105 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
27107 regno
= GP_ARG_RETURN
;
27109 return gen_rtx_REG (mode
, regno
);
27113 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27114 Frame pointer elimination is automatically handled.
27116 For the RS/6000, if frame pointer elimination is being done, we would like
27117 to convert ap into fp, not sp.
27119 We need r30 if -mminimal-toc was specified, and there are constant pool
27123 rs6000_can_eliminate (const int from
, const int to
)
27125 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
27126 ? ! frame_pointer_needed
27127 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
27128 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
|| get_pool_size () == 0
27132 /* Define the offset between two registers, FROM to be eliminated and its
27133 replacement TO, at the start of a routine. */
27135 rs6000_initial_elimination_offset (int from
, int to
)
27137 rs6000_stack_t
*info
= rs6000_stack_info ();
27138 HOST_WIDE_INT offset
;
27140 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27141 offset
= info
->push_p
? 0 : -info
->total_size
;
27142 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27144 offset
= info
->push_p
? 0 : -info
->total_size
;
27145 if (FRAME_GROWS_DOWNWARD
)
27146 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
27148 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
27149 offset
= FRAME_GROWS_DOWNWARD
27150 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
27152 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
27153 offset
= info
->total_size
;
27154 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27155 offset
= info
->push_p
? info
->total_size
: 0;
27156 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
27159 gcc_unreachable ();
27165 rs6000_dwarf_register_span (rtx reg
)
27169 unsigned regno
= REGNO (reg
);
27170 enum machine_mode mode
= GET_MODE (reg
);
27174 && (SPE_VECTOR_MODE (GET_MODE (reg
))
27175 || (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
)
27176 && mode
!= SFmode
&& mode
!= SDmode
&& mode
!= SCmode
)))
27181 regno
= REGNO (reg
);
27183 /* The duality of the SPE register size wreaks all kinds of havoc.
27184 This is a way of distinguishing r0 in 32-bits from r0 in
27186 words
= (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
27187 gcc_assert (words
<= 4);
27188 for (i
= 0; i
< words
; i
++, regno
++)
27190 if (BYTES_BIG_ENDIAN
)
27192 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
+ 1200);
27193 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
);
27197 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
);
27198 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
+ 1200);
27202 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (words
* 2, parts
));
27205 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27208 rs6000_init_dwarf_reg_sizes_extra (tree address
)
27213 enum machine_mode mode
= TYPE_MODE (char_type_node
);
27214 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
27215 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
27216 rtx value
= gen_int_mode (4, mode
);
27218 for (i
= 1201; i
< 1232; i
++)
27220 int column
= DWARF_REG_TO_UNWIND_COLUMN (i
);
27221 HOST_WIDE_INT offset
27222 = DWARF_FRAME_REGNUM (column
) * GET_MODE_SIZE (mode
);
27224 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
27229 /* Map internal gcc register numbers to DWARF2 register numbers. */
27232 rs6000_dbx_register_number (unsigned int regno
)
27234 if (regno
<= 63 || write_symbols
!= DWARF2_DEBUG
)
27236 if (regno
== MQ_REGNO
)
27238 if (regno
== LR_REGNO
)
27240 if (regno
== CTR_REGNO
)
27242 if (CR_REGNO_P (regno
))
27243 return regno
- CR0_REGNO
+ 86;
27244 if (regno
== CA_REGNO
)
27245 return 101; /* XER */
27246 if (ALTIVEC_REGNO_P (regno
))
27247 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
27248 if (regno
== VRSAVE_REGNO
)
27250 if (regno
== VSCR_REGNO
)
27252 if (regno
== SPE_ACC_REGNO
)
27254 if (regno
== SPEFSCR_REGNO
)
27256 /* SPE high reg number. We get these values of regno from
27257 rs6000_dwarf_register_span. */
27258 gcc_assert (regno
>= 1200 && regno
< 1232);
27262 /* target hook eh_return_filter_mode */
27263 static enum machine_mode
27264 rs6000_eh_return_filter_mode (void)
27266 return TARGET_32BIT
? SImode
: word_mode
;
27269 /* Target hook for scalar_mode_supported_p. */
27271 rs6000_scalar_mode_supported_p (enum machine_mode mode
)
27273 if (DECIMAL_FLOAT_MODE_P (mode
))
27274 return default_decimal_float_supported_p ();
27276 return default_scalar_mode_supported_p (mode
);
27279 /* Target hook for vector_mode_supported_p. */
27281 rs6000_vector_mode_supported_p (enum machine_mode mode
)
27284 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
27287 if (TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
27290 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
27297 /* Target hook for invalid_arg_for_unprototyped_fn. */
27298 static const char *
27299 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
27301 return (!rs6000_darwin64_abi
27303 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
27304 && (funcdecl
== NULL_TREE
27305 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
27306 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
27307 ? N_("AltiVec argument passed to unprototyped function")
27311 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27312 setup by using __stack_chk_fail_local hidden function instead of
27313 calling __stack_chk_fail directly. Otherwise it is better to call
27314 __stack_chk_fail directly. */
27316 static tree ATTRIBUTE_UNUSED
27317 rs6000_stack_protect_fail (void)
27319 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
27320 ? default_hidden_stack_protect_fail ()
27321 : default_external_stack_protect_fail ();
27325 rs6000_final_prescan_insn (rtx insn
, rtx
*operand ATTRIBUTE_UNUSED
,
27326 int num_operands ATTRIBUTE_UNUSED
)
27328 if (rs6000_warn_cell_microcode
)
27331 int insn_code_number
= recog_memoized (insn
);
27332 location_t location
= locator_location (INSN_LOCATOR (insn
));
27334 /* Punt on insns we cannot recognize. */
27335 if (insn_code_number
< 0)
27338 temp
= get_insn_template (insn_code_number
, insn
);
27340 if (get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
)
27341 warning_at (location
, OPT_mwarn_cell_microcode
,
27342 "emitting microcode insn %s\t[%s] #%d",
27343 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
27344 else if (get_attr_cell_micro (insn
) == CELL_MICRO_CONDITIONAL
)
27345 warning_at (location
, OPT_mwarn_cell_microcode
,
27346 "emitting conditional microcode insn %s\t[%s] #%d",
27347 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
27352 /* Mask options that we want to support inside of attribute((target)) and
27353 #pragma GCC target operations. Note, we do not include things like
27354 64/32-bit, endianess, hard/soft floating point, etc. that would have
27355 different calling sequences. */
27357 struct rs6000_opt_mask
{
27358 const char *name
; /* option name */
27359 int mask
; /* mask to set */
27360 bool invert
; /* invert sense of mask */
27361 bool valid_target
; /* option is a target option */
27364 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
27366 { "altivec", MASK_ALTIVEC
, false, true },
27367 { "cmpb", MASK_CMPB
, false, true },
27368 { "dlmzb", MASK_DLMZB
, false, true },
27369 { "fprnd", MASK_FPRND
, false, true },
27370 { "hard-dfp", MASK_DFP
, false, true },
27371 { "isel", MASK_ISEL
, false, true },
27372 { "mfcrf", MASK_MFCRF
, false, true },
27373 { "mfpgpr", MASK_MFPGPR
, false, true },
27374 { "mulhw", MASK_MULHW
, false, true },
27375 { "multiple", MASK_MULTIPLE
, false, true },
27376 { "update", MASK_NO_UPDATE
, true , true },
27377 { "popcntb", MASK_POPCNTB
, false, true },
27378 { "popcntd", MASK_POPCNTD
, false, true },
27379 { "powerpc-gfxopt", MASK_PPC_GFXOPT
, false, true },
27380 { "powerpc-gpopt", MASK_PPC_GPOPT
, false, true },
27381 { "recip-precision", MASK_RECIP_PRECISION
, false, true },
27382 { "string", MASK_STRING
, false, true },
27383 { "vsx", MASK_VSX
, false, true },
27386 { "aix64", MASK_64BIT
, false, false },
27387 { "aix32", MASK_64BIT
, true, false },
27389 { "64", MASK_64BIT
, false, false },
27390 { "32", MASK_64BIT
, true, false },
27394 { "eabi", MASK_EABI
, false, false },
27396 #ifdef MASK_LITTLE_ENDIAN
27397 { "little", MASK_LITTLE_ENDIAN
, false, false },
27398 { "big", MASK_LITTLE_ENDIAN
, true, false },
27400 #ifdef MASK_RELOCATABLE
27401 { "relocatable", MASK_RELOCATABLE
, false, false },
27403 #ifdef MASK_STRICT_ALIGN
27404 { "strict-align", MASK_STRICT_ALIGN
, false, false },
27406 { "power", MASK_POWER
, false, false },
27407 { "power2", MASK_POWER2
, false, false },
27408 { "powerpc", MASK_POWERPC
, false, false },
27409 { "soft-float", MASK_SOFT_FLOAT
, false, false },
27410 { "string", MASK_STRING
, false, false },
27413 /* Builtin mask mapping for printing the flags. */
27414 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
27416 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
27417 { "vsx", RS6000_BTM_VSX
, false, false },
27418 { "spe", RS6000_BTM_SPE
, false, false },
27419 { "paired", RS6000_BTM_PAIRED
, false, false },
27420 { "fre", RS6000_BTM_FRE
, false, false },
27421 { "fres", RS6000_BTM_FRES
, false, false },
27422 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
27423 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
27424 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
27425 { "powerpc", RS6000_BTM_POWERPC
, false, false },
27426 { "cell", RS6000_BTM_CELL
, false, false },
27429 /* Option variables that we want to support inside attribute((target)) and
27430 #pragma GCC target operations. */
27432 struct rs6000_opt_var
{
27433 const char *name
; /* option name */
27434 size_t global_offset
; /* offset of the option in global_options. */
27435 size_t target_offset
; /* offset of the option in target optiosn. */
27438 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
27441 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
27442 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
27443 { "avoid-indexed-addresses",
27444 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
27445 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
27447 offsetof (struct gcc_options
, x_rs6000_paired_float
),
27448 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
27450 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
27451 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
27454 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27455 parsing. Return true if there were no errors. */
27458 rs6000_inner_target_options (tree args
, bool attr_p
)
27462 if (args
== NULL_TREE
)
27465 else if (TREE_CODE (args
) == STRING_CST
)
27467 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
27470 while ((q
= strtok (p
, ",")) != NULL
)
27472 bool error_p
= false;
27473 bool not_valid_p
= false;
27474 const char *cpu_opt
= NULL
;
27477 if (strncmp (q
, "cpu=", 4) == 0)
27479 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
27480 if (cpu_index
>= 0)
27481 rs6000_cpu_index
= cpu_index
;
27488 else if (strncmp (q
, "tune=", 5) == 0)
27490 int tune_index
= rs6000_cpu_name_lookup (q
+5);
27491 if (tune_index
>= 0)
27492 rs6000_tune_index
= tune_index
;
27502 bool invert
= false;
27506 if (strncmp (r
, "no-", 3) == 0)
27512 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
27513 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
27515 int mask
= rs6000_opt_masks
[i
].mask
;
27517 if (!rs6000_opt_masks
[i
].valid_target
)
27518 not_valid_p
= true;
27522 target_flags_explicit
|= mask
;
27524 /* VSX needs altivec, so -mvsx automagically sets
27526 if (mask
== MASK_VSX
&& !invert
)
27527 mask
|= MASK_ALTIVEC
;
27529 if (rs6000_opt_masks
[i
].invert
)
27533 target_flags
&= ~mask
;
27535 target_flags
|= mask
;
27540 if (error_p
&& !not_valid_p
)
27542 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
27543 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
27545 size_t j
= rs6000_opt_vars
[i
].global_offset
;
27546 *((int *) ((char *)&global_options
+ j
)) = !invert
;
27555 const char *eprefix
, *esuffix
;
27560 eprefix
= "__attribute__((__target__(";
27565 eprefix
= "#pragma GCC target ";
27570 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt
, eprefix
,
27572 else if (not_valid_p
)
27573 error ("%s\"%s\"%s is not allowed", eprefix
, q
, esuffix
);
27575 error ("%s\"%s\"%s is invalid", eprefix
, q
, esuffix
);
27580 else if (TREE_CODE (args
) == TREE_LIST
)
27584 tree value
= TREE_VALUE (args
);
27587 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
27591 args
= TREE_CHAIN (args
);
27593 while (args
!= NULL_TREE
);
27597 gcc_unreachable ();
27602 /* Print out the target options as a list for -mdebug=target. */
27605 rs6000_debug_target_options (tree args
, const char *prefix
)
27607 if (args
== NULL_TREE
)
27608 fprintf (stderr
, "%s<NULL>", prefix
);
27610 else if (TREE_CODE (args
) == STRING_CST
)
27612 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
27615 while ((q
= strtok (p
, ",")) != NULL
)
27618 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
27623 else if (TREE_CODE (args
) == TREE_LIST
)
27627 tree value
= TREE_VALUE (args
);
27630 rs6000_debug_target_options (value
, prefix
);
27633 args
= TREE_CHAIN (args
);
27635 while (args
!= NULL_TREE
);
27639 gcc_unreachable ();
27645 /* Hook to validate attribute((target("..."))). */
27648 rs6000_valid_attribute_p (tree fndecl
,
27649 tree
ARG_UNUSED (name
),
27653 struct cl_target_option cur_target
;
27655 tree old_optimize
= build_optimization_node ();
27656 tree new_target
, new_optimize
;
27657 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
27659 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
27661 if (TARGET_DEBUG_TARGET
)
27663 tree tname
= DECL_NAME (fndecl
);
27664 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
27666 fprintf (stderr
, "function: %.*s\n",
27667 (int) IDENTIFIER_LENGTH (tname
),
27668 IDENTIFIER_POINTER (tname
));
27670 fprintf (stderr
, "function: unknown\n");
27672 fprintf (stderr
, "args:");
27673 rs6000_debug_target_options (args
, " ");
27674 fprintf (stderr
, "\n");
27677 fprintf (stderr
, "flags: 0x%x\n", flags
);
27679 fprintf (stderr
, "--------------------\n");
27682 old_optimize
= build_optimization_node ();
27683 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
27685 /* If the function changed the optimization levels as well as setting target
27686 options, start with the optimizations specified. */
27687 if (func_optimize
&& func_optimize
!= old_optimize
)
27688 cl_optimization_restore (&global_options
,
27689 TREE_OPTIMIZATION (func_optimize
));
27691 /* The target attributes may also change some optimization flags, so update
27692 the optimization options if necessary. */
27693 cl_target_option_save (&cur_target
, &global_options
);
27694 rs6000_cpu_index
= rs6000_tune_index
= -1;
27695 ret
= rs6000_inner_target_options (args
, true);
27697 /* Set up any additional state. */
27700 ret
= rs6000_option_override_internal (false);
27701 new_target
= build_target_option_node ();
27706 new_optimize
= build_optimization_node ();
27713 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
27715 if (old_optimize
!= new_optimize
)
27716 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
27719 cl_target_option_restore (&global_options
, &cur_target
);
27721 if (old_optimize
!= new_optimize
)
27722 cl_optimization_restore (&global_options
,
27723 TREE_OPTIMIZATION (old_optimize
));
27729 /* Hook to validate the current #pragma GCC target and set the state, and
27730 update the macros based on what was changed. If ARGS is NULL, then
27731 POP_TARGET is used to reset the options. */
27734 rs6000_pragma_target_parse (tree args
, tree pop_target
)
27736 tree prev_tree
= build_target_option_node ();
27738 struct cl_target_option
*prev_opt
, *cur_opt
;
27739 unsigned prev_bumask
, cur_bumask
, diff_bumask
;
27740 int prev_flags
, cur_flags
, diff_flags
;
27742 if (TARGET_DEBUG_TARGET
)
27744 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
27745 fprintf (stderr
, "args:");
27746 rs6000_debug_target_options (args
, " ");
27747 fprintf (stderr
, "\n");
27751 fprintf (stderr
, "pop_target:\n");
27752 debug_tree (pop_target
);
27755 fprintf (stderr
, "pop_target: <NULL>\n");
27757 fprintf (stderr
, "--------------------\n");
27762 cur_tree
= ((pop_target
)
27764 : target_option_default_node
);
27765 cl_target_option_restore (&global_options
,
27766 TREE_TARGET_OPTION (cur_tree
));
27770 rs6000_cpu_index
= rs6000_tune_index
= -1;
27771 if (!rs6000_inner_target_options (args
, false)
27772 || !rs6000_option_override_internal (false)
27773 || (cur_tree
= build_target_option_node ()) == NULL_TREE
)
27775 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
27776 fprintf (stderr
, "invalid pragma\n");
27782 target_option_current_node
= cur_tree
;
27784 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
27785 change the macros that are defined. */
27786 if (rs6000_target_modify_macros_ptr
)
27788 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
27789 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
27790 prev_flags
= prev_opt
->x_target_flags
;
27792 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
27793 cur_flags
= cur_opt
->x_target_flags
;
27794 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
27796 diff_bumask
= (prev_bumask
^ cur_bumask
);
27797 diff_flags
= (prev_flags
^ cur_flags
);
27799 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
27801 /* Delete old macros. */
27802 rs6000_target_modify_macros_ptr (false,
27803 prev_flags
& diff_flags
,
27804 prev_bumask
& diff_bumask
);
27806 /* Define new macros. */
27807 rs6000_target_modify_macros_ptr (true,
27808 cur_flags
& diff_flags
,
27809 cur_bumask
& diff_bumask
);
27817 /* Remember the last target of rs6000_set_current_function. */
27818 static GTY(()) tree rs6000_previous_fndecl
;
27820 /* Establish appropriate back-end context for processing the function
27821 FNDECL. The argument might be NULL to indicate processing at top
27822 level, outside of any function scope. */
27824 rs6000_set_current_function (tree fndecl
)
27826 tree old_tree
= (rs6000_previous_fndecl
27827 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
)
27830 tree new_tree
= (fndecl
27831 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl
)
27834 if (TARGET_DEBUG_TARGET
)
27836 bool print_final
= false;
27837 fprintf (stderr
, "\n==================== rs6000_set_current_function");
27840 fprintf (stderr
, ", fndecl %s (%p)",
27841 (DECL_NAME (fndecl
)
27842 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
27843 : "<unknown>"), (void *)fndecl
);
27845 if (rs6000_previous_fndecl
)
27846 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
27848 fprintf (stderr
, "\n");
27851 fprintf (stderr
, "\nnew fndecl target specific options:\n");
27852 debug_tree (new_tree
);
27853 print_final
= true;
27858 fprintf (stderr
, "\nold fndecl target specific options:\n");
27859 debug_tree (old_tree
);
27860 print_final
= true;
27864 fprintf (stderr
, "--------------------\n");
27867 /* Only change the context if the function changes. This hook is called
27868 several times in the course of compiling a function, and we don't want to
27869 slow things down too much or call target_reinit when it isn't safe. */
27870 if (fndecl
&& fndecl
!= rs6000_previous_fndecl
)
27872 rs6000_previous_fndecl
= fndecl
;
27873 if (old_tree
== new_tree
)
27878 cl_target_option_restore (&global_options
,
27879 TREE_TARGET_OPTION (new_tree
));
27885 struct cl_target_option
*def
27886 = TREE_TARGET_OPTION (target_option_current_node
);
27888 cl_target_option_restore (&global_options
, def
);
27895 /* Save the current options */
27898 rs6000_function_specific_save (struct cl_target_option
*ptr
)
27900 ptr
->rs6000_target_flags_explicit
= target_flags_explicit
;
27903 /* Restore the current options */
27906 rs6000_function_specific_restore (struct cl_target_option
*ptr
)
27908 target_flags_explicit
= ptr
->rs6000_target_flags_explicit
;
27909 (void) rs6000_option_override_internal (false);
27912 /* Print the current options */
27915 rs6000_function_specific_print (FILE *file
, int indent
,
27916 struct cl_target_option
*ptr
)
27919 int flags
= ptr
->x_target_flags
;
27920 unsigned bu_mask
= ptr
->x_rs6000_builtin_mask
;
27922 /* Print the various mask options. */
27923 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
27924 if ((flags
& rs6000_opt_masks
[i
].mask
) != 0)
27926 flags
&= ~ rs6000_opt_masks
[i
].mask
;
27927 fprintf (file
, "%*s-m%s%s\n", indent
, "",
27928 rs6000_opt_masks
[i
].invert
? "no-" : "",
27929 rs6000_opt_masks
[i
].name
);
27932 /* Print the various options that are variables. */
27933 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
27935 size_t j
= rs6000_opt_vars
[i
].target_offset
;
27936 if (((signed char *) ptr
)[j
])
27937 fprintf (file
, "%*s-m%s\n", indent
, "",
27938 rs6000_opt_vars
[i
].name
);
27941 /* Print the various builtin flags. */
27942 fprintf (file
, "%*sbuiltin mask = 0x%x\n", indent
, "", bu_mask
);
27943 for (i
= 0; i
< ARRAY_SIZE (rs6000_builtin_mask_names
); i
++)
27944 if ((bu_mask
& rs6000_builtin_mask_names
[i
].mask
) != 0)
27946 fprintf (file
, "%*s%s builtins supported\n", indent
, "",
27947 rs6000_builtin_mask_names
[i
].name
);
27952 /* Hook to determine if one function can safely inline another. */
27955 rs6000_can_inline_p (tree caller
, tree callee
)
27958 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
27959 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
27961 /* If callee has no option attributes, then it is ok to inline. */
27965 /* If caller has no option attributes, but callee does then it is not ok to
27967 else if (!caller_tree
)
27972 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
27973 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
27975 /* Callee's options should a subset of the caller's, i.e. a vsx function
27976 can inline an altivec function but a non-vsx function can't inline a
27978 if ((caller_opts
->x_target_flags
& callee_opts
->x_target_flags
)
27979 == callee_opts
->x_target_flags
)
27983 if (TARGET_DEBUG_TARGET
)
27984 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
27985 (DECL_NAME (caller
)
27986 ? IDENTIFIER_POINTER (DECL_NAME (caller
))
27988 (DECL_NAME (callee
)
27989 ? IDENTIFIER_POINTER (DECL_NAME (callee
))
27991 (ret
? "can" : "cannot"));
27996 /* Allocate a stack temp and fixup the address so it meets the particular
27997 memory requirements (either offetable or REG+REG addressing). */
28000 rs6000_allocate_stack_temp (enum machine_mode mode
,
28001 bool offsettable_p
,
28004 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
28005 rtx addr
= XEXP (stack
, 0);
28006 int strict_p
= (reload_in_progress
|| reload_completed
);
28008 if (!legitimate_indirect_address_p (addr
, strict_p
))
28011 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
28012 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
28014 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
28015 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
28021 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
28022 to such a form to deal with memory reference instructions like STFIWX that
28023 only take reg+reg addressing. */
28026 rs6000_address_for_fpconvert (rtx x
)
28028 int strict_p
= (reload_in_progress
|| reload_completed
);
28031 gcc_assert (MEM_P (x
));
28032 addr
= XEXP (x
, 0);
28033 if (! legitimate_indirect_address_p (addr
, strict_p
)
28034 && ! legitimate_indexed_address_p (addr
, strict_p
))
28036 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
28038 rtx reg
= XEXP (addr
, 0);
28039 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
28040 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
28041 gcc_assert (REG_P (reg
));
28042 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
28045 else if (GET_CODE (addr
) == PRE_MODIFY
)
28047 rtx reg
= XEXP (addr
, 0);
28048 rtx expr
= XEXP (addr
, 1);
28049 gcc_assert (REG_P (reg
));
28050 gcc_assert (GET_CODE (expr
) == PLUS
);
28051 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
28055 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
28061 /* Given a memory reference, if it is not in the form for altivec memory
28062 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
28063 convert to the altivec format. */
28066 rs6000_address_for_altivec (rtx x
)
28068 gcc_assert (MEM_P (x
));
28069 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
28071 rtx addr
= XEXP (x
, 0);
28072 int strict_p
= (reload_in_progress
|| reload_completed
);
28074 if (!legitimate_indexed_address_p (addr
, strict_p
)
28075 && !legitimate_indirect_address_p (addr
, strict_p
))
28076 addr
= copy_to_mode_reg (Pmode
, addr
);
28078 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
28079 x
= change_address (x
, GET_MODE (x
), addr
);
28085 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28087 On the RS/6000, all integer constants are acceptable, most won't be valid
28088 for particular insns, though. Only easy FP constants are acceptable. */
28091 rs6000_legitimate_constant_p (enum machine_mode mode
, rtx x
)
28093 if (rs6000_tls_referenced_p (x
))
28096 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
28097 || GET_MODE (x
) == VOIDmode
28098 || (TARGET_POWERPC64
&& mode
== DImode
)
28099 || easy_fp_constant (x
, mode
)
28100 || easy_vector_constant (x
, mode
));
28104 /* A function pointer under AIX is a pointer to a data area whose first word
28105 contains the actual address of the function, whose second word contains a
28106 pointer to its TOC, and whose third word contains a value to place in the
28107 static chain register (r11). Note that if we load the static chain, our
28108 "trampoline" need not have any executable code. */
28111 rs6000_call_indirect_aix (rtx value
, rtx func_desc
, rtx flag
)
28117 rtx stack_toc_offset
;
28119 rtx func_toc_offset
;
28121 rtx func_sc_offset
;
28124 rtx (*call_func
) (rtx
, rtx
, rtx
, rtx
);
28125 rtx (*call_value_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
28127 stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
28128 toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
28130 /* Load up address of the actual function. */
28131 func_desc
= force_reg (Pmode
, func_desc
);
28132 func_addr
= gen_reg_rtx (Pmode
);
28133 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
28138 stack_toc_offset
= GEN_INT (TOC_SAVE_OFFSET_32BIT
);
28139 func_toc_offset
= GEN_INT (AIX_FUNC_DESC_TOC_32BIT
);
28140 func_sc_offset
= GEN_INT (AIX_FUNC_DESC_SC_32BIT
);
28141 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28143 call_func
= gen_call_indirect_aix32bit
;
28144 call_value_func
= gen_call_value_indirect_aix32bit
;
28148 call_func
= gen_call_indirect_aix32bit_nor11
;
28149 call_value_func
= gen_call_value_indirect_aix32bit_nor11
;
28154 stack_toc_offset
= GEN_INT (TOC_SAVE_OFFSET_64BIT
);
28155 func_toc_offset
= GEN_INT (AIX_FUNC_DESC_TOC_64BIT
);
28156 func_sc_offset
= GEN_INT (AIX_FUNC_DESC_SC_64BIT
);
28157 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28159 call_func
= gen_call_indirect_aix64bit
;
28160 call_value_func
= gen_call_value_indirect_aix64bit
;
28164 call_func
= gen_call_indirect_aix64bit_nor11
;
28165 call_value_func
= gen_call_value_indirect_aix64bit_nor11
;
28169 /* Reserved spot to store the TOC. */
28170 stack_toc_mem
= gen_frame_mem (Pmode
,
28171 gen_rtx_PLUS (Pmode
,
28173 stack_toc_offset
));
28176 gcc_assert (cfun
->machine
);
28178 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28180 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
28181 cfun
->machine
->save_toc_in_prologue
= true;
28185 MEM_VOLATILE_P (stack_toc_mem
) = 1;
28186 emit_move_insn (stack_toc_mem
, toc_reg
);
28189 /* Calculate the address to load the TOC of the called function. We don't
28190 actually load this until the split after reload. */
28191 func_toc_mem
= gen_rtx_MEM (Pmode
,
28192 gen_rtx_PLUS (Pmode
,
28196 /* If we have a static chain, load it up. */
28197 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28199 func_sc_mem
= gen_rtx_MEM (Pmode
,
28200 gen_rtx_PLUS (Pmode
,
28204 sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
28205 emit_move_insn (sc_reg
, func_sc_mem
);
28208 /* Create the call. */
28210 insn
= call_value_func (value
, func_addr
, flag
, func_toc_mem
,
28213 insn
= call_func (func_addr
, flag
, func_toc_mem
, stack_toc_mem
);
28215 emit_call_insn (insn
);
28218 /* Return whether we need to always update the saved TOC pointer when we update
28219 the stack pointer. */
28222 rs6000_save_toc_in_prologue_p (void)
28224 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
28227 #ifdef HAVE_GAS_HIDDEN
28228 # define USE_HIDDEN_LINKONCE 1
28230 # define USE_HIDDEN_LINKONCE 0
28233 /* Fills in the label name that should be used for a 476 link stack thunk. */
28236 get_ppc476_thunk_name (char name
[32])
28238 gcc_assert (TARGET_LINK_STACK
);
28240 if (USE_HIDDEN_LINKONCE
)
28241 sprintf (name
, "__ppc476.get_thunk");
28243 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
28246 /* This function emits the simple thunk routine that is used to preserve
28247 the link stack on the 476 cpu. */
28249 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
28251 rs6000_code_end (void)
28256 if (!TARGET_LINK_STACK
)
28259 get_ppc476_thunk_name (name
);
28261 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
28262 build_function_type_list (void_type_node
, NULL_TREE
));
28263 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
28264 NULL_TREE
, void_type_node
);
28265 TREE_PUBLIC (decl
) = 1;
28266 TREE_STATIC (decl
) = 1;
28268 if (USE_HIDDEN_LINKONCE
)
28270 DECL_COMDAT_GROUP (decl
) = DECL_ASSEMBLER_NAME (decl
);
28271 targetm
.asm_out
.unique_section (decl
, 0);
28272 switch_to_section (get_named_section (decl
, NULL
, 0));
28273 DECL_WEAK (decl
) = 1;
28274 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
28275 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
28276 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
28277 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
28281 switch_to_section (text_section
);
28282 ASM_OUTPUT_LABEL (asm_out_file
, name
);
28285 DECL_INITIAL (decl
) = make_node (BLOCK
);
28286 current_function_decl
= decl
;
28287 init_function_start (decl
);
28288 first_function_block_is_cold
= false;
28289 /* Make sure unwind info is emitted for the thunk if needed. */
28290 final_start_function (emit_barrier (), asm_out_file
, 1);
28292 fputs ("\tblr\n", asm_out_file
);
28294 final_end_function ();
28295 init_insn_lengths ();
28296 free_after_compilation (cfun
);
28298 current_function_decl
= NULL
;
28301 /* Add r30 to hard reg set if the prologue sets it up and it is not
28302 pic_offset_table_rtx. */
28305 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
28307 if (!TARGET_SINGLE_PIC_BASE
28309 && TARGET_MINIMAL_TOC
28310 && get_pool_size () != 0)
28311 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
28314 struct gcc_target targetm
= TARGET_INITIALIZER
;
28316 #include "gt-rs6000.h"