1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2012 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
53 #include "sched-int.h"
55 #include "tree-flow.h"
58 #include "tm-constrs.h"
60 #include "tree-vectorizer.h"
62 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
65 #include "gstab.h" /* for N_SLINE */
68 #ifndef TARGET_NO_PROTOTYPE
69 #define TARGET_NO_PROTOTYPE 0
72 #define min(A,B) ((A) < (B) ? (A) : (B))
73 #define max(A,B) ((A) > (B) ? (A) : (B))
75 /* Structure used to define the rs6000 stack */
76 typedef struct rs6000_stack
{
77 int reload_completed
; /* stack info won't change from here on */
78 int first_gp_reg_save
; /* first callee saved GP register used */
79 int first_fp_reg_save
; /* first callee saved FP register used */
80 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
81 int lr_save_p
; /* true if the link reg needs to be saved */
82 int cr_save_p
; /* true if the CR reg needs to be saved */
83 unsigned int vrsave_mask
; /* mask of vec registers to save */
84 int push_p
; /* true if we need to allocate stack space */
85 int calls_p
; /* true if the function makes any calls */
86 int world_save_p
; /* true if we're saving *everything*:
87 r13-r31, cr, f14-f31, vrsave, v20-v31 */
88 enum rs6000_abi abi
; /* which ABI to use */
89 int gp_save_offset
; /* offset to save GP regs from initial SP */
90 int fp_save_offset
; /* offset to save FP regs from initial SP */
91 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
92 int lr_save_offset
; /* offset to save LR from initial SP */
93 int cr_save_offset
; /* offset to save CR from initial SP */
94 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
95 int spe_gp_save_offset
; /* offset to save spe 64-bit gprs */
96 int varargs_save_offset
; /* offset to save the varargs registers */
97 int ehrd_offset
; /* offset to EH return data */
98 int reg_size
; /* register size (4 or 8) */
99 HOST_WIDE_INT vars_size
; /* variable save area size */
100 int parm_size
; /* outgoing parameter size */
101 int save_size
; /* save area size */
102 int fixed_size
; /* fixed size of stack frame */
103 int gp_size
; /* size of saved GP registers */
104 int fp_size
; /* size of saved FP registers */
105 int altivec_size
; /* size of saved AltiVec registers */
106 int cr_size
; /* size to hold CR if not in save_size */
107 int vrsave_size
; /* size to hold VRSAVE if not in save_size */
108 int altivec_padding_size
; /* size of altivec alignment padding if
110 int spe_gp_size
; /* size of 64-bit GPR save size for SPE */
111 int spe_padding_size
;
112 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
113 int spe_64bit_regs_used
;
117 /* A C structure for machine-specific, per-function data.
118 This is added to the cfun structure. */
119 typedef struct GTY(()) machine_function
121 /* Some local-dynamic symbol. */
122 const char *some_ld_name
;
123 /* Whether the instruction chain has been scanned already. */
124 int insn_chain_scanned_p
;
125 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
126 int ra_needs_full_frame
;
127 /* Flags if __builtin_return_address (0) was used. */
129 /* Cache lr_save_p after expansion of builtin_eh_return. */
131 /* Whether we need to save the TOC to the reserved stack location in the
132 function prologue. */
133 bool save_toc_in_prologue
;
134 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
135 varargs save area. */
136 HOST_WIDE_INT varargs_save_offset
;
137 /* Temporary stack slot to use for SDmode copies. This slot is
138 64-bits wide and is allocated early enough so that the offset
139 does not overflow the 16-bit load/store offset field. */
140 rtx sdmode_stack_slot
;
143 /* Support targetm.vectorize.builtin_mask_for_load. */
144 static GTY(()) tree altivec_builtin_mask_for_load
;
146 /* Set to nonzero once AIX common-mode calls have been defined. */
147 static GTY(()) int common_mode_defined
;
149 /* Label number of label created for -mrelocatable, to call to so we can
150 get the address of the GOT section */
151 static int rs6000_pic_labelno
;
154 /* Counter for labels which are to be placed in .fixup. */
155 int fixuplabelno
= 0;
158 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
161 /* Specify the machine mode that pointers have. After generation of rtl, the
162 compiler makes no further distinction between pointers and any other objects
163 of this machine mode. The type is unsigned since not all things that
164 include rs6000.h also include machmode.h. */
165 unsigned rs6000_pmode
;
167 /* Width in bits of a pointer. */
168 unsigned rs6000_pointer_size
;
170 #ifdef HAVE_AS_GNU_ATTRIBUTE
171 /* Flag whether floating point values have been passed/returned. */
172 static bool rs6000_passes_float
;
173 /* Flag whether vector values have been passed/returned. */
174 static bool rs6000_passes_vector
;
175 /* Flag whether small (<= 8 byte) structures have been returned. */
176 static bool rs6000_returns_struct
;
179 /* Value is TRUE if register/mode pair is acceptable. */
180 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
182 /* Maximum number of registers needed for a given register class and mode. */
183 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
185 /* How many registers are needed for a given register and mode. */
186 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
188 /* Map register number to register class. */
189 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
191 /* Reload functions based on the type and the vector unit. */
192 static enum insn_code rs6000_vector_reload
[NUM_MACHINE_MODES
][2];
194 static int dbg_cost_ctrl
;
196 /* Built in types. */
197 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
198 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
200 /* Flag to say the TOC is initialized */
202 char toc_label_name
[10];
204 /* Cached value of rs6000_variable_issue. This is cached in
205 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
206 static short cached_can_issue_more
;
208 static GTY(()) section
*read_only_data_section
;
209 static GTY(()) section
*private_data_section
;
210 static GTY(()) section
*read_only_private_data_section
;
211 static GTY(()) section
*sdata2_section
;
212 static GTY(()) section
*toc_section
;
214 struct builtin_description
216 const unsigned int mask
;
217 const enum insn_code icode
;
218 const char *const name
;
219 const enum rs6000_builtins code
;
222 /* Describe the vector unit used for modes. */
223 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
224 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
226 /* Register classes for various constraints that are based on the target
228 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
230 /* Describe the alignment of a vector. */
231 int rs6000_vector_align
[NUM_MACHINE_MODES
];
233 /* Map selected modes to types for builtins. */
234 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
236 /* What modes to automatically generate reciprocal divide estimate (fre) and
237 reciprocal sqrt (frsqrte) for. */
238 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
240 /* Masks to determine which reciprocal esitmate instructions to generate
242 enum rs6000_recip_mask
{
243 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
244 RECIP_DF_DIV
= 0x002,
245 RECIP_V4SF_DIV
= 0x004,
246 RECIP_V2DF_DIV
= 0x008,
248 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
249 RECIP_DF_RSQRT
= 0x020,
250 RECIP_V4SF_RSQRT
= 0x040,
251 RECIP_V2DF_RSQRT
= 0x080,
253 /* Various combination of flags for -mrecip=xxx. */
255 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
256 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
257 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
259 RECIP_HIGH_PRECISION
= RECIP_ALL
,
261 /* On low precision machines like the power5, don't enable double precision
262 reciprocal square root estimate, since it isn't accurate enough. */
263 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
266 /* -mrecip options. */
269 const char *string
; /* option name */
270 unsigned int mask
; /* mask bits to set */
271 } recip_options
[] = {
272 { "all", RECIP_ALL
},
273 { "none", RECIP_NONE
},
274 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
276 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
277 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
278 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
279 | RECIP_V2DF_RSQRT
) },
280 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
281 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
284 /* 2 argument gen function typedef. */
285 typedef rtx (*gen_2arg_fn_t
) (rtx
, rtx
, rtx
);
287 /* Pointer to function (in rs6000-c.c) that can define or undefine target
288 macros that have changed. Languages that don't support the preprocessor
289 don't link in rs6000-c.c, so we can't call it directly. */
290 void (*rs6000_target_modify_macros_ptr
) (bool, int, unsigned);
293 /* Target cpu costs. */
295 struct processor_costs
{
296 const int mulsi
; /* cost of SImode multiplication. */
297 const int mulsi_const
; /* cost of SImode multiplication by constant. */
298 const int mulsi_const9
; /* cost of SImode mult by short constant. */
299 const int muldi
; /* cost of DImode multiplication. */
300 const int divsi
; /* cost of SImode division. */
301 const int divdi
; /* cost of DImode division. */
302 const int fp
; /* cost of simple SFmode and DFmode insns. */
303 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
304 const int sdiv
; /* cost of SFmode division (fdivs). */
305 const int ddiv
; /* cost of DFmode division (fdiv). */
306 const int cache_line_size
; /* cache line size in bytes. */
307 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
308 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
309 const int simultaneous_prefetches
; /* number of parallel prefetch
313 const struct processor_costs
*rs6000_cost
;
315 /* Processor costs (relative to an add) */
317 /* Instruction size costs on 32bit processors. */
319 struct processor_costs size32_cost
= {
320 COSTS_N_INSNS (1), /* mulsi */
321 COSTS_N_INSNS (1), /* mulsi_const */
322 COSTS_N_INSNS (1), /* mulsi_const9 */
323 COSTS_N_INSNS (1), /* muldi */
324 COSTS_N_INSNS (1), /* divsi */
325 COSTS_N_INSNS (1), /* divdi */
326 COSTS_N_INSNS (1), /* fp */
327 COSTS_N_INSNS (1), /* dmul */
328 COSTS_N_INSNS (1), /* sdiv */
329 COSTS_N_INSNS (1), /* ddiv */
336 /* Instruction size costs on 64bit processors. */
338 struct processor_costs size64_cost
= {
339 COSTS_N_INSNS (1), /* mulsi */
340 COSTS_N_INSNS (1), /* mulsi_const */
341 COSTS_N_INSNS (1), /* mulsi_const9 */
342 COSTS_N_INSNS (1), /* muldi */
343 COSTS_N_INSNS (1), /* divsi */
344 COSTS_N_INSNS (1), /* divdi */
345 COSTS_N_INSNS (1), /* fp */
346 COSTS_N_INSNS (1), /* dmul */
347 COSTS_N_INSNS (1), /* sdiv */
348 COSTS_N_INSNS (1), /* ddiv */
355 /* Instruction costs on RS64A processors. */
357 struct processor_costs rs64a_cost
= {
358 COSTS_N_INSNS (20), /* mulsi */
359 COSTS_N_INSNS (12), /* mulsi_const */
360 COSTS_N_INSNS (8), /* mulsi_const9 */
361 COSTS_N_INSNS (34), /* muldi */
362 COSTS_N_INSNS (65), /* divsi */
363 COSTS_N_INSNS (67), /* divdi */
364 COSTS_N_INSNS (4), /* fp */
365 COSTS_N_INSNS (4), /* dmul */
366 COSTS_N_INSNS (31), /* sdiv */
367 COSTS_N_INSNS (31), /* ddiv */
368 128, /* cache line size */
374 /* Instruction costs on MPCCORE processors. */
376 struct processor_costs mpccore_cost
= {
377 COSTS_N_INSNS (2), /* mulsi */
378 COSTS_N_INSNS (2), /* mulsi_const */
379 COSTS_N_INSNS (2), /* mulsi_const9 */
380 COSTS_N_INSNS (2), /* muldi */
381 COSTS_N_INSNS (6), /* divsi */
382 COSTS_N_INSNS (6), /* divdi */
383 COSTS_N_INSNS (4), /* fp */
384 COSTS_N_INSNS (5), /* dmul */
385 COSTS_N_INSNS (10), /* sdiv */
386 COSTS_N_INSNS (17), /* ddiv */
387 32, /* cache line size */
393 /* Instruction costs on PPC403 processors. */
395 struct processor_costs ppc403_cost
= {
396 COSTS_N_INSNS (4), /* mulsi */
397 COSTS_N_INSNS (4), /* mulsi_const */
398 COSTS_N_INSNS (4), /* mulsi_const9 */
399 COSTS_N_INSNS (4), /* muldi */
400 COSTS_N_INSNS (33), /* divsi */
401 COSTS_N_INSNS (33), /* divdi */
402 COSTS_N_INSNS (11), /* fp */
403 COSTS_N_INSNS (11), /* dmul */
404 COSTS_N_INSNS (11), /* sdiv */
405 COSTS_N_INSNS (11), /* ddiv */
406 32, /* cache line size */
412 /* Instruction costs on PPC405 processors. */
414 struct processor_costs ppc405_cost
= {
415 COSTS_N_INSNS (5), /* mulsi */
416 COSTS_N_INSNS (4), /* mulsi_const */
417 COSTS_N_INSNS (3), /* mulsi_const9 */
418 COSTS_N_INSNS (5), /* muldi */
419 COSTS_N_INSNS (35), /* divsi */
420 COSTS_N_INSNS (35), /* divdi */
421 COSTS_N_INSNS (11), /* fp */
422 COSTS_N_INSNS (11), /* dmul */
423 COSTS_N_INSNS (11), /* sdiv */
424 COSTS_N_INSNS (11), /* ddiv */
425 32, /* cache line size */
431 /* Instruction costs on PPC440 processors. */
433 struct processor_costs ppc440_cost
= {
434 COSTS_N_INSNS (3), /* mulsi */
435 COSTS_N_INSNS (2), /* mulsi_const */
436 COSTS_N_INSNS (2), /* mulsi_const9 */
437 COSTS_N_INSNS (3), /* muldi */
438 COSTS_N_INSNS (34), /* divsi */
439 COSTS_N_INSNS (34), /* divdi */
440 COSTS_N_INSNS (5), /* fp */
441 COSTS_N_INSNS (5), /* dmul */
442 COSTS_N_INSNS (19), /* sdiv */
443 COSTS_N_INSNS (33), /* ddiv */
444 32, /* cache line size */
450 /* Instruction costs on PPC476 processors. */
452 struct processor_costs ppc476_cost
= {
453 COSTS_N_INSNS (4), /* mulsi */
454 COSTS_N_INSNS (4), /* mulsi_const */
455 COSTS_N_INSNS (4), /* mulsi_const9 */
456 COSTS_N_INSNS (4), /* muldi */
457 COSTS_N_INSNS (11), /* divsi */
458 COSTS_N_INSNS (11), /* divdi */
459 COSTS_N_INSNS (6), /* fp */
460 COSTS_N_INSNS (6), /* dmul */
461 COSTS_N_INSNS (19), /* sdiv */
462 COSTS_N_INSNS (33), /* ddiv */
463 32, /* l1 cache line size */
469 /* Instruction costs on PPC601 processors. */
471 struct processor_costs ppc601_cost
= {
472 COSTS_N_INSNS (5), /* mulsi */
473 COSTS_N_INSNS (5), /* mulsi_const */
474 COSTS_N_INSNS (5), /* mulsi_const9 */
475 COSTS_N_INSNS (5), /* muldi */
476 COSTS_N_INSNS (36), /* divsi */
477 COSTS_N_INSNS (36), /* divdi */
478 COSTS_N_INSNS (4), /* fp */
479 COSTS_N_INSNS (5), /* dmul */
480 COSTS_N_INSNS (17), /* sdiv */
481 COSTS_N_INSNS (31), /* ddiv */
482 32, /* cache line size */
488 /* Instruction costs on PPC603 processors. */
490 struct processor_costs ppc603_cost
= {
491 COSTS_N_INSNS (5), /* mulsi */
492 COSTS_N_INSNS (3), /* mulsi_const */
493 COSTS_N_INSNS (2), /* mulsi_const9 */
494 COSTS_N_INSNS (5), /* muldi */
495 COSTS_N_INSNS (37), /* divsi */
496 COSTS_N_INSNS (37), /* divdi */
497 COSTS_N_INSNS (3), /* fp */
498 COSTS_N_INSNS (4), /* dmul */
499 COSTS_N_INSNS (18), /* sdiv */
500 COSTS_N_INSNS (33), /* ddiv */
501 32, /* cache line size */
507 /* Instruction costs on PPC604 processors. */
509 struct processor_costs ppc604_cost
= {
510 COSTS_N_INSNS (4), /* mulsi */
511 COSTS_N_INSNS (4), /* mulsi_const */
512 COSTS_N_INSNS (4), /* mulsi_const9 */
513 COSTS_N_INSNS (4), /* muldi */
514 COSTS_N_INSNS (20), /* divsi */
515 COSTS_N_INSNS (20), /* divdi */
516 COSTS_N_INSNS (3), /* fp */
517 COSTS_N_INSNS (3), /* dmul */
518 COSTS_N_INSNS (18), /* sdiv */
519 COSTS_N_INSNS (32), /* ddiv */
520 32, /* cache line size */
526 /* Instruction costs on PPC604e processors. */
528 struct processor_costs ppc604e_cost
= {
529 COSTS_N_INSNS (2), /* mulsi */
530 COSTS_N_INSNS (2), /* mulsi_const */
531 COSTS_N_INSNS (2), /* mulsi_const9 */
532 COSTS_N_INSNS (2), /* muldi */
533 COSTS_N_INSNS (20), /* divsi */
534 COSTS_N_INSNS (20), /* divdi */
535 COSTS_N_INSNS (3), /* fp */
536 COSTS_N_INSNS (3), /* dmul */
537 COSTS_N_INSNS (18), /* sdiv */
538 COSTS_N_INSNS (32), /* ddiv */
539 32, /* cache line size */
545 /* Instruction costs on PPC620 processors. */
547 struct processor_costs ppc620_cost
= {
548 COSTS_N_INSNS (5), /* mulsi */
549 COSTS_N_INSNS (4), /* mulsi_const */
550 COSTS_N_INSNS (3), /* mulsi_const9 */
551 COSTS_N_INSNS (7), /* muldi */
552 COSTS_N_INSNS (21), /* divsi */
553 COSTS_N_INSNS (37), /* divdi */
554 COSTS_N_INSNS (3), /* fp */
555 COSTS_N_INSNS (3), /* dmul */
556 COSTS_N_INSNS (18), /* sdiv */
557 COSTS_N_INSNS (32), /* ddiv */
558 128, /* cache line size */
564 /* Instruction costs on PPC630 processors. */
566 struct processor_costs ppc630_cost
= {
567 COSTS_N_INSNS (5), /* mulsi */
568 COSTS_N_INSNS (4), /* mulsi_const */
569 COSTS_N_INSNS (3), /* mulsi_const9 */
570 COSTS_N_INSNS (7), /* muldi */
571 COSTS_N_INSNS (21), /* divsi */
572 COSTS_N_INSNS (37), /* divdi */
573 COSTS_N_INSNS (3), /* fp */
574 COSTS_N_INSNS (3), /* dmul */
575 COSTS_N_INSNS (17), /* sdiv */
576 COSTS_N_INSNS (21), /* ddiv */
577 128, /* cache line size */
583 /* Instruction costs on Cell processor. */
584 /* COSTS_N_INSNS (1) ~ one add. */
586 struct processor_costs ppccell_cost
= {
587 COSTS_N_INSNS (9/2)+2, /* mulsi */
588 COSTS_N_INSNS (6/2), /* mulsi_const */
589 COSTS_N_INSNS (6/2), /* mulsi_const9 */
590 COSTS_N_INSNS (15/2)+2, /* muldi */
591 COSTS_N_INSNS (38/2), /* divsi */
592 COSTS_N_INSNS (70/2), /* divdi */
593 COSTS_N_INSNS (10/2), /* fp */
594 COSTS_N_INSNS (10/2), /* dmul */
595 COSTS_N_INSNS (74/2), /* sdiv */
596 COSTS_N_INSNS (74/2), /* ddiv */
597 128, /* cache line size */
603 /* Instruction costs on PPC750 and PPC7400 processors. */
605 struct processor_costs ppc750_cost
= {
606 COSTS_N_INSNS (5), /* mulsi */
607 COSTS_N_INSNS (3), /* mulsi_const */
608 COSTS_N_INSNS (2), /* mulsi_const9 */
609 COSTS_N_INSNS (5), /* muldi */
610 COSTS_N_INSNS (17), /* divsi */
611 COSTS_N_INSNS (17), /* divdi */
612 COSTS_N_INSNS (3), /* fp */
613 COSTS_N_INSNS (3), /* dmul */
614 COSTS_N_INSNS (17), /* sdiv */
615 COSTS_N_INSNS (31), /* ddiv */
616 32, /* cache line size */
622 /* Instruction costs on PPC7450 processors. */
624 struct processor_costs ppc7450_cost
= {
625 COSTS_N_INSNS (4), /* mulsi */
626 COSTS_N_INSNS (3), /* mulsi_const */
627 COSTS_N_INSNS (3), /* mulsi_const9 */
628 COSTS_N_INSNS (4), /* muldi */
629 COSTS_N_INSNS (23), /* divsi */
630 COSTS_N_INSNS (23), /* divdi */
631 COSTS_N_INSNS (5), /* fp */
632 COSTS_N_INSNS (5), /* dmul */
633 COSTS_N_INSNS (21), /* sdiv */
634 COSTS_N_INSNS (35), /* ddiv */
635 32, /* cache line size */
641 /* Instruction costs on PPC8540 processors. */
643 struct processor_costs ppc8540_cost
= {
644 COSTS_N_INSNS (4), /* mulsi */
645 COSTS_N_INSNS (4), /* mulsi_const */
646 COSTS_N_INSNS (4), /* mulsi_const9 */
647 COSTS_N_INSNS (4), /* muldi */
648 COSTS_N_INSNS (19), /* divsi */
649 COSTS_N_INSNS (19), /* divdi */
650 COSTS_N_INSNS (4), /* fp */
651 COSTS_N_INSNS (4), /* dmul */
652 COSTS_N_INSNS (29), /* sdiv */
653 COSTS_N_INSNS (29), /* ddiv */
654 32, /* cache line size */
657 1, /* prefetch streams /*/
660 /* Instruction costs on E300C2 and E300C3 cores. */
662 struct processor_costs ppce300c2c3_cost
= {
663 COSTS_N_INSNS (4), /* mulsi */
664 COSTS_N_INSNS (4), /* mulsi_const */
665 COSTS_N_INSNS (4), /* mulsi_const9 */
666 COSTS_N_INSNS (4), /* muldi */
667 COSTS_N_INSNS (19), /* divsi */
668 COSTS_N_INSNS (19), /* divdi */
669 COSTS_N_INSNS (3), /* fp */
670 COSTS_N_INSNS (4), /* dmul */
671 COSTS_N_INSNS (18), /* sdiv */
672 COSTS_N_INSNS (33), /* ddiv */
676 1, /* prefetch streams /*/
679 /* Instruction costs on PPCE500MC processors. */
681 struct processor_costs ppce500mc_cost
= {
682 COSTS_N_INSNS (4), /* mulsi */
683 COSTS_N_INSNS (4), /* mulsi_const */
684 COSTS_N_INSNS (4), /* mulsi_const9 */
685 COSTS_N_INSNS (4), /* muldi */
686 COSTS_N_INSNS (14), /* divsi */
687 COSTS_N_INSNS (14), /* divdi */
688 COSTS_N_INSNS (8), /* fp */
689 COSTS_N_INSNS (10), /* dmul */
690 COSTS_N_INSNS (36), /* sdiv */
691 COSTS_N_INSNS (66), /* ddiv */
692 64, /* cache line size */
695 1, /* prefetch streams /*/
698 /* Instruction costs on PPCE500MC64 processors. */
700 struct processor_costs ppce500mc64_cost
= {
701 COSTS_N_INSNS (4), /* mulsi */
702 COSTS_N_INSNS (4), /* mulsi_const */
703 COSTS_N_INSNS (4), /* mulsi_const9 */
704 COSTS_N_INSNS (4), /* muldi */
705 COSTS_N_INSNS (14), /* divsi */
706 COSTS_N_INSNS (14), /* divdi */
707 COSTS_N_INSNS (4), /* fp */
708 COSTS_N_INSNS (10), /* dmul */
709 COSTS_N_INSNS (36), /* sdiv */
710 COSTS_N_INSNS (66), /* ddiv */
711 64, /* cache line size */
714 1, /* prefetch streams /*/
717 /* Instruction costs on PPCE5500 processors. */
719 struct processor_costs ppce5500_cost
= {
720 COSTS_N_INSNS (5), /* mulsi */
721 COSTS_N_INSNS (5), /* mulsi_const */
722 COSTS_N_INSNS (4), /* mulsi_const9 */
723 COSTS_N_INSNS (5), /* muldi */
724 COSTS_N_INSNS (14), /* divsi */
725 COSTS_N_INSNS (14), /* divdi */
726 COSTS_N_INSNS (7), /* fp */
727 COSTS_N_INSNS (10), /* dmul */
728 COSTS_N_INSNS (36), /* sdiv */
729 COSTS_N_INSNS (66), /* ddiv */
730 64, /* cache line size */
733 1, /* prefetch streams /*/
736 /* Instruction costs on PPCE6500 processors. */
738 struct processor_costs ppce6500_cost
= {
739 COSTS_N_INSNS (5), /* mulsi */
740 COSTS_N_INSNS (5), /* mulsi_const */
741 COSTS_N_INSNS (4), /* mulsi_const9 */
742 COSTS_N_INSNS (5), /* muldi */
743 COSTS_N_INSNS (14), /* divsi */
744 COSTS_N_INSNS (14), /* divdi */
745 COSTS_N_INSNS (7), /* fp */
746 COSTS_N_INSNS (10), /* dmul */
747 COSTS_N_INSNS (36), /* sdiv */
748 COSTS_N_INSNS (66), /* ddiv */
749 64, /* cache line size */
752 1, /* prefetch streams /*/
755 /* Instruction costs on AppliedMicro Titan processors. */
757 struct processor_costs titan_cost
= {
758 COSTS_N_INSNS (5), /* mulsi */
759 COSTS_N_INSNS (5), /* mulsi_const */
760 COSTS_N_INSNS (5), /* mulsi_const9 */
761 COSTS_N_INSNS (5), /* muldi */
762 COSTS_N_INSNS (18), /* divsi */
763 COSTS_N_INSNS (18), /* divdi */
764 COSTS_N_INSNS (10), /* fp */
765 COSTS_N_INSNS (10), /* dmul */
766 COSTS_N_INSNS (46), /* sdiv */
767 COSTS_N_INSNS (72), /* ddiv */
768 32, /* cache line size */
771 1, /* prefetch streams /*/
774 /* Instruction costs on POWER4 and POWER5 processors. */
776 struct processor_costs power4_cost
= {
777 COSTS_N_INSNS (3), /* mulsi */
778 COSTS_N_INSNS (2), /* mulsi_const */
779 COSTS_N_INSNS (2), /* mulsi_const9 */
780 COSTS_N_INSNS (4), /* muldi */
781 COSTS_N_INSNS (18), /* divsi */
782 COSTS_N_INSNS (34), /* divdi */
783 COSTS_N_INSNS (3), /* fp */
784 COSTS_N_INSNS (3), /* dmul */
785 COSTS_N_INSNS (17), /* sdiv */
786 COSTS_N_INSNS (17), /* ddiv */
787 128, /* cache line size */
790 8, /* prefetch streams /*/
793 /* Instruction costs on POWER6 processors. */
795 struct processor_costs power6_cost
= {
796 COSTS_N_INSNS (8), /* mulsi */
797 COSTS_N_INSNS (8), /* mulsi_const */
798 COSTS_N_INSNS (8), /* mulsi_const9 */
799 COSTS_N_INSNS (8), /* muldi */
800 COSTS_N_INSNS (22), /* divsi */
801 COSTS_N_INSNS (28), /* divdi */
802 COSTS_N_INSNS (3), /* fp */
803 COSTS_N_INSNS (3), /* dmul */
804 COSTS_N_INSNS (13), /* sdiv */
805 COSTS_N_INSNS (16), /* ddiv */
806 128, /* cache line size */
809 16, /* prefetch streams */
812 /* Instruction costs on POWER7 processors. */
814 struct processor_costs power7_cost
= {
815 COSTS_N_INSNS (2), /* mulsi */
816 COSTS_N_INSNS (2), /* mulsi_const */
817 COSTS_N_INSNS (2), /* mulsi_const9 */
818 COSTS_N_INSNS (2), /* muldi */
819 COSTS_N_INSNS (18), /* divsi */
820 COSTS_N_INSNS (34), /* divdi */
821 COSTS_N_INSNS (3), /* fp */
822 COSTS_N_INSNS (3), /* dmul */
823 COSTS_N_INSNS (13), /* sdiv */
824 COSTS_N_INSNS (16), /* ddiv */
825 128, /* cache line size */
828 12, /* prefetch streams */
831 /* Instruction costs on POWER A2 processors. */
833 struct processor_costs ppca2_cost
= {
834 COSTS_N_INSNS (16), /* mulsi */
835 COSTS_N_INSNS (16), /* mulsi_const */
836 COSTS_N_INSNS (16), /* mulsi_const9 */
837 COSTS_N_INSNS (16), /* muldi */
838 COSTS_N_INSNS (22), /* divsi */
839 COSTS_N_INSNS (28), /* divdi */
840 COSTS_N_INSNS (3), /* fp */
841 COSTS_N_INSNS (3), /* dmul */
842 COSTS_N_INSNS (59), /* sdiv */
843 COSTS_N_INSNS (72), /* ddiv */
847 16, /* prefetch streams */
851 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
852 #undef RS6000_BUILTIN_1
853 #undef RS6000_BUILTIN_2
854 #undef RS6000_BUILTIN_3
855 #undef RS6000_BUILTIN_A
856 #undef RS6000_BUILTIN_D
857 #undef RS6000_BUILTIN_E
858 #undef RS6000_BUILTIN_P
859 #undef RS6000_BUILTIN_Q
860 #undef RS6000_BUILTIN_S
861 #undef RS6000_BUILTIN_X
863 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
864 { NAME, ICODE, MASK, ATTR },
866 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
867 { NAME, ICODE, MASK, ATTR },
869 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
870 { NAME, ICODE, MASK, ATTR },
872 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
873 { NAME, ICODE, MASK, ATTR },
875 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
876 { NAME, ICODE, MASK, ATTR },
878 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
879 { NAME, ICODE, MASK, ATTR },
881 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
882 { NAME, ICODE, MASK, ATTR },
884 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
885 { NAME, ICODE, MASK, ATTR },
887 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
888 { NAME, ICODE, MASK, ATTR },
890 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
891 { NAME, ICODE, MASK, ATTR },
893 struct rs6000_builtin_info_type
{
895 const enum insn_code icode
;
900 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
902 #include "rs6000-builtin.def"
905 #undef RS6000_BUILTIN_1
906 #undef RS6000_BUILTIN_2
907 #undef RS6000_BUILTIN_3
908 #undef RS6000_BUILTIN_A
909 #undef RS6000_BUILTIN_D
910 #undef RS6000_BUILTIN_E
911 #undef RS6000_BUILTIN_P
912 #undef RS6000_BUILTIN_Q
913 #undef RS6000_BUILTIN_S
914 #undef RS6000_BUILTIN_X
916 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
917 static tree (*rs6000_veclib_handler
) (tree
, tree
, tree
);
920 static bool rs6000_debug_legitimate_address_p (enum machine_mode
, rtx
, bool);
921 static bool spe_func_has_64bit_regs_p (void);
922 static struct machine_function
* rs6000_init_machine_status (void);
923 static int rs6000_ra_ever_killed (void);
924 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
925 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
926 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
927 static tree
rs6000_builtin_vectorized_libmass (tree
, tree
, tree
);
928 static rtx
rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
929 static int rs6000_memory_move_cost (enum machine_mode
, reg_class_t
, bool);
930 static bool rs6000_debug_rtx_costs (rtx
, int, int, int, int *, bool);
931 static int rs6000_debug_address_cost (rtx
, bool);
932 static int rs6000_debug_adjust_cost (rtx
, rtx
, rtx
, int);
933 static bool is_microcoded_insn (rtx
);
934 static bool is_nonpipeline_insn (rtx
);
935 static bool is_cracked_insn (rtx
);
936 static bool is_load_insn (rtx
, rtx
*);
937 static bool is_store_insn (rtx
, rtx
*);
938 static bool set_to_load_agen (rtx
,rtx
);
939 static bool insn_terminates_group_p (rtx
, enum group_termination
);
940 static bool insn_must_be_first_in_group (rtx
);
941 static bool insn_must_be_last_in_group (rtx
);
942 static void altivec_init_builtins (void);
943 static tree
builtin_function_type (enum machine_mode
, enum machine_mode
,
944 enum machine_mode
, enum machine_mode
,
945 enum rs6000_builtins
, const char *name
);
946 static void rs6000_common_init_builtins (void);
947 static void paired_init_builtins (void);
948 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
949 static void spe_init_builtins (void);
950 static rtx
spe_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
951 static rtx
spe_expand_evsel_builtin (enum insn_code
, tree
, rtx
);
952 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
953 static rs6000_stack_t
*rs6000_stack_info (void);
954 static void is_altivec_return_reg (rtx
, void *);
955 int easy_vector_constant (rtx
, enum machine_mode
);
956 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, enum machine_mode
);
957 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
958 static int rs6000_tls_symbol_ref_1 (rtx
*, void *);
959 static int rs6000_get_some_local_dynamic_name_1 (rtx
*, void *);
960 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
963 static void macho_branch_islands (void);
965 static rtx
rs6000_legitimize_reload_address (rtx
, enum machine_mode
, int, int,
967 static rtx
rs6000_debug_legitimize_reload_address (rtx
, enum machine_mode
, int,
969 static bool rs6000_mode_dependent_address (const_rtx
);
970 static bool rs6000_debug_mode_dependent_address (const_rtx
);
971 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
972 enum machine_mode
, rtx
);
973 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
976 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
977 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
979 static bool rs6000_secondary_memory_needed (enum reg_class
, enum reg_class
,
981 static bool rs6000_debug_secondary_memory_needed (enum reg_class
,
984 static bool rs6000_cannot_change_mode_class (enum machine_mode
,
987 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode
,
990 static bool rs6000_save_toc_in_prologue_p (void);
992 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, enum machine_mode
, int, int,
994 = rs6000_legitimize_reload_address
;
996 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
997 = rs6000_mode_dependent_address
;
999 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1000 enum machine_mode
, rtx
)
1001 = rs6000_secondary_reload_class
;
1003 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1004 = rs6000_preferred_reload_class
;
1006 bool (*rs6000_secondary_memory_needed_ptr
) (enum reg_class
, enum reg_class
,
1008 = rs6000_secondary_memory_needed
;
1010 bool (*rs6000_cannot_change_mode_class_ptr
) (enum machine_mode
,
1013 = rs6000_cannot_change_mode_class
;
1015 const int INSN_NOT_AVAILABLE
= -1;
1017 /* Hash table stuff for keeping track of TOC entries. */
1019 struct GTY(()) toc_hash_struct
1021 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1022 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1024 enum machine_mode key_mode
;
1028 static GTY ((param_is (struct toc_hash_struct
))) htab_t toc_hash_table
;
1030 /* Hash table to keep track of the argument types for builtin functions. */
1032 struct GTY(()) builtin_hash_struct
1035 enum machine_mode mode
[4]; /* return value + 3 arguments. */
1036 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1039 static GTY ((param_is (struct builtin_hash_struct
))) htab_t builtin_hash_table
;
1042 /* Default register names. */
1043 char rs6000_reg_names
[][8] =
1045 "0", "1", "2", "3", "4", "5", "6", "7",
1046 "8", "9", "10", "11", "12", "13", "14", "15",
1047 "16", "17", "18", "19", "20", "21", "22", "23",
1048 "24", "25", "26", "27", "28", "29", "30", "31",
1049 "0", "1", "2", "3", "4", "5", "6", "7",
1050 "8", "9", "10", "11", "12", "13", "14", "15",
1051 "16", "17", "18", "19", "20", "21", "22", "23",
1052 "24", "25", "26", "27", "28", "29", "30", "31",
1053 "mq", "lr", "ctr","ap",
1054 "0", "1", "2", "3", "4", "5", "6", "7",
1056 /* AltiVec registers. */
1057 "0", "1", "2", "3", "4", "5", "6", "7",
1058 "8", "9", "10", "11", "12", "13", "14", "15",
1059 "16", "17", "18", "19", "20", "21", "22", "23",
1060 "24", "25", "26", "27", "28", "29", "30", "31",
1062 /* SPE registers. */
1063 "spe_acc", "spefscr",
1064 /* Soft frame pointer. */
1068 #ifdef TARGET_REGNAMES
1069 static const char alt_reg_names
[][8] =
1071 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1072 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1073 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1074 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1075 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1076 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1077 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1078 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1079 "mq", "lr", "ctr", "ap",
1080 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1082 /* AltiVec registers. */
1083 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1084 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1085 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1086 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1088 /* SPE registers. */
1089 "spe_acc", "spefscr",
1090 /* Soft frame pointer. */
1095 /* Table of valid machine attributes. */
1097 static const struct attribute_spec rs6000_attribute_table
[] =
1099 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1100 affects_type_identity } */
1101 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1103 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1105 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1107 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1109 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1111 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1112 SUBTARGET_ATTRIBUTE_TABLE
,
1114 { NULL
, 0, 0, false, false, false, NULL
, false }
1117 #ifndef MASK_STRICT_ALIGN
1118 #define MASK_STRICT_ALIGN 0
1120 #ifndef TARGET_PROFILE_KERNEL
1121 #define TARGET_PROFILE_KERNEL 0
1124 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1125 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1127 /* Initialize the GCC target structure. */
1128 #undef TARGET_ATTRIBUTE_TABLE
1129 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1130 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1131 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1132 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1133 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1135 #undef TARGET_ASM_ALIGNED_DI_OP
1136 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1138 /* Default unaligned ops are only provided for ELF. Find the ops needed
1139 for non-ELF systems. */
1140 #ifndef OBJECT_FORMAT_ELF
1142 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1144 #undef TARGET_ASM_UNALIGNED_HI_OP
1145 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1146 #undef TARGET_ASM_UNALIGNED_SI_OP
1147 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1148 #undef TARGET_ASM_UNALIGNED_DI_OP
1149 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1152 #undef TARGET_ASM_UNALIGNED_HI_OP
1153 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1154 #undef TARGET_ASM_UNALIGNED_SI_OP
1155 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1156 #undef TARGET_ASM_UNALIGNED_DI_OP
1157 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1158 #undef TARGET_ASM_ALIGNED_DI_OP
1159 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1163 /* This hook deals with fixups for relocatable code and DI-mode objects
1165 #undef TARGET_ASM_INTEGER
1166 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1168 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1169 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1170 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1173 #undef TARGET_SET_UP_BY_PROLOGUE
1174 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1176 #undef TARGET_HAVE_TLS
1177 #define TARGET_HAVE_TLS HAVE_AS_TLS
1179 #undef TARGET_CANNOT_FORCE_CONST_MEM
1180 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1182 #undef TARGET_DELEGITIMIZE_ADDRESS
1183 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1185 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1186 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1188 #undef TARGET_ASM_FUNCTION_PROLOGUE
1189 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1190 #undef TARGET_ASM_FUNCTION_EPILOGUE
1191 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1193 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1194 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1196 #undef TARGET_LEGITIMIZE_ADDRESS
1197 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1199 #undef TARGET_SCHED_VARIABLE_ISSUE
1200 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1202 #undef TARGET_SCHED_ISSUE_RATE
1203 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1204 #undef TARGET_SCHED_ADJUST_COST
1205 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1206 #undef TARGET_SCHED_ADJUST_PRIORITY
1207 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1208 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1209 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1210 #undef TARGET_SCHED_INIT
1211 #define TARGET_SCHED_INIT rs6000_sched_init
1212 #undef TARGET_SCHED_FINISH
1213 #define TARGET_SCHED_FINISH rs6000_sched_finish
1214 #undef TARGET_SCHED_REORDER
1215 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1216 #undef TARGET_SCHED_REORDER2
1217 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1219 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1220 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1222 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1223 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1225 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1226 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1227 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1228 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1229 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1230 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1231 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1232 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1234 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1235 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1236 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1237 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1238 rs6000_builtin_support_vector_misalignment
1239 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1240 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1241 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1242 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1243 rs6000_builtin_vectorization_cost
1244 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1245 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1246 rs6000_preferred_simd_mode
1247 #undef TARGET_VECTORIZE_INIT_COST
1248 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1249 #undef TARGET_VECTORIZE_ADD_STMT_COST
1250 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1251 #undef TARGET_VECTORIZE_FINISH_COST
1252 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1253 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1254 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1256 #undef TARGET_INIT_BUILTINS
1257 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1258 #undef TARGET_BUILTIN_DECL
1259 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1261 #undef TARGET_EXPAND_BUILTIN
1262 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1264 #undef TARGET_MANGLE_TYPE
1265 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1267 #undef TARGET_INIT_LIBFUNCS
1268 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1271 #undef TARGET_BINDS_LOCAL_P
1272 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1275 #undef TARGET_MS_BITFIELD_LAYOUT_P
1276 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1278 #undef TARGET_ASM_OUTPUT_MI_THUNK
1279 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1281 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1282 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1284 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1285 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1287 #undef TARGET_INVALID_WITHIN_DOLOOP
1288 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1290 #undef TARGET_REGISTER_MOVE_COST
1291 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1292 #undef TARGET_MEMORY_MOVE_COST
1293 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1294 #undef TARGET_RTX_COSTS
1295 #define TARGET_RTX_COSTS rs6000_rtx_costs
1296 #undef TARGET_ADDRESS_COST
1297 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
1299 #undef TARGET_DWARF_REGISTER_SPAN
1300 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1302 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1303 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1305 /* On rs6000, function arguments are promoted, as are function return
1307 #undef TARGET_PROMOTE_FUNCTION_MODE
1308 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1310 #undef TARGET_RETURN_IN_MEMORY
1311 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1313 #undef TARGET_SETUP_INCOMING_VARARGS
1314 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1316 /* Always strict argument naming on rs6000. */
1317 #undef TARGET_STRICT_ARGUMENT_NAMING
1318 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1319 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1320 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1321 #undef TARGET_SPLIT_COMPLEX_ARG
1322 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1323 #undef TARGET_MUST_PASS_IN_STACK
1324 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1325 #undef TARGET_PASS_BY_REFERENCE
1326 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1327 #undef TARGET_ARG_PARTIAL_BYTES
1328 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1329 #undef TARGET_FUNCTION_ARG_ADVANCE
1330 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1331 #undef TARGET_FUNCTION_ARG
1332 #define TARGET_FUNCTION_ARG rs6000_function_arg
1333 #undef TARGET_FUNCTION_ARG_BOUNDARY
1334 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1336 #undef TARGET_BUILD_BUILTIN_VA_LIST
1337 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1339 #undef TARGET_EXPAND_BUILTIN_VA_START
1340 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1342 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1343 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1345 #undef TARGET_EH_RETURN_FILTER_MODE
1346 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1348 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1349 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1351 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1352 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1354 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1355 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1357 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1358 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1360 #undef TARGET_OPTION_OVERRIDE
1361 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1363 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1364 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1365 rs6000_builtin_vectorized_function
1368 #undef TARGET_STACK_PROTECT_FAIL
1369 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1372 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1373 The PowerPC architecture requires only weak consistency among
1374 processors--that is, memory accesses between processors need not be
1375 sequentially consistent and memory accesses among processors can occur
1376 in any order. The ability to order memory accesses weakly provides
1377 opportunities for more efficient use of the system bus. Unless a
1378 dependency exists, the 604e allows read operations to precede store
1380 #undef TARGET_RELAXED_ORDERING
1381 #define TARGET_RELAXED_ORDERING true
1384 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1385 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1388 /* Use a 32-bit anchor range. This leads to sequences like:
1390 addis tmp,anchor,high
1393 where tmp itself acts as an anchor, and can be shared between
1394 accesses to the same 64k page. */
1395 #undef TARGET_MIN_ANCHOR_OFFSET
1396 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1397 #undef TARGET_MAX_ANCHOR_OFFSET
1398 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1399 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1400 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1402 #undef TARGET_BUILTIN_RECIPROCAL
1403 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1405 #undef TARGET_EXPAND_TO_RTL_HOOK
1406 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1408 #undef TARGET_INSTANTIATE_DECLS
1409 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1411 #undef TARGET_SECONDARY_RELOAD
1412 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1414 #undef TARGET_LEGITIMATE_ADDRESS_P
1415 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1417 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1418 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1420 #undef TARGET_CAN_ELIMINATE
1421 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1423 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1424 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1426 #undef TARGET_TRAMPOLINE_INIT
1427 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1429 #undef TARGET_FUNCTION_VALUE
1430 #define TARGET_FUNCTION_VALUE rs6000_function_value
1432 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1433 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1435 #undef TARGET_OPTION_SAVE
1436 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1438 #undef TARGET_OPTION_RESTORE
1439 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1441 #undef TARGET_OPTION_PRINT
1442 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1444 #undef TARGET_CAN_INLINE_P
1445 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1447 #undef TARGET_SET_CURRENT_FUNCTION
1448 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1450 #undef TARGET_LEGITIMATE_CONSTANT_P
1451 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1453 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1454 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1457 /* Simplifications for entries below. */
1460 POWERPC_BASE_MASK
= MASK_NEW_MNEMONICS
,
1461 POWERPC_7400_MASK
= POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_ALTIVEC
1464 /* Some OSs don't support saving the high part of 64-bit registers on context
1465 switch. Other OSs don't support saving Altivec registers. On those OSs, we
1466 don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants
1467 either, the user must explicitly specify them and we won't interfere with
1468 the user's specification. */
1471 POWERPC_MASKS
= (POWERPC_BASE_MASK
| MASK_PPC_GPOPT
| MASK_STRICT_ALIGN
1472 | MASK_PPC_GFXOPT
| MASK_POWERPC64
| MASK_ALTIVEC
1473 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
| MASK_MULHW
1474 | MASK_DLMZB
| MASK_CMPB
| MASK_MFPGPR
| MASK_DFP
1475 | MASK_POPCNTD
| MASK_VSX
| MASK_ISEL
| MASK_NO_UPDATE
1476 | MASK_RECIP_PRECISION
)
1479 /* Masks for instructions set at various powerpc ISAs. */
1481 ISA_2_1_MASKS
= MASK_MFCRF
,
1482 ISA_2_2_MASKS
= (ISA_2_1_MASKS
| MASK_POPCNTB
),
1483 ISA_2_4_MASKS
= (ISA_2_2_MASKS
| MASK_FPRND
),
1485 /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add
1486 ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel,
1487 fre, fsqrt, etc. were no longer documented as optional. Group masks by
1488 server and embedded. */
1489 ISA_2_5_MASKS_EMBEDDED
= (ISA_2_2_MASKS
| MASK_CMPB
| MASK_RECIP_PRECISION
1490 | MASK_PPC_GFXOPT
| MASK_PPC_GPOPT
),
1491 ISA_2_5_MASKS_SERVER
= (ISA_2_5_MASKS_EMBEDDED
| MASK_DFP
),
1493 /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but
1494 altivec is a win so enable it. */
1495 ISA_2_6_MASKS_EMBEDDED
= (ISA_2_5_MASKS_EMBEDDED
| MASK_POPCNTD
),
1496 ISA_2_6_MASKS_SERVER
= (ISA_2_5_MASKS_SERVER
| MASK_POPCNTD
| MASK_ALTIVEC
1502 const char *const name
; /* Canonical processor name. */
1503 const enum processor_type processor
; /* Processor type enum value. */
1504 const int target_enable
; /* Target flags to enable. */
1507 static struct rs6000_ptt
const processor_target_table
[] =
1509 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1510 #include "rs6000-cpus.def"
1514 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1518 rs6000_cpu_name_lookup (const char *name
)
1524 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1525 if (! strcmp (name
, processor_target_table
[i
].name
))
1533 /* Return number of consecutive hard regs needed starting at reg REGNO
1534 to hold something of mode MODE.
1535 This is ordinarily the length in words of a value of mode MODE
1536 but can be less for certain modes in special long registers.
1538 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1539 scalar instructions. The upper 32 bits are only available to the
1542 POWER and PowerPC GPRs hold 32 bits worth;
1543 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1546 rs6000_hard_regno_nregs_internal (int regno
, enum machine_mode mode
)
1548 unsigned HOST_WIDE_INT reg_size
;
1550 if (FP_REGNO_P (regno
))
1551 reg_size
= (VECTOR_MEM_VSX_P (mode
)
1552 ? UNITS_PER_VSX_WORD
1553 : UNITS_PER_FP_WORD
);
1555 else if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1556 reg_size
= UNITS_PER_SPE_WORD
;
1558 else if (ALTIVEC_REGNO_P (regno
))
1559 reg_size
= UNITS_PER_ALTIVEC_WORD
;
1561 /* The value returned for SCmode in the E500 double case is 2 for
1562 ABI compatibility; storing an SCmode value in a single register
1563 would require function_arg and rs6000_spe_function_arg to handle
1564 SCmode so as to pass the value correctly in a pair of
1566 else if (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
) && mode
!= SCmode
1567 && !DECIMAL_FLOAT_MODE_P (mode
))
1568 reg_size
= UNITS_PER_FP_WORD
;
1571 reg_size
= UNITS_PER_WORD
;
1573 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
1576 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1579 rs6000_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
1581 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
1583 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1584 implementations. Don't allow an item to be split between a FP register
1585 and an Altivec register. */
1586 if (VECTOR_MEM_VSX_P (mode
))
1588 if (FP_REGNO_P (regno
))
1589 return FP_REGNO_P (last_regno
);
1591 if (ALTIVEC_REGNO_P (regno
))
1592 return ALTIVEC_REGNO_P (last_regno
);
1595 /* The GPRs can hold any mode, but values bigger than one register
1596 cannot go past R31. */
1597 if (INT_REGNO_P (regno
))
1598 return INT_REGNO_P (last_regno
);
1600 /* The float registers (except for VSX vector modes) can only hold floating
1601 modes and DImode. This excludes the 32-bit decimal float mode for
1603 if (FP_REGNO_P (regno
))
1605 if (SCALAR_FLOAT_MODE_P (mode
)
1606 && (mode
!= TDmode
|| (regno
% 2) == 0)
1607 && FP_REGNO_P (last_regno
))
1610 if (GET_MODE_CLASS (mode
) == MODE_INT
1611 && GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
1614 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
1615 && PAIRED_VECTOR_MODE (mode
))
1621 /* The CR register can only hold CC modes. */
1622 if (CR_REGNO_P (regno
))
1623 return GET_MODE_CLASS (mode
) == MODE_CC
;
1625 if (CA_REGNO_P (regno
))
1626 return mode
== BImode
;
1628 /* AltiVec only in AldyVec registers. */
1629 if (ALTIVEC_REGNO_P (regno
))
1630 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
);
1632 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1633 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1636 /* We cannot put TImode anywhere except general register and it must be able
1637 to fit within the register set. In the future, allow TImode in the
1638 Altivec or VSX registers. */
1640 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
1643 /* Print interesting facts about registers. */
1645 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
1649 for (r
= first_regno
; r
<= last_regno
; ++r
)
1651 const char *comma
= "";
1654 if (first_regno
== last_regno
)
1655 fprintf (stderr
, "%s:\t", reg_name
);
1657 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
1660 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1661 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
1665 fprintf (stderr
, ",\n\t");
1670 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
1671 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
1672 rs6000_hard_regno_nregs
[m
][r
]);
1674 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
1679 if (call_used_regs
[r
])
1683 fprintf (stderr
, ",\n\t");
1688 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
1696 fprintf (stderr
, ",\n\t");
1701 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
1707 fprintf (stderr
, ",\n\t");
1711 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
1715 #define DEBUG_FMT_D "%-32s= %d\n"
1716 #define DEBUG_FMT_X "%-32s= 0x%x\n"
1717 #define DEBUG_FMT_S "%-32s= %s\n"
1719 /* Print various interesting information with -mdebug=reg. */
1721 rs6000_debug_reg_global (void)
1723 static const char *const tf
[2] = { "false", "true" };
1724 const char *nl
= (const char *)0;
1726 char costly_num
[20];
1728 const char *costly_str
;
1729 const char *nop_str
;
1730 const char *trace_str
;
1731 const char *abi_str
;
1732 const char *cmodel_str
;
1734 /* Map enum rs6000_vector to string. */
1735 static const char *rs6000_debug_vector_unit
[] = {
1744 fprintf (stderr
, "Register information: (last virtual reg = %d)\n",
1745 LAST_VIRTUAL_REGISTER
);
1746 rs6000_debug_reg_print (0, 31, "gr");
1747 rs6000_debug_reg_print (32, 63, "fp");
1748 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
1751 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
1752 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
1753 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
1754 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
1755 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
1756 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
1757 rs6000_debug_reg_print (SPE_ACC_REGNO
, SPE_ACC_REGNO
, "spe_a");
1758 rs6000_debug_reg_print (SPEFSCR_REGNO
, SPEFSCR_REGNO
, "spe_f");
1762 "d reg_class = %s\n"
1763 "f reg_class = %s\n"
1764 "v reg_class = %s\n"
1765 "wa reg_class = %s\n"
1766 "wd reg_class = %s\n"
1767 "wf reg_class = %s\n"
1768 "ws reg_class = %s\n\n",
1769 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
1770 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
1771 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
1772 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
1773 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
1774 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
1775 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]]);
1777 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1778 if (rs6000_vector_unit
[m
] || rs6000_vector_mem
[m
])
1781 fprintf (stderr
, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1783 rs6000_debug_vector_unit
[ rs6000_vector_unit
[m
] ],
1784 rs6000_debug_vector_unit
[ rs6000_vector_mem
[m
] ]);
1790 if (rs6000_recip_control
)
1792 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
1794 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1795 if (rs6000_recip_bits
[m
])
1798 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1800 (RS6000_RECIP_AUTO_RE_P (m
)
1802 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
1803 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
1805 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
1808 fputs ("\n", stderr
);
1811 if (rs6000_cpu_index
>= 0)
1812 fprintf (stderr
, DEBUG_FMT_S
, "cpu",
1813 processor_target_table
[rs6000_cpu_index
].name
);
1815 if (rs6000_tune_index
>= 0)
1816 fprintf (stderr
, DEBUG_FMT_S
, "tune",
1817 processor_target_table
[rs6000_tune_index
].name
);
1819 switch (rs6000_sched_costly_dep
)
1821 case max_dep_latency
:
1822 costly_str
= "max_dep_latency";
1826 costly_str
= "no_dep_costly";
1829 case all_deps_costly
:
1830 costly_str
= "all_deps_costly";
1833 case true_store_to_load_dep_costly
:
1834 costly_str
= "true_store_to_load_dep_costly";
1837 case store_to_load_dep_costly
:
1838 costly_str
= "store_to_load_dep_costly";
1842 costly_str
= costly_num
;
1843 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
1847 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
1849 switch (rs6000_sched_insert_nops
)
1851 case sched_finish_regroup_exact
:
1852 nop_str
= "sched_finish_regroup_exact";
1855 case sched_finish_pad_groups
:
1856 nop_str
= "sched_finish_pad_groups";
1859 case sched_finish_none
:
1860 nop_str
= "sched_finish_none";
1865 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
1869 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
1871 switch (rs6000_sdata
)
1878 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
1882 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
1886 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
1891 switch (rs6000_traceback
)
1893 case traceback_default
: trace_str
= "default"; break;
1894 case traceback_none
: trace_str
= "none"; break;
1895 case traceback_part
: trace_str
= "part"; break;
1896 case traceback_full
: trace_str
= "full"; break;
1897 default: trace_str
= "unknown"; break;
1900 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
1902 switch (rs6000_current_cmodel
)
1904 case CMODEL_SMALL
: cmodel_str
= "small"; break;
1905 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
1906 case CMODEL_LARGE
: cmodel_str
= "large"; break;
1907 default: cmodel_str
= "unknown"; break;
1910 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
1912 switch (rs6000_current_abi
)
1914 case ABI_NONE
: abi_str
= "none"; break;
1915 case ABI_AIX
: abi_str
= "aix"; break;
1916 case ABI_V4
: abi_str
= "V4"; break;
1917 case ABI_DARWIN
: abi_str
= "darwin"; break;
1918 default: abi_str
= "unknown"; break;
1921 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
1923 if (rs6000_altivec_abi
)
1924 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
1927 fprintf (stderr
, DEBUG_FMT_S
, "spe_abi", "true");
1929 if (rs6000_darwin64_abi
)
1930 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
1932 if (rs6000_float_gprs
)
1933 fprintf (stderr
, DEBUG_FMT_S
, "float_gprs", "true");
1935 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
1936 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
1937 tf
[!!rs6000_align_branch_targets
]);
1938 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
1939 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
1940 rs6000_long_double_type_size
);
1941 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
1942 (int)rs6000_sched_restricted_insns_priority
);
1943 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
1945 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
1946 (int)RS6000_BUILTIN_COUNT
);
1947 fprintf (stderr
, DEBUG_FMT_X
, "Builtin mask", rs6000_builtin_mask
);
1950 /* Initialize the various global tables that are based on register size. */
1952 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
1958 /* Precalculate REGNO_REG_CLASS. */
1959 rs6000_regno_regclass
[0] = GENERAL_REGS
;
1960 for (r
= 1; r
< 32; ++r
)
1961 rs6000_regno_regclass
[r
] = BASE_REGS
;
1963 for (r
= 32; r
< 64; ++r
)
1964 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
1966 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
1967 rs6000_regno_regclass
[r
] = NO_REGS
;
1969 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
1970 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
1972 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
1973 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
1974 rs6000_regno_regclass
[r
] = CR_REGS
;
1976 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
1977 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
1978 rs6000_regno_regclass
[CA_REGNO
] = CA_REGS
;
1979 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
1980 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
1981 rs6000_regno_regclass
[SPE_ACC_REGNO
] = SPE_ACC_REGS
;
1982 rs6000_regno_regclass
[SPEFSCR_REGNO
] = SPEFSCR_REGS
;
1983 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
1984 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
1986 /* Precalculate vector information, this must be set up before the
1987 rs6000_hard_regno_nregs_internal below. */
1988 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1990 rs6000_vector_unit
[m
] = rs6000_vector_mem
[m
] = VECTOR_NONE
;
1991 rs6000_vector_reload
[m
][0] = CODE_FOR_nothing
;
1992 rs6000_vector_reload
[m
][1] = CODE_FOR_nothing
;
1995 for (c
= 0; c
< (int)(int)RS6000_CONSTRAINT_MAX
; c
++)
1996 rs6000_constraints
[c
] = NO_REGS
;
1998 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
1999 believes it can use native alignment or still uses 128-bit alignment. */
2000 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
2011 /* V2DF mode, VSX only. */
2014 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
2015 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
2016 rs6000_vector_align
[V2DFmode
] = align64
;
2019 /* V4SF mode, either VSX or Altivec. */
2022 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
2023 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
2024 rs6000_vector_align
[V4SFmode
] = align32
;
2026 else if (TARGET_ALTIVEC
)
2028 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
2029 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
2030 rs6000_vector_align
[V4SFmode
] = align32
;
2033 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2037 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
2038 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
2039 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
2040 rs6000_vector_align
[V4SImode
] = align32
;
2041 rs6000_vector_align
[V8HImode
] = align32
;
2042 rs6000_vector_align
[V16QImode
] = align32
;
2046 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
2047 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
2048 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
2052 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
2053 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
2054 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
2058 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2059 Altivec doesn't have 64-bit support. */
2062 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
2063 rs6000_vector_unit
[V2DImode
] = VECTOR_NONE
;
2064 rs6000_vector_align
[V2DImode
] = align64
;
2067 /* DFmode, see if we want to use the VSX unit. */
2068 if (TARGET_VSX
&& TARGET_VSX_SCALAR_DOUBLE
)
2070 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
2071 rs6000_vector_mem
[DFmode
]
2072 = (TARGET_VSX_SCALAR_MEMORY
? VECTOR_VSX
: VECTOR_NONE
);
2073 rs6000_vector_align
[DFmode
] = align64
;
2076 /* TODO add SPE and paired floating point vector support. */
2078 /* Register class constraints for the constraints that depend on compile
2080 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
2081 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
;
2083 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
2084 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
;
2088 /* At present, we just use VSX_REGS, but we have different constraints
2089 based on the use, in case we want to fine tune the default register
2090 class used. wa = any VSX register, wf = register class to use for
2091 V4SF, wd = register class to use for V2DF, and ws = register classs to
2092 use for DF scalars. */
2093 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
2094 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
;
2095 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
;
2096 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = (TARGET_VSX_SCALAR_MEMORY
2102 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
2104 /* Set up the reload helper functions. */
2105 if (TARGET_VSX
|| TARGET_ALTIVEC
)
2109 rs6000_vector_reload
[V16QImode
][0] = CODE_FOR_reload_v16qi_di_store
;
2110 rs6000_vector_reload
[V16QImode
][1] = CODE_FOR_reload_v16qi_di_load
;
2111 rs6000_vector_reload
[V8HImode
][0] = CODE_FOR_reload_v8hi_di_store
;
2112 rs6000_vector_reload
[V8HImode
][1] = CODE_FOR_reload_v8hi_di_load
;
2113 rs6000_vector_reload
[V4SImode
][0] = CODE_FOR_reload_v4si_di_store
;
2114 rs6000_vector_reload
[V4SImode
][1] = CODE_FOR_reload_v4si_di_load
;
2115 rs6000_vector_reload
[V2DImode
][0] = CODE_FOR_reload_v2di_di_store
;
2116 rs6000_vector_reload
[V2DImode
][1] = CODE_FOR_reload_v2di_di_load
;
2117 rs6000_vector_reload
[V4SFmode
][0] = CODE_FOR_reload_v4sf_di_store
;
2118 rs6000_vector_reload
[V4SFmode
][1] = CODE_FOR_reload_v4sf_di_load
;
2119 rs6000_vector_reload
[V2DFmode
][0] = CODE_FOR_reload_v2df_di_store
;
2120 rs6000_vector_reload
[V2DFmode
][1] = CODE_FOR_reload_v2df_di_load
;
2121 if (TARGET_VSX
&& TARGET_VSX_SCALAR_MEMORY
)
2123 rs6000_vector_reload
[DFmode
][0] = CODE_FOR_reload_df_di_store
;
2124 rs6000_vector_reload
[DFmode
][1] = CODE_FOR_reload_df_di_load
;
2129 rs6000_vector_reload
[V16QImode
][0] = CODE_FOR_reload_v16qi_si_store
;
2130 rs6000_vector_reload
[V16QImode
][1] = CODE_FOR_reload_v16qi_si_load
;
2131 rs6000_vector_reload
[V8HImode
][0] = CODE_FOR_reload_v8hi_si_store
;
2132 rs6000_vector_reload
[V8HImode
][1] = CODE_FOR_reload_v8hi_si_load
;
2133 rs6000_vector_reload
[V4SImode
][0] = CODE_FOR_reload_v4si_si_store
;
2134 rs6000_vector_reload
[V4SImode
][1] = CODE_FOR_reload_v4si_si_load
;
2135 rs6000_vector_reload
[V2DImode
][0] = CODE_FOR_reload_v2di_si_store
;
2136 rs6000_vector_reload
[V2DImode
][1] = CODE_FOR_reload_v2di_si_load
;
2137 rs6000_vector_reload
[V4SFmode
][0] = CODE_FOR_reload_v4sf_si_store
;
2138 rs6000_vector_reload
[V4SFmode
][1] = CODE_FOR_reload_v4sf_si_load
;
2139 rs6000_vector_reload
[V2DFmode
][0] = CODE_FOR_reload_v2df_si_store
;
2140 rs6000_vector_reload
[V2DFmode
][1] = CODE_FOR_reload_v2df_si_load
;
2141 if (TARGET_VSX
&& TARGET_VSX_SCALAR_MEMORY
)
2143 rs6000_vector_reload
[DFmode
][0] = CODE_FOR_reload_df_si_store
;
2144 rs6000_vector_reload
[DFmode
][1] = CODE_FOR_reload_df_si_load
;
2149 /* Precalculate HARD_REGNO_NREGS. */
2150 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2151 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2152 rs6000_hard_regno_nregs
[m
][r
]
2153 = rs6000_hard_regno_nregs_internal (r
, (enum machine_mode
)m
);
2155 /* Precalculate HARD_REGNO_MODE_OK. */
2156 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2157 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2158 if (rs6000_hard_regno_mode_ok (r
, (enum machine_mode
)m
))
2159 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
2161 /* Precalculate CLASS_MAX_NREGS sizes. */
2162 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
2166 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
2167 reg_size
= UNITS_PER_VSX_WORD
;
2169 else if (c
== ALTIVEC_REGS
)
2170 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2172 else if (c
== FLOAT_REGS
)
2173 reg_size
= UNITS_PER_FP_WORD
;
2176 reg_size
= UNITS_PER_WORD
;
2178 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2179 rs6000_class_max_nregs
[m
][c
]
2180 = (GET_MODE_SIZE (m
) + reg_size
- 1) / reg_size
;
2183 if (TARGET_E500_DOUBLE
)
2184 rs6000_class_max_nregs
[DFmode
][GENERAL_REGS
] = 1;
2186 /* Calculate which modes to automatically generate code to use a the
2187 reciprocal divide and square root instructions. In the future, possibly
2188 automatically generate the instructions even if the user did not specify
2189 -mrecip. The older machines double precision reciprocal sqrt estimate is
2190 not accurate enough. */
2191 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
2193 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2195 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2196 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2197 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2198 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2199 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2201 if (TARGET_FRSQRTES
)
2202 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2204 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2205 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2206 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2207 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2208 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2210 if (rs6000_recip_control
)
2212 if (!flag_finite_math_only
)
2213 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2214 if (flag_trapping_math
)
2215 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2216 if (!flag_reciprocal_math
)
2217 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2218 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
2220 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
2221 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
2222 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2224 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
2225 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
2226 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2228 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
2229 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
2230 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2232 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
2233 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
2234 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2236 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
2237 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
2238 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2240 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
2241 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
2242 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2244 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
2245 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
2246 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2248 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
2249 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
2250 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2254 if (global_init_p
|| TARGET_DEBUG_TARGET
)
2256 if (TARGET_DEBUG_REG
)
2257 rs6000_debug_reg_global ();
2259 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
2261 "SImode variable mult cost = %d\n"
2262 "SImode constant mult cost = %d\n"
2263 "SImode short constant mult cost = %d\n"
2264 "DImode multipliciation cost = %d\n"
2265 "SImode division cost = %d\n"
2266 "DImode division cost = %d\n"
2267 "Simple fp operation cost = %d\n"
2268 "DFmode multiplication cost = %d\n"
2269 "SFmode division cost = %d\n"
2270 "DFmode division cost = %d\n"
2271 "cache line size = %d\n"
2272 "l1 cache size = %d\n"
2273 "l2 cache size = %d\n"
2274 "simultaneous prefetches = %d\n"
2277 rs6000_cost
->mulsi_const
,
2278 rs6000_cost
->mulsi_const9
,
2286 rs6000_cost
->cache_line_size
,
2287 rs6000_cost
->l1_cache_size
,
2288 rs6000_cost
->l2_cache_size
,
2289 rs6000_cost
->simultaneous_prefetches
);
2294 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2297 darwin_rs6000_override_options (void)
2299 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2301 rs6000_altivec_abi
= 1;
2302 TARGET_ALTIVEC_VRSAVE
= 1;
2303 rs6000_current_abi
= ABI_DARWIN
;
2305 if (DEFAULT_ABI
== ABI_DARWIN
2307 darwin_one_byte_bool
= 1;
2309 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
2311 target_flags
|= MASK_POWERPC64
;
2312 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2316 rs6000_default_long_calls
= 1;
2317 target_flags
|= MASK_SOFT_FLOAT
;
2320 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2322 if (!flag_mkernel
&& !flag_apple_kext
2324 && ! (target_flags_explicit
& MASK_ALTIVEC
))
2325 target_flags
|= MASK_ALTIVEC
;
2327 /* Unless the user (not the configurer) has explicitly overridden
2328 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2329 G4 unless targeting the kernel. */
2332 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
2333 && ! (target_flags_explicit
& MASK_ALTIVEC
)
2334 && ! global_options_set
.x_rs6000_cpu_index
)
2336 target_flags
|= MASK_ALTIVEC
;
2341 /* If not otherwise specified by a target, make 'long double' equivalent to
2344 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2345 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2348 /* Return the builtin mask of the various options used that could affect which
2349 builtins were used. In the past we used target_flags, but we've run out of
2350 bits, and some options like SPE and PAIRED are no longer in
2354 rs6000_builtin_mask_calculate (void)
2356 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
2357 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
2358 | ((TARGET_SPE
) ? RS6000_BTM_SPE
: 0)
2359 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
2360 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
2361 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
2362 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
2363 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
2364 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
2365 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0));
2368 /* Override command line options. Mostly we process the processor type and
2369 sometimes adjust other TARGET_ options. */
2372 rs6000_option_override_internal (bool global_init_p
)
2375 bool have_cpu
= false;
2377 /* The default cpu requested at configure time, if any. */
2378 const char *implicit_cpu
= OPTION_TARGET_CPU_DEFAULT
;
2383 struct cl_target_option
*main_target_opt
2384 = ((global_init_p
|| target_option_default_node
== NULL
)
2385 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
2387 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2388 library functions, so warn about it. The flag may be useful for
2389 performance studies from time to time though, so don't disable it
2391 if (global_options_set
.x_rs6000_alignment_flags
2392 && rs6000_alignment_flags
== MASK_ALIGN_POWER
2393 && DEFAULT_ABI
== ABI_DARWIN
2395 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2396 " it is incompatible with the installed C and C++ libraries");
2398 /* Numerous experiment shows that IRA based loop pressure
2399 calculation works better for RTL loop invariant motion on targets
2400 with enough (>= 32) registers. It is an expensive optimization.
2401 So it is on only for peak performance. */
2402 if (optimize
>= 3 && global_init_p
)
2403 flag_ira_loop_pressure
= 1;
2405 /* Set the pointer size. */
2408 rs6000_pmode
= (int)DImode
;
2409 rs6000_pointer_size
= 64;
2413 rs6000_pmode
= (int)SImode
;
2414 rs6000_pointer_size
= 32;
2417 set_masks
= POWERPC_MASKS
| MASK_SOFT_FLOAT
;
2418 #ifdef OS_MISSING_POWERPC64
2419 if (OS_MISSING_POWERPC64
)
2420 set_masks
&= ~MASK_POWERPC64
;
2422 #ifdef OS_MISSING_ALTIVEC
2423 if (OS_MISSING_ALTIVEC
)
2424 set_masks
&= ~MASK_ALTIVEC
;
2427 /* Don't override by the processor default if given explicitly. */
2428 set_masks
&= ~target_flags_explicit
;
2430 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2431 the cpu in a target attribute or pragma, but did not specify a tuning
2432 option, use the cpu for the tuning option rather than the option specified
2433 with -mtune on the command line. Process a '--with-cpu' configuration
2434 request as an implicit --cpu. */
2435 if (rs6000_cpu_index
>= 0)
2437 cpu_index
= rs6000_cpu_index
;
2440 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
2442 rs6000_cpu_index
= cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
2447 const char *default_cpu
=
2448 (implicit_cpu
? implicit_cpu
2449 : (TARGET_POWERPC64
? "powerpc64" : "powerpc"));
2451 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
2452 have_cpu
= implicit_cpu
!= 0;
2455 gcc_assert (cpu_index
>= 0);
2457 target_flags
&= ~set_masks
;
2458 target_flags
|= (processor_target_table
[cpu_index
].target_enable
2461 if (rs6000_tune_index
>= 0)
2462 tune_index
= rs6000_tune_index
;
2464 rs6000_tune_index
= tune_index
= cpu_index
;
2468 enum processor_type tune_proc
2469 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
2472 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
2473 if (processor_target_table
[i
].processor
== tune_proc
)
2475 rs6000_tune_index
= tune_index
= i
;
2480 gcc_assert (tune_index
>= 0);
2481 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
2483 /* Pick defaults for SPE related control flags. Do this early to make sure
2484 that the TARGET_ macros are representative ASAP. */
2486 int spe_capable_cpu
=
2487 (rs6000_cpu
== PROCESSOR_PPC8540
2488 || rs6000_cpu
== PROCESSOR_PPC8548
);
2490 if (!global_options_set
.x_rs6000_spe_abi
)
2491 rs6000_spe_abi
= spe_capable_cpu
;
2493 if (!global_options_set
.x_rs6000_spe
)
2494 rs6000_spe
= spe_capable_cpu
;
2496 if (!global_options_set
.x_rs6000_float_gprs
)
2498 (rs6000_cpu
== PROCESSOR_PPC8540
? 1
2499 : rs6000_cpu
== PROCESSOR_PPC8548
? 2
2503 if (global_options_set
.x_rs6000_spe_abi
2506 error ("not configured for SPE ABI");
2508 if (global_options_set
.x_rs6000_spe
2511 error ("not configured for SPE instruction set");
2513 if (main_target_opt
!= NULL
2514 && ((main_target_opt
->x_rs6000_spe_abi
!= rs6000_spe_abi
)
2515 || (main_target_opt
->x_rs6000_spe
!= rs6000_spe
)
2516 || (main_target_opt
->x_rs6000_float_gprs
!= rs6000_float_gprs
)))
2517 error ("target attribute or pragma changes SPE ABI");
2519 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
2520 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
2521 || rs6000_cpu
== PROCESSOR_PPCE5500
)
2524 error ("AltiVec not supported in this target");
2526 error ("SPE not supported in this target");
2528 if (rs6000_cpu
== PROCESSOR_PPCE6500
)
2531 error ("SPE not supported in this target");
2534 /* Disable Cell microcode if we are optimizing for the Cell
2535 and not optimizing for size. */
2536 if (rs6000_gen_cell_microcode
== -1)
2537 rs6000_gen_cell_microcode
= !(rs6000_cpu
== PROCESSOR_CELL
2540 /* If we are optimizing big endian systems for space and it's OK to
2541 use instructions that would be microcoded on the Cell, use the
2542 load/store multiple and string instructions. */
2543 if (BYTES_BIG_ENDIAN
&& optimize_size
&& rs6000_gen_cell_microcode
)
2544 target_flags
|= ~target_flags_explicit
& (MASK_MULTIPLE
| MASK_STRING
);
2546 /* Don't allow -mmultiple or -mstring on little endian systems
2547 unless the cpu is a 750, because the hardware doesn't support the
2548 instructions used in little endian mode, and causes an alignment
2549 trap. The 750 does not cause an alignment trap (except when the
2550 target is unaligned). */
2552 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
2554 if (TARGET_MULTIPLE
)
2556 target_flags
&= ~MASK_MULTIPLE
;
2557 if ((target_flags_explicit
& MASK_MULTIPLE
) != 0)
2558 warning (0, "-mmultiple is not supported on little endian systems");
2563 target_flags
&= ~MASK_STRING
;
2564 if ((target_flags_explicit
& MASK_STRING
) != 0)
2565 warning (0, "-mstring is not supported on little endian systems");
2569 /* Add some warnings for VSX. */
2572 const char *msg
= NULL
;
2573 if (!TARGET_HARD_FLOAT
|| !TARGET_FPRS
2574 || !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
2576 if (target_flags_explicit
& MASK_VSX
)
2577 msg
= N_("-mvsx requires hardware floating point");
2579 target_flags
&= ~ MASK_VSX
;
2581 else if (TARGET_PAIRED_FLOAT
)
2582 msg
= N_("-mvsx and -mpaired are incompatible");
2583 /* The hardware will allow VSX and little endian, but until we make sure
2584 things like vector select, etc. work don't allow VSX on little endian
2585 systems at this point. */
2586 else if (!BYTES_BIG_ENDIAN
)
2587 msg
= N_("-mvsx used with little endian code");
2588 else if (TARGET_AVOID_XFORM
> 0)
2589 msg
= N_("-mvsx needs indexed addressing");
2590 else if (!TARGET_ALTIVEC
&& (target_flags_explicit
& MASK_ALTIVEC
))
2592 if (target_flags_explicit
& MASK_VSX
)
2593 msg
= N_("-mvsx and -mno-altivec are incompatible");
2595 msg
= N_("-mno-altivec disables vsx");
2601 target_flags
&= ~ MASK_VSX
;
2602 target_flags_explicit
|= MASK_VSX
;
2606 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2607 unless the user explicitly used the -mno-<option> to disable the code. */
2609 target_flags
|= (ISA_2_6_MASKS_SERVER
& ~target_flags_explicit
);
2610 else if (TARGET_POPCNTD
)
2611 target_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~target_flags_explicit
);
2612 else if (TARGET_DFP
)
2613 target_flags
|= (ISA_2_5_MASKS_SERVER
& ~target_flags_explicit
);
2614 else if (TARGET_CMPB
)
2615 target_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~target_flags_explicit
);
2616 else if (TARGET_FPRND
)
2617 target_flags
|= (ISA_2_4_MASKS
& ~target_flags_explicit
);
2618 else if (TARGET_POPCNTB
)
2619 target_flags
|= (ISA_2_2_MASKS
& ~target_flags_explicit
);
2620 else if (TARGET_ALTIVEC
)
2621 target_flags
|= (MASK_PPC_GFXOPT
& ~target_flags_explicit
);
2623 /* E500mc does "better" if we inline more aggressively. Respect the
2624 user's opinion, though. */
2625 if (rs6000_block_move_inline_limit
== 0
2626 && (rs6000_cpu
== PROCESSOR_PPCE500MC
2627 || rs6000_cpu
== PROCESSOR_PPCE500MC64
2628 || rs6000_cpu
== PROCESSOR_PPCE5500
2629 || rs6000_cpu
== PROCESSOR_PPCE6500
))
2630 rs6000_block_move_inline_limit
= 128;
2632 /* store_one_arg depends on expand_block_move to handle at least the
2633 size of reg_parm_stack_space. */
2634 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
2635 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
2639 /* If the appropriate debug option is enabled, replace the target hooks
2640 with debug versions that call the real version and then prints
2641 debugging information. */
2642 if (TARGET_DEBUG_COST
)
2644 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
2645 targetm
.address_cost
= rs6000_debug_address_cost
;
2646 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
2649 if (TARGET_DEBUG_ADDR
)
2651 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
2652 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
2653 rs6000_secondary_reload_class_ptr
2654 = rs6000_debug_secondary_reload_class
;
2655 rs6000_secondary_memory_needed_ptr
2656 = rs6000_debug_secondary_memory_needed
;
2657 rs6000_cannot_change_mode_class_ptr
2658 = rs6000_debug_cannot_change_mode_class
;
2659 rs6000_preferred_reload_class_ptr
2660 = rs6000_debug_preferred_reload_class
;
2661 rs6000_legitimize_reload_address_ptr
2662 = rs6000_debug_legitimize_reload_address
;
2663 rs6000_mode_dependent_address_ptr
2664 = rs6000_debug_mode_dependent_address
;
2667 if (rs6000_veclibabi_name
)
2669 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
2670 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
2673 error ("unknown vectorization library ABI type (%s) for "
2674 "-mveclibabi= switch", rs6000_veclibabi_name
);
2680 if (!global_options_set
.x_rs6000_long_double_type_size
)
2682 if (main_target_opt
!= NULL
2683 && (main_target_opt
->x_rs6000_long_double_type_size
2684 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
2685 error ("target attribute or pragma changes long double size");
2687 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
2690 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2691 if (!global_options_set
.x_rs6000_ieeequad
)
2692 rs6000_ieeequad
= 1;
2695 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2696 target attribute or pragma which automatically enables both options,
2697 unless the altivec ABI was set. This is set by default for 64-bit, but
2699 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
2700 target_flags
&= ~((MASK_VSX
| MASK_ALTIVEC
) & ~target_flags_explicit
);
2702 /* Enable Altivec ABI for AIX -maltivec. */
2703 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
2705 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
2706 error ("target attribute or pragma changes AltiVec ABI");
2708 rs6000_altivec_abi
= 1;
2711 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2712 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2713 be explicitly overridden in either case. */
2716 if (!global_options_set
.x_rs6000_altivec_abi
2717 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
2719 if (main_target_opt
!= NULL
&&
2720 !main_target_opt
->x_rs6000_altivec_abi
)
2721 error ("target attribute or pragma changes AltiVec ABI");
2723 rs6000_altivec_abi
= 1;
2726 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
2727 if (!global_options_set
.x_TARGET_ALTIVEC_VRSAVE
)
2728 TARGET_ALTIVEC_VRSAVE
= rs6000_altivec_abi
;
2731 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2732 So far, the only darwin64 targets are also MACH-O. */
2734 && DEFAULT_ABI
== ABI_DARWIN
2737 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
2738 error ("target attribute or pragma changes darwin64 ABI");
2741 rs6000_darwin64_abi
= 1;
2742 /* Default to natural alignment, for better performance. */
2743 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
2747 /* Place FP constants in the constant pool instead of TOC
2748 if section anchors enabled. */
2749 if (flag_section_anchors
)
2750 TARGET_NO_FP_IN_TOC
= 1;
2752 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2753 SUBTARGET_OVERRIDE_OPTIONS
;
2755 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2756 SUBSUBTARGET_OVERRIDE_OPTIONS
;
2758 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2759 SUB3TARGET_OVERRIDE_OPTIONS
;
2762 /* For the E500 family of cores, reset the single/double FP flags to let us
2763 check that they remain constant across attributes or pragmas. Also,
2764 clear a possible request for string instructions, not supported and which
2765 we might have silently queried above for -Os.
2767 For other families, clear ISEL in case it was set implicitly.
2772 case PROCESSOR_PPC8540
:
2773 case PROCESSOR_PPC8548
:
2774 case PROCESSOR_PPCE500MC
:
2775 case PROCESSOR_PPCE500MC64
:
2776 case PROCESSOR_PPCE5500
:
2777 case PROCESSOR_PPCE6500
:
2779 rs6000_single_float
= TARGET_E500_SINGLE
|| TARGET_E500_DOUBLE
;
2780 rs6000_double_float
= TARGET_E500_DOUBLE
;
2782 target_flags
&= ~MASK_STRING
;
2788 if (have_cpu
&& !(target_flags_explicit
& MASK_ISEL
))
2789 target_flags
&= ~MASK_ISEL
;
2794 if (main_target_opt
)
2796 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
2797 error ("target attribute or pragma changes single precision floating "
2799 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
2800 error ("target attribute or pragma changes double precision floating "
2804 /* Detect invalid option combinations with E500. */
2807 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
2808 && rs6000_cpu
!= PROCESSOR_POWER5
2809 && rs6000_cpu
!= PROCESSOR_POWER6
2810 && rs6000_cpu
!= PROCESSOR_POWER7
2811 && rs6000_cpu
!= PROCESSOR_PPCA2
2812 && rs6000_cpu
!= PROCESSOR_CELL
2813 && rs6000_cpu
!= PROCESSOR_PPC476
);
2814 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
2815 || rs6000_cpu
== PROCESSOR_POWER5
2816 || rs6000_cpu
== PROCESSOR_POWER7
);
2817 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
2818 || rs6000_cpu
== PROCESSOR_POWER5
2819 || rs6000_cpu
== PROCESSOR_POWER6
2820 || rs6000_cpu
== PROCESSOR_POWER7
2821 || rs6000_cpu
== PROCESSOR_PPCE500MC
2822 || rs6000_cpu
== PROCESSOR_PPCE500MC64
2823 || rs6000_cpu
== PROCESSOR_PPCE5500
2824 || rs6000_cpu
== PROCESSOR_PPCE6500
);
2826 /* Allow debug switches to override the above settings. These are set to -1
2827 in rs6000.opt to indicate the user hasn't directly set the switch. */
2828 if (TARGET_ALWAYS_HINT
>= 0)
2829 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
2831 if (TARGET_SCHED_GROUPS
>= 0)
2832 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
2834 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
2835 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
2837 rs6000_sched_restricted_insns_priority
2838 = (rs6000_sched_groups
? 1 : 0);
2840 /* Handle -msched-costly-dep option. */
2841 rs6000_sched_costly_dep
2842 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
2844 if (rs6000_sched_costly_dep_str
)
2846 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
2847 rs6000_sched_costly_dep
= no_dep_costly
;
2848 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
2849 rs6000_sched_costly_dep
= all_deps_costly
;
2850 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
2851 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
2852 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
2853 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
2855 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
2856 atoi (rs6000_sched_costly_dep_str
));
2859 /* Handle -minsert-sched-nops option. */
2860 rs6000_sched_insert_nops
2861 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
2863 if (rs6000_sched_insert_nops_str
)
2865 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
2866 rs6000_sched_insert_nops
= sched_finish_none
;
2867 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
2868 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
2869 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
2870 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
2872 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
2873 atoi (rs6000_sched_insert_nops_str
));
2878 #ifdef TARGET_REGNAMES
2879 /* If the user desires alternate register names, copy in the
2880 alternate names now. */
2881 if (TARGET_REGNAMES
)
2882 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
2885 /* Set aix_struct_return last, after the ABI is determined.
2886 If -maix-struct-return or -msvr4-struct-return was explicitly
2887 used, don't override with the ABI default. */
2888 if (!global_options_set
.x_aix_struct_return
)
2889 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
2892 /* IBM XL compiler defaults to unsigned bitfields. */
2893 if (TARGET_XL_COMPAT
)
2894 flag_signed_bitfields
= 0;
2897 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
2898 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
2901 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
2903 /* We can only guarantee the availability of DI pseudo-ops when
2904 assembling for 64-bit targets. */
2907 targetm
.asm_out
.aligned_op
.di
= NULL
;
2908 targetm
.asm_out
.unaligned_op
.di
= NULL
;
2912 /* Set branch target alignment, if not optimizing for size. */
2915 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2916 aligned 8byte to avoid misprediction by the branch predictor. */
2917 if (rs6000_cpu
== PROCESSOR_TITAN
2918 || rs6000_cpu
== PROCESSOR_CELL
)
2920 if (align_functions
<= 0)
2921 align_functions
= 8;
2922 if (align_jumps
<= 0)
2924 if (align_loops
<= 0)
2927 if (rs6000_align_branch_targets
)
2929 if (align_functions
<= 0)
2930 align_functions
= 16;
2931 if (align_jumps
<= 0)
2933 if (align_loops
<= 0)
2935 can_override_loop_align
= 1;
2939 if (align_jumps_max_skip
<= 0)
2940 align_jumps_max_skip
= 15;
2941 if (align_loops_max_skip
<= 0)
2942 align_loops_max_skip
= 15;
2945 /* Arrange to save and restore machine status around nested functions. */
2946 init_machine_status
= rs6000_init_machine_status
;
2948 /* We should always be splitting complex arguments, but we can't break
2949 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2950 if (DEFAULT_ABI
!= ABI_AIX
)
2951 targetm
.calls
.split_complex_arg
= NULL
;
2954 /* Initialize rs6000_cost with the appropriate target costs. */
2956 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
2960 case PROCESSOR_RS64A
:
2961 rs6000_cost
= &rs64a_cost
;
2964 case PROCESSOR_MPCCORE
:
2965 rs6000_cost
= &mpccore_cost
;
2968 case PROCESSOR_PPC403
:
2969 rs6000_cost
= &ppc403_cost
;
2972 case PROCESSOR_PPC405
:
2973 rs6000_cost
= &ppc405_cost
;
2976 case PROCESSOR_PPC440
:
2977 rs6000_cost
= &ppc440_cost
;
2980 case PROCESSOR_PPC476
:
2981 rs6000_cost
= &ppc476_cost
;
2984 case PROCESSOR_PPC601
:
2985 rs6000_cost
= &ppc601_cost
;
2988 case PROCESSOR_PPC603
:
2989 rs6000_cost
= &ppc603_cost
;
2992 case PROCESSOR_PPC604
:
2993 rs6000_cost
= &ppc604_cost
;
2996 case PROCESSOR_PPC604e
:
2997 rs6000_cost
= &ppc604e_cost
;
3000 case PROCESSOR_PPC620
:
3001 rs6000_cost
= &ppc620_cost
;
3004 case PROCESSOR_PPC630
:
3005 rs6000_cost
= &ppc630_cost
;
3008 case PROCESSOR_CELL
:
3009 rs6000_cost
= &ppccell_cost
;
3012 case PROCESSOR_PPC750
:
3013 case PROCESSOR_PPC7400
:
3014 rs6000_cost
= &ppc750_cost
;
3017 case PROCESSOR_PPC7450
:
3018 rs6000_cost
= &ppc7450_cost
;
3021 case PROCESSOR_PPC8540
:
3022 case PROCESSOR_PPC8548
:
3023 rs6000_cost
= &ppc8540_cost
;
3026 case PROCESSOR_PPCE300C2
:
3027 case PROCESSOR_PPCE300C3
:
3028 rs6000_cost
= &ppce300c2c3_cost
;
3031 case PROCESSOR_PPCE500MC
:
3032 rs6000_cost
= &ppce500mc_cost
;
3035 case PROCESSOR_PPCE500MC64
:
3036 rs6000_cost
= &ppce500mc64_cost
;
3039 case PROCESSOR_PPCE5500
:
3040 rs6000_cost
= &ppce5500_cost
;
3043 case PROCESSOR_PPCE6500
:
3044 rs6000_cost
= &ppce6500_cost
;
3047 case PROCESSOR_TITAN
:
3048 rs6000_cost
= &titan_cost
;
3051 case PROCESSOR_POWER4
:
3052 case PROCESSOR_POWER5
:
3053 rs6000_cost
= &power4_cost
;
3056 case PROCESSOR_POWER6
:
3057 rs6000_cost
= &power6_cost
;
3060 case PROCESSOR_POWER7
:
3061 rs6000_cost
= &power7_cost
;
3064 case PROCESSOR_PPCA2
:
3065 rs6000_cost
= &ppca2_cost
;
3074 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
3075 rs6000_cost
->simultaneous_prefetches
,
3076 global_options
.x_param_values
,
3077 global_options_set
.x_param_values
);
3078 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
3079 global_options
.x_param_values
,
3080 global_options_set
.x_param_values
);
3081 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
3082 rs6000_cost
->cache_line_size
,
3083 global_options
.x_param_values
,
3084 global_options_set
.x_param_values
);
3085 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
3086 global_options
.x_param_values
,
3087 global_options_set
.x_param_values
);
3089 /* If using typedef char *va_list, signal that
3090 __builtin_va_start (&ap, 0) can be optimized to
3091 ap = __builtin_next_arg (0). */
3092 if (DEFAULT_ABI
!= ABI_V4
)
3093 targetm
.expand_builtin_va_start
= NULL
;
3096 /* Set up single/double float flags.
3097 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3098 then set both flags. */
3099 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
3100 && rs6000_single_float
== 0 && rs6000_double_float
== 0)
3101 rs6000_single_float
= rs6000_double_float
= 1;
3103 /* If not explicitly specified via option, decide whether to generate indexed
3104 load/store instructions. */
3105 if (TARGET_AVOID_XFORM
== -1)
3106 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3107 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3108 need indexed accesses and the type used is the scalar type of the element
3109 being loaded or stored. */
3110 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
3111 && !TARGET_ALTIVEC
);
3113 /* Set the -mrecip options. */
3114 if (rs6000_recip_name
)
3116 char *p
= ASTRDUP (rs6000_recip_name
);
3118 unsigned int mask
, i
;
3121 while ((q
= strtok (p
, ",")) != NULL
)
3132 if (!strcmp (q
, "default"))
3133 mask
= ((TARGET_RECIP_PRECISION
)
3134 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
3137 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
3138 if (!strcmp (q
, recip_options
[i
].string
))
3140 mask
= recip_options
[i
].mask
;
3144 if (i
== ARRAY_SIZE (recip_options
))
3146 error ("unknown option for -mrecip=%s", q
);
3154 rs6000_recip_control
&= ~mask
;
3156 rs6000_recip_control
|= mask
;
3160 /* Set the builtin mask of the various options used that could affect which
3161 builtins were used. In the past we used target_flags, but we've run out
3162 of bits, and some options like SPE and PAIRED are no longer in
3164 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
3165 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
3166 fprintf (stderr
, "new builtin mask = 0x%x%s%s%s%s\n", rs6000_builtin_mask
,
3167 (rs6000_builtin_mask
& RS6000_BTM_ALTIVEC
) ? ", altivec" : "",
3168 (rs6000_builtin_mask
& RS6000_BTM_VSX
) ? ", vsx" : "",
3169 (rs6000_builtin_mask
& RS6000_BTM_PAIRED
) ? ", paired" : "",
3170 (rs6000_builtin_mask
& RS6000_BTM_SPE
) ? ", spe" : "");
3172 /* Initialize all of the registers. */
3173 rs6000_init_hard_regno_mode_ok (global_init_p
);
3175 /* Save the initial options in case the user does function specific options */
3177 target_option_default_node
= target_option_current_node
3178 = build_target_option_node ();
3180 /* If not explicitly specified via option, decide whether to generate the
3181 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3182 if (TARGET_LINK_STACK
== -1)
3183 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
3188 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3189 define the target cpu type. */
3192 rs6000_option_override (void)
3194 (void) rs6000_option_override_internal (true);
3198 /* Implement targetm.vectorize.builtin_mask_for_load. */
3200 rs6000_builtin_mask_for_load (void)
3202 if (TARGET_ALTIVEC
|| TARGET_VSX
)
3203 return altivec_builtin_mask_for_load
;
3208 /* Implement LOOP_ALIGN. */
3210 rs6000_loop_align (rtx label
)
3215 /* Don't override loop alignment if -falign-loops was specified. */
3216 if (!can_override_loop_align
)
3217 return align_loops_log
;
3219 bb
= BLOCK_FOR_INSN (label
);
3220 ninsns
= num_loop_insns(bb
->loop_father
);
3222 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3223 if (ninsns
> 4 && ninsns
<= 8
3224 && (rs6000_cpu
== PROCESSOR_POWER4
3225 || rs6000_cpu
== PROCESSOR_POWER5
3226 || rs6000_cpu
== PROCESSOR_POWER6
3227 || rs6000_cpu
== PROCESSOR_POWER7
))
3230 return align_loops_log
;
3233 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3235 rs6000_loop_align_max_skip (rtx label
)
3237 return (1 << rs6000_loop_align (label
)) - 1;
3240 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3241 after applying N number of iterations. This routine does not determine
3242 how may iterations are required to reach desired alignment. */
3245 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
3252 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
3255 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
3265 /* Assuming that all other types are naturally aligned. CHECKME! */
3270 /* Return true if the vector misalignment factor is supported by the
3273 rs6000_builtin_support_vector_misalignment (enum machine_mode mode
,
3280 /* Return if movmisalign pattern is not supported for this mode. */
3281 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
3284 if (misalignment
== -1)
3286 /* Misalignment factor is unknown at compile time but we know
3287 it's word aligned. */
3288 if (rs6000_vector_alignment_reachable (type
, is_packed
))
3290 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
3292 if (element_size
== 64 || element_size
== 32)
3299 /* VSX supports word-aligned vector. */
3300 if (misalignment
% 4 == 0)
3306 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3308 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
3309 tree vectype
, int misalign
)
3314 switch (type_of_cost
)
3324 case cond_branch_not_taken
:
3333 case vec_promote_demote
:
3339 case cond_branch_taken
:
3342 case unaligned_load
:
3343 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
3345 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3347 /* Double word aligned. */
3355 /* Double word aligned. */
3359 /* Unknown misalignment. */
3372 /* Misaligned loads are not supported. */
3377 case unaligned_store
:
3378 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
3380 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3382 /* Double word aligned. */
3390 /* Double word aligned. */
3394 /* Unknown misalignment. */
3407 /* Misaligned stores are not supported. */
3413 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3414 elem_type
= TREE_TYPE (vectype
);
3415 /* 32-bit vectors loaded into registers are stored as double
3416 precision, so we need n/2 converts in addition to the usual
3417 n/2 merges to construct a vector of short floats from them. */
3418 if (SCALAR_FLOAT_TYPE_P (elem_type
)
3419 && TYPE_PRECISION (elem_type
) == 32)
3420 return elements
+ 1;
3422 return elements
/ 2 + 1;
3429 /* Implement targetm.vectorize.preferred_simd_mode. */
3431 static enum machine_mode
3432 rs6000_preferred_simd_mode (enum machine_mode mode
)
3441 if (TARGET_ALTIVEC
|| TARGET_VSX
)
3465 if (TARGET_PAIRED_FLOAT
3471 typedef struct _rs6000_cost_data
3473 struct loop
*loop_info
;
3477 /* Test for likely overcommitment of vector hardware resources. If a
3478 loop iteration is relatively large, and too large a percentage of
3479 instructions in the loop are vectorized, the cost model may not
3480 adequately reflect delays from unavailable vector resources.
3481 Penalize the loop body cost for this case. */
3484 rs6000_density_test (rs6000_cost_data
*data
)
3486 const int DENSITY_PCT_THRESHOLD
= 85;
3487 const int DENSITY_SIZE_THRESHOLD
= 70;
3488 const int DENSITY_PENALTY
= 10;
3489 struct loop
*loop
= data
->loop_info
;
3490 basic_block
*bbs
= get_loop_body (loop
);
3491 int nbbs
= loop
->num_nodes
;
3492 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
3495 for (i
= 0; i
< nbbs
; i
++)
3497 basic_block bb
= bbs
[i
];
3498 gimple_stmt_iterator gsi
;
3500 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3502 gimple stmt
= gsi_stmt (gsi
);
3503 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3505 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
3506 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
3511 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
3513 if (density_pct
> DENSITY_PCT_THRESHOLD
3514 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
3516 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
3517 if (vect_print_dump_info (REPORT_DETAILS
))
3519 "density %d%%, cost %d exceeds threshold, penalizing "
3520 "loop body cost by %d%%", density_pct
,
3521 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
3525 /* Implement targetm.vectorize.init_cost. */
3528 rs6000_init_cost (struct loop
*loop_info
)
3530 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
3531 data
->loop_info
= loop_info
;
3532 data
->cost
[vect_prologue
] = 0;
3533 data
->cost
[vect_body
] = 0;
3534 data
->cost
[vect_epilogue
] = 0;
3538 /* Implement targetm.vectorize.add_stmt_cost. */
3541 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
3542 struct _stmt_vec_info
*stmt_info
, int misalign
,
3543 enum vect_cost_model_location where
)
3545 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
3546 unsigned retval
= 0;
3548 if (flag_vect_cost_model
)
3550 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
3551 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
3553 /* Statements in an inner loop relative to the loop being
3554 vectorized are weighted more heavily. The value here is
3555 arbitrary and could potentially be improved with analysis. */
3556 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
3557 count
*= 50; /* FIXME. */
3559 retval
= (unsigned) (count
* stmt_cost
);
3560 cost_data
->cost
[where
] += retval
;
3566 /* Implement targetm.vectorize.finish_cost. */
3569 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
3570 unsigned *body_cost
, unsigned *epilogue_cost
)
3572 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
3574 if (cost_data
->loop_info
)
3575 rs6000_density_test (cost_data
);
3577 *prologue_cost
= cost_data
->cost
[vect_prologue
];
3578 *body_cost
= cost_data
->cost
[vect_body
];
3579 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
3582 /* Implement targetm.vectorize.destroy_cost_data. */
3585 rs6000_destroy_cost_data (void *data
)
3590 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3591 library with vectorized intrinsics. */
3594 rs6000_builtin_vectorized_libmass (tree fndecl
, tree type_out
, tree type_in
)
3597 const char *suffix
= NULL
;
3598 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
3601 enum machine_mode el_mode
, in_mode
;
3604 /* Libmass is suitable for unsafe math only as it does not correctly support
3605 parts of IEEE with the required precision such as denormals. Only support
3606 it if we have VSX to use the simd d2 or f4 functions.
3607 XXX: Add variable length support. */
3608 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
3611 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
3612 n
= TYPE_VECTOR_SUBPARTS (type_out
);
3613 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
3614 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
3615 if (el_mode
!= in_mode
3619 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3621 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
3624 case BUILT_IN_ATAN2
:
3625 case BUILT_IN_HYPOT
:
3631 case BUILT_IN_ACOSH
:
3633 case BUILT_IN_ASINH
:
3635 case BUILT_IN_ATANH
:
3643 case BUILT_IN_EXPM1
:
3644 case BUILT_IN_LGAMMA
:
3645 case BUILT_IN_LOG10
:
3646 case BUILT_IN_LOG1P
:
3654 bdecl
= builtin_decl_implicit (fn
);
3655 suffix
= "d2"; /* pow -> powd2 */
3656 if (el_mode
!= DFmode
3661 case BUILT_IN_ATAN2F
:
3662 case BUILT_IN_HYPOTF
:
3667 case BUILT_IN_ACOSF
:
3668 case BUILT_IN_ACOSHF
:
3669 case BUILT_IN_ASINF
:
3670 case BUILT_IN_ASINHF
:
3671 case BUILT_IN_ATANF
:
3672 case BUILT_IN_ATANHF
:
3673 case BUILT_IN_CBRTF
:
3675 case BUILT_IN_COSHF
:
3677 case BUILT_IN_ERFCF
:
3678 case BUILT_IN_EXP2F
:
3680 case BUILT_IN_EXPM1F
:
3681 case BUILT_IN_LGAMMAF
:
3682 case BUILT_IN_LOG10F
:
3683 case BUILT_IN_LOG1PF
:
3684 case BUILT_IN_LOG2F
:
3687 case BUILT_IN_SINHF
:
3688 case BUILT_IN_SQRTF
:
3690 case BUILT_IN_TANHF
:
3691 bdecl
= builtin_decl_implicit (fn
);
3692 suffix
= "4"; /* powf -> powf4 */
3693 if (el_mode
!= SFmode
3705 gcc_assert (suffix
!= NULL
);
3706 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
3707 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
3708 strcat (name
, suffix
);
3711 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
3712 else if (n_args
== 2)
3713 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
3717 /* Build a function declaration for the vectorized function. */
3718 new_fndecl
= build_decl (BUILTINS_LOCATION
,
3719 FUNCTION_DECL
, get_identifier (name
), fntype
);
3720 TREE_PUBLIC (new_fndecl
) = 1;
3721 DECL_EXTERNAL (new_fndecl
) = 1;
3722 DECL_IS_NOVOPS (new_fndecl
) = 1;
3723 TREE_READONLY (new_fndecl
) = 1;
3728 /* Returns a function decl for a vectorized version of the builtin function
3729 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3730 if it is not available. */
3733 rs6000_builtin_vectorized_function (tree fndecl
, tree type_out
,
3736 enum machine_mode in_mode
, out_mode
;
3739 if (TARGET_DEBUG_BUILTIN
)
3740 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3741 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
3742 GET_MODE_NAME (TYPE_MODE (type_out
)),
3743 GET_MODE_NAME (TYPE_MODE (type_in
)));
3745 if (TREE_CODE (type_out
) != VECTOR_TYPE
3746 || TREE_CODE (type_in
) != VECTOR_TYPE
3747 || !TARGET_VECTORIZE_BUILTINS
)
3750 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
3751 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
3752 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
3753 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
3755 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3757 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
3760 case BUILT_IN_COPYSIGN
:
3761 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3762 && out_mode
== DFmode
&& out_n
== 2
3763 && in_mode
== DFmode
&& in_n
== 2)
3764 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
3766 case BUILT_IN_COPYSIGNF
:
3767 if (out_mode
!= SFmode
|| out_n
!= 4
3768 || in_mode
!= SFmode
|| in_n
!= 4)
3770 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3771 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
3772 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3773 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
3776 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3777 && out_mode
== DFmode
&& out_n
== 2
3778 && in_mode
== DFmode
&& in_n
== 2)
3779 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTDP
];
3781 case BUILT_IN_SQRTF
:
3782 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3783 && out_mode
== SFmode
&& out_n
== 4
3784 && in_mode
== SFmode
&& in_n
== 4)
3785 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTSP
];
3788 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3789 && out_mode
== DFmode
&& out_n
== 2
3790 && in_mode
== DFmode
&& in_n
== 2)
3791 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
3793 case BUILT_IN_CEILF
:
3794 if (out_mode
!= SFmode
|| out_n
!= 4
3795 || in_mode
!= SFmode
|| in_n
!= 4)
3797 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3798 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
3799 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3800 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
3802 case BUILT_IN_FLOOR
:
3803 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3804 && out_mode
== DFmode
&& out_n
== 2
3805 && in_mode
== DFmode
&& in_n
== 2)
3806 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
3808 case BUILT_IN_FLOORF
:
3809 if (out_mode
!= SFmode
|| out_n
!= 4
3810 || in_mode
!= SFmode
|| in_n
!= 4)
3812 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3813 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
3814 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3815 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
3818 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3819 && out_mode
== DFmode
&& out_n
== 2
3820 && in_mode
== DFmode
&& in_n
== 2)
3821 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
3824 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3825 && out_mode
== SFmode
&& out_n
== 4
3826 && in_mode
== SFmode
&& in_n
== 4)
3827 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
3828 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
3829 && out_mode
== SFmode
&& out_n
== 4
3830 && in_mode
== SFmode
&& in_n
== 4)
3831 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
3833 case BUILT_IN_TRUNC
:
3834 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3835 && out_mode
== DFmode
&& out_n
== 2
3836 && in_mode
== DFmode
&& in_n
== 2)
3837 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
3839 case BUILT_IN_TRUNCF
:
3840 if (out_mode
!= SFmode
|| out_n
!= 4
3841 || in_mode
!= SFmode
|| in_n
!= 4)
3843 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3844 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
3845 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3846 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
3848 case BUILT_IN_NEARBYINT
:
3849 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3850 && flag_unsafe_math_optimizations
3851 && out_mode
== DFmode
&& out_n
== 2
3852 && in_mode
== DFmode
&& in_n
== 2)
3853 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
3855 case BUILT_IN_NEARBYINTF
:
3856 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3857 && flag_unsafe_math_optimizations
3858 && out_mode
== SFmode
&& out_n
== 4
3859 && in_mode
== SFmode
&& in_n
== 4)
3860 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
3863 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3864 && !flag_trapping_math
3865 && out_mode
== DFmode
&& out_n
== 2
3866 && in_mode
== DFmode
&& in_n
== 2)
3867 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
3869 case BUILT_IN_RINTF
:
3870 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3871 && !flag_trapping_math
3872 && out_mode
== SFmode
&& out_n
== 4
3873 && in_mode
== SFmode
&& in_n
== 4)
3874 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
3881 else if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
3883 enum rs6000_builtins fn
3884 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
3887 case RS6000_BUILTIN_RSQRTF
:
3888 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
3889 && out_mode
== SFmode
&& out_n
== 4
3890 && in_mode
== SFmode
&& in_n
== 4)
3891 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
3893 case RS6000_BUILTIN_RSQRT
:
3894 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3895 && out_mode
== DFmode
&& out_n
== 2
3896 && in_mode
== DFmode
&& in_n
== 2)
3897 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
3899 case RS6000_BUILTIN_RECIPF
:
3900 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
3901 && out_mode
== SFmode
&& out_n
== 4
3902 && in_mode
== SFmode
&& in_n
== 4)
3903 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
3905 case RS6000_BUILTIN_RECIP
:
3906 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3907 && out_mode
== DFmode
&& out_n
== 2
3908 && in_mode
== DFmode
&& in_n
== 2)
3909 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
3916 /* Generate calls to libmass if appropriate. */
3917 if (rs6000_veclib_handler
)
3918 return rs6000_veclib_handler (fndecl
, type_out
, type_in
);
3923 /* Default CPU string for rs6000*_file_start functions. */
3924 static const char *rs6000_default_cpu
;
3926 /* Do anything needed at the start of the asm file. */
3929 rs6000_file_start (void)
3932 const char *start
= buffer
;
3933 FILE *file
= asm_out_file
;
3935 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
3937 default_file_start ();
3939 if (flag_verbose_asm
)
3941 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
3943 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
3945 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
3949 if (global_options_set
.x_rs6000_cpu_index
)
3951 fprintf (file
, "%s -mcpu=%s", start
,
3952 processor_target_table
[rs6000_cpu_index
].name
);
3956 if (global_options_set
.x_rs6000_tune_index
)
3958 fprintf (file
, "%s -mtune=%s", start
,
3959 processor_target_table
[rs6000_tune_index
].name
);
3963 if (PPC405_ERRATUM77
)
3965 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
3969 #ifdef USING_ELFOS_H
3970 switch (rs6000_sdata
)
3972 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
3973 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
3974 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
3975 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
3978 if (rs6000_sdata
&& g_switch_value
)
3980 fprintf (file
, "%s -G %d", start
,
3990 if (DEFAULT_ABI
== ABI_AIX
|| (TARGET_ELF
&& flag_pic
== 2))
3992 switch_to_section (toc_section
);
3993 switch_to_section (text_section
);
3998 /* Return nonzero if this function is known to have a null epilogue. */
4001 direct_return (void)
4003 if (reload_completed
)
4005 rs6000_stack_t
*info
= rs6000_stack_info ();
4007 if (info
->first_gp_reg_save
== 32
4008 && info
->first_fp_reg_save
== 64
4009 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
4010 && ! info
->lr_save_p
4011 && ! info
->cr_save_p
4012 && info
->vrsave_mask
== 0
4020 /* Return the number of instructions it takes to form a constant in an
4021 integer register. */
4024 num_insns_constant_wide (HOST_WIDE_INT value
)
4026 /* signed constant loadable with {cal|addi} */
4027 if ((unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000)
4030 /* constant loadable with {cau|addis} */
4031 else if ((value
& 0xffff) == 0
4032 && (value
>> 31 == -1 || value
>> 31 == 0))
4035 #if HOST_BITS_PER_WIDE_INT == 64
4036 else if (TARGET_POWERPC64
)
4038 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
4039 HOST_WIDE_INT high
= value
>> 31;
4041 if (high
== 0 || high
== -1)
4047 return num_insns_constant_wide (high
) + 1;
4049 return num_insns_constant_wide (low
) + 1;
4051 return (num_insns_constant_wide (high
)
4052 + num_insns_constant_wide (low
) + 1);
4061 num_insns_constant (rtx op
, enum machine_mode mode
)
4063 HOST_WIDE_INT low
, high
;
4065 switch (GET_CODE (op
))
4068 #if HOST_BITS_PER_WIDE_INT == 64
4069 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
4070 && mask64_operand (op
, mode
))
4074 return num_insns_constant_wide (INTVAL (op
));
4077 if (mode
== SFmode
|| mode
== SDmode
)
4082 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4083 if (DECIMAL_FLOAT_MODE_P (mode
))
4084 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
4086 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
4087 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
4090 if (mode
== VOIDmode
|| mode
== DImode
)
4092 high
= CONST_DOUBLE_HIGH (op
);
4093 low
= CONST_DOUBLE_LOW (op
);
4100 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4101 if (DECIMAL_FLOAT_MODE_P (mode
))
4102 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, l
);
4104 REAL_VALUE_TO_TARGET_DOUBLE (rv
, l
);
4105 high
= l
[WORDS_BIG_ENDIAN
== 0];
4106 low
= l
[WORDS_BIG_ENDIAN
!= 0];
4110 return (num_insns_constant_wide (low
)
4111 + num_insns_constant_wide (high
));
4114 if ((high
== 0 && low
>= 0)
4115 || (high
== -1 && low
< 0))
4116 return num_insns_constant_wide (low
);
4118 else if (mask64_operand (op
, mode
))
4122 return num_insns_constant_wide (high
) + 1;
4125 return (num_insns_constant_wide (high
)
4126 + num_insns_constant_wide (low
) + 1);
4134 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4135 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4136 corresponding element of the vector, but for V4SFmode and V2SFmode,
4137 the corresponding "float" is interpreted as an SImode integer. */
4140 const_vector_elt_as_int (rtx op
, unsigned int elt
)
4144 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4145 gcc_assert (GET_MODE (op
) != V2DImode
4146 && GET_MODE (op
) != V2DFmode
);
4148 tmp
= CONST_VECTOR_ELT (op
, elt
);
4149 if (GET_MODE (op
) == V4SFmode
4150 || GET_MODE (op
) == V2SFmode
)
4151 tmp
= gen_lowpart (SImode
, tmp
);
4152 return INTVAL (tmp
);
4155 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4156 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4157 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4158 all items are set to the same value and contain COPIES replicas of the
4159 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4160 operand and the others are set to the value of the operand's msb. */
4163 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
4165 enum machine_mode mode
= GET_MODE (op
);
4166 enum machine_mode inner
= GET_MODE_INNER (mode
);
4174 HOST_WIDE_INT splat_val
;
4175 HOST_WIDE_INT msb_val
;
4177 if (mode
== V2DImode
|| mode
== V2DFmode
)
4180 nunits
= GET_MODE_NUNITS (mode
);
4181 bitsize
= GET_MODE_BITSIZE (inner
);
4182 mask
= GET_MODE_MASK (inner
);
4184 val
= const_vector_elt_as_int (op
, nunits
- 1);
4186 msb_val
= val
> 0 ? 0 : -1;
4188 /* Construct the value to be splatted, if possible. If not, return 0. */
4189 for (i
= 2; i
<= copies
; i
*= 2)
4191 HOST_WIDE_INT small_val
;
4193 small_val
= splat_val
>> bitsize
;
4195 if (splat_val
!= ((small_val
<< bitsize
) | (small_val
& mask
)))
4197 splat_val
= small_val
;
4200 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4201 if (EASY_VECTOR_15 (splat_val
))
4204 /* Also check if we can splat, and then add the result to itself. Do so if
4205 the value is positive, of if the splat instruction is using OP's mode;
4206 for splat_val < 0, the splat and the add should use the same mode. */
4207 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
4208 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
4211 /* Also check if are loading up the most significant bit which can be done by
4212 loading up -1 and shifting the value left by -1. */
4213 else if (EASY_VECTOR_MSB (splat_val
, inner
))
4219 /* Check if VAL is present in every STEP-th element, and the
4220 other elements are filled with its most significant bit. */
4221 for (i
= 0; i
< nunits
- 1; ++i
)
4223 HOST_WIDE_INT desired_val
;
4224 if (((i
+ 1) & (step
- 1)) == 0)
4227 desired_val
= msb_val
;
4229 if (desired_val
!= const_vector_elt_as_int (op
, i
))
4237 /* Return true if OP is of the given MODE and can be synthesized
4238 with a vspltisb, vspltish or vspltisw. */
4241 easy_altivec_constant (rtx op
, enum machine_mode mode
)
4243 unsigned step
, copies
;
4245 if (mode
== VOIDmode
)
4246 mode
= GET_MODE (op
);
4247 else if (mode
!= GET_MODE (op
))
4250 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4252 if (mode
== V2DFmode
)
4253 return zero_constant (op
, mode
);
4255 if (mode
== V2DImode
)
4257 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4259 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
4260 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
4263 if (zero_constant (op
, mode
))
4266 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
4267 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
4273 /* Start with a vspltisw. */
4274 step
= GET_MODE_NUNITS (mode
) / 4;
4277 if (vspltis_constant (op
, step
, copies
))
4280 /* Then try with a vspltish. */
4286 if (vspltis_constant (op
, step
, copies
))
4289 /* And finally a vspltisb. */
4295 if (vspltis_constant (op
, step
, copies
))
4301 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4302 result is OP. Abort if it is not possible. */
4305 gen_easy_altivec_constant (rtx op
)
4307 enum machine_mode mode
= GET_MODE (op
);
4308 int nunits
= GET_MODE_NUNITS (mode
);
4309 rtx last
= CONST_VECTOR_ELT (op
, nunits
- 1);
4310 unsigned step
= nunits
/ 4;
4311 unsigned copies
= 1;
4313 /* Start with a vspltisw. */
4314 if (vspltis_constant (op
, step
, copies
))
4315 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, last
));
4317 /* Then try with a vspltish. */
4323 if (vspltis_constant (op
, step
, copies
))
4324 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, last
));
4326 /* And finally a vspltisb. */
4332 if (vspltis_constant (op
, step
, copies
))
4333 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, last
));
4339 output_vec_const_move (rtx
*operands
)
4342 enum machine_mode mode
;
4347 mode
= GET_MODE (dest
);
4351 if (zero_constant (vec
, mode
))
4352 return "xxlxor %x0,%x0,%x0";
4354 if (mode
== V2DImode
4355 && INTVAL (CONST_VECTOR_ELT (vec
, 0)) == -1
4356 && INTVAL (CONST_VECTOR_ELT (vec
, 1)) == -1)
4357 return "vspltisw %0,-1";
4363 if (zero_constant (vec
, mode
))
4364 return "vxor %0,%0,%0";
4366 splat_vec
= gen_easy_altivec_constant (vec
);
4367 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
4368 operands
[1] = XEXP (splat_vec
, 0);
4369 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
4372 switch (GET_MODE (splat_vec
))
4375 return "vspltisw %0,%1";
4378 return "vspltish %0,%1";
4381 return "vspltisb %0,%1";
4388 gcc_assert (TARGET_SPE
);
4390 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4391 pattern of V1DI, V4HI, and V2SF.
4393 FIXME: We should probably return # and add post reload
4394 splitters for these, but this way is so easy ;-). */
4395 cst
= INTVAL (CONST_VECTOR_ELT (vec
, 0));
4396 cst2
= INTVAL (CONST_VECTOR_ELT (vec
, 1));
4397 operands
[1] = CONST_VECTOR_ELT (vec
, 0);
4398 operands
[2] = CONST_VECTOR_ELT (vec
, 1);
4400 return "li %0,%1\n\tevmergelo %0,%0,%0";
4402 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4405 /* Initialize TARGET of vector PAIRED to VALS. */
4408 paired_expand_vector_init (rtx target
, rtx vals
)
4410 enum machine_mode mode
= GET_MODE (target
);
4411 int n_elts
= GET_MODE_NUNITS (mode
);
4413 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
4416 for (i
= 0; i
< n_elts
; ++i
)
4418 x
= XVECEXP (vals
, 0, i
);
4419 if (!(CONST_INT_P (x
)
4420 || GET_CODE (x
) == CONST_DOUBLE
4421 || GET_CODE (x
) == CONST_FIXED
))
4426 /* Load from constant pool. */
4427 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
4433 /* The vector is initialized only with non-constants. */
4434 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
4435 XVECEXP (vals
, 0, 1));
4437 emit_move_insn (target
, new_rtx
);
4441 /* One field is non-constant and the other one is a constant. Load the
4442 constant from the constant pool and use ps_merge instruction to
4443 construct the whole vector. */
4444 op1
= XVECEXP (vals
, 0, 0);
4445 op2
= XVECEXP (vals
, 0, 1);
4447 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
4449 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
4450 emit_move_insn (tmp
, constant_op
);
4452 if (CONSTANT_P (op1
))
4453 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
4455 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
4457 emit_move_insn (target
, new_rtx
);
4461 paired_expand_vector_move (rtx operands
[])
4463 rtx op0
= operands
[0], op1
= operands
[1];
4465 emit_move_insn (op0
, op1
);
4468 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4469 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4470 operands for the relation operation COND. This is a recursive
4474 paired_emit_vector_compare (enum rtx_code rcode
,
4475 rtx dest
, rtx op0
, rtx op1
,
4476 rtx cc_op0
, rtx cc_op1
)
4478 rtx tmp
= gen_reg_rtx (V2SFmode
);
4481 gcc_assert (TARGET_PAIRED_FLOAT
);
4482 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
4488 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4492 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
4493 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
4497 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
4500 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4503 tmp1
= gen_reg_rtx (V2SFmode
);
4504 max
= gen_reg_rtx (V2SFmode
);
4505 min
= gen_reg_rtx (V2SFmode
);
4506 gen_reg_rtx (V2SFmode
);
4508 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
4509 emit_insn (gen_selv2sf4
4510 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
4511 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
4512 emit_insn (gen_selv2sf4
4513 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
4514 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
4515 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
4518 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4521 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4524 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4527 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4530 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4539 /* Emit vector conditional expression.
4540 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4541 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4544 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
4545 rtx cond
, rtx cc_op0
, rtx cc_op1
)
4547 enum rtx_code rcode
= GET_CODE (cond
);
4549 if (!TARGET_PAIRED_FLOAT
)
4552 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
4557 /* Initialize vector TARGET to VALS. */
4560 rs6000_expand_vector_init (rtx target
, rtx vals
)
4562 enum machine_mode mode
= GET_MODE (target
);
4563 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4564 int n_elts
= GET_MODE_NUNITS (mode
);
4565 int n_var
= 0, one_var
= -1;
4566 bool all_same
= true, all_const_zero
= true;
4570 for (i
= 0; i
< n_elts
; ++i
)
4572 x
= XVECEXP (vals
, 0, i
);
4573 if (!(CONST_INT_P (x
)
4574 || GET_CODE (x
) == CONST_DOUBLE
4575 || GET_CODE (x
) == CONST_FIXED
))
4576 ++n_var
, one_var
= i
;
4577 else if (x
!= CONST0_RTX (inner_mode
))
4578 all_const_zero
= false;
4580 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
4586 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
4587 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
4588 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
4590 /* Zero register. */
4591 emit_insn (gen_rtx_SET (VOIDmode
, target
,
4592 gen_rtx_XOR (mode
, target
, target
)));
4595 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
4597 /* Splat immediate. */
4598 emit_insn (gen_rtx_SET (VOIDmode
, target
, const_vec
));
4603 /* Load from constant pool. */
4604 emit_move_insn (target
, const_vec
);
4609 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4610 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
4612 rtx op0
= XVECEXP (vals
, 0, 0);
4613 rtx op1
= XVECEXP (vals
, 0, 1);
4616 if (!MEM_P (op0
) && !REG_P (op0
))
4617 op0
= force_reg (inner_mode
, op0
);
4618 if (mode
== V2DFmode
)
4619 emit_insn (gen_vsx_splat_v2df (target
, op0
));
4621 emit_insn (gen_vsx_splat_v2di (target
, op0
));
4625 op0
= force_reg (inner_mode
, op0
);
4626 op1
= force_reg (inner_mode
, op1
);
4627 if (mode
== V2DFmode
)
4628 emit_insn (gen_vsx_concat_v2df (target
, op0
, op1
));
4630 emit_insn (gen_vsx_concat_v2di (target
, op0
, op1
));
4635 /* With single precision floating point on VSX, know that internally single
4636 precision is actually represented as a double, and either make 2 V2DF
4637 vectors, and convert these vectors to single precision, or do one
4638 conversion, and splat the result to the other elements. */
4639 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (mode
))
4643 rtx freg
= gen_reg_rtx (V4SFmode
);
4644 rtx sreg
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
4646 emit_insn (gen_vsx_xscvdpsp_scalar (freg
, sreg
));
4647 emit_insn (gen_vsx_xxspltw_v4sf (target
, freg
, const0_rtx
));
4651 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
4652 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
4653 rtx flt_even
= gen_reg_rtx (V4SFmode
);
4654 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
4655 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
4656 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
4657 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
4658 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
4660 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
4661 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
4662 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
4663 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
4664 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
4669 /* Store value to stack temp. Load vector element. Splat. However, splat
4670 of 64-bit items is not supported on Altivec. */
4671 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
4673 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
4674 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
4675 XVECEXP (vals
, 0, 0));
4676 x
= gen_rtx_UNSPEC (VOIDmode
,
4677 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
4678 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
4680 gen_rtx_SET (VOIDmode
,
4683 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
4684 gen_rtx_PARALLEL (VOIDmode
,
4685 gen_rtvec (1, const0_rtx
)));
4686 emit_insn (gen_rtx_SET (VOIDmode
, target
,
4687 gen_rtx_VEC_DUPLICATE (mode
, x
)));
4691 /* One field is non-constant. Load constant then overwrite
4695 rtx copy
= copy_rtx (vals
);
4697 /* Load constant part of vector, substitute neighboring value for
4699 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
4700 rs6000_expand_vector_init (target
, copy
);
4702 /* Insert variable. */
4703 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
4707 /* Construct the vector in memory one field at a time
4708 and load the whole vector. */
4709 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
4710 for (i
= 0; i
< n_elts
; i
++)
4711 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
4712 i
* GET_MODE_SIZE (inner_mode
)),
4713 XVECEXP (vals
, 0, i
));
4714 emit_move_insn (target
, mem
);
4717 /* Set field ELT of TARGET to VAL. */
4720 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
4722 enum machine_mode mode
= GET_MODE (target
);
4723 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4724 rtx reg
= gen_reg_rtx (mode
);
4726 int width
= GET_MODE_SIZE (inner_mode
);
4729 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
4731 rtx (*set_func
) (rtx
, rtx
, rtx
, rtx
)
4732 = ((mode
== V2DFmode
) ? gen_vsx_set_v2df
: gen_vsx_set_v2di
);
4733 emit_insn (set_func (target
, target
, val
, GEN_INT (elt
)));
4737 /* Load single variable value. */
4738 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
4739 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
4740 x
= gen_rtx_UNSPEC (VOIDmode
,
4741 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
4742 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
4744 gen_rtx_SET (VOIDmode
,
4748 /* Linear sequence. */
4749 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
4750 for (i
= 0; i
< 16; ++i
)
4751 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
4753 /* Set permute mask to insert element into target. */
4754 for (i
= 0; i
< width
; ++i
)
4755 XVECEXP (mask
, 0, elt
*width
+ i
)
4756 = GEN_INT (i
+ 0x10);
4757 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
4758 x
= gen_rtx_UNSPEC (mode
,
4759 gen_rtvec (3, target
, reg
,
4760 force_reg (V16QImode
, x
)),
4762 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
4765 /* Extract field ELT from VEC into TARGET. */
4768 rs6000_expand_vector_extract (rtx target
, rtx vec
, int elt
)
4770 enum machine_mode mode
= GET_MODE (vec
);
4771 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4774 if (VECTOR_MEM_VSX_P (mode
))
4781 emit_insn (gen_vsx_extract_v2df (target
, vec
, GEN_INT (elt
)));
4784 emit_insn (gen_vsx_extract_v2di (target
, vec
, GEN_INT (elt
)));
4787 emit_insn (gen_vsx_extract_v4sf (target
, vec
, GEN_INT (elt
)));
4792 /* Allocate mode-sized buffer. */
4793 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
4795 emit_move_insn (mem
, vec
);
4797 /* Add offset to field within buffer matching vector element. */
4798 mem
= adjust_address_nv (mem
, inner_mode
, elt
* GET_MODE_SIZE (inner_mode
));
4800 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
4803 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4804 implement ANDing by the mask IN. */
4806 build_mask64_2_operands (rtx in
, rtx
*out
)
4808 #if HOST_BITS_PER_WIDE_INT >= 64
4809 unsigned HOST_WIDE_INT c
, lsb
, m1
, m2
;
4812 gcc_assert (GET_CODE (in
) == CONST_INT
);
4817 /* Assume c initially something like 0x00fff000000fffff. The idea
4818 is to rotate the word so that the middle ^^^^^^ group of zeros
4819 is at the MS end and can be cleared with an rldicl mask. We then
4820 rotate back and clear off the MS ^^ group of zeros with a
4822 c
= ~c
; /* c == 0xff000ffffff00000 */
4823 lsb
= c
& -c
; /* lsb == 0x0000000000100000 */
4824 m1
= -lsb
; /* m1 == 0xfffffffffff00000 */
4825 c
= ~c
; /* c == 0x00fff000000fffff */
4826 c
&= -lsb
; /* c == 0x00fff00000000000 */
4827 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
4828 c
= ~c
; /* c == 0xff000fffffffffff */
4829 c
&= -lsb
; /* c == 0xff00000000000000 */
4831 while ((lsb
>>= 1) != 0)
4832 shift
++; /* shift == 44 on exit from loop */
4833 m1
<<= 64 - shift
; /* m1 == 0xffffff0000000000 */
4834 m1
= ~m1
; /* m1 == 0x000000ffffffffff */
4835 m2
= ~c
; /* m2 == 0x00ffffffffffffff */
4839 /* Assume c initially something like 0xff000f0000000000. The idea
4840 is to rotate the word so that the ^^^ middle group of zeros
4841 is at the LS end and can be cleared with an rldicr mask. We then
4842 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4844 lsb
= c
& -c
; /* lsb == 0x0000010000000000 */
4845 m2
= -lsb
; /* m2 == 0xffffff0000000000 */
4846 c
= ~c
; /* c == 0x00fff0ffffffffff */
4847 c
&= -lsb
; /* c == 0x00fff00000000000 */
4848 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
4849 c
= ~c
; /* c == 0xff000fffffffffff */
4850 c
&= -lsb
; /* c == 0xff00000000000000 */
4852 while ((lsb
>>= 1) != 0)
4853 shift
++; /* shift == 44 on exit from loop */
4854 m1
= ~c
; /* m1 == 0x00ffffffffffffff */
4855 m1
>>= shift
; /* m1 == 0x0000000000000fff */
4856 m1
= ~m1
; /* m1 == 0xfffffffffffff000 */
4859 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4860 masks will be all 1's. We are guaranteed more than one transition. */
4861 out
[0] = GEN_INT (64 - shift
);
4862 out
[1] = GEN_INT (m1
);
4863 out
[2] = GEN_INT (shift
);
4864 out
[3] = GEN_INT (m2
);
4872 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4875 invalid_e500_subreg (rtx op
, enum machine_mode mode
)
4877 if (TARGET_E500_DOUBLE
)
4879 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4880 subreg:TI and reg:TF. Decimal float modes are like integer
4881 modes (only low part of each register used) for this
4883 if (GET_CODE (op
) == SUBREG
4884 && (mode
== SImode
|| mode
== DImode
|| mode
== TImode
4885 || mode
== DDmode
|| mode
== TDmode
)
4886 && REG_P (SUBREG_REG (op
))
4887 && (GET_MODE (SUBREG_REG (op
)) == DFmode
4888 || GET_MODE (SUBREG_REG (op
)) == TFmode
))
4891 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4893 if (GET_CODE (op
) == SUBREG
4894 && (mode
== DFmode
|| mode
== TFmode
)
4895 && REG_P (SUBREG_REG (op
))
4896 && (GET_MODE (SUBREG_REG (op
)) == DImode
4897 || GET_MODE (SUBREG_REG (op
)) == TImode
4898 || GET_MODE (SUBREG_REG (op
)) == DDmode
4899 || GET_MODE (SUBREG_REG (op
)) == TDmode
))
4904 && GET_CODE (op
) == SUBREG
4906 && REG_P (SUBREG_REG (op
))
4907 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op
))))
4913 /* AIX increases natural record alignment to doubleword if the first
4914 field is an FP double while the FP fields remain word aligned. */
4917 rs6000_special_round_type_align (tree type
, unsigned int computed
,
4918 unsigned int specified
)
4920 unsigned int align
= MAX (computed
, specified
);
4921 tree field
= TYPE_FIELDS (type
);
4923 /* Skip all non field decls */
4924 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
4925 field
= DECL_CHAIN (field
);
4927 if (field
!= NULL
&& field
!= type
)
4929 type
= TREE_TYPE (field
);
4930 while (TREE_CODE (type
) == ARRAY_TYPE
)
4931 type
= TREE_TYPE (type
);
4933 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
4934 align
= MAX (align
, 64);
4940 /* Darwin increases record alignment to the natural alignment of
4944 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
4945 unsigned int specified
)
4947 unsigned int align
= MAX (computed
, specified
);
4949 if (TYPE_PACKED (type
))
4952 /* Find the first field, looking down into aggregates. */
4954 tree field
= TYPE_FIELDS (type
);
4955 /* Skip all non field decls */
4956 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
4957 field
= DECL_CHAIN (field
);
4960 /* A packed field does not contribute any extra alignment. */
4961 if (DECL_PACKED (field
))
4963 type
= TREE_TYPE (field
);
4964 while (TREE_CODE (type
) == ARRAY_TYPE
)
4965 type
= TREE_TYPE (type
);
4966 } while (AGGREGATE_TYPE_P (type
));
4968 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
4969 align
= MAX (align
, TYPE_ALIGN (type
));
4974 /* Return 1 for an operand in small memory on V.4/eabi. */
4977 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
4978 enum machine_mode mode ATTRIBUTE_UNUSED
)
4983 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
4986 if (DEFAULT_ABI
!= ABI_V4
)
4989 /* Vector and float memory instructions have a limited offset on the
4990 SPE, so using a vector or float variable directly as an operand is
4993 && (SPE_VECTOR_MODE (mode
) || FLOAT_MODE_P (mode
)))
4996 if (GET_CODE (op
) == SYMBOL_REF
)
4999 else if (GET_CODE (op
) != CONST
5000 || GET_CODE (XEXP (op
, 0)) != PLUS
5001 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
5002 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
5007 rtx sum
= XEXP (op
, 0);
5008 HOST_WIDE_INT summand
;
5010 /* We have to be careful here, because it is the referenced address
5011 that must be 32k from _SDA_BASE_, not just the symbol. */
5012 summand
= INTVAL (XEXP (sum
, 1));
5013 if (summand
< 0 || summand
> g_switch_value
)
5016 sym_ref
= XEXP (sum
, 0);
5019 return SYMBOL_REF_SMALL_P (sym_ref
);
5025 /* Return true if either operand is a general purpose register. */
5028 gpr_or_gpr_p (rtx op0
, rtx op1
)
5030 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
5031 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
5034 /* Given an address, return a constant offset term if one exists. */
5037 address_offset (rtx op
)
5039 if (GET_CODE (op
) == PRE_INC
5040 || GET_CODE (op
) == PRE_DEC
)
5042 else if (GET_CODE (op
) == PRE_MODIFY
5043 || GET_CODE (op
) == LO_SUM
)
5046 if (GET_CODE (op
) == CONST
)
5049 if (GET_CODE (op
) == PLUS
)
5052 if (CONST_INT_P (op
))
5058 /* Return true if the MEM operand is a memory operand suitable for use
5059 with a (full width, possibly multiple) gpr load/store. On
5060 powerpc64 this means the offset must be divisible by 4.
5061 Implements 'Y' constraint.
5063 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5064 a constraint function we know the operand has satisfied a suitable
5065 memory predicate. Also accept some odd rtl generated by reload
5066 (see rs6000_legitimize_reload_address for various forms). It is
5067 important that reload rtl be accepted by appropriate constraints
5068 but not by the operand predicate.
5070 Offsetting a lo_sum should not be allowed, except where we know by
5071 alignment that a 32k boundary is not crossed, but see the ???
5072 comment in rs6000_legitimize_reload_address. Note that by
5073 "offsetting" here we mean a further offset to access parts of the
5074 MEM. It's fine to have a lo_sum where the inner address is offset
5075 from a sym, since the same sym+offset will appear in the high part
5076 of the address calculation. */
5079 mem_operand_gpr (rtx op
, enum machine_mode mode
)
5081 unsigned HOST_WIDE_INT offset
;
5083 rtx addr
= XEXP (op
, 0);
5085 op
= address_offset (addr
);
5089 offset
= INTVAL (op
);
5090 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
5093 if (GET_CODE (addr
) == LO_SUM
)
5094 /* We know by alignment that ABI_AIX medium/large model toc refs
5095 will not cross a 32k boundary, since all entries in the
5096 constant pool are naturally aligned and we check alignment for
5097 other medium model toc-relative addresses. For ABI_V4 and
5098 ABI_DARWIN lo_sum addresses, we just check that 64-bit
5099 offsets are 4-byte aligned. */
5102 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
5103 gcc_assert (extra
>= 0);
5104 return offset
+ 0x8000 < 0x10000u
- extra
;
5107 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5110 reg_offset_addressing_ok_p (enum machine_mode mode
)
5120 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
5121 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
5129 /* Paired vector modes. Only reg+reg addressing is valid. */
5130 if (TARGET_PAIRED_FLOAT
)
5142 virtual_stack_registers_memory_p (rtx op
)
5146 if (GET_CODE (op
) == REG
)
5147 regnum
= REGNO (op
);
5149 else if (GET_CODE (op
) == PLUS
5150 && GET_CODE (XEXP (op
, 0)) == REG
5151 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
5152 regnum
= REGNO (XEXP (op
, 0));
5157 return (regnum
>= FIRST_VIRTUAL_REGISTER
5158 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
5161 /* Return true if memory accesses to OP are known to never straddle
5165 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
5166 enum machine_mode mode
)
5169 unsigned HOST_WIDE_INT dsize
, dalign
;
5171 if (GET_CODE (op
) != SYMBOL_REF
)
5174 decl
= SYMBOL_REF_DECL (op
);
5177 if (GET_MODE_SIZE (mode
) == 0)
5180 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5181 replacing memory addresses with an anchor plus offset. We
5182 could find the decl by rummaging around in the block->objects
5183 VEC for the given offset but that seems like too much work. */
5185 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
5186 && SYMBOL_REF_ANCHOR_P (op
)
5187 && SYMBOL_REF_BLOCK (op
) != NULL
)
5189 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
5190 HOST_WIDE_INT lsb
, mask
;
5192 /* Given the alignment of the block.. */
5193 dalign
= block
->alignment
;
5194 mask
= dalign
/ BITS_PER_UNIT
- 1;
5196 /* ..and the combined offset of the anchor and any offset
5197 to this block object.. */
5198 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
5199 lsb
= offset
& -offset
;
5201 /* ..find how many bits of the alignment we know for the
5206 return dalign
>= GET_MODE_SIZE (mode
);
5211 if (TREE_CODE (decl
) == FUNCTION_DECL
)
5214 if (!DECL_SIZE_UNIT (decl
))
5217 if (!host_integerp (DECL_SIZE_UNIT (decl
), 1))
5220 dsize
= tree_low_cst (DECL_SIZE_UNIT (decl
), 1);
5224 dalign
= DECL_ALIGN_UNIT (decl
);
5225 return dalign
>= dsize
;
5228 type
= TREE_TYPE (decl
);
5230 if (TREE_CODE (decl
) == STRING_CST
)
5231 dsize
= TREE_STRING_LENGTH (decl
);
5232 else if (TYPE_SIZE_UNIT (type
)
5233 && host_integerp (TYPE_SIZE_UNIT (type
), 1))
5234 dsize
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5240 dalign
= TYPE_ALIGN (type
);
5241 if (CONSTANT_CLASS_P (decl
))
5242 dalign
= CONSTANT_ALIGNMENT (decl
, dalign
);
5244 dalign
= DATA_ALIGNMENT (decl
, dalign
);
5245 dalign
/= BITS_PER_UNIT
;
5246 return dalign
>= dsize
;
5250 constant_pool_expr_p (rtx op
)
5254 split_const (op
, &base
, &offset
);
5255 return (GET_CODE (base
) == SYMBOL_REF
5256 && CONSTANT_POOL_ADDRESS_P (base
)
5257 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
5260 static const_rtx tocrel_base
, tocrel_offset
;
5262 /* Return true if OP is a toc pointer relative address (the output
5263 of create_TOC_reference). If STRICT, do not match high part or
5264 non-split -mcmodel=large/medium toc pointer relative addresses. */
5267 toc_relative_expr_p (const_rtx op
, bool strict
)
5272 if (TARGET_CMODEL
!= CMODEL_SMALL
)
5274 /* Only match the low part. */
5275 if (GET_CODE (op
) == LO_SUM
5276 && REG_P (XEXP (op
, 0))
5277 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
))
5284 tocrel_offset
= const0_rtx
;
5285 if (GET_CODE (op
) == PLUS
&& CONST_INT_P (XEXP (op
, 1)))
5287 tocrel_base
= XEXP (op
, 0);
5288 tocrel_offset
= XEXP (op
, 1);
5291 return (GET_CODE (tocrel_base
) == UNSPEC
5292 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
5295 /* Return true if X is a constant pool address, and also for cmodel=medium
5296 if X is a toc-relative address known to be offsettable within MODE. */
5299 legitimate_constant_pool_address_p (const_rtx x
, enum machine_mode mode
,
5302 return (toc_relative_expr_p (x
, strict
)
5303 && (TARGET_CMODEL
!= CMODEL_MEDIUM
5304 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
5306 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
5307 INTVAL (tocrel_offset
), mode
)));
5311 legitimate_small_data_p (enum machine_mode mode
, rtx x
)
5313 return (DEFAULT_ABI
== ABI_V4
5314 && !flag_pic
&& !TARGET_TOC
5315 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
5316 && small_data_operand (x
, mode
));
5319 /* SPE offset addressing is limited to 5-bits worth of double words. */
5320 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5323 rs6000_legitimate_offset_address_p (enum machine_mode mode
, rtx x
,
5324 bool strict
, bool worst_case
)
5326 unsigned HOST_WIDE_INT offset
;
5329 if (GET_CODE (x
) != PLUS
)
5331 if (!REG_P (XEXP (x
, 0)))
5333 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
5335 if (!reg_offset_addressing_ok_p (mode
))
5336 return virtual_stack_registers_memory_p (x
);
5337 if (legitimate_constant_pool_address_p (x
, mode
, strict
))
5339 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5342 offset
= INTVAL (XEXP (x
, 1));
5350 /* SPE vector modes. */
5351 return SPE_CONST_OFFSET_OK (offset
);
5356 /* On e500v2, we may have:
5358 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5360 Which gets addressed with evldd instructions. */
5361 if (TARGET_E500_DOUBLE
)
5362 return SPE_CONST_OFFSET_OK (offset
);
5364 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5366 if (mode
== DFmode
&& VECTOR_MEM_VSX_P (DFmode
))
5371 if (!TARGET_POWERPC64
)
5373 else if (offset
& 3)
5380 if (TARGET_E500_DOUBLE
)
5381 return (SPE_CONST_OFFSET_OK (offset
)
5382 && SPE_CONST_OFFSET_OK (offset
+ 8));
5387 if (!TARGET_POWERPC64
)
5389 else if (offset
& 3)
5398 return offset
< 0x10000 - extra
;
5402 legitimate_indexed_address_p (rtx x
, int strict
)
5406 if (GET_CODE (x
) != PLUS
)
5412 /* Recognize the rtl generated by reload which we know will later be
5413 replaced with proper base and index regs. */
5415 && reload_in_progress
5416 && (REG_P (op0
) || GET_CODE (op0
) == PLUS
)
5420 return (REG_P (op0
) && REG_P (op1
)
5421 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
5422 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
5423 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
5424 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
5428 avoiding_indexed_address_p (enum machine_mode mode
)
5430 /* Avoid indexed addressing for modes that have non-indexed
5431 load/store instruction forms. */
5432 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
5436 legitimate_indirect_address_p (rtx x
, int strict
)
5438 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
5442 macho_lo_sum_memory_operand (rtx x
, enum machine_mode mode
)
5444 if (!TARGET_MACHO
|| !flag_pic
5445 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
5449 if (GET_CODE (x
) != LO_SUM
)
5451 if (GET_CODE (XEXP (x
, 0)) != REG
)
5453 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
5457 return CONSTANT_P (x
);
5461 legitimate_lo_sum_address_p (enum machine_mode mode
, rtx x
, int strict
)
5463 if (GET_CODE (x
) != LO_SUM
)
5465 if (GET_CODE (XEXP (x
, 0)) != REG
)
5467 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
5469 /* Restrict addressing for DI because of our SUBREG hackery. */
5470 if (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
5474 if (TARGET_ELF
|| TARGET_MACHO
)
5476 if (DEFAULT_ABI
!= ABI_AIX
&& DEFAULT_ABI
!= ABI_DARWIN
&& flag_pic
)
5480 if (GET_MODE_NUNITS (mode
) != 1)
5482 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
5483 && !(/* ??? Assume floating point reg based on mode? */
5484 TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
5485 && (mode
== DFmode
|| mode
== DDmode
)))
5488 return CONSTANT_P (x
);
5495 /* Try machine-dependent ways of modifying an illegitimate address
5496 to be legitimate. If we find one, return the new, valid address.
5497 This is used from only one place: `memory_address' in explow.c.
5499 OLDX is the address as it was before break_out_memory_refs was
5500 called. In some cases it is useful to look at this to decide what
5503 It is always safe for this function to do nothing. It exists to
5504 recognize opportunities to optimize the output.
5506 On RS/6000, first check for the sum of a register with a constant
5507 integer that is out of range. If so, generate code to add the
5508 constant with the low-order 16 bits masked to the register and force
5509 this result into another register (this can be done with `cau').
5510 Then generate an address of REG+(CONST&0xffff), allowing for the
5511 possibility of bit 16 being a one.
5513 Then check for the sum of a register and something not constant, try to
5514 load the other things into a register and return the sum. */
5517 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
5518 enum machine_mode mode
)
5522 if (!reg_offset_addressing_ok_p (mode
))
5524 if (virtual_stack_registers_memory_p (x
))
5527 /* In theory we should not be seeing addresses of the form reg+0,
5528 but just in case it is generated, optimize it away. */
5529 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
5530 return force_reg (Pmode
, XEXP (x
, 0));
5532 /* Make sure both operands are registers. */
5533 else if (GET_CODE (x
) == PLUS
)
5534 return gen_rtx_PLUS (Pmode
,
5535 force_reg (Pmode
, XEXP (x
, 0)),
5536 force_reg (Pmode
, XEXP (x
, 1)));
5538 return force_reg (Pmode
, x
);
5540 if (GET_CODE (x
) == SYMBOL_REF
)
5542 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
5544 return rs6000_legitimize_tls_address (x
, model
);
5553 /* As in legitimate_offset_address_p we do not assume
5554 worst-case. The mode here is just a hint as to the registers
5555 used. A TImode is usually in gprs, but may actually be in
5556 fprs. Leave worst-case scenario for reload to handle via
5557 insn constraints. */
5564 if (GET_CODE (x
) == PLUS
5565 && GET_CODE (XEXP (x
, 0)) == REG
5566 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5567 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
5569 && !(SPE_VECTOR_MODE (mode
)
5570 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)))
5572 HOST_WIDE_INT high_int
, low_int
;
5574 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5575 if (low_int
>= 0x8000 - extra
)
5577 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
5578 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
5579 GEN_INT (high_int
)), 0);
5580 return plus_constant (Pmode
, sum
, low_int
);
5582 else if (GET_CODE (x
) == PLUS
5583 && GET_CODE (XEXP (x
, 0)) == REG
5584 && GET_CODE (XEXP (x
, 1)) != CONST_INT
5585 && GET_MODE_NUNITS (mode
) == 1
5586 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
5587 || (/* ??? Assume floating point reg based on mode? */
5588 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
5589 && (mode
== DFmode
|| mode
== DDmode
)))
5590 && !avoiding_indexed_address_p (mode
))
5592 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
5593 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
5595 else if (SPE_VECTOR_MODE (mode
)
5596 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
))
5600 /* We accept [reg + reg] and [reg + OFFSET]. */
5602 if (GET_CODE (x
) == PLUS
)
5604 rtx op1
= XEXP (x
, 0);
5605 rtx op2
= XEXP (x
, 1);
5608 op1
= force_reg (Pmode
, op1
);
5610 if (GET_CODE (op2
) != REG
5611 && (GET_CODE (op2
) != CONST_INT
5612 || !SPE_CONST_OFFSET_OK (INTVAL (op2
))
5613 || (GET_MODE_SIZE (mode
) > 8
5614 && !SPE_CONST_OFFSET_OK (INTVAL (op2
) + 8))))
5615 op2
= force_reg (Pmode
, op2
);
5617 /* We can't always do [reg + reg] for these, because [reg +
5618 reg + offset] is not a legitimate addressing mode. */
5619 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
5621 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
5622 return force_reg (Pmode
, y
);
5627 return force_reg (Pmode
, x
);
5629 else if ((TARGET_ELF
5631 || !MACHO_DYNAMIC_NO_PIC_P
5637 && GET_CODE (x
) != CONST_INT
5638 && GET_CODE (x
) != CONST_DOUBLE
5640 && GET_MODE_NUNITS (mode
) == 1
5641 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
5642 || (/* ??? Assume floating point reg based on mode? */
5643 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
5644 && (mode
== DFmode
|| mode
== DDmode
))))
5646 rtx reg
= gen_reg_rtx (Pmode
);
5648 emit_insn (gen_elf_high (reg
, x
));
5650 emit_insn (gen_macho_high (reg
, x
));
5651 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
5654 && GET_CODE (x
) == SYMBOL_REF
5655 && constant_pool_expr_p (x
)
5656 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
5657 return create_TOC_reference (x
, NULL_RTX
);
5662 /* Debug version of rs6000_legitimize_address. */
5664 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
)
5670 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
5671 insns
= get_insns ();
5677 "\nrs6000_legitimize_address: mode %s, old code %s, "
5678 "new code %s, modified\n",
5679 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
5680 GET_RTX_NAME (GET_CODE (ret
)));
5682 fprintf (stderr
, "Original address:\n");
5685 fprintf (stderr
, "oldx:\n");
5688 fprintf (stderr
, "New address:\n");
5693 fprintf (stderr
, "Insns added:\n");
5694 debug_rtx_list (insns
, 20);
5700 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5701 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
5712 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5713 We need to emit DTP-relative relocations. */
5715 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
5717 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
5722 fputs ("\t.long\t", file
);
5725 fputs (DOUBLE_INT_ASM_OP
, file
);
5730 output_addr_const (file
, x
);
5731 fputs ("@dtprel+0x8000", file
);
5734 /* In the name of slightly smaller debug output, and to cater to
5735 general assembler lossage, recognize various UNSPEC sequences
5736 and turn them back into a direct symbol reference. */
5739 rs6000_delegitimize_address (rtx orig_x
)
5743 orig_x
= delegitimize_mem_from_attrs (orig_x
);
5749 if (TARGET_CMODEL
!= CMODEL_SMALL
5750 && GET_CODE (y
) == LO_SUM
)
5754 if (GET_CODE (y
) == PLUS
5755 && GET_MODE (y
) == Pmode
5756 && CONST_INT_P (XEXP (y
, 1)))
5758 offset
= XEXP (y
, 1);
5762 if (GET_CODE (y
) == UNSPEC
5763 && XINT (y
, 1) == UNSPEC_TOCREL
)
5765 #ifdef ENABLE_CHECKING
5766 if (REG_P (XVECEXP (y
, 0, 1))
5767 && REGNO (XVECEXP (y
, 0, 1)) == TOC_REGISTER
)
5771 else if (GET_CODE (XVECEXP (y
, 0, 1)) == DEBUG_EXPR
)
5773 /* Weirdness alert. df_note_compute can replace r2 with a
5774 debug_expr when this unspec is in a debug_insn.
5775 Seen in gcc.dg/pr51957-1.c */
5783 y
= XVECEXP (y
, 0, 0);
5784 if (offset
!= NULL_RTX
)
5785 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
5786 if (!MEM_P (orig_x
))
5789 return replace_equiv_address_nv (orig_x
, y
);
5793 && GET_CODE (orig_x
) == LO_SUM
5794 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
5796 y
= XEXP (XEXP (orig_x
, 1), 0);
5797 if (GET_CODE (y
) == UNSPEC
5798 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
5799 return XVECEXP (y
, 0, 0);
5805 /* Return true if X shouldn't be emitted into the debug info.
5806 The linker doesn't like .toc section references from
5807 .debug_* sections, so reject .toc section symbols. */
5810 rs6000_const_not_ok_for_debug_p (rtx x
)
5812 if (GET_CODE (x
) == SYMBOL_REF
5813 && CONSTANT_POOL_ADDRESS_P (x
))
5815 rtx c
= get_pool_constant (x
);
5816 enum machine_mode cmode
= get_pool_mode (x
);
5817 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
5824 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5826 static GTY(()) rtx rs6000_tls_symbol
;
5828 rs6000_tls_get_addr (void)
5830 if (!rs6000_tls_symbol
)
5831 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
5833 return rs6000_tls_symbol
;
5836 /* Construct the SYMBOL_REF for TLS GOT references. */
5838 static GTY(()) rtx rs6000_got_symbol
;
5840 rs6000_got_sym (void)
5842 if (!rs6000_got_symbol
)
5844 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
5845 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
5846 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
5849 return rs6000_got_symbol
;
5852 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5853 this (thread-local) address. */
5856 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
5860 dest
= gen_reg_rtx (Pmode
);
5861 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
5867 tlsreg
= gen_rtx_REG (Pmode
, 13);
5868 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
5872 tlsreg
= gen_rtx_REG (Pmode
, 2);
5873 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
5877 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
5881 tmp
= gen_reg_rtx (Pmode
);
5884 tlsreg
= gen_rtx_REG (Pmode
, 13);
5885 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
5889 tlsreg
= gen_rtx_REG (Pmode
, 2);
5890 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
5894 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
5896 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
5901 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
5903 /* We currently use relocations like @got@tlsgd for tls, which
5904 means the linker will handle allocation of tls entries, placing
5905 them in the .got section. So use a pointer to the .got section,
5906 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5907 or to secondary GOT sections used by 32-bit -fPIC. */
5909 got
= gen_rtx_REG (Pmode
, 2);
5913 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
5916 rtx gsym
= rs6000_got_sym ();
5917 got
= gen_reg_rtx (Pmode
);
5919 rs6000_emit_move (got
, gsym
, Pmode
);
5924 tmp1
= gen_reg_rtx (Pmode
);
5925 tmp2
= gen_reg_rtx (Pmode
);
5926 mem
= gen_const_mem (Pmode
, tmp1
);
5927 lab
= gen_label_rtx ();
5928 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
5929 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
5930 if (TARGET_LINK_STACK
)
5931 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
5932 emit_move_insn (tmp2
, mem
);
5933 last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
5934 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
5939 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
5941 tga
= rs6000_tls_get_addr ();
5942 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
5943 1, const0_rtx
, Pmode
);
5945 r3
= gen_rtx_REG (Pmode
, 3);
5946 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5947 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
5948 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
5949 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
5950 else if (DEFAULT_ABI
== ABI_V4
)
5951 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
5954 call_insn
= last_call_insn ();
5955 PATTERN (call_insn
) = insn
;
5956 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
5957 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
5958 pic_offset_table_rtx
);
5960 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
5962 tga
= rs6000_tls_get_addr ();
5963 tmp1
= gen_reg_rtx (Pmode
);
5964 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
5965 1, const0_rtx
, Pmode
);
5967 r3
= gen_rtx_REG (Pmode
, 3);
5968 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5969 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
5970 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
5971 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
5972 else if (DEFAULT_ABI
== ABI_V4
)
5973 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
5976 call_insn
= last_call_insn ();
5977 PATTERN (call_insn
) = insn
;
5978 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
5979 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
5980 pic_offset_table_rtx
);
5982 if (rs6000_tls_size
== 16)
5985 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
5987 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
5989 else if (rs6000_tls_size
== 32)
5991 tmp2
= gen_reg_rtx (Pmode
);
5993 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
5995 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
5998 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
6000 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
6004 tmp2
= gen_reg_rtx (Pmode
);
6006 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
6008 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
6010 insn
= gen_rtx_SET (Pmode
, dest
,
6011 gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
6017 /* IE, or 64-bit offset LE. */
6018 tmp2
= gen_reg_rtx (Pmode
);
6020 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
6022 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
6025 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
6027 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
6035 /* Return 1 if X contains a thread-local symbol. */
6038 rs6000_tls_referenced_p (rtx x
)
6040 if (! TARGET_HAVE_TLS
)
6043 return for_each_rtx (&x
, &rs6000_tls_symbol_ref_1
, 0);
6046 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
6049 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
6051 if (GET_CODE (x
) == HIGH
6052 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
6055 return rs6000_tls_referenced_p (x
);
6058 /* Return 1 if *X is a thread-local symbol. This is the same as
6059 rs6000_tls_symbol_ref except for the type of the unused argument. */
6062 rs6000_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
6064 return RS6000_SYMBOL_REF_TLS_P (*x
);
6067 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
6068 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
6069 can be addressed relative to the toc pointer. */
6072 use_toc_relative_ref (rtx sym
)
6074 return ((constant_pool_expr_p (sym
)
6075 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
6076 get_pool_mode (sym
)))
6077 || (TARGET_CMODEL
== CMODEL_MEDIUM
6078 && !CONSTANT_POOL_ADDRESS_P (sym
)
6079 && SYMBOL_REF_LOCAL_P (sym
)));
6082 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6083 replace the input X, or the original X if no replacement is called for.
6084 The output parameter *WIN is 1 if the calling macro should goto WIN,
6087 For RS/6000, we wish to handle large displacements off a base
6088 register by splitting the addend across an addiu/addis and the mem insn.
6089 This cuts number of extra insns needed from 3 to 1.
6091 On Darwin, we use this to generate code for floating point constants.
6092 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6093 The Darwin code is inside #if TARGET_MACHO because only then are the
6094 machopic_* functions defined. */
6096 rs6000_legitimize_reload_address (rtx x
, enum machine_mode mode
,
6097 int opnum
, int type
,
6098 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
6100 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
6102 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6103 DFmode/DImode MEM. */
6106 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
6107 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)))
6108 reg_offset_p
= false;
6110 /* We must recognize output that we have already generated ourselves. */
6111 if (GET_CODE (x
) == PLUS
6112 && GET_CODE (XEXP (x
, 0)) == PLUS
6113 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
6114 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6115 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
6117 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6118 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6119 opnum
, (enum reload_type
) type
);
6124 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6125 if (GET_CODE (x
) == LO_SUM
6126 && GET_CODE (XEXP (x
, 0)) == HIGH
)
6128 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6129 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6130 opnum
, (enum reload_type
) type
);
6136 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
6137 && GET_CODE (x
) == LO_SUM
6138 && GET_CODE (XEXP (x
, 0)) == PLUS
6139 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
6140 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
6141 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
6142 && machopic_operand_p (XEXP (x
, 1)))
6144 /* Result of previous invocation of this function on Darwin
6145 floating point constant. */
6146 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6147 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6148 opnum
, (enum reload_type
) type
);
6154 if (TARGET_CMODEL
!= CMODEL_SMALL
6156 && small_toc_ref (x
, VOIDmode
))
6158 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
6159 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
6160 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6161 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6162 opnum
, (enum reload_type
) type
);
6167 /* Force ld/std non-word aligned offset into base register by wrapping
6169 if (GET_CODE (x
) == PLUS
6170 && GET_CODE (XEXP (x
, 0)) == REG
6171 && REGNO (XEXP (x
, 0)) < 32
6172 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
6173 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6175 && (INTVAL (XEXP (x
, 1)) & 3) != 0
6176 && VECTOR_MEM_NONE_P (mode
)
6177 && GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
6178 && TARGET_POWERPC64
)
6180 x
= gen_rtx_PLUS (GET_MODE (x
), x
, GEN_INT (0));
6181 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6182 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6183 opnum
, (enum reload_type
) type
);
6188 if (GET_CODE (x
) == PLUS
6189 && GET_CODE (XEXP (x
, 0)) == REG
6190 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
6191 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
6192 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6194 && !SPE_VECTOR_MODE (mode
)
6195 && !(TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
6196 || mode
== DDmode
|| mode
== TDmode
6198 && VECTOR_MEM_NONE_P (mode
))
6200 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
6201 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
6203 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6205 /* Check for 32-bit overflow. */
6206 if (high
+ low
!= val
)
6212 /* Reload the high part into a base reg; leave the low part
6213 in the mem directly. */
6215 x
= gen_rtx_PLUS (GET_MODE (x
),
6216 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
6220 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6221 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6222 opnum
, (enum reload_type
) type
);
6227 if (GET_CODE (x
) == SYMBOL_REF
6229 && VECTOR_MEM_NONE_P (mode
)
6230 && !SPE_VECTOR_MODE (mode
)
6232 && DEFAULT_ABI
== ABI_DARWIN
6233 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
6234 && machopic_symbol_defined_p (x
)
6236 && DEFAULT_ABI
== ABI_V4
6239 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6240 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6242 ??? Assume floating point reg based on mode? This assumption is
6243 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6244 where reload ends up doing a DFmode load of a constant from
6245 mem using two gprs. Unfortunately, at this point reload
6246 hasn't yet selected regs so poking around in reload data
6247 won't help and even if we could figure out the regs reliably,
6248 we'd still want to allow this transformation when the mem is
6249 naturally aligned. Since we say the address is good here, we
6250 can't disable offsets from LO_SUMs in mem_operand_gpr.
6251 FIXME: Allow offset from lo_sum for other modes too, when
6252 mem is sufficiently aligned. */
6255 && (mode
!= DImode
|| TARGET_POWERPC64
)
6256 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
6257 || (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)))
6262 rtx offset
= machopic_gen_offset (x
);
6263 x
= gen_rtx_LO_SUM (GET_MODE (x
),
6264 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
6265 gen_rtx_HIGH (Pmode
, offset
)), offset
);
6269 x
= gen_rtx_LO_SUM (GET_MODE (x
),
6270 gen_rtx_HIGH (Pmode
, x
), x
);
6272 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6273 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6274 opnum
, (enum reload_type
) type
);
6279 /* Reload an offset address wrapped by an AND that represents the
6280 masking of the lower bits. Strip the outer AND and let reload
6281 convert the offset address into an indirect address. For VSX,
6282 force reload to create the address with an AND in a separate
6283 register, because we can't guarantee an altivec register will
6285 if (VECTOR_MEM_ALTIVEC_P (mode
)
6286 && GET_CODE (x
) == AND
6287 && GET_CODE (XEXP (x
, 0)) == PLUS
6288 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
6289 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6290 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6291 && INTVAL (XEXP (x
, 1)) == -16)
6300 && GET_CODE (x
) == SYMBOL_REF
6301 && use_toc_relative_ref (x
))
6303 x
= create_TOC_reference (x
, NULL_RTX
);
6304 if (TARGET_CMODEL
!= CMODEL_SMALL
)
6305 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6306 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6307 opnum
, (enum reload_type
) type
);
6315 /* Debug version of rs6000_legitimize_reload_address. */
6317 rs6000_debug_legitimize_reload_address (rtx x
, enum machine_mode mode
,
6318 int opnum
, int type
,
6319 int ind_levels
, int *win
)
6321 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
6324 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6325 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6326 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
6330 fprintf (stderr
, "Same address returned\n");
6332 fprintf (stderr
, "NULL returned\n");
6335 fprintf (stderr
, "New address:\n");
6342 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6343 that is a valid memory address for an instruction.
6344 The MODE argument is the machine mode for the MEM expression
6345 that wants to use this address.
6347 On the RS/6000, there are four valid address: a SYMBOL_REF that
6348 refers to a constant pool entry of an address (or the sum of it
6349 plus a constant), a short (16-bit signed) constant plus a register,
6350 the sum of two registers, or a register indirect, possibly with an
6351 auto-increment. For DFmode, DDmode and DImode with a constant plus
6352 register, we must ensure that both words are addressable or PowerPC64
6353 with offset word aligned.
6355 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6356 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6357 because adjacent memory cells are accessed by adding word-sized offsets
6358 during assembly output. */
6360 rs6000_legitimate_address_p (enum machine_mode mode
, rtx x
, bool reg_ok_strict
)
6362 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
6364 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6365 if (VECTOR_MEM_ALTIVEC_P (mode
)
6366 && GET_CODE (x
) == AND
6367 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6368 && INTVAL (XEXP (x
, 1)) == -16)
6371 if (RS6000_SYMBOL_REF_TLS_P (x
))
6373 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
6375 if ((GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
6376 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
6377 && !SPE_VECTOR_MODE (mode
)
6380 /* Restrict addressing for DI because of our SUBREG hackery. */
6381 && !(TARGET_E500_DOUBLE
6382 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
6384 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
6386 if (virtual_stack_registers_memory_p (x
))
6388 if (reg_offset_p
&& legitimate_small_data_p (mode
, x
))
6391 && legitimate_constant_pool_address_p (x
, mode
, reg_ok_strict
))
6393 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6396 && GET_CODE (x
) == PLUS
6397 && GET_CODE (XEXP (x
, 0)) == REG
6398 && (XEXP (x
, 0) == virtual_stack_vars_rtx
6399 || XEXP (x
, 0) == arg_pointer_rtx
)
6400 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
6402 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
6407 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6409 || (mode
!= DFmode
&& mode
!= DDmode
)
6410 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
))
6411 && (TARGET_POWERPC64
|| mode
!= DImode
)
6412 && !avoiding_indexed_address_p (mode
)
6413 && legitimate_indexed_address_p (x
, reg_ok_strict
))
6415 if (GET_CODE (x
) == PRE_MODIFY
6419 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6421 || ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_E500_DOUBLE
))
6422 && (TARGET_POWERPC64
|| mode
!= DImode
)
6423 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
6424 && !SPE_VECTOR_MODE (mode
)
6425 /* Restrict addressing for DI because of our SUBREG hackery. */
6426 && !(TARGET_E500_DOUBLE
6427 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
6429 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
6430 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
6431 reg_ok_strict
, false)
6432 || (!avoiding_indexed_address_p (mode
)
6433 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
6434 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6436 if (reg_offset_p
&& legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
6441 /* Debug version of rs6000_legitimate_address_p. */
6443 rs6000_debug_legitimate_address_p (enum machine_mode mode
, rtx x
,
6446 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
6448 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6449 "strict = %d, code = %s\n",
6450 ret
? "true" : "false",
6451 GET_MODE_NAME (mode
),
6453 GET_RTX_NAME (GET_CODE (x
)));
6459 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6462 rs6000_mode_dependent_address_p (const_rtx addr
)
6464 return rs6000_mode_dependent_address_ptr (addr
);
6467 /* Go to LABEL if ADDR (a legitimate address expression)
6468 has an effect that depends on the machine mode it is used for.
6470 On the RS/6000 this is true of all integral offsets (since AltiVec
6471 and VSX modes don't allow them) or is a pre-increment or decrement.
6473 ??? Except that due to conceptual problems in offsettable_address_p
6474 we can't really report the problems of integral offsets. So leave
6475 this assuming that the adjustable offset must be valid for the
6476 sub-words of a TFmode operand, which is what we had before. */
6479 rs6000_mode_dependent_address (const_rtx addr
)
6481 switch (GET_CODE (addr
))
6484 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6485 is considered a legitimate address before reload, so there
6486 are no offset restrictions in that case. Note that this
6487 condition is safe in strict mode because any address involving
6488 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6489 been rejected as illegitimate. */
6490 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
6491 && XEXP (addr
, 0) != arg_pointer_rtx
6492 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
6494 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
6495 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
6500 /* Anything in the constant pool is sufficiently aligned that
6501 all bytes have the same high part address. */
6502 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
6504 /* Auto-increment cases are now treated generically in recog.c. */
6506 return TARGET_UPDATE
;
6508 /* AND is only allowed in Altivec loads. */
6519 /* Debug version of rs6000_mode_dependent_address. */
6521 rs6000_debug_mode_dependent_address (const_rtx addr
)
6523 bool ret
= rs6000_mode_dependent_address (addr
);
6525 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
6526 ret
? "true" : "false");
6532 /* Implement FIND_BASE_TERM. */
6535 rs6000_find_base_term (rtx op
)
6540 if (GET_CODE (base
) == CONST
)
6541 base
= XEXP (base
, 0);
6542 if (GET_CODE (base
) == PLUS
)
6543 base
= XEXP (base
, 0);
6544 if (GET_CODE (base
) == UNSPEC
)
6545 switch (XINT (base
, 1))
6548 case UNSPEC_MACHOPIC_OFFSET
:
6549 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6550 for aliasing purposes. */
6551 return XVECEXP (base
, 0, 0);
6557 /* More elaborate version of recog's offsettable_memref_p predicate
6558 that works around the ??? note of rs6000_mode_dependent_address.
6559 In particular it accepts
6561 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6563 in 32-bit mode, that the recog predicate rejects. */
6566 rs6000_offsettable_memref_p (rtx op
, enum machine_mode reg_mode
)
6573 /* First mimic offsettable_memref_p. */
6574 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
6577 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6578 the latter predicate knows nothing about the mode of the memory
6579 reference and, therefore, assumes that it is the largest supported
6580 mode (TFmode). As a consequence, legitimate offsettable memory
6581 references are rejected. rs6000_legitimate_offset_address_p contains
6582 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6583 at least with a little bit of help here given that we know the
6584 actual registers used. */
6585 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
6586 || GET_MODE_SIZE (reg_mode
) == 4);
6587 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
6591 /* Change register usage conditional on target flags. */
6593 rs6000_conditional_register_usage (void)
6597 if (TARGET_DEBUG_TARGET
)
6598 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
6600 /* Set MQ register fixed (already call_used) so that it will not be
6604 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6606 fixed_regs
[13] = call_used_regs
[13]
6607 = call_really_used_regs
[13] = 1;
6609 /* Conditionally disable FPRs. */
6610 if (TARGET_SOFT_FLOAT
|| !TARGET_FPRS
)
6611 for (i
= 32; i
< 64; i
++)
6612 fixed_regs
[i
] = call_used_regs
[i
]
6613 = call_really_used_regs
[i
] = 1;
6615 /* The TOC register is not killed across calls in a way that is
6616 visible to the compiler. */
6617 if (DEFAULT_ABI
== ABI_AIX
)
6618 call_really_used_regs
[2] = 0;
6620 if (DEFAULT_ABI
== ABI_V4
6621 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
6623 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6625 if (DEFAULT_ABI
== ABI_V4
6626 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
6628 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6629 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6630 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6632 if (DEFAULT_ABI
== ABI_DARWIN
6633 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
6634 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6635 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6636 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6638 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
6639 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6640 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6644 global_regs
[SPEFSCR_REGNO
] = 1;
6645 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6646 registers in prologues and epilogues. We no longer use r14
6647 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6648 pool for link-compatibility with older versions of GCC. Once
6649 "old" code has died out, we can return r14 to the allocation
6652 = call_used_regs
[14]
6653 = call_really_used_regs
[14] = 1;
6656 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
6658 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
6659 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6660 call_really_used_regs
[VRSAVE_REGNO
] = 1;
6663 if (TARGET_ALTIVEC
|| TARGET_VSX
)
6664 global_regs
[VSCR_REGNO
] = 1;
6666 if (TARGET_ALTIVEC_ABI
)
6668 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
6669 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6671 /* AIX reserves VR20:31 in non-extended ABI mode. */
6673 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
6674 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6678 /* Try to output insns to set TARGET equal to the constant C if it can
6679 be done in less than N insns. Do all computations in MODE.
6680 Returns the place where the output has been placed if it can be
6681 done and the insns have been emitted. If it would take more than N
6682 insns, zero is returned and no insns and emitted. */
6685 rs6000_emit_set_const (rtx dest
, enum machine_mode mode
,
6686 rtx source
, int n ATTRIBUTE_UNUSED
)
6688 rtx result
, insn
, set
;
6689 HOST_WIDE_INT c0
, c1
;
6696 dest
= gen_reg_rtx (mode
);
6697 emit_insn (gen_rtx_SET (VOIDmode
, dest
, source
));
6701 result
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
6703 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (result
),
6704 GEN_INT (INTVAL (source
)
6705 & (~ (HOST_WIDE_INT
) 0xffff))));
6706 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
6707 gen_rtx_IOR (SImode
, copy_rtx (result
),
6708 GEN_INT (INTVAL (source
) & 0xffff))));
6713 switch (GET_CODE (source
))
6716 c0
= INTVAL (source
);
6721 #if HOST_BITS_PER_WIDE_INT >= 64
6722 c0
= CONST_DOUBLE_LOW (source
);
6725 c0
= CONST_DOUBLE_LOW (source
);
6726 c1
= CONST_DOUBLE_HIGH (source
);
6734 result
= rs6000_emit_set_long_const (dest
, c0
, c1
);
6741 insn
= get_last_insn ();
6742 set
= single_set (insn
);
6743 if (! CONSTANT_P (SET_SRC (set
)))
6744 set_unique_reg_note (insn
, REG_EQUAL
, source
);
6749 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6750 fall back to a straight forward decomposition. We do this to avoid
6751 exponential run times encountered when looking for longer sequences
6752 with rs6000_emit_set_const. */
6754 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
6756 if (!TARGET_POWERPC64
)
6758 rtx operand1
, operand2
;
6760 operand1
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
== 0,
6762 operand2
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
!= 0,
6764 emit_move_insn (operand1
, GEN_INT (c1
));
6765 emit_move_insn (operand2
, GEN_INT (c2
));
6769 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
6772 ud2
= (c1
& 0xffff0000) >> 16;
6773 #if HOST_BITS_PER_WIDE_INT >= 64
6777 ud4
= (c2
& 0xffff0000) >> 16;
6779 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
6780 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
6783 emit_move_insn (dest
, GEN_INT (((ud1
^ 0x8000) - 0x8000)));
6785 emit_move_insn (dest
, GEN_INT (ud1
));
6788 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
6789 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
6792 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
6795 emit_move_insn (dest
, GEN_INT (ud2
<< 16));
6797 emit_move_insn (copy_rtx (dest
),
6798 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6801 else if (ud3
== 0 && ud4
== 0)
6803 gcc_assert (ud2
& 0x8000);
6804 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
6807 emit_move_insn (copy_rtx (dest
),
6808 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6810 emit_move_insn (copy_rtx (dest
),
6811 gen_rtx_ZERO_EXTEND (DImode
,
6812 gen_lowpart (SImode
,
6815 else if ((ud4
== 0xffff && (ud3
& 0x8000))
6816 || (ud4
== 0 && ! (ud3
& 0x8000)))
6819 emit_move_insn (dest
, GEN_INT (((ud3
<< 16) ^ 0x80000000)
6822 emit_move_insn (dest
, GEN_INT (ud3
<< 16));
6825 emit_move_insn (copy_rtx (dest
),
6826 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6828 emit_move_insn (copy_rtx (dest
),
6829 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
6832 emit_move_insn (copy_rtx (dest
),
6833 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6839 emit_move_insn (dest
, GEN_INT (((ud4
<< 16) ^ 0x80000000)
6842 emit_move_insn (dest
, GEN_INT (ud4
<< 16));
6845 emit_move_insn (copy_rtx (dest
),
6846 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6849 emit_move_insn (copy_rtx (dest
),
6850 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
6853 emit_move_insn (copy_rtx (dest
),
6854 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6855 GEN_INT (ud2
<< 16)));
6857 emit_move_insn (copy_rtx (dest
),
6858 gen_rtx_IOR (DImode
, copy_rtx (dest
), GEN_INT (ud1
)));
6864 /* Helper for the following. Get rid of [r+r] memory refs
6865 in cases where it won't work (TImode, TFmode, TDmode). */
6868 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
6870 if (reload_in_progress
)
6873 if (GET_CODE (operands
[0]) == MEM
6874 && GET_CODE (XEXP (operands
[0], 0)) != REG
6875 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
6876 GET_MODE (operands
[0]), false))
6878 = replace_equiv_address (operands
[0],
6879 copy_addr_to_reg (XEXP (operands
[0], 0)));
6881 if (GET_CODE (operands
[1]) == MEM
6882 && GET_CODE (XEXP (operands
[1], 0)) != REG
6883 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
6884 GET_MODE (operands
[1]), false))
6886 = replace_equiv_address (operands
[1],
6887 copy_addr_to_reg (XEXP (operands
[1], 0)));
6890 /* Emit a move from SOURCE to DEST in mode MODE. */
6892 rs6000_emit_move (rtx dest
, rtx source
, enum machine_mode mode
)
6896 operands
[1] = source
;
6898 if (TARGET_DEBUG_ADDR
)
6901 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6902 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6903 GET_MODE_NAME (mode
),
6906 can_create_pseudo_p ());
6908 fprintf (stderr
, "source:\n");
6912 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6913 if (GET_CODE (operands
[1]) == CONST_DOUBLE
6914 && ! FLOAT_MODE_P (mode
)
6915 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
6917 /* FIXME. This should never happen. */
6918 /* Since it seems that it does, do the safe thing and convert
6920 operands
[1] = gen_int_mode (CONST_DOUBLE_LOW (operands
[1]), mode
);
6922 gcc_assert (GET_CODE (operands
[1]) != CONST_DOUBLE
6923 || FLOAT_MODE_P (mode
)
6924 || ((CONST_DOUBLE_HIGH (operands
[1]) != 0
6925 || CONST_DOUBLE_LOW (operands
[1]) < 0)
6926 && (CONST_DOUBLE_HIGH (operands
[1]) != -1
6927 || CONST_DOUBLE_LOW (operands
[1]) >= 0)));
6929 /* Check if GCC is setting up a block move that will end up using FP
6930 registers as temporaries. We must make sure this is acceptable. */
6931 if (GET_CODE (operands
[0]) == MEM
6932 && GET_CODE (operands
[1]) == MEM
6934 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
6935 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
6936 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
6937 ? 32 : MEM_ALIGN (operands
[0])))
6938 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
6940 : MEM_ALIGN (operands
[1]))))
6941 && ! MEM_VOLATILE_P (operands
[0])
6942 && ! MEM_VOLATILE_P (operands
[1]))
6944 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
6945 adjust_address (operands
[1], SImode
, 0));
6946 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
6947 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
6951 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
6952 && !gpc_reg_operand (operands
[1], mode
))
6953 operands
[1] = force_reg (mode
, operands
[1]);
6955 /* Recognize the case where operand[1] is a reference to thread-local
6956 data and load its address to a register. */
6957 if (rs6000_tls_referenced_p (operands
[1]))
6959 enum tls_model model
;
6960 rtx tmp
= operands
[1];
6963 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
6965 addend
= XEXP (XEXP (tmp
, 0), 1);
6966 tmp
= XEXP (XEXP (tmp
, 0), 0);
6969 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
6970 model
= SYMBOL_REF_TLS_MODEL (tmp
);
6971 gcc_assert (model
!= 0);
6973 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
6976 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
6977 tmp
= force_operand (tmp
, operands
[0]);
6982 /* Handle the case where reload calls us with an invalid address. */
6983 if (reload_in_progress
&& mode
== Pmode
6984 && (! general_operand (operands
[1], mode
)
6985 || ! nonimmediate_operand (operands
[0], mode
)))
6988 /* 128-bit constant floating-point values on Darwin should really be
6989 loaded as two parts. */
6990 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
6991 && mode
== TFmode
&& GET_CODE (operands
[1]) == CONST_DOUBLE
)
6993 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
6994 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
6996 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
6997 GET_MODE_SIZE (DFmode
)),
6998 simplify_gen_subreg (DFmode
, operands
[1], mode
,
6999 GET_MODE_SIZE (DFmode
)),
7004 if (reload_in_progress
&& cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
7005 cfun
->machine
->sdmode_stack_slot
=
7006 eliminate_regs (cfun
->machine
->sdmode_stack_slot
, VOIDmode
, NULL_RTX
);
7008 if (reload_in_progress
7010 && MEM_P (operands
[0])
7011 && rtx_equal_p (operands
[0], cfun
->machine
->sdmode_stack_slot
)
7012 && REG_P (operands
[1]))
7014 if (FP_REGNO_P (REGNO (operands
[1])))
7016 rtx mem
= adjust_address_nv (operands
[0], DDmode
, 0);
7017 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7018 emit_insn (gen_movsd_store (mem
, operands
[1]));
7020 else if (INT_REGNO_P (REGNO (operands
[1])))
7022 rtx mem
= adjust_address_nv (operands
[0], mode
, 4);
7023 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7024 emit_insn (gen_movsd_hardfloat (mem
, operands
[1]));
7030 if (reload_in_progress
7032 && REG_P (operands
[0])
7033 && MEM_P (operands
[1])
7034 && rtx_equal_p (operands
[1], cfun
->machine
->sdmode_stack_slot
))
7036 if (FP_REGNO_P (REGNO (operands
[0])))
7038 rtx mem
= adjust_address_nv (operands
[1], DDmode
, 0);
7039 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7040 emit_insn (gen_movsd_load (operands
[0], mem
));
7042 else if (INT_REGNO_P (REGNO (operands
[0])))
7044 rtx mem
= adjust_address_nv (operands
[1], mode
, 4);
7045 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
7046 emit_insn (gen_movsd_hardfloat (operands
[0], mem
));
7053 /* FIXME: In the long term, this switch statement should go away
7054 and be replaced by a sequence of tests based on things like
7060 if (CONSTANT_P (operands
[1])
7061 && GET_CODE (operands
[1]) != CONST_INT
)
7062 operands
[1] = force_const_mem (mode
, operands
[1]);
7067 rs6000_eliminate_indexed_memrefs (operands
);
7074 if (CONSTANT_P (operands
[1])
7075 && ! easy_fp_constant (operands
[1], mode
))
7076 operands
[1] = force_const_mem (mode
, operands
[1]);
7089 if (CONSTANT_P (operands
[1])
7090 && !easy_vector_constant (operands
[1], mode
))
7091 operands
[1] = force_const_mem (mode
, operands
[1]);
7096 /* Use default pattern for address of ELF small data */
7099 && DEFAULT_ABI
== ABI_V4
7100 && (GET_CODE (operands
[1]) == SYMBOL_REF
7101 || GET_CODE (operands
[1]) == CONST
)
7102 && small_data_operand (operands
[1], mode
))
7104 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7108 if (DEFAULT_ABI
== ABI_V4
7109 && mode
== Pmode
&& mode
== SImode
7110 && flag_pic
== 1 && got_operand (operands
[1], mode
))
7112 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
7116 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
7120 && CONSTANT_P (operands
[1])
7121 && GET_CODE (operands
[1]) != HIGH
7122 && GET_CODE (operands
[1]) != CONST_INT
)
7124 rtx target
= (!can_create_pseudo_p ()
7126 : gen_reg_rtx (mode
));
7128 /* If this is a function address on -mcall-aixdesc,
7129 convert it to the address of the descriptor. */
7130 if (DEFAULT_ABI
== ABI_AIX
7131 && GET_CODE (operands
[1]) == SYMBOL_REF
7132 && XSTR (operands
[1], 0)[0] == '.')
7134 const char *name
= XSTR (operands
[1], 0);
7136 while (*name
== '.')
7138 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
7139 CONSTANT_POOL_ADDRESS_P (new_ref
)
7140 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
7141 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
7142 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
7143 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
7144 operands
[1] = new_ref
;
7147 if (DEFAULT_ABI
== ABI_DARWIN
)
7150 if (MACHO_DYNAMIC_NO_PIC_P
)
7152 /* Take care of any required data indirection. */
7153 operands
[1] = rs6000_machopic_legitimize_pic_address (
7154 operands
[1], mode
, operands
[0]);
7155 if (operands
[0] != operands
[1])
7156 emit_insn (gen_rtx_SET (VOIDmode
,
7157 operands
[0], operands
[1]));
7161 emit_insn (gen_macho_high (target
, operands
[1]));
7162 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
7166 emit_insn (gen_elf_high (target
, operands
[1]));
7167 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
7171 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7172 and we have put it in the TOC, we just need to make a TOC-relative
7175 && GET_CODE (operands
[1]) == SYMBOL_REF
7176 && use_toc_relative_ref (operands
[1]))
7177 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
7178 else if (mode
== Pmode
7179 && CONSTANT_P (operands
[1])
7180 && GET_CODE (operands
[1]) != HIGH
7181 && ((GET_CODE (operands
[1]) != CONST_INT
7182 && ! easy_fp_constant (operands
[1], mode
))
7183 || (GET_CODE (operands
[1]) == CONST_INT
7184 && (num_insns_constant (operands
[1], mode
)
7185 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
7186 || (GET_CODE (operands
[0]) == REG
7187 && FP_REGNO_P (REGNO (operands
[0]))))
7188 && !toc_relative_expr_p (operands
[1], false)
7189 && (TARGET_CMODEL
== CMODEL_SMALL
7190 || can_create_pseudo_p ()
7191 || (REG_P (operands
[0])
7192 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
7196 /* Darwin uses a special PIC legitimizer. */
7197 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
7200 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
7202 if (operands
[0] != operands
[1])
7203 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7208 /* If we are to limit the number of things we put in the TOC and
7209 this is a symbol plus a constant we can add in one insn,
7210 just put the symbol in the TOC and add the constant. Don't do
7211 this if reload is in progress. */
7212 if (GET_CODE (operands
[1]) == CONST
7213 && TARGET_NO_SUM_IN_TOC
&& ! reload_in_progress
7214 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
7215 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
7216 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
7217 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
7218 && ! side_effects_p (operands
[0]))
7221 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
7222 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
7224 sym
= force_reg (mode
, sym
);
7225 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
7229 operands
[1] = force_const_mem (mode
, operands
[1]);
7232 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
7233 && constant_pool_expr_p (XEXP (operands
[1], 0))
7234 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7235 get_pool_constant (XEXP (operands
[1], 0)),
7236 get_pool_mode (XEXP (operands
[1], 0))))
7238 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
7240 operands
[1] = gen_const_mem (mode
, tocref
);
7241 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
7247 rs6000_eliminate_indexed_memrefs (operands
);
7251 fatal_insn ("bad move", gen_rtx_SET (VOIDmode
, dest
, source
));
7254 /* Above, we may have called force_const_mem which may have returned
7255 an invalid address. If we can, fix this up; otherwise, reload will
7256 have to deal with it. */
7257 if (GET_CODE (operands
[1]) == MEM
&& ! reload_in_progress
)
7258 operands
[1] = validize_mem (operands
[1]);
7261 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7264 /* Nonzero if we can use a floating-point register to pass this arg. */
7265 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7266 (SCALAR_FLOAT_MODE_P (MODE) \
7267 && (CUM)->fregno <= FP_ARG_MAX_REG \
7268 && TARGET_HARD_FLOAT && TARGET_FPRS)
7270 /* Nonzero if we can use an AltiVec register to pass this arg. */
7271 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7272 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7273 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7274 && TARGET_ALTIVEC_ABI \
7277 /* Return a nonzero value to say to return the function value in
7278 memory, just as large structures are always returned. TYPE will be
7279 the data type of the value, and FNTYPE will be the type of the
7280 function doing the returning, or @code{NULL} for libcalls.
7282 The AIX ABI for the RS/6000 specifies that all structures are
7283 returned in memory. The Darwin ABI does the same.
7285 For the Darwin 64 Bit ABI, a function result can be returned in
7286 registers or in memory, depending on the size of the return data
7287 type. If it is returned in registers, the value occupies the same
7288 registers as it would if it were the first and only function
7289 argument. Otherwise, the function places its result in memory at
7290 the location pointed to by GPR3.
7292 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7293 but a draft put them in memory, and GCC used to implement the draft
7294 instead of the final standard. Therefore, aix_struct_return
7295 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7296 compatibility can change DRAFT_V4_STRUCT_RET to override the
7297 default, and -m switches get the final word. See
7298 rs6000_option_override_internal for more details.
7300 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7301 long double support is enabled. These values are returned in memory.
7303 int_size_in_bytes returns -1 for variable size objects, which go in
7304 memory always. The cast to unsigned makes -1 > 8. */
7307 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
7309 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7311 && rs6000_darwin64_abi
7312 && TREE_CODE (type
) == RECORD_TYPE
7313 && int_size_in_bytes (type
) > 0)
7315 CUMULATIVE_ARGS valcum
;
7319 valcum
.fregno
= FP_ARG_MIN_REG
;
7320 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
7321 /* Do a trial code generation as if this were going to be passed
7322 as an argument; if any part goes in memory, we return NULL. */
7323 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
7326 /* Otherwise fall through to more conventional ABI rules. */
7329 if (AGGREGATE_TYPE_P (type
)
7330 && (aix_struct_return
7331 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
7334 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7335 modes only exist for GCC vector types if -maltivec. */
7336 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
7337 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
7340 /* Return synthetic vectors in memory. */
7341 if (TREE_CODE (type
) == VECTOR_TYPE
7342 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
7344 static bool warned_for_return_big_vectors
= false;
7345 if (!warned_for_return_big_vectors
)
7347 warning (0, "GCC vector returned by reference: "
7348 "non-standard ABI extension with no compatibility guarantee");
7349 warned_for_return_big_vectors
= true;
7354 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& TYPE_MODE (type
) == TFmode
)
7360 #ifdef HAVE_AS_GNU_ATTRIBUTE
7361 /* Return TRUE if a call to function FNDECL may be one that
7362 potentially affects the function calling ABI of the object file. */
7365 call_ABI_of_interest (tree fndecl
)
7367 if (cgraph_state
== CGRAPH_STATE_EXPANSION
)
7369 struct cgraph_node
*c_node
;
7371 /* Libcalls are always interesting. */
7372 if (fndecl
== NULL_TREE
)
7375 /* Any call to an external function is interesting. */
7376 if (DECL_EXTERNAL (fndecl
))
7379 /* Interesting functions that we are emitting in this object file. */
7380 c_node
= cgraph_get_node (fndecl
);
7381 c_node
= cgraph_function_or_thunk_node (c_node
, NULL
);
7382 return !cgraph_only_called_directly_p (c_node
);
7388 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7389 for a call to a function whose data type is FNTYPE.
7390 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7392 For incoming args we set the number of arguments in the prototype large
7393 so we never return a PARALLEL. */
7396 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
7397 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
7398 int libcall
, int n_named_args
,
7399 tree fndecl ATTRIBUTE_UNUSED
,
7400 enum machine_mode return_mode ATTRIBUTE_UNUSED
)
7402 static CUMULATIVE_ARGS zero_cumulative
;
7404 *cum
= zero_cumulative
;
7406 cum
->fregno
= FP_ARG_MIN_REG
;
7407 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
7408 cum
->prototype
= (fntype
&& prototype_p (fntype
));
7409 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
7410 ? CALL_LIBCALL
: CALL_NORMAL
);
7411 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
7412 cum
->stdarg
= stdarg_p (fntype
);
7414 cum
->nargs_prototype
= 0;
7415 if (incoming
|| cum
->prototype
)
7416 cum
->nargs_prototype
= n_named_args
;
7418 /* Check for a longcall attribute. */
7419 if ((!fntype
&& rs6000_default_long_calls
)
7421 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
7422 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
7423 cum
->call_cookie
|= CALL_LONG
;
7425 if (TARGET_DEBUG_ARG
)
7427 fprintf (stderr
, "\ninit_cumulative_args:");
7430 tree ret_type
= TREE_TYPE (fntype
);
7431 fprintf (stderr
, " ret code = %s,",
7432 tree_code_name
[ (int)TREE_CODE (ret_type
) ]);
7435 if (cum
->call_cookie
& CALL_LONG
)
7436 fprintf (stderr
, " longcall,");
7438 fprintf (stderr
, " proto = %d, nargs = %d\n",
7439 cum
->prototype
, cum
->nargs_prototype
);
7442 #ifdef HAVE_AS_GNU_ATTRIBUTE
7443 if (DEFAULT_ABI
== ABI_V4
)
7445 cum
->escapes
= call_ABI_of_interest (fndecl
);
7452 return_type
= TREE_TYPE (fntype
);
7453 return_mode
= TYPE_MODE (return_type
);
7456 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
7458 if (return_type
!= NULL
)
7460 if (TREE_CODE (return_type
) == RECORD_TYPE
7461 && TYPE_TRANSPARENT_AGGR (return_type
))
7463 return_type
= TREE_TYPE (first_field (return_type
));
7464 return_mode
= TYPE_MODE (return_type
);
7466 if (AGGREGATE_TYPE_P (return_type
)
7467 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
7469 rs6000_returns_struct
= true;
7471 if (SCALAR_FLOAT_MODE_P (return_mode
))
7472 rs6000_passes_float
= true;
7473 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
7474 || SPE_VECTOR_MODE (return_mode
))
7475 rs6000_passes_vector
= true;
7482 && TARGET_ALTIVEC_ABI
7483 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
7485 error ("cannot return value in vector register because"
7486 " altivec instructions are disabled, use -maltivec"
7491 /* Return true if TYPE must be passed on the stack and not in registers. */
7494 rs6000_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
7496 if (DEFAULT_ABI
== ABI_AIX
|| TARGET_64BIT
)
7497 return must_pass_in_stack_var_size (mode
, type
);
7499 return must_pass_in_stack_var_size_or_pad (mode
, type
);
7502 /* If defined, a C expression which determines whether, and in which
7503 direction, to pad out an argument with extra space. The value
7504 should be of type `enum direction': either `upward' to pad above
7505 the argument, `downward' to pad below, or `none' to inhibit
7508 For the AIX ABI structs are always stored left shifted in their
7512 function_arg_padding (enum machine_mode mode
, const_tree type
)
7514 #ifndef AGGREGATE_PADDING_FIXED
7515 #define AGGREGATE_PADDING_FIXED 0
7517 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7518 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7521 if (!AGGREGATE_PADDING_FIXED
)
7523 /* GCC used to pass structures of the same size as integer types as
7524 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7525 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7526 passed padded downward, except that -mstrict-align further
7527 muddied the water in that multi-component structures of 2 and 4
7528 bytes in size were passed padded upward.
7530 The following arranges for best compatibility with previous
7531 versions of gcc, but removes the -mstrict-align dependency. */
7532 if (BYTES_BIG_ENDIAN
)
7534 HOST_WIDE_INT size
= 0;
7536 if (mode
== BLKmode
)
7538 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
7539 size
= int_size_in_bytes (type
);
7542 size
= GET_MODE_SIZE (mode
);
7544 if (size
== 1 || size
== 2 || size
== 4)
7550 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
7552 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
7556 /* Fall back to the default. */
7557 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
7560 /* If defined, a C expression that gives the alignment boundary, in bits,
7561 of an argument with the specified mode and type. If it is not defined,
7562 PARM_BOUNDARY is used for all arguments.
7564 V.4 wants long longs and doubles to be double word aligned. Just
7565 testing the mode size is a boneheaded way to do this as it means
7566 that other types such as complex int are also double word aligned.
7567 However, we're stuck with this because changing the ABI might break
7568 existing library interfaces.
7570 Doubleword align SPE vectors.
7571 Quadword align Altivec/VSX vectors.
7572 Quadword align large synthetic vector types. */
7575 rs6000_function_arg_boundary (enum machine_mode mode
, const_tree type
)
7577 if (DEFAULT_ABI
== ABI_V4
7578 && (GET_MODE_SIZE (mode
) == 8
7579 || (TARGET_HARD_FLOAT
7581 && (mode
== TFmode
|| mode
== TDmode
))))
7583 else if (SPE_VECTOR_MODE (mode
)
7584 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7585 && int_size_in_bytes (type
) >= 8
7586 && int_size_in_bytes (type
) < 16))
7588 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
7589 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7590 && int_size_in_bytes (type
) >= 16))
7592 else if (TARGET_MACHO
7593 && rs6000_darwin64_abi
7595 && type
&& TYPE_ALIGN (type
) > 64)
7598 return PARM_BOUNDARY
;
7601 /* For a function parm of MODE and TYPE, return the starting word in
7602 the parameter area. NWORDS of the parameter area are already used. */
7605 rs6000_parm_start (enum machine_mode mode
, const_tree type
,
7606 unsigned int nwords
)
7609 unsigned int parm_offset
;
7611 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
7612 parm_offset
= DEFAULT_ABI
== ABI_V4
? 2 : 6;
7613 return nwords
+ (-(parm_offset
+ nwords
) & align
);
7616 /* Compute the size (in words) of a function argument. */
7618 static unsigned long
7619 rs6000_arg_size (enum machine_mode mode
, const_tree type
)
7623 if (mode
!= BLKmode
)
7624 size
= GET_MODE_SIZE (mode
);
7626 size
= int_size_in_bytes (type
);
7629 return (size
+ 3) >> 2;
7631 return (size
+ 7) >> 3;
7634 /* Use this to flush pending int fields. */
7637 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
7638 HOST_WIDE_INT bitpos
, int final
)
7640 unsigned int startbit
, endbit
;
7641 int intregs
, intoffset
;
7642 enum machine_mode mode
;
7644 /* Handle the situations where a float is taking up the first half
7645 of the GPR, and the other half is empty (typically due to
7646 alignment restrictions). We can detect this by a 8-byte-aligned
7647 int field, or by seeing that this is the final flush for this
7648 argument. Count the word and continue on. */
7649 if (cum
->floats_in_gpr
== 1
7650 && (cum
->intoffset
% 64 == 0
7651 || (cum
->intoffset
== -1 && final
)))
7654 cum
->floats_in_gpr
= 0;
7657 if (cum
->intoffset
== -1)
7660 intoffset
= cum
->intoffset
;
7661 cum
->intoffset
= -1;
7662 cum
->floats_in_gpr
= 0;
7664 if (intoffset
% BITS_PER_WORD
!= 0)
7666 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
7668 if (mode
== BLKmode
)
7670 /* We couldn't find an appropriate mode, which happens,
7671 e.g., in packed structs when there are 3 bytes to load.
7672 Back intoffset back to the beginning of the word in this
7674 intoffset
= intoffset
& -BITS_PER_WORD
;
7678 startbit
= intoffset
& -BITS_PER_WORD
;
7679 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
7680 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
7681 cum
->words
+= intregs
;
7682 /* words should be unsigned. */
7683 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
7685 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
7690 /* The darwin64 ABI calls for us to recurse down through structs,
7691 looking for elements passed in registers. Unfortunately, we have
7692 to track int register count here also because of misalignments
7693 in powerpc alignment mode. */
7696 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
7698 HOST_WIDE_INT startbitpos
)
7702 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
7703 if (TREE_CODE (f
) == FIELD_DECL
)
7705 HOST_WIDE_INT bitpos
= startbitpos
;
7706 tree ftype
= TREE_TYPE (f
);
7707 enum machine_mode mode
;
7708 if (ftype
== error_mark_node
)
7710 mode
= TYPE_MODE (ftype
);
7712 if (DECL_SIZE (f
) != 0
7713 && host_integerp (bit_position (f
), 1))
7714 bitpos
+= int_bit_position (f
);
7716 /* ??? FIXME: else assume zero offset. */
7718 if (TREE_CODE (ftype
) == RECORD_TYPE
)
7719 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
7720 else if (USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
7722 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
7723 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
7724 cum
->fregno
+= n_fpregs
;
7725 /* Single-precision floats present a special problem for
7726 us, because they are smaller than an 8-byte GPR, and so
7727 the structure-packing rules combined with the standard
7728 varargs behavior mean that we want to pack float/float
7729 and float/int combinations into a single register's
7730 space. This is complicated by the arg advance flushing,
7731 which works on arbitrarily large groups of int-type
7735 if (cum
->floats_in_gpr
== 1)
7737 /* Two floats in a word; count the word and reset
7740 cum
->floats_in_gpr
= 0;
7742 else if (bitpos
% 64 == 0)
7744 /* A float at the beginning of an 8-byte word;
7745 count it and put off adjusting cum->words until
7746 we see if a arg advance flush is going to do it
7748 cum
->floats_in_gpr
++;
7752 /* The float is at the end of a word, preceded
7753 by integer fields, so the arg advance flush
7754 just above has already set cum->words and
7755 everything is taken care of. */
7759 cum
->words
+= n_fpregs
;
7761 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, 1))
7763 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
7767 else if (cum
->intoffset
== -1)
7768 cum
->intoffset
= bitpos
;
7772 /* Check for an item that needs to be considered specially under the darwin 64
7773 bit ABI. These are record types where the mode is BLK or the structure is
7776 rs6000_darwin64_struct_check_p (enum machine_mode mode
, const_tree type
)
7778 return rs6000_darwin64_abi
7779 && ((mode
== BLKmode
7780 && TREE_CODE (type
) == RECORD_TYPE
7781 && int_size_in_bytes (type
) > 0)
7782 || (type
&& TREE_CODE (type
) == RECORD_TYPE
7783 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
7786 /* Update the data in CUM to advance over an argument
7787 of mode MODE and data type TYPE.
7788 (TYPE is null for libcalls where that information may not be available.)
7790 Note that for args passed by reference, function_arg will be called
7791 with MODE and TYPE set to that of the pointer to the arg, not the arg
7795 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
7796 const_tree type
, bool named
, int depth
)
7798 /* Only tick off an argument if we're not recursing. */
7800 cum
->nargs_prototype
--;
7802 #ifdef HAVE_AS_GNU_ATTRIBUTE
7803 if (DEFAULT_ABI
== ABI_V4
7806 if (SCALAR_FLOAT_MODE_P (mode
))
7807 rs6000_passes_float
= true;
7808 else if (named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
7809 rs6000_passes_vector
= true;
7810 else if (SPE_VECTOR_MODE (mode
)
7812 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
7813 rs6000_passes_vector
= true;
7817 if (TARGET_ALTIVEC_ABI
7818 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
7819 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7820 && int_size_in_bytes (type
) == 16)))
7824 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
7827 if (!TARGET_ALTIVEC
)
7828 error ("cannot pass argument in vector register because"
7829 " altivec instructions are disabled, use -maltivec"
7832 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7833 even if it is going to be passed in a vector register.
7834 Darwin does the same for variable-argument functions. */
7835 if ((DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
7836 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
7846 /* Vector parameters must be 16-byte aligned. This places
7847 them at 2 mod 4 in terms of words in 32-bit mode, since
7848 the parameter save area starts at offset 24 from the
7849 stack. In 64-bit mode, they just have to start on an
7850 even word, since the parameter save area is 16-byte
7851 aligned. Space for GPRs is reserved even if the argument
7852 will be passed in memory. */
7854 align
= (2 - cum
->words
) & 3;
7856 align
= cum
->words
& 1;
7857 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
7859 if (TARGET_DEBUG_ARG
)
7861 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
7863 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
7864 cum
->nargs_prototype
, cum
->prototype
,
7865 GET_MODE_NAME (mode
));
7869 else if (TARGET_SPE_ABI
&& TARGET_SPE
&& SPE_VECTOR_MODE (mode
)
7871 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
7874 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
7876 int size
= int_size_in_bytes (type
);
7877 /* Variable sized types have size == -1 and are
7878 treated as if consisting entirely of ints.
7879 Pad to 16 byte boundary if needed. */
7880 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
7881 && (cum
->words
% 2) != 0)
7883 /* For varargs, we can just go up by the size of the struct. */
7885 cum
->words
+= (size
+ 7) / 8;
7888 /* It is tempting to say int register count just goes up by
7889 sizeof(type)/8, but this is wrong in a case such as
7890 { int; double; int; } [powerpc alignment]. We have to
7891 grovel through the fields for these too. */
7893 cum
->floats_in_gpr
= 0;
7894 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
7895 rs6000_darwin64_record_arg_advance_flush (cum
,
7896 size
* BITS_PER_UNIT
, 1);
7898 if (TARGET_DEBUG_ARG
)
7900 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
7901 cum
->words
, TYPE_ALIGN (type
), size
);
7903 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
7904 cum
->nargs_prototype
, cum
->prototype
,
7905 GET_MODE_NAME (mode
));
7908 else if (DEFAULT_ABI
== ABI_V4
)
7910 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
7911 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
7912 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
7913 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
7914 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
7916 /* _Decimal128 must use an even/odd register pair. This assumes
7917 that the register number is odd when fregno is odd. */
7918 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
7921 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
7922 <= FP_ARG_V4_MAX_REG
)
7923 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
7926 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
7927 if (mode
== DFmode
|| mode
== TFmode
7928 || mode
== DDmode
|| mode
== TDmode
)
7929 cum
->words
+= cum
->words
& 1;
7930 cum
->words
+= rs6000_arg_size (mode
, type
);
7935 int n_words
= rs6000_arg_size (mode
, type
);
7936 int gregno
= cum
->sysv_gregno
;
7938 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7939 (r7,r8) or (r9,r10). As does any other 2 word item such
7940 as complex int due to a historical mistake. */
7942 gregno
+= (1 - gregno
) & 1;
7944 /* Multi-reg args are not split between registers and stack. */
7945 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
7947 /* Long long and SPE vectors are aligned on the stack.
7948 So are other 2 word items such as complex int due to
7949 a historical mistake. */
7951 cum
->words
+= cum
->words
& 1;
7952 cum
->words
+= n_words
;
7955 /* Note: continuing to accumulate gregno past when we've started
7956 spilling to the stack indicates the fact that we've started
7957 spilling to the stack to expand_builtin_saveregs. */
7958 cum
->sysv_gregno
= gregno
+ n_words
;
7961 if (TARGET_DEBUG_ARG
)
7963 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
7964 cum
->words
, cum
->fregno
);
7965 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
7966 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
7967 fprintf (stderr
, "mode = %4s, named = %d\n",
7968 GET_MODE_NAME (mode
), named
);
7973 int n_words
= rs6000_arg_size (mode
, type
);
7974 int start_words
= cum
->words
;
7975 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
7977 cum
->words
= align_words
+ n_words
;
7979 if (SCALAR_FLOAT_MODE_P (mode
)
7980 && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
7982 /* _Decimal128 must be passed in an even/odd float register pair.
7983 This assumes that the register number is odd when fregno is
7985 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
7987 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
7990 if (TARGET_DEBUG_ARG
)
7992 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
7993 cum
->words
, cum
->fregno
);
7994 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
7995 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
7996 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
7997 named
, align_words
- start_words
, depth
);
8003 rs6000_function_arg_advance (cumulative_args_t cum
, enum machine_mode mode
,
8004 const_tree type
, bool named
)
8006 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
8011 spe_build_register_parallel (enum machine_mode mode
, int gregno
)
8018 r1
= gen_rtx_REG (DImode
, gregno
);
8019 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
8020 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, r1
));
8024 r1
= gen_rtx_REG (DImode
, gregno
);
8025 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
8026 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
8027 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
8028 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r3
));
8031 r1
= gen_rtx_REG (DImode
, gregno
);
8032 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
8033 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
8034 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
8035 r5
= gen_rtx_REG (DImode
, gregno
+ 4);
8036 r5
= gen_rtx_EXPR_LIST (VOIDmode
, r5
, GEN_INT (16));
8037 r7
= gen_rtx_REG (DImode
, gregno
+ 6);
8038 r7
= gen_rtx_EXPR_LIST (VOIDmode
, r7
, GEN_INT (24));
8039 return gen_rtx_PARALLEL (mode
, gen_rtvec (4, r1
, r3
, r5
, r7
));
8046 /* Determine where to put a SIMD argument on the SPE. */
8048 rs6000_spe_function_arg (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
8051 int gregno
= cum
->sysv_gregno
;
8053 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8054 are passed and returned in a pair of GPRs for ABI compatibility. */
8055 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
8056 || mode
== DCmode
|| mode
== TCmode
))
8058 int n_words
= rs6000_arg_size (mode
, type
);
8060 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8062 gregno
+= (1 - gregno
) & 1;
8064 /* Multi-reg args are not split between registers and stack. */
8065 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
8068 return spe_build_register_parallel (mode
, gregno
);
8072 int n_words
= rs6000_arg_size (mode
, type
);
8074 /* SPE vectors are put in odd registers. */
8075 if (n_words
== 2 && (gregno
& 1) == 0)
8078 if (gregno
+ n_words
- 1 <= GP_ARG_MAX_REG
)
8081 enum machine_mode m
= SImode
;
8083 r1
= gen_rtx_REG (m
, gregno
);
8084 r1
= gen_rtx_EXPR_LIST (m
, r1
, const0_rtx
);
8085 r2
= gen_rtx_REG (m
, gregno
+ 1);
8086 r2
= gen_rtx_EXPR_LIST (m
, r2
, GEN_INT (4));
8087 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
8094 if (gregno
<= GP_ARG_MAX_REG
)
8095 return gen_rtx_REG (mode
, gregno
);
8101 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8102 structure between cum->intoffset and bitpos to integer registers. */
8105 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
8106 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
8108 enum machine_mode mode
;
8110 unsigned int startbit
, endbit
;
8111 int this_regno
, intregs
, intoffset
;
8114 if (cum
->intoffset
== -1)
8117 intoffset
= cum
->intoffset
;
8118 cum
->intoffset
= -1;
8120 /* If this is the trailing part of a word, try to only load that
8121 much into the register. Otherwise load the whole register. Note
8122 that in the latter case we may pick up unwanted bits. It's not a
8123 problem at the moment but may wish to revisit. */
8125 if (intoffset
% BITS_PER_WORD
!= 0)
8127 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
8129 if (mode
== BLKmode
)
8131 /* We couldn't find an appropriate mode, which happens,
8132 e.g., in packed structs when there are 3 bytes to load.
8133 Back intoffset back to the beginning of the word in this
8135 intoffset
= intoffset
& -BITS_PER_WORD
;
8142 startbit
= intoffset
& -BITS_PER_WORD
;
8143 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
8144 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
8145 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
8147 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
8150 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
8154 intoffset
/= BITS_PER_UNIT
;
8157 regno
= GP_ARG_MIN_REG
+ this_regno
;
8158 reg
= gen_rtx_REG (mode
, regno
);
8160 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
8163 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
8167 while (intregs
> 0);
8170 /* Recursive workhorse for the following. */
8173 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
8174 HOST_WIDE_INT startbitpos
, rtx rvec
[],
8179 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
8180 if (TREE_CODE (f
) == FIELD_DECL
)
8182 HOST_WIDE_INT bitpos
= startbitpos
;
8183 tree ftype
= TREE_TYPE (f
);
8184 enum machine_mode mode
;
8185 if (ftype
== error_mark_node
)
8187 mode
= TYPE_MODE (ftype
);
8189 if (DECL_SIZE (f
) != 0
8190 && host_integerp (bit_position (f
), 1))
8191 bitpos
+= int_bit_position (f
);
8193 /* ??? FIXME: else assume zero offset. */
8195 if (TREE_CODE (ftype
) == RECORD_TYPE
)
8196 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
8197 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
8199 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
8203 case SCmode
: mode
= SFmode
; break;
8204 case DCmode
: mode
= DFmode
; break;
8205 case TCmode
: mode
= TFmode
; break;
8209 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
8210 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
8212 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
8213 && (mode
== TFmode
|| mode
== TDmode
));
8214 /* Long double or _Decimal128 split over regs and memory. */
8215 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
8219 = gen_rtx_EXPR_LIST (VOIDmode
,
8220 gen_rtx_REG (mode
, cum
->fregno
++),
8221 GEN_INT (bitpos
/ BITS_PER_UNIT
));
8222 if (mode
== TFmode
|| mode
== TDmode
)
8225 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, ftype
, 1))
8227 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
8229 = gen_rtx_EXPR_LIST (VOIDmode
,
8230 gen_rtx_REG (mode
, cum
->vregno
++),
8231 GEN_INT (bitpos
/ BITS_PER_UNIT
));
8233 else if (cum
->intoffset
== -1)
8234 cum
->intoffset
= bitpos
;
8238 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8239 the register(s) to be used for each field and subfield of a struct
8240 being passed by value, along with the offset of where the
8241 register's value may be found in the block. FP fields go in FP
8242 register, vector fields go in vector registers, and everything
8243 else goes in int registers, packed as in memory.
8245 This code is also used for function return values. RETVAL indicates
8246 whether this is the case.
8248 Much of this is taken from the SPARC V9 port, which has a similar
8249 calling convention. */
8252 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
8253 bool named
, bool retval
)
8255 rtx rvec
[FIRST_PSEUDO_REGISTER
];
8256 int k
= 1, kbase
= 1;
8257 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
8258 /* This is a copy; modifications are not visible to our caller. */
8259 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
8260 CUMULATIVE_ARGS
*cum
= ©_cum
;
8262 /* Pad to 16 byte boundary if needed. */
8263 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
8264 && (cum
->words
% 2) != 0)
8271 /* Put entries into rvec[] for individual FP and vector fields, and
8272 for the chunks of memory that go in int regs. Note we start at
8273 element 1; 0 is reserved for an indication of using memory, and
8274 may or may not be filled in below. */
8275 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
8276 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
8278 /* If any part of the struct went on the stack put all of it there.
8279 This hack is because the generic code for
8280 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8281 parts of the struct are not at the beginning. */
8285 return NULL_RTX
; /* doesn't go in registers at all */
8287 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8289 if (k
> 1 || cum
->use_stack
)
8290 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
8295 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8298 rs6000_mixed_function_arg (enum machine_mode mode
, const_tree type
,
8303 rtx rvec
[GP_ARG_NUM_REG
+ 1];
8305 if (align_words
>= GP_ARG_NUM_REG
)
8308 n_units
= rs6000_arg_size (mode
, type
);
8310 /* Optimize the simple case where the arg fits in one gpr, except in
8311 the case of BLKmode due to assign_parms assuming that registers are
8312 BITS_PER_WORD wide. */
8314 || (n_units
== 1 && mode
!= BLKmode
))
8315 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8318 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
8319 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8320 using a magic NULL_RTX component.
8321 This is not strictly correct. Only some of the arg belongs in
8322 memory, not all of it. However, the normal scheme using
8323 function_arg_partial_nregs can result in unusual subregs, eg.
8324 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8325 store the whole arg to memory is often more efficient than code
8326 to store pieces, and we know that space is available in the right
8327 place for the whole arg. */
8328 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8333 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
8334 rtx off
= GEN_INT (i
++ * 4);
8335 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
8337 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
8339 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
8342 /* Determine where to put an argument to a function.
8343 Value is zero to push the argument on the stack,
8344 or a hard register in which to store the argument.
8346 MODE is the argument's machine mode.
8347 TYPE is the data type of the argument (as a tree).
8348 This is null for libcalls where that information may
8350 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8351 the preceding args and about the function being called. It is
8352 not modified in this routine.
8353 NAMED is nonzero if this argument is a named parameter
8354 (otherwise it is an extra parameter matching an ellipsis).
8356 On RS/6000 the first eight words of non-FP are normally in registers
8357 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8358 Under V.4, the first 8 FP args are in registers.
8360 If this is floating-point and no prototype is specified, we use
8361 both an FP and integer register (or possibly FP reg and stack). Library
8362 functions (when CALL_LIBCALL is set) always have the proper types for args,
8363 so we can pass the FP value just in one register. emit_library_function
8364 doesn't support PARALLEL anyway.
8366 Note that for args passed by reference, function_arg will be called
8367 with MODE and TYPE set to that of the pointer to the arg, not the arg
8371 rs6000_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
8372 const_tree type
, bool named
)
8374 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8375 enum rs6000_abi abi
= DEFAULT_ABI
;
8377 /* Return a marker to indicate whether CR1 needs to set or clear the
8378 bit that V.4 uses to say fp args were passed in registers.
8379 Assume that we don't need the marker for software floating point,
8380 or compiler generated library calls. */
8381 if (mode
== VOIDmode
)
8384 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
8386 || (cum
->nargs_prototype
< 0
8387 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
))))
8389 /* For the SPE, we need to crxor CR6 always. */
8391 return GEN_INT (cum
->call_cookie
| CALL_V4_SET_FP_ARGS
);
8392 else if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
8393 return GEN_INT (cum
->call_cookie
8394 | ((cum
->fregno
== FP_ARG_MIN_REG
)
8395 ? CALL_V4_SET_FP_ARGS
8396 : CALL_V4_CLEAR_FP_ARGS
));
8399 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
8402 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
8404 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
8405 if (rslt
!= NULL_RTX
)
8407 /* Else fall through to usual handling. */
8410 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
8411 if (TARGET_64BIT
&& ! cum
->prototype
)
8413 /* Vector parameters get passed in vector register
8414 and also in GPRs or memory, in absence of prototype. */
8417 align_words
= (cum
->words
+ 1) & ~1;
8419 if (align_words
>= GP_ARG_NUM_REG
)
8425 slot
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8427 return gen_rtx_PARALLEL (mode
,
8429 gen_rtx_EXPR_LIST (VOIDmode
,
8431 gen_rtx_EXPR_LIST (VOIDmode
,
8432 gen_rtx_REG (mode
, cum
->vregno
),
8436 return gen_rtx_REG (mode
, cum
->vregno
);
8437 else if (TARGET_ALTIVEC_ABI
8438 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
8439 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
8440 && int_size_in_bytes (type
) == 16)))
8442 if (named
|| abi
== ABI_V4
)
8446 /* Vector parameters to varargs functions under AIX or Darwin
8447 get passed in memory and possibly also in GPRs. */
8448 int align
, align_words
, n_words
;
8449 enum machine_mode part_mode
;
8451 /* Vector parameters must be 16-byte aligned. This places them at
8452 2 mod 4 in terms of words in 32-bit mode, since the parameter
8453 save area starts at offset 24 from the stack. In 64-bit mode,
8454 they just have to start on an even word, since the parameter
8455 save area is 16-byte aligned. */
8457 align
= (2 - cum
->words
) & 3;
8459 align
= cum
->words
& 1;
8460 align_words
= cum
->words
+ align
;
8462 /* Out of registers? Memory, then. */
8463 if (align_words
>= GP_ARG_NUM_REG
)
8466 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8467 return rs6000_mixed_function_arg (mode
, type
, align_words
);
8469 /* The vector value goes in GPRs. Only the part of the
8470 value in GPRs is reported here. */
8472 n_words
= rs6000_arg_size (mode
, type
);
8473 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
8474 /* Fortunately, there are only two possibilities, the value
8475 is either wholly in GPRs or half in GPRs and half not. */
8478 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
8481 else if (TARGET_SPE_ABI
&& TARGET_SPE
8482 && (SPE_VECTOR_MODE (mode
)
8483 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
8486 || mode
== TCmode
))))
8487 return rs6000_spe_function_arg (cum
, mode
, type
);
8489 else if (abi
== ABI_V4
)
8491 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
8492 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
8493 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
8494 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
8495 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
8497 /* _Decimal128 must use an even/odd register pair. This assumes
8498 that the register number is odd when fregno is odd. */
8499 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8502 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
8503 <= FP_ARG_V4_MAX_REG
)
8504 return gen_rtx_REG (mode
, cum
->fregno
);
8510 int n_words
= rs6000_arg_size (mode
, type
);
8511 int gregno
= cum
->sysv_gregno
;
8513 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8514 (r7,r8) or (r9,r10). As does any other 2 word item such
8515 as complex int due to a historical mistake. */
8517 gregno
+= (1 - gregno
) & 1;
8519 /* Multi-reg args are not split between registers and stack. */
8520 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
8523 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8524 return rs6000_mixed_function_arg (mode
, type
,
8525 gregno
- GP_ARG_MIN_REG
);
8526 return gen_rtx_REG (mode
, gregno
);
8531 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
8533 /* _Decimal128 must be passed in an even/odd float register pair.
8534 This assumes that the register number is odd when fregno is odd. */
8535 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8538 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
8540 rtx rvec
[GP_ARG_NUM_REG
+ 1];
8544 enum machine_mode fmode
= mode
;
8545 unsigned long n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
8547 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
8549 /* Currently, we only ever need one reg here because complex
8550 doubles are split. */
8551 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
8552 && (fmode
== TFmode
|| fmode
== TDmode
));
8554 /* Long double or _Decimal128 split over regs and memory. */
8555 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
8558 /* Do we also need to pass this arg in the parameter save
8561 && (cum
->nargs_prototype
<= 0
8562 || (DEFAULT_ABI
== ABI_AIX
8564 && align_words
>= GP_ARG_NUM_REG
)));
8566 if (!needs_psave
&& mode
== fmode
)
8567 return gen_rtx_REG (fmode
, cum
->fregno
);
8572 /* Describe the part that goes in gprs or the stack.
8573 This piece must come first, before the fprs. */
8574 if (align_words
< GP_ARG_NUM_REG
)
8576 unsigned long n_words
= rs6000_arg_size (mode
, type
);
8578 if (align_words
+ n_words
> GP_ARG_NUM_REG
8579 || (TARGET_32BIT
&& TARGET_POWERPC64
))
8581 /* If this is partially on the stack, then we only
8582 include the portion actually in registers here. */
8583 enum machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
8586 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
8587 /* Not all of the arg fits in gprs. Say that it
8588 goes in memory too, using a magic NULL_RTX
8589 component. Also see comment in
8590 rs6000_mixed_function_arg for why the normal
8591 function_arg_partial_nregs scheme doesn't work
8593 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
,
8597 r
= gen_rtx_REG (rmode
,
8598 GP_ARG_MIN_REG
+ align_words
);
8599 off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
8600 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
8602 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
8606 /* The whole arg fits in gprs. */
8607 r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8608 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
8612 /* It's entirely in memory. */
8613 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8616 /* Describe where this piece goes in the fprs. */
8617 r
= gen_rtx_REG (fmode
, cum
->fregno
);
8618 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
8620 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
8622 else if (align_words
< GP_ARG_NUM_REG
)
8624 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8625 return rs6000_mixed_function_arg (mode
, type
, align_words
);
8627 if (mode
== BLKmode
)
8630 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8637 /* For an arg passed partly in registers and partly in memory, this is
8638 the number of bytes passed in registers. For args passed entirely in
8639 registers or entirely in memory, zero. When an arg is described by a
8640 PARALLEL, perhaps using more than one register type, this function
8641 returns the number of bytes used by the first element of the PARALLEL. */
8644 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
8645 tree type
, bool named
)
8647 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8651 if (DEFAULT_ABI
== ABI_V4
)
8654 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
)
8655 && cum
->nargs_prototype
>= 0)
8658 /* In this complicated case we just disable the partial_nregs code. */
8659 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
8662 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
8664 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
8666 /* If we are passing this arg in the fixed parameter save area
8667 (gprs or memory) as well as fprs, then this function should
8668 return the number of partial bytes passed in the parameter
8669 save area rather than partial bytes passed in fprs. */
8671 && (cum
->nargs_prototype
<= 0
8672 || (DEFAULT_ABI
== ABI_AIX
8674 && align_words
>= GP_ARG_NUM_REG
)))
8676 else if (cum
->fregno
+ ((GET_MODE_SIZE (mode
) + 7) >> 3)
8677 > FP_ARG_MAX_REG
+ 1)
8678 ret
= (FP_ARG_MAX_REG
+ 1 - cum
->fregno
) * 8;
8679 else if (cum
->nargs_prototype
>= 0)
8683 if (align_words
< GP_ARG_NUM_REG
8684 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
8685 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
8687 if (ret
!= 0 && TARGET_DEBUG_ARG
)
8688 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
8693 /* A C expression that indicates when an argument must be passed by
8694 reference. If nonzero for an argument, a copy of that argument is
8695 made in memory and a pointer to the argument is passed instead of
8696 the argument itself. The pointer is passed in whatever way is
8697 appropriate for passing a pointer to that type.
8699 Under V.4, aggregates and long double are passed by reference.
8701 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8702 reference unless the AltiVec vector extension ABI is in force.
8704 As an extension to all ABIs, variable sized types are passed by
8708 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
8709 enum machine_mode mode
, const_tree type
,
8710 bool named ATTRIBUTE_UNUSED
)
8712 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& mode
== TFmode
)
8714 if (TARGET_DEBUG_ARG
)
8715 fprintf (stderr
, "function_arg_pass_by_reference: V4 long double\n");
8722 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
8724 if (TARGET_DEBUG_ARG
)
8725 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
8729 if (int_size_in_bytes (type
) < 0)
8731 if (TARGET_DEBUG_ARG
)
8732 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
8736 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8737 modes only exist for GCC vector types if -maltivec. */
8738 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
8740 if (TARGET_DEBUG_ARG
)
8741 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
8745 /* Pass synthetic vectors in memory. */
8746 if (TREE_CODE (type
) == VECTOR_TYPE
8747 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
8749 static bool warned_for_pass_big_vectors
= false;
8750 if (TARGET_DEBUG_ARG
)
8751 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
8752 if (!warned_for_pass_big_vectors
)
8754 warning (0, "GCC vector passed by reference: "
8755 "non-standard ABI extension with no compatibility guarantee");
8756 warned_for_pass_big_vectors
= true;
8765 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
8768 enum machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
8773 for (i
= 0; i
< nregs
; i
++)
8775 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
8776 if (reload_completed
)
8778 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
8781 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
8782 i
* GET_MODE_SIZE (reg_mode
));
8785 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
8789 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
8793 /* Perform any needed actions needed for a function that is receiving a
8794 variable number of arguments.
8798 MODE and TYPE are the mode and type of the current parameter.
8800 PRETEND_SIZE is a variable that should be set to the amount of stack
8801 that must be pushed by the prolog to pretend that our caller pushed
8804 Normally, this macro will push all remaining incoming registers on the
8805 stack and set PRETEND_SIZE to the length of the registers pushed. */
8808 setup_incoming_varargs (cumulative_args_t cum
, enum machine_mode mode
,
8809 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
8812 CUMULATIVE_ARGS next_cum
;
8813 int reg_size
= TARGET_32BIT
? 4 : 8;
8814 rtx save_area
= NULL_RTX
, mem
;
8815 int first_reg_offset
;
8818 /* Skip the last named argument. */
8819 next_cum
= *get_cumulative_args (cum
);
8820 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
8822 if (DEFAULT_ABI
== ABI_V4
)
8824 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
8828 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
8829 HOST_WIDE_INT offset
= 0;
8831 /* Try to optimize the size of the varargs save area.
8832 The ABI requires that ap.reg_save_area is doubleword
8833 aligned, but we don't need to allocate space for all
8834 the bytes, only those to which we actually will save
8836 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
8837 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
8838 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
8839 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
8840 && cfun
->va_list_fpr_size
)
8843 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
8844 * UNITS_PER_FP_WORD
;
8845 if (cfun
->va_list_fpr_size
8846 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
8847 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
8849 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
8850 * UNITS_PER_FP_WORD
;
8854 offset
= -((first_reg_offset
* reg_size
) & ~7);
8855 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
8857 gpr_reg_num
= cfun
->va_list_gpr_size
;
8858 if (reg_size
== 4 && (first_reg_offset
& 1))
8861 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
8864 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
8866 - (int) (GP_ARG_NUM_REG
* reg_size
);
8868 if (gpr_size
+ fpr_size
)
8871 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
8872 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
8873 reg_save_area
= XEXP (reg_save_area
, 0);
8874 if (GET_CODE (reg_save_area
) == PLUS
)
8876 gcc_assert (XEXP (reg_save_area
, 0)
8877 == virtual_stack_vars_rtx
);
8878 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
8879 offset
+= INTVAL (XEXP (reg_save_area
, 1));
8882 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
8885 cfun
->machine
->varargs_save_offset
= offset
;
8886 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
8891 first_reg_offset
= next_cum
.words
;
8892 save_area
= virtual_incoming_args_rtx
;
8894 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
8895 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
8898 set
= get_varargs_alias_set ();
8899 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
8900 && cfun
->va_list_gpr_size
)
8902 int nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
8904 if (va_list_gpr_counter_field
)
8906 /* V4 va_list_gpr_size counts number of registers needed. */
8907 if (nregs
> cfun
->va_list_gpr_size
)
8908 nregs
= cfun
->va_list_gpr_size
;
8912 /* char * va_list instead counts number of bytes needed. */
8913 if (nregs
> cfun
->va_list_gpr_size
/ reg_size
)
8914 nregs
= cfun
->va_list_gpr_size
/ reg_size
;
8917 mem
= gen_rtx_MEM (BLKmode
,
8918 plus_constant (Pmode
, save_area
,
8919 first_reg_offset
* reg_size
));
8920 MEM_NOTRAP_P (mem
) = 1;
8921 set_mem_alias_set (mem
, set
);
8922 set_mem_align (mem
, BITS_PER_WORD
);
8924 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
8928 /* Save FP registers if needed. */
8929 if (DEFAULT_ABI
== ABI_V4
8930 && TARGET_HARD_FLOAT
&& TARGET_FPRS
8932 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
8933 && cfun
->va_list_fpr_size
)
8935 int fregno
= next_cum
.fregno
, nregs
;
8936 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
8937 rtx lab
= gen_label_rtx ();
8938 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
8939 * UNITS_PER_FP_WORD
);
8942 (gen_rtx_SET (VOIDmode
,
8944 gen_rtx_IF_THEN_ELSE (VOIDmode
,
8945 gen_rtx_NE (VOIDmode
, cr1
,
8947 gen_rtx_LABEL_REF (VOIDmode
, lab
),
8951 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
8952 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
8954 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8956 plus_constant (Pmode
, save_area
, off
));
8957 MEM_NOTRAP_P (mem
) = 1;
8958 set_mem_alias_set (mem
, set
);
8959 set_mem_align (mem
, GET_MODE_ALIGNMENT (
8960 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8961 ? DFmode
: SFmode
));
8962 emit_move_insn (mem
, gen_rtx_REG (
8963 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8964 ? DFmode
: SFmode
, fregno
));
8971 /* Create the va_list data type. */
8974 rs6000_build_builtin_va_list (void)
8976 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
8978 /* For AIX, prefer 'char *' because that's what the system
8979 header files like. */
8980 if (DEFAULT_ABI
!= ABI_V4
)
8981 return build_pointer_type (char_type_node
);
8983 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
8984 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
8985 get_identifier ("__va_list_tag"), record
);
8987 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
8988 unsigned_char_type_node
);
8989 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
8990 unsigned_char_type_node
);
8991 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
8993 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
8994 get_identifier ("reserved"), short_unsigned_type_node
);
8995 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
8996 get_identifier ("overflow_arg_area"),
8998 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
8999 get_identifier ("reg_save_area"),
9002 va_list_gpr_counter_field
= f_gpr
;
9003 va_list_fpr_counter_field
= f_fpr
;
9005 DECL_FIELD_CONTEXT (f_gpr
) = record
;
9006 DECL_FIELD_CONTEXT (f_fpr
) = record
;
9007 DECL_FIELD_CONTEXT (f_res
) = record
;
9008 DECL_FIELD_CONTEXT (f_ovf
) = record
;
9009 DECL_FIELD_CONTEXT (f_sav
) = record
;
9011 TYPE_STUB_DECL (record
) = type_decl
;
9012 TYPE_NAME (record
) = type_decl
;
9013 TYPE_FIELDS (record
) = f_gpr
;
9014 DECL_CHAIN (f_gpr
) = f_fpr
;
9015 DECL_CHAIN (f_fpr
) = f_res
;
9016 DECL_CHAIN (f_res
) = f_ovf
;
9017 DECL_CHAIN (f_ovf
) = f_sav
;
9019 layout_type (record
);
9021 /* The correct type is an array type of one element. */
9022 return build_array_type (record
, build_index_type (size_zero_node
));
9025 /* Implement va_start. */
9028 rs6000_va_start (tree valist
, rtx nextarg
)
9030 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
9031 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
9032 tree gpr
, fpr
, ovf
, sav
, t
;
9034 /* Only SVR4 needs something special. */
9035 if (DEFAULT_ABI
!= ABI_V4
)
9037 std_expand_builtin_va_start (valist
, nextarg
);
9041 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9042 f_fpr
= DECL_CHAIN (f_gpr
);
9043 f_res
= DECL_CHAIN (f_fpr
);
9044 f_ovf
= DECL_CHAIN (f_res
);
9045 f_sav
= DECL_CHAIN (f_ovf
);
9047 valist
= build_simple_mem_ref (valist
);
9048 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9049 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
9051 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
9053 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
9056 /* Count number of gp and fp argument registers used. */
9057 words
= crtl
->args
.info
.words
;
9058 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
9060 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
9063 if (TARGET_DEBUG_ARG
)
9064 fprintf (stderr
, "va_start: words = "HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
9065 HOST_WIDE_INT_PRINT_DEC
", n_fpr = "HOST_WIDE_INT_PRINT_DEC
"\n",
9066 words
, n_gpr
, n_fpr
);
9068 if (cfun
->va_list_gpr_size
)
9070 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
9071 build_int_cst (NULL_TREE
, n_gpr
));
9072 TREE_SIDE_EFFECTS (t
) = 1;
9073 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9076 if (cfun
->va_list_fpr_size
)
9078 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
9079 build_int_cst (NULL_TREE
, n_fpr
));
9080 TREE_SIDE_EFFECTS (t
) = 1;
9081 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9083 #ifdef HAVE_AS_GNU_ATTRIBUTE
9084 if (call_ABI_of_interest (cfun
->decl
))
9085 rs6000_passes_float
= true;
9089 /* Find the overflow area. */
9090 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
9092 t
= fold_build_pointer_plus_hwi (t
, words
* UNITS_PER_WORD
);
9093 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
9094 TREE_SIDE_EFFECTS (t
) = 1;
9095 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9097 /* If there were no va_arg invocations, don't set up the register
9099 if (!cfun
->va_list_gpr_size
9100 && !cfun
->va_list_fpr_size
9101 && n_gpr
< GP_ARG_NUM_REG
9102 && n_fpr
< FP_ARG_V4_MAX_REG
)
9105 /* Find the register save area. */
9106 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
9107 if (cfun
->machine
->varargs_save_offset
)
9108 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
9109 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
9110 TREE_SIDE_EFFECTS (t
) = 1;
9111 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9114 /* Implement va_arg. */
9117 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
9120 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
9121 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
9122 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
9123 tree lab_false
, lab_over
, addr
;
9125 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
9129 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
9131 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
9132 return build_va_arg_indirect_ref (t
);
9135 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9136 earlier version of gcc, with the property that it always applied alignment
9137 adjustments to the va-args (even for zero-sized types). The cheapest way
9138 to deal with this is to replicate the effect of the part of
9139 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9141 We don't need to check for pass-by-reference because of the test above.
9142 We can return a simplifed answer, since we know there's no offset to add. */
9145 && rs6000_darwin64_abi
9146 && integer_zerop (TYPE_SIZE (type
)))
9148 unsigned HOST_WIDE_INT align
, boundary
;
9149 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
9150 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
9151 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
9152 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
9153 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
9154 boundary
/= BITS_PER_UNIT
;
9155 if (boundary
> align
)
9158 /* This updates arg ptr by the amount that would be necessary
9159 to align the zero-sized (but not zero-alignment) item. */
9160 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
9161 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
9162 gimplify_and_add (t
, pre_p
);
9164 t
= fold_convert (sizetype
, valist_tmp
);
9165 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
9166 fold_convert (TREE_TYPE (valist
),
9167 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
9168 size_int (-boundary
))));
9169 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
9170 gimplify_and_add (t
, pre_p
);
9172 /* Since it is zero-sized there's no increment for the item itself. */
9173 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
9174 return build_va_arg_indirect_ref (valist_tmp
);
9177 if (DEFAULT_ABI
!= ABI_V4
)
9179 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
9181 tree elem_type
= TREE_TYPE (type
);
9182 enum machine_mode elem_mode
= TYPE_MODE (elem_type
);
9183 int elem_size
= GET_MODE_SIZE (elem_mode
);
9185 if (elem_size
< UNITS_PER_WORD
)
9187 tree real_part
, imag_part
;
9188 gimple_seq post
= NULL
;
9190 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
9192 /* Copy the value into a temporary, lest the formal temporary
9193 be reused out from under us. */
9194 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
9195 gimple_seq_add_seq (pre_p
, post
);
9197 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
9200 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
9204 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
9207 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9208 f_fpr
= DECL_CHAIN (f_gpr
);
9209 f_res
= DECL_CHAIN (f_fpr
);
9210 f_ovf
= DECL_CHAIN (f_res
);
9211 f_sav
= DECL_CHAIN (f_ovf
);
9213 valist
= build_va_arg_indirect_ref (valist
);
9214 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9215 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
9217 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
9219 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
9222 size
= int_size_in_bytes (type
);
9223 rsize
= (size
+ 3) / 4;
9226 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
9227 && ((TARGET_SINGLE_FLOAT
&& TYPE_MODE (type
) == SFmode
)
9228 || (TARGET_DOUBLE_FLOAT
9229 && (TYPE_MODE (type
) == DFmode
9230 || TYPE_MODE (type
) == TFmode
9231 || TYPE_MODE (type
) == SDmode
9232 || TYPE_MODE (type
) == DDmode
9233 || TYPE_MODE (type
) == TDmode
))))
9235 /* FP args go in FP registers, if present. */
9237 n_reg
= (size
+ 7) / 8;
9238 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
9239 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
9240 if (TYPE_MODE (type
) != SFmode
&& TYPE_MODE (type
) != SDmode
)
9245 /* Otherwise into GP registers. */
9254 /* Pull the value out of the saved registers.... */
9257 addr
= create_tmp_var (ptr_type_node
, "addr");
9259 /* AltiVec vectors never go in registers when -mabi=altivec. */
9260 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
9264 lab_false
= create_artificial_label (input_location
);
9265 lab_over
= create_artificial_label (input_location
);
9267 /* Long long and SPE vectors are aligned in the registers.
9268 As are any other 2 gpr item such as complex int due to a
9269 historical mistake. */
9271 if (n_reg
== 2 && reg
== gpr
)
9274 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9275 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
9276 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
9277 unshare_expr (reg
), u
);
9279 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9280 reg number is 0 for f1, so we want to make it odd. */
9281 else if (reg
== fpr
&& TYPE_MODE (type
) == TDmode
)
9283 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9284 build_int_cst (TREE_TYPE (reg
), 1));
9285 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
9288 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
9289 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
9290 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
9291 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
9292 gimplify_and_add (t
, pre_p
);
9296 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
9298 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9299 build_int_cst (TREE_TYPE (reg
), n_reg
));
9300 u
= fold_convert (sizetype
, u
);
9301 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
9302 t
= fold_build_pointer_plus (t
, u
);
9304 /* _Decimal32 varargs are located in the second word of the 64-bit
9305 FP register for 32-bit binaries. */
9306 if (!TARGET_POWERPC64
9307 && TARGET_HARD_FLOAT
&& TARGET_FPRS
9308 && TYPE_MODE (type
) == SDmode
)
9309 t
= fold_build_pointer_plus_hwi (t
, size
);
9311 gimplify_assign (addr
, t
, pre_p
);
9313 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
9315 stmt
= gimple_build_label (lab_false
);
9316 gimple_seq_add_stmt (pre_p
, stmt
);
9318 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
9320 /* Ensure that we don't find any more args in regs.
9321 Alignment has taken care of for special cases. */
9322 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
9326 /* ... otherwise out of the overflow area. */
9328 /* Care for on-stack alignment if needed. */
9332 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
9333 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
9334 build_int_cst (TREE_TYPE (t
), -align
));
9336 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
9338 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
9340 t
= fold_build_pointer_plus_hwi (t
, size
);
9341 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
9345 stmt
= gimple_build_label (lab_over
);
9346 gimple_seq_add_stmt (pre_p
, stmt
);
9349 if (STRICT_ALIGNMENT
9350 && (TYPE_ALIGN (type
)
9351 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
9353 /* The value (of type complex double, for example) may not be
9354 aligned in memory in the saved registers, so copy via a
9355 temporary. (This is the same code as used for SPARC.) */
9356 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
9357 tree dest_addr
= build_fold_addr_expr (tmp
);
9359 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
9360 3, dest_addr
, addr
, size_int (rsize
* 4));
9362 gimplify_and_add (copy
, pre_p
);
9366 addr
= fold_convert (ptrtype
, addr
);
9367 return build_va_arg_indirect_ref (addr
);
9373 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
9376 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
9377 const char *attr_string
= "";
9379 gcc_assert (name
!= NULL
);
9380 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
9382 if (rs6000_builtin_decls
[(int)code
])
9383 fatal_error ("internal error: builtin function %s already processed", name
);
9385 rs6000_builtin_decls
[(int)code
] = t
=
9386 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
9388 /* Set any special attributes. */
9389 if ((classify
& RS6000_BTC_CONST
) != 0)
9391 /* const function, function only depends on the inputs. */
9392 TREE_READONLY (t
) = 1;
9393 TREE_NOTHROW (t
) = 1;
9394 attr_string
= ", pure";
9396 else if ((classify
& RS6000_BTC_PURE
) != 0)
9398 /* pure function, function can read global memory, but does not set any
9400 DECL_PURE_P (t
) = 1;
9401 TREE_NOTHROW (t
) = 1;
9402 attr_string
= ", const";
9404 else if ((classify
& RS6000_BTC_FP
) != 0)
9406 /* Function is a math function. If rounding mode is on, then treat the
9407 function as not reading global memory, but it can have arbitrary side
9408 effects. If it is off, then assume the function is a const function.
9409 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9410 builtin-attribute.def that is used for the math functions. */
9411 TREE_NOTHROW (t
) = 1;
9412 if (flag_rounding_math
)
9414 DECL_PURE_P (t
) = 1;
9415 DECL_IS_NOVOPS (t
) = 1;
9416 attr_string
= ", fp, pure";
9420 TREE_READONLY (t
) = 1;
9421 attr_string
= ", fp, const";
9424 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
9427 if (TARGET_DEBUG_BUILTIN
)
9428 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
9429 (int)code
, name
, attr_string
);
9432 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9434 #undef RS6000_BUILTIN_1
9435 #undef RS6000_BUILTIN_2
9436 #undef RS6000_BUILTIN_3
9437 #undef RS6000_BUILTIN_A
9438 #undef RS6000_BUILTIN_D
9439 #undef RS6000_BUILTIN_E
9440 #undef RS6000_BUILTIN_P
9441 #undef RS6000_BUILTIN_Q
9442 #undef RS6000_BUILTIN_S
9443 #undef RS6000_BUILTIN_X
9445 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9446 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9447 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9448 { MASK, ICODE, NAME, ENUM },
9450 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9451 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9452 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9453 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9454 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9455 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9456 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9458 static const struct builtin_description bdesc_3arg
[] =
9460 #include "rs6000-builtin.def"
9463 /* DST operations: void foo (void *, const int, const char). */
9465 #undef RS6000_BUILTIN_1
9466 #undef RS6000_BUILTIN_2
9467 #undef RS6000_BUILTIN_3
9468 #undef RS6000_BUILTIN_A
9469 #undef RS6000_BUILTIN_D
9470 #undef RS6000_BUILTIN_E
9471 #undef RS6000_BUILTIN_P
9472 #undef RS6000_BUILTIN_Q
9473 #undef RS6000_BUILTIN_S
9474 #undef RS6000_BUILTIN_X
9476 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9477 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9478 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9479 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9480 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9481 { MASK, ICODE, NAME, ENUM },
9483 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9484 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9485 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9486 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9487 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9489 static const struct builtin_description bdesc_dst
[] =
9491 #include "rs6000-builtin.def"
9494 /* Simple binary operations: VECc = foo (VECa, VECb). */
9496 #undef RS6000_BUILTIN_1
9497 #undef RS6000_BUILTIN_2
9498 #undef RS6000_BUILTIN_3
9499 #undef RS6000_BUILTIN_A
9500 #undef RS6000_BUILTIN_D
9501 #undef RS6000_BUILTIN_E
9502 #undef RS6000_BUILTIN_P
9503 #undef RS6000_BUILTIN_Q
9504 #undef RS6000_BUILTIN_S
9505 #undef RS6000_BUILTIN_X
9507 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9508 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9509 { MASK, ICODE, NAME, ENUM },
9511 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9512 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9513 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9514 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9515 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9516 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9517 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9518 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9520 static const struct builtin_description bdesc_2arg
[] =
9522 #include "rs6000-builtin.def"
9525 #undef RS6000_BUILTIN_1
9526 #undef RS6000_BUILTIN_2
9527 #undef RS6000_BUILTIN_3
9528 #undef RS6000_BUILTIN_A
9529 #undef RS6000_BUILTIN_D
9530 #undef RS6000_BUILTIN_E
9531 #undef RS6000_BUILTIN_P
9532 #undef RS6000_BUILTIN_Q
9533 #undef RS6000_BUILTIN_S
9534 #undef RS6000_BUILTIN_X
9536 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9537 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9538 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9539 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9540 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9541 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9542 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9543 { MASK, ICODE, NAME, ENUM },
9545 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9546 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9547 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9549 /* AltiVec predicates. */
9551 static const struct builtin_description bdesc_altivec_preds
[] =
9553 #include "rs6000-builtin.def"
9556 /* SPE predicates. */
9557 #undef RS6000_BUILTIN_1
9558 #undef RS6000_BUILTIN_2
9559 #undef RS6000_BUILTIN_3
9560 #undef RS6000_BUILTIN_A
9561 #undef RS6000_BUILTIN_D
9562 #undef RS6000_BUILTIN_E
9563 #undef RS6000_BUILTIN_P
9564 #undef RS6000_BUILTIN_Q
9565 #undef RS6000_BUILTIN_S
9566 #undef RS6000_BUILTIN_X
9568 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9569 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9570 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9571 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9572 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9573 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9574 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9575 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9576 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9577 { MASK, ICODE, NAME, ENUM },
9579 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9581 static const struct builtin_description bdesc_spe_predicates
[] =
9583 #include "rs6000-builtin.def"
9586 /* SPE evsel predicates. */
9587 #undef RS6000_BUILTIN_1
9588 #undef RS6000_BUILTIN_2
9589 #undef RS6000_BUILTIN_3
9590 #undef RS6000_BUILTIN_A
9591 #undef RS6000_BUILTIN_D
9592 #undef RS6000_BUILTIN_E
9593 #undef RS6000_BUILTIN_P
9594 #undef RS6000_BUILTIN_Q
9595 #undef RS6000_BUILTIN_S
9596 #undef RS6000_BUILTIN_X
9598 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9599 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9600 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9601 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9602 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9603 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9604 { MASK, ICODE, NAME, ENUM },
9606 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9607 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9608 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9609 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9611 static const struct builtin_description bdesc_spe_evsel
[] =
9613 #include "rs6000-builtin.def"
9616 /* PAIRED predicates. */
9617 #undef RS6000_BUILTIN_1
9618 #undef RS6000_BUILTIN_2
9619 #undef RS6000_BUILTIN_3
9620 #undef RS6000_BUILTIN_A
9621 #undef RS6000_BUILTIN_D
9622 #undef RS6000_BUILTIN_E
9623 #undef RS6000_BUILTIN_P
9624 #undef RS6000_BUILTIN_Q
9625 #undef RS6000_BUILTIN_S
9626 #undef RS6000_BUILTIN_X
9628 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9629 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9630 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9631 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9632 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9633 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9634 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9635 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9636 { MASK, ICODE, NAME, ENUM },
9638 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9639 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9641 static const struct builtin_description bdesc_paired_preds
[] =
9643 #include "rs6000-builtin.def"
9646 /* ABS* operations. */
9648 #undef RS6000_BUILTIN_1
9649 #undef RS6000_BUILTIN_2
9650 #undef RS6000_BUILTIN_3
9651 #undef RS6000_BUILTIN_A
9652 #undef RS6000_BUILTIN_D
9653 #undef RS6000_BUILTIN_E
9654 #undef RS6000_BUILTIN_P
9655 #undef RS6000_BUILTIN_Q
9656 #undef RS6000_BUILTIN_S
9657 #undef RS6000_BUILTIN_X
9659 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9660 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9661 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9662 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9663 { MASK, ICODE, NAME, ENUM },
9665 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9666 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9667 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9668 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9669 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9670 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9672 static const struct builtin_description bdesc_abs
[] =
9674 #include "rs6000-builtin.def"
9677 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9680 #undef RS6000_BUILTIN_1
9681 #undef RS6000_BUILTIN_2
9682 #undef RS6000_BUILTIN_3
9683 #undef RS6000_BUILTIN_A
9684 #undef RS6000_BUILTIN_E
9685 #undef RS6000_BUILTIN_D
9686 #undef RS6000_BUILTIN_P
9687 #undef RS6000_BUILTIN_Q
9688 #undef RS6000_BUILTIN_S
9689 #undef RS6000_BUILTIN_X
9691 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9692 { MASK, ICODE, NAME, ENUM },
9694 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9695 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9696 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9697 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9698 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9699 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9700 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9701 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9702 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9704 static const struct builtin_description bdesc_1arg
[] =
9706 #include "rs6000-builtin.def"
9709 #undef RS6000_BUILTIN_1
9710 #undef RS6000_BUILTIN_2
9711 #undef RS6000_BUILTIN_3
9712 #undef RS6000_BUILTIN_A
9713 #undef RS6000_BUILTIN_D
9714 #undef RS6000_BUILTIN_E
9715 #undef RS6000_BUILTIN_P
9716 #undef RS6000_BUILTIN_Q
9717 #undef RS6000_BUILTIN_S
9718 #undef RS6000_BUILTIN_X
9720 /* Return true if a builtin function is overloaded. */
9722 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
9724 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
9729 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
9732 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9733 rtx op0
= expand_normal (arg0
);
9734 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9735 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9737 if (icode
== CODE_FOR_nothing
)
9738 /* Builtin not supported on this processor. */
9741 /* If we got invalid arguments bail out before generating bad rtl. */
9742 if (arg0
== error_mark_node
)
9745 if (icode
== CODE_FOR_altivec_vspltisb
9746 || icode
== CODE_FOR_altivec_vspltish
9747 || icode
== CODE_FOR_altivec_vspltisw
9748 || icode
== CODE_FOR_spe_evsplatfi
9749 || icode
== CODE_FOR_spe_evsplati
)
9751 /* Only allow 5-bit *signed* literals. */
9752 if (GET_CODE (op0
) != CONST_INT
9753 || INTVAL (op0
) > 15
9754 || INTVAL (op0
) < -16)
9756 error ("argument 1 must be a 5-bit signed literal");
9762 || GET_MODE (target
) != tmode
9763 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9764 target
= gen_reg_rtx (tmode
);
9766 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9767 op0
= copy_to_mode_reg (mode0
, op0
);
9769 pat
= GEN_FCN (icode
) (target
, op0
);
9778 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
9780 rtx pat
, scratch1
, scratch2
;
9781 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9782 rtx op0
= expand_normal (arg0
);
9783 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9784 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9786 /* If we have invalid arguments, bail out before generating bad rtl. */
9787 if (arg0
== error_mark_node
)
9791 || GET_MODE (target
) != tmode
9792 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9793 target
= gen_reg_rtx (tmode
);
9795 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9796 op0
= copy_to_mode_reg (mode0
, op0
);
9798 scratch1
= gen_reg_rtx (mode0
);
9799 scratch2
= gen_reg_rtx (mode0
);
9801 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
9810 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
9813 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9814 tree arg1
= CALL_EXPR_ARG (exp
, 1);
9815 rtx op0
= expand_normal (arg0
);
9816 rtx op1
= expand_normal (arg1
);
9817 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9818 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9819 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9821 if (icode
== CODE_FOR_nothing
)
9822 /* Builtin not supported on this processor. */
9825 /* If we got invalid arguments bail out before generating bad rtl. */
9826 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9829 if (icode
== CODE_FOR_altivec_vcfux
9830 || icode
== CODE_FOR_altivec_vcfsx
9831 || icode
== CODE_FOR_altivec_vctsxs
9832 || icode
== CODE_FOR_altivec_vctuxs
9833 || icode
== CODE_FOR_altivec_vspltb
9834 || icode
== CODE_FOR_altivec_vsplth
9835 || icode
== CODE_FOR_altivec_vspltw
9836 || icode
== CODE_FOR_spe_evaddiw
9837 || icode
== CODE_FOR_spe_evldd
9838 || icode
== CODE_FOR_spe_evldh
9839 || icode
== CODE_FOR_spe_evldw
9840 || icode
== CODE_FOR_spe_evlhhesplat
9841 || icode
== CODE_FOR_spe_evlhhossplat
9842 || icode
== CODE_FOR_spe_evlhhousplat
9843 || icode
== CODE_FOR_spe_evlwhe
9844 || icode
== CODE_FOR_spe_evlwhos
9845 || icode
== CODE_FOR_spe_evlwhou
9846 || icode
== CODE_FOR_spe_evlwhsplat
9847 || icode
== CODE_FOR_spe_evlwwsplat
9848 || icode
== CODE_FOR_spe_evrlwi
9849 || icode
== CODE_FOR_spe_evslwi
9850 || icode
== CODE_FOR_spe_evsrwis
9851 || icode
== CODE_FOR_spe_evsubifw
9852 || icode
== CODE_FOR_spe_evsrwiu
)
9854 /* Only allow 5-bit unsigned literals. */
9856 if (TREE_CODE (arg1
) != INTEGER_CST
9857 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
9859 error ("argument 2 must be a 5-bit unsigned literal");
9865 || GET_MODE (target
) != tmode
9866 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9867 target
= gen_reg_rtx (tmode
);
9869 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9870 op0
= copy_to_mode_reg (mode0
, op0
);
9871 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9872 op1
= copy_to_mode_reg (mode1
, op1
);
9874 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
9883 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
9886 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
9887 tree arg0
= CALL_EXPR_ARG (exp
, 1);
9888 tree arg1
= CALL_EXPR_ARG (exp
, 2);
9889 rtx op0
= expand_normal (arg0
);
9890 rtx op1
= expand_normal (arg1
);
9891 enum machine_mode tmode
= SImode
;
9892 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9893 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9896 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
9898 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9902 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
9904 gcc_assert (mode0
== mode1
);
9906 /* If we have invalid arguments, bail out before generating bad rtl. */
9907 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9911 || GET_MODE (target
) != tmode
9912 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9913 target
= gen_reg_rtx (tmode
);
9915 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9916 op0
= copy_to_mode_reg (mode0
, op0
);
9917 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9918 op1
= copy_to_mode_reg (mode1
, op1
);
9920 scratch
= gen_reg_rtx (mode0
);
9922 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
9927 /* The vec_any* and vec_all* predicates use the same opcodes for two
9928 different operations, but the bits in CR6 will be different
9929 depending on what information we want. So we have to play tricks
9930 with CR6 to get the right bits out.
9932 If you think this is disgusting, look at the specs for the
9933 AltiVec predicates. */
9935 switch (cr6_form_int
)
9938 emit_insn (gen_cr6_test_for_zero (target
));
9941 emit_insn (gen_cr6_test_for_zero_reverse (target
));
9944 emit_insn (gen_cr6_test_for_lt (target
));
9947 emit_insn (gen_cr6_test_for_lt_reverse (target
));
9950 error ("argument 1 of __builtin_altivec_predicate is out of range");
9958 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
9961 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9962 tree arg1
= CALL_EXPR_ARG (exp
, 1);
9963 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9964 enum machine_mode mode0
= Pmode
;
9965 enum machine_mode mode1
= Pmode
;
9966 rtx op0
= expand_normal (arg0
);
9967 rtx op1
= expand_normal (arg1
);
9969 if (icode
== CODE_FOR_nothing
)
9970 /* Builtin not supported on this processor. */
9973 /* If we got invalid arguments bail out before generating bad rtl. */
9974 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9978 || GET_MODE (target
) != tmode
9979 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9980 target
= gen_reg_rtx (tmode
);
9982 op1
= copy_to_mode_reg (mode1
, op1
);
9984 if (op0
== const0_rtx
)
9986 addr
= gen_rtx_MEM (tmode
, op1
);
9990 op0
= copy_to_mode_reg (mode0
, op0
);
9991 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
9994 pat
= GEN_FCN (icode
) (target
, addr
);
10004 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
10007 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10008 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10009 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10010 enum machine_mode mode0
= Pmode
;
10011 enum machine_mode mode1
= Pmode
;
10012 rtx op0
= expand_normal (arg0
);
10013 rtx op1
= expand_normal (arg1
);
10015 if (icode
== CODE_FOR_nothing
)
10016 /* Builtin not supported on this processor. */
10019 /* If we got invalid arguments bail out before generating bad rtl. */
10020 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
10024 || GET_MODE (target
) != tmode
10025 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10026 target
= gen_reg_rtx (tmode
);
10028 op1
= copy_to_mode_reg (mode1
, op1
);
10030 if (op0
== const0_rtx
)
10032 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
10036 op0
= copy_to_mode_reg (mode0
, op0
);
10037 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
10040 pat
= GEN_FCN (icode
) (target
, addr
);
10050 spe_expand_stv_builtin (enum insn_code icode
, tree exp
)
10052 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10053 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10054 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10055 rtx op0
= expand_normal (arg0
);
10056 rtx op1
= expand_normal (arg1
);
10057 rtx op2
= expand_normal (arg2
);
10059 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
10060 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
10061 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
10063 /* Invalid arguments. Bail before doing anything stoopid! */
10064 if (arg0
== error_mark_node
10065 || arg1
== error_mark_node
10066 || arg2
== error_mark_node
)
10069 if (! (*insn_data
[icode
].operand
[2].predicate
) (op0
, mode2
))
10070 op0
= copy_to_mode_reg (mode2
, op0
);
10071 if (! (*insn_data
[icode
].operand
[0].predicate
) (op1
, mode0
))
10072 op1
= copy_to_mode_reg (mode0
, op1
);
10073 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
10074 op2
= copy_to_mode_reg (mode1
, op2
);
10076 pat
= GEN_FCN (icode
) (op1
, op2
, op0
);
10083 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
10085 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10086 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10087 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10088 rtx op0
= expand_normal (arg0
);
10089 rtx op1
= expand_normal (arg1
);
10090 rtx op2
= expand_normal (arg2
);
10092 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10093 enum machine_mode mode1
= Pmode
;
10094 enum machine_mode mode2
= Pmode
;
10096 /* Invalid arguments. Bail before doing anything stoopid! */
10097 if (arg0
== error_mark_node
10098 || arg1
== error_mark_node
10099 || arg2
== error_mark_node
)
10102 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
10103 op0
= copy_to_mode_reg (tmode
, op0
);
10105 op2
= copy_to_mode_reg (mode2
, op2
);
10107 if (op1
== const0_rtx
)
10109 addr
= gen_rtx_MEM (tmode
, op2
);
10113 op1
= copy_to_mode_reg (mode1
, op1
);
10114 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
10117 pat
= GEN_FCN (icode
) (addr
, op0
);
10124 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
10126 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10127 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10128 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10129 rtx op0
= expand_normal (arg0
);
10130 rtx op1
= expand_normal (arg1
);
10131 rtx op2
= expand_normal (arg2
);
10133 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10134 enum machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
10135 enum machine_mode mode1
= Pmode
;
10136 enum machine_mode mode2
= Pmode
;
10138 /* Invalid arguments. Bail before doing anything stoopid! */
10139 if (arg0
== error_mark_node
10140 || arg1
== error_mark_node
10141 || arg2
== error_mark_node
)
10144 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
10145 op0
= copy_to_mode_reg (smode
, op0
);
10147 op2
= copy_to_mode_reg (mode2
, op2
);
10149 if (op1
== const0_rtx
)
10151 addr
= gen_rtx_MEM (tmode
, op2
);
10155 op1
= copy_to_mode_reg (mode1
, op1
);
10156 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
10159 pat
= GEN_FCN (icode
) (addr
, op0
);
10166 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
10169 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10170 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10171 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10172 rtx op0
= expand_normal (arg0
);
10173 rtx op1
= expand_normal (arg1
);
10174 rtx op2
= expand_normal (arg2
);
10175 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10176 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
10177 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
10178 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
10180 if (icode
== CODE_FOR_nothing
)
10181 /* Builtin not supported on this processor. */
10184 /* If we got invalid arguments bail out before generating bad rtl. */
10185 if (arg0
== error_mark_node
10186 || arg1
== error_mark_node
10187 || arg2
== error_mark_node
)
10190 /* Check and prepare argument depending on the instruction code.
10192 Note that a switch statement instead of the sequence of tests
10193 would be incorrect as many of the CODE_FOR values could be
10194 CODE_FOR_nothing and that would yield multiple alternatives
10195 with identical values. We'd never reach here at runtime in
10197 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
10198 || icode
== CODE_FOR_altivec_vsldoi_v4si
10199 || icode
== CODE_FOR_altivec_vsldoi_v8hi
10200 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
10202 /* Only allow 4-bit unsigned literals. */
10204 if (TREE_CODE (arg2
) != INTEGER_CST
10205 || TREE_INT_CST_LOW (arg2
) & ~0xf)
10207 error ("argument 3 must be a 4-bit unsigned literal");
10211 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
10212 || icode
== CODE_FOR_vsx_xxpermdi_v2di
10213 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
10214 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
10215 || icode
== CODE_FOR_vsx_xxsldwi_v4si
10216 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
10217 || icode
== CODE_FOR_vsx_xxsldwi_v2di
10218 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
10220 /* Only allow 2-bit unsigned literals. */
10222 if (TREE_CODE (arg2
) != INTEGER_CST
10223 || TREE_INT_CST_LOW (arg2
) & ~0x3)
10225 error ("argument 3 must be a 2-bit unsigned literal");
10229 else if (icode
== CODE_FOR_vsx_set_v2df
10230 || icode
== CODE_FOR_vsx_set_v2di
)
10232 /* Only allow 1-bit unsigned literals. */
10234 if (TREE_CODE (arg2
) != INTEGER_CST
10235 || TREE_INT_CST_LOW (arg2
) & ~0x1)
10237 error ("argument 3 must be a 1-bit unsigned literal");
10243 || GET_MODE (target
) != tmode
10244 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10245 target
= gen_reg_rtx (tmode
);
10247 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
10248 op0
= copy_to_mode_reg (mode0
, op0
);
10249 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
10250 op1
= copy_to_mode_reg (mode1
, op1
);
10251 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
10252 op2
= copy_to_mode_reg (mode2
, op2
);
10254 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
10255 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
10257 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
10265 /* Expand the lvx builtins. */
10267 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
10269 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10270 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10272 enum machine_mode tmode
, mode0
;
10274 enum insn_code icode
;
10278 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
10279 icode
= CODE_FOR_vector_altivec_load_v16qi
;
10281 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
10282 icode
= CODE_FOR_vector_altivec_load_v8hi
;
10284 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
10285 icode
= CODE_FOR_vector_altivec_load_v4si
;
10287 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
10288 icode
= CODE_FOR_vector_altivec_load_v4sf
;
10290 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
10291 icode
= CODE_FOR_vector_altivec_load_v2df
;
10293 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
10294 icode
= CODE_FOR_vector_altivec_load_v2di
;
10297 *expandedp
= false;
10303 arg0
= CALL_EXPR_ARG (exp
, 0);
10304 op0
= expand_normal (arg0
);
10305 tmode
= insn_data
[icode
].operand
[0].mode
;
10306 mode0
= insn_data
[icode
].operand
[1].mode
;
10309 || GET_MODE (target
) != tmode
10310 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10311 target
= gen_reg_rtx (tmode
);
10313 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
10314 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
10316 pat
= GEN_FCN (icode
) (target
, op0
);
10323 /* Expand the stvx builtins. */
10325 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
10328 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10329 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10331 enum machine_mode mode0
, mode1
;
10333 enum insn_code icode
;
10337 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
10338 icode
= CODE_FOR_vector_altivec_store_v16qi
;
10340 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
10341 icode
= CODE_FOR_vector_altivec_store_v8hi
;
10343 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
10344 icode
= CODE_FOR_vector_altivec_store_v4si
;
10346 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
10347 icode
= CODE_FOR_vector_altivec_store_v4sf
;
10349 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
10350 icode
= CODE_FOR_vector_altivec_store_v2df
;
10352 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
10353 icode
= CODE_FOR_vector_altivec_store_v2di
;
10356 *expandedp
= false;
10360 arg0
= CALL_EXPR_ARG (exp
, 0);
10361 arg1
= CALL_EXPR_ARG (exp
, 1);
10362 op0
= expand_normal (arg0
);
10363 op1
= expand_normal (arg1
);
10364 mode0
= insn_data
[icode
].operand
[0].mode
;
10365 mode1
= insn_data
[icode
].operand
[1].mode
;
10367 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10368 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
10369 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
10370 op1
= copy_to_mode_reg (mode1
, op1
);
10372 pat
= GEN_FCN (icode
) (op0
, op1
);
10380 /* Expand the dst builtins. */
10382 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
10385 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10386 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10387 tree arg0
, arg1
, arg2
;
10388 enum machine_mode mode0
, mode1
;
10389 rtx pat
, op0
, op1
, op2
;
10390 const struct builtin_description
*d
;
10393 *expandedp
= false;
10395 /* Handle DST variants. */
10397 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
10398 if (d
->code
== fcode
)
10400 arg0
= CALL_EXPR_ARG (exp
, 0);
10401 arg1
= CALL_EXPR_ARG (exp
, 1);
10402 arg2
= CALL_EXPR_ARG (exp
, 2);
10403 op0
= expand_normal (arg0
);
10404 op1
= expand_normal (arg1
);
10405 op2
= expand_normal (arg2
);
10406 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10407 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10409 /* Invalid arguments, bail out before generating bad rtl. */
10410 if (arg0
== error_mark_node
10411 || arg1
== error_mark_node
10412 || arg2
== error_mark_node
)
10417 if (TREE_CODE (arg2
) != INTEGER_CST
10418 || TREE_INT_CST_LOW (arg2
) & ~0x3)
10420 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
10424 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
10425 op0
= copy_to_mode_reg (Pmode
, op0
);
10426 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
10427 op1
= copy_to_mode_reg (mode1
, op1
);
10429 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
10439 /* Expand vec_init builtin. */
10441 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
10443 enum machine_mode tmode
= TYPE_MODE (type
);
10444 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
10445 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
10446 rtvec v
= rtvec_alloc (n_elt
);
10448 gcc_assert (VECTOR_MODE_P (tmode
));
10449 gcc_assert (n_elt
== call_expr_nargs (exp
));
10451 for (i
= 0; i
< n_elt
; ++i
)
10453 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
10454 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
10457 if (!target
|| !register_operand (target
, tmode
))
10458 target
= gen_reg_rtx (tmode
);
10460 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
10464 /* Return the integer constant in ARG. Constrain it to be in the range
10465 of the subparts of VEC_TYPE; issue an error if not. */
10468 get_element_number (tree vec_type
, tree arg
)
10470 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
10472 if (!host_integerp (arg
, 1)
10473 || (elt
= tree_low_cst (arg
, 1), elt
> max
))
10475 error ("selector must be an integer constant in the range 0..%wi", max
);
10482 /* Expand vec_set builtin. */
10484 altivec_expand_vec_set_builtin (tree exp
)
10486 enum machine_mode tmode
, mode1
;
10487 tree arg0
, arg1
, arg2
;
10491 arg0
= CALL_EXPR_ARG (exp
, 0);
10492 arg1
= CALL_EXPR_ARG (exp
, 1);
10493 arg2
= CALL_EXPR_ARG (exp
, 2);
10495 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
10496 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
10497 gcc_assert (VECTOR_MODE_P (tmode
));
10499 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
10500 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
10501 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
10503 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
10504 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
10506 op0
= force_reg (tmode
, op0
);
10507 op1
= force_reg (mode1
, op1
);
10509 rs6000_expand_vector_set (op0
, op1
, elt
);
10514 /* Expand vec_ext builtin. */
10516 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
10518 enum machine_mode tmode
, mode0
;
10523 arg0
= CALL_EXPR_ARG (exp
, 0);
10524 arg1
= CALL_EXPR_ARG (exp
, 1);
10526 op0
= expand_normal (arg0
);
10527 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
10529 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
10530 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
10531 gcc_assert (VECTOR_MODE_P (mode0
));
10533 op0
= force_reg (mode0
, op0
);
10535 if (optimize
|| !target
|| !register_operand (target
, tmode
))
10536 target
= gen_reg_rtx (tmode
);
10538 rs6000_expand_vector_extract (target
, op0
, elt
);
10543 /* Expand the builtin in EXP and store the result in TARGET. Store
10544 true in *EXPANDEDP if we found a builtin to expand. */
10546 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
10548 const struct builtin_description
*d
;
10550 enum insn_code icode
;
10551 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10554 enum machine_mode tmode
, mode0
;
10555 enum rs6000_builtins fcode
10556 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
10558 if (rs6000_overloaded_builtin_p (fcode
))
10561 error ("unresolved overload for Altivec builtin %qF", fndecl
);
10563 /* Given it is invalid, just generate a normal call. */
10564 return expand_call (exp
, target
, false);
10567 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
10571 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
10575 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
10583 case ALTIVEC_BUILTIN_STVX
:
10584 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si
, exp
);
10585 case ALTIVEC_BUILTIN_STVEBX
:
10586 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
10587 case ALTIVEC_BUILTIN_STVEHX
:
10588 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
10589 case ALTIVEC_BUILTIN_STVEWX
:
10590 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
10591 case ALTIVEC_BUILTIN_STVXL
:
10592 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl
, exp
);
10594 case ALTIVEC_BUILTIN_STVLX
:
10595 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
10596 case ALTIVEC_BUILTIN_STVLXL
:
10597 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
10598 case ALTIVEC_BUILTIN_STVRX
:
10599 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
10600 case ALTIVEC_BUILTIN_STVRXL
:
10601 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
10603 case VSX_BUILTIN_STXVD2X_V2DF
:
10604 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
10605 case VSX_BUILTIN_STXVD2X_V2DI
:
10606 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
10607 case VSX_BUILTIN_STXVW4X_V4SF
:
10608 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
10609 case VSX_BUILTIN_STXVW4X_V4SI
:
10610 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
10611 case VSX_BUILTIN_STXVW4X_V8HI
:
10612 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
10613 case VSX_BUILTIN_STXVW4X_V16QI
:
10614 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
10616 case ALTIVEC_BUILTIN_MFVSCR
:
10617 icode
= CODE_FOR_altivec_mfvscr
;
10618 tmode
= insn_data
[icode
].operand
[0].mode
;
10621 || GET_MODE (target
) != tmode
10622 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10623 target
= gen_reg_rtx (tmode
);
10625 pat
= GEN_FCN (icode
) (target
);
10631 case ALTIVEC_BUILTIN_MTVSCR
:
10632 icode
= CODE_FOR_altivec_mtvscr
;
10633 arg0
= CALL_EXPR_ARG (exp
, 0);
10634 op0
= expand_normal (arg0
);
10635 mode0
= insn_data
[icode
].operand
[0].mode
;
10637 /* If we got invalid arguments bail out before generating bad rtl. */
10638 if (arg0
== error_mark_node
)
10641 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10642 op0
= copy_to_mode_reg (mode0
, op0
);
10644 pat
= GEN_FCN (icode
) (op0
);
10649 case ALTIVEC_BUILTIN_DSSALL
:
10650 emit_insn (gen_altivec_dssall ());
10653 case ALTIVEC_BUILTIN_DSS
:
10654 icode
= CODE_FOR_altivec_dss
;
10655 arg0
= CALL_EXPR_ARG (exp
, 0);
10657 op0
= expand_normal (arg0
);
10658 mode0
= insn_data
[icode
].operand
[0].mode
;
10660 /* If we got invalid arguments bail out before generating bad rtl. */
10661 if (arg0
== error_mark_node
)
10664 if (TREE_CODE (arg0
) != INTEGER_CST
10665 || TREE_INT_CST_LOW (arg0
) & ~0x3)
10667 error ("argument to dss must be a 2-bit unsigned literal");
10671 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10672 op0
= copy_to_mode_reg (mode0
, op0
);
10674 emit_insn (gen_altivec_dss (op0
));
10677 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
10678 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
10679 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
10680 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
10681 case VSX_BUILTIN_VEC_INIT_V2DF
:
10682 case VSX_BUILTIN_VEC_INIT_V2DI
:
10683 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
10685 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
10686 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
10687 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
10688 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
10689 case VSX_BUILTIN_VEC_SET_V2DF
:
10690 case VSX_BUILTIN_VEC_SET_V2DI
:
10691 return altivec_expand_vec_set_builtin (exp
);
10693 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
10694 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
10695 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
10696 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
10697 case VSX_BUILTIN_VEC_EXT_V2DF
:
10698 case VSX_BUILTIN_VEC_EXT_V2DI
:
10699 return altivec_expand_vec_ext_builtin (exp
, target
);
10703 /* Fall through. */
10706 /* Expand abs* operations. */
10708 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
10709 if (d
->code
== fcode
)
10710 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
10712 /* Expand the AltiVec predicates. */
10713 d
= bdesc_altivec_preds
;
10714 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
10715 if (d
->code
== fcode
)
10716 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
10718 /* LV* are funky. We initialized them differently. */
10721 case ALTIVEC_BUILTIN_LVSL
:
10722 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
10723 exp
, target
, false);
10724 case ALTIVEC_BUILTIN_LVSR
:
10725 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
10726 exp
, target
, false);
10727 case ALTIVEC_BUILTIN_LVEBX
:
10728 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
10729 exp
, target
, false);
10730 case ALTIVEC_BUILTIN_LVEHX
:
10731 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
10732 exp
, target
, false);
10733 case ALTIVEC_BUILTIN_LVEWX
:
10734 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
10735 exp
, target
, false);
10736 case ALTIVEC_BUILTIN_LVXL
:
10737 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl
,
10738 exp
, target
, false);
10739 case ALTIVEC_BUILTIN_LVX
:
10740 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si
,
10741 exp
, target
, false);
10742 case ALTIVEC_BUILTIN_LVLX
:
10743 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
10744 exp
, target
, true);
10745 case ALTIVEC_BUILTIN_LVLXL
:
10746 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
10747 exp
, target
, true);
10748 case ALTIVEC_BUILTIN_LVRX
:
10749 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
10750 exp
, target
, true);
10751 case ALTIVEC_BUILTIN_LVRXL
:
10752 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
10753 exp
, target
, true);
10754 case VSX_BUILTIN_LXVD2X_V2DF
:
10755 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
10756 exp
, target
, false);
10757 case VSX_BUILTIN_LXVD2X_V2DI
:
10758 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
10759 exp
, target
, false);
10760 case VSX_BUILTIN_LXVW4X_V4SF
:
10761 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
10762 exp
, target
, false);
10763 case VSX_BUILTIN_LXVW4X_V4SI
:
10764 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
10765 exp
, target
, false);
10766 case VSX_BUILTIN_LXVW4X_V8HI
:
10767 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
10768 exp
, target
, false);
10769 case VSX_BUILTIN_LXVW4X_V16QI
:
10770 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
10771 exp
, target
, false);
10775 /* Fall through. */
10778 *expandedp
= false;
10782 /* Expand the builtin in EXP and store the result in TARGET. Store
10783 true in *EXPANDEDP if we found a builtin to expand. */
10785 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
10787 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10788 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10789 const struct builtin_description
*d
;
10796 case PAIRED_BUILTIN_STX
:
10797 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
10798 case PAIRED_BUILTIN_LX
:
10799 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
10802 /* Fall through. */
10805 /* Expand the paired predicates. */
10806 d
= bdesc_paired_preds
;
10807 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
10808 if (d
->code
== fcode
)
10809 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
10811 *expandedp
= false;
10815 /* Binops that need to be initialized manually, but can be expanded
10816 automagically by rs6000_expand_binop_builtin. */
10817 static const struct builtin_description bdesc_2arg_spe
[] =
10819 { RS6000_BTM_SPE
, CODE_FOR_spe_evlddx
, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX
},
10820 { RS6000_BTM_SPE
, CODE_FOR_spe_evldwx
, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX
},
10821 { RS6000_BTM_SPE
, CODE_FOR_spe_evldhx
, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX
},
10822 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhex
, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX
},
10823 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhoux
, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX
},
10824 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhosx
, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX
},
10825 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplatx
, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX
},
10826 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplatx
, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX
},
10827 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplatx
, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX
},
10828 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplatx
, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX
},
10829 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplatx
, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX
},
10830 { RS6000_BTM_SPE
, CODE_FOR_spe_evldd
, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD
},
10831 { RS6000_BTM_SPE
, CODE_FOR_spe_evldw
, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW
},
10832 { RS6000_BTM_SPE
, CODE_FOR_spe_evldh
, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH
},
10833 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhe
, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE
},
10834 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhou
, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU
},
10835 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhos
, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS
},
10836 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplat
, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT
},
10837 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplat
, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT
},
10838 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplat
, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT
},
10839 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplat
, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT
},
10840 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplat
, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT
}
10843 /* Expand the builtin in EXP and store the result in TARGET. Store
10844 true in *EXPANDEDP if we found a builtin to expand.
10846 This expands the SPE builtins that are not simple unary and binary
10849 spe_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
10851 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10853 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10854 enum insn_code icode
;
10855 enum machine_mode tmode
, mode0
;
10857 const struct builtin_description
*d
;
10862 /* Syntax check for a 5-bit unsigned immediate. */
10865 case SPE_BUILTIN_EVSTDD
:
10866 case SPE_BUILTIN_EVSTDH
:
10867 case SPE_BUILTIN_EVSTDW
:
10868 case SPE_BUILTIN_EVSTWHE
:
10869 case SPE_BUILTIN_EVSTWHO
:
10870 case SPE_BUILTIN_EVSTWWE
:
10871 case SPE_BUILTIN_EVSTWWO
:
10872 arg1
= CALL_EXPR_ARG (exp
, 2);
10873 if (TREE_CODE (arg1
) != INTEGER_CST
10874 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
10876 error ("argument 2 must be a 5-bit unsigned literal");
10884 /* The evsplat*i instructions are not quite generic. */
10887 case SPE_BUILTIN_EVSPLATFI
:
10888 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi
,
10890 case SPE_BUILTIN_EVSPLATI
:
10891 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati
,
10897 d
= bdesc_2arg_spe
;
10898 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg_spe
); ++i
, ++d
)
10899 if (d
->code
== fcode
)
10900 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
10902 d
= bdesc_spe_predicates
;
10903 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, ++d
)
10904 if (d
->code
== fcode
)
10905 return spe_expand_predicate_builtin (d
->icode
, exp
, target
);
10907 d
= bdesc_spe_evsel
;
10908 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, ++d
)
10909 if (d
->code
== fcode
)
10910 return spe_expand_evsel_builtin (d
->icode
, exp
, target
);
10914 case SPE_BUILTIN_EVSTDDX
:
10915 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx
, exp
);
10916 case SPE_BUILTIN_EVSTDHX
:
10917 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx
, exp
);
10918 case SPE_BUILTIN_EVSTDWX
:
10919 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx
, exp
);
10920 case SPE_BUILTIN_EVSTWHEX
:
10921 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex
, exp
);
10922 case SPE_BUILTIN_EVSTWHOX
:
10923 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox
, exp
);
10924 case SPE_BUILTIN_EVSTWWEX
:
10925 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex
, exp
);
10926 case SPE_BUILTIN_EVSTWWOX
:
10927 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox
, exp
);
10928 case SPE_BUILTIN_EVSTDD
:
10929 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd
, exp
);
10930 case SPE_BUILTIN_EVSTDH
:
10931 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh
, exp
);
10932 case SPE_BUILTIN_EVSTDW
:
10933 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw
, exp
);
10934 case SPE_BUILTIN_EVSTWHE
:
10935 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe
, exp
);
10936 case SPE_BUILTIN_EVSTWHO
:
10937 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho
, exp
);
10938 case SPE_BUILTIN_EVSTWWE
:
10939 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe
, exp
);
10940 case SPE_BUILTIN_EVSTWWO
:
10941 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo
, exp
);
10942 case SPE_BUILTIN_MFSPEFSCR
:
10943 icode
= CODE_FOR_spe_mfspefscr
;
10944 tmode
= insn_data
[icode
].operand
[0].mode
;
10947 || GET_MODE (target
) != tmode
10948 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10949 target
= gen_reg_rtx (tmode
);
10951 pat
= GEN_FCN (icode
) (target
);
10956 case SPE_BUILTIN_MTSPEFSCR
:
10957 icode
= CODE_FOR_spe_mtspefscr
;
10958 arg0
= CALL_EXPR_ARG (exp
, 0);
10959 op0
= expand_normal (arg0
);
10960 mode0
= insn_data
[icode
].operand
[0].mode
;
10962 if (arg0
== error_mark_node
)
10965 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10966 op0
= copy_to_mode_reg (mode0
, op0
);
10968 pat
= GEN_FCN (icode
) (op0
);
10976 *expandedp
= false;
10981 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
10983 rtx pat
, scratch
, tmp
;
10984 tree form
= CALL_EXPR_ARG (exp
, 0);
10985 tree arg0
= CALL_EXPR_ARG (exp
, 1);
10986 tree arg1
= CALL_EXPR_ARG (exp
, 2);
10987 rtx op0
= expand_normal (arg0
);
10988 rtx op1
= expand_normal (arg1
);
10989 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
10990 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
10992 enum rtx_code code
;
10994 if (TREE_CODE (form
) != INTEGER_CST
)
10996 error ("argument 1 of __builtin_paired_predicate must be a constant");
11000 form_int
= TREE_INT_CST_LOW (form
);
11002 gcc_assert (mode0
== mode1
);
11004 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11008 || GET_MODE (target
) != SImode
11009 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
11010 target
= gen_reg_rtx (SImode
);
11011 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11012 op0
= copy_to_mode_reg (mode0
, op0
);
11013 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
11014 op1
= copy_to_mode_reg (mode1
, op1
);
11016 scratch
= gen_reg_rtx (CCFPmode
);
11018 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11040 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
11043 error ("argument 1 of __builtin_paired_predicate is out of range");
11047 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
11048 emit_move_insn (target
, tmp
);
11053 spe_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
11055 rtx pat
, scratch
, tmp
;
11056 tree form
= CALL_EXPR_ARG (exp
, 0);
11057 tree arg0
= CALL_EXPR_ARG (exp
, 1);
11058 tree arg1
= CALL_EXPR_ARG (exp
, 2);
11059 rtx op0
= expand_normal (arg0
);
11060 rtx op1
= expand_normal (arg1
);
11061 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11062 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11064 enum rtx_code code
;
11066 if (TREE_CODE (form
) != INTEGER_CST
)
11068 error ("argument 1 of __builtin_spe_predicate must be a constant");
11072 form_int
= TREE_INT_CST_LOW (form
);
11074 gcc_assert (mode0
== mode1
);
11076 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11080 || GET_MODE (target
) != SImode
11081 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
11082 target
= gen_reg_rtx (SImode
);
11084 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11085 op0
= copy_to_mode_reg (mode0
, op0
);
11086 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
11087 op1
= copy_to_mode_reg (mode1
, op1
);
11089 scratch
= gen_reg_rtx (CCmode
);
11091 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11096 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11097 _lower_. We use one compare, but look in different bits of the
11098 CR for each variant.
11100 There are 2 elements in each SPE simd type (upper/lower). The CR
11101 bits are set as follows:
11103 BIT0 | BIT 1 | BIT 2 | BIT 3
11104 U | L | (U | L) | (U & L)
11106 So, for an "all" relationship, BIT 3 would be set.
11107 For an "any" relationship, BIT 2 would be set. Etc.
11109 Following traditional nomenclature, these bits map to:
11111 BIT0 | BIT 1 | BIT 2 | BIT 3
11114 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11119 /* All variant. OV bit. */
11121 /* We need to get to the OV bit, which is the ORDERED bit. We
11122 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11123 that's ugly and will make validate_condition_mode die.
11124 So let's just use another pattern. */
11125 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
11127 /* Any variant. EQ bit. */
11131 /* Upper variant. LT bit. */
11135 /* Lower variant. GT bit. */
11140 error ("argument 1 of __builtin_spe_predicate is out of range");
11144 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
11145 emit_move_insn (target
, tmp
);
11150 /* The evsel builtins look like this:
11152 e = __builtin_spe_evsel_OP (a, b, c, d);
11154 and work like this:
11156 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11157 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11161 spe_expand_evsel_builtin (enum insn_code icode
, tree exp
, rtx target
)
11164 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11165 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11166 tree arg2
= CALL_EXPR_ARG (exp
, 2);
11167 tree arg3
= CALL_EXPR_ARG (exp
, 3);
11168 rtx op0
= expand_normal (arg0
);
11169 rtx op1
= expand_normal (arg1
);
11170 rtx op2
= expand_normal (arg2
);
11171 rtx op3
= expand_normal (arg3
);
11172 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11173 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11175 gcc_assert (mode0
== mode1
);
11177 if (arg0
== error_mark_node
|| arg1
== error_mark_node
11178 || arg2
== error_mark_node
|| arg3
== error_mark_node
)
11182 || GET_MODE (target
) != mode0
11183 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode0
))
11184 target
= gen_reg_rtx (mode0
);
11186 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11187 op0
= copy_to_mode_reg (mode0
, op0
);
11188 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
11189 op1
= copy_to_mode_reg (mode0
, op1
);
11190 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
11191 op2
= copy_to_mode_reg (mode0
, op2
);
11192 if (! (*insn_data
[icode
].operand
[1].predicate
) (op3
, mode1
))
11193 op3
= copy_to_mode_reg (mode0
, op3
);
11195 /* Generate the compare. */
11196 scratch
= gen_reg_rtx (CCmode
);
11197 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11202 if (mode0
== V2SImode
)
11203 emit_insn (gen_spe_evsel (target
, op2
, op3
, scratch
));
11205 emit_insn (gen_spe_evsel_fs (target
, op2
, op3
, scratch
));
11210 /* Raise an error message for a builtin function that is called without the
11211 appropriate target options being set. */
11214 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
11216 size_t uns_fncode
= (size_t)fncode
;
11217 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
11218 unsigned fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
11220 gcc_assert (name
!= NULL
);
11221 if ((fnmask
& RS6000_BTM_CELL
) != 0)
11222 error ("Builtin function %s is only valid for the cell processor", name
);
11223 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
11224 error ("Builtin function %s requires the -mvsx option", name
);
11225 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
11226 error ("Builtin function %s requires the -maltivec option", name
);
11227 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
11228 error ("Builtin function %s requires the -mpaired option", name
);
11229 else if ((fnmask
& RS6000_BTM_SPE
) != 0)
11230 error ("Builtin function %s requires the -mspe option", name
);
11232 error ("Builtin function %s is not supported with the current options",
11236 /* Expand an expression EXP that calls a built-in function,
11237 with result going to TARGET if that's convenient
11238 (and in mode MODE if that's convenient).
11239 SUBTARGET may be used as the target for computing one of EXP's operands.
11240 IGNORE is nonzero if the value is to be ignored. */
11243 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
11244 enum machine_mode mode ATTRIBUTE_UNUSED
,
11245 int ignore ATTRIBUTE_UNUSED
)
11247 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
11248 enum rs6000_builtins fcode
11249 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
11250 size_t uns_fcode
= (size_t)fcode
;
11251 const struct builtin_description
*d
;
11255 unsigned mask
= rs6000_builtin_info
[uns_fcode
].mask
;
11256 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
11258 if (TARGET_DEBUG_BUILTIN
)
11260 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
11261 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
11262 const char *name2
= ((icode
!= CODE_FOR_nothing
)
11263 ? get_insn_name ((int)icode
)
11267 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
11269 default: name3
= "unknown"; break;
11270 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
11271 case RS6000_BTC_UNARY
: name3
= "unary"; break;
11272 case RS6000_BTC_BINARY
: name3
= "binary"; break;
11273 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
11274 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
11275 case RS6000_BTC_ABS
: name3
= "abs"; break;
11276 case RS6000_BTC_EVSEL
: name3
= "evsel"; break;
11277 case RS6000_BTC_DST
: name3
= "dst"; break;
11282 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11283 (name1
) ? name1
: "---", fcode
,
11284 (name2
) ? name2
: "---", (int)icode
,
11286 func_valid_p
? "" : ", not valid");
11291 rs6000_invalid_builtin (fcode
);
11293 /* Given it is invalid, just generate a normal call. */
11294 return expand_call (exp
, target
, ignore
);
11299 case RS6000_BUILTIN_RECIP
:
11300 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
11302 case RS6000_BUILTIN_RECIPF
:
11303 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
11305 case RS6000_BUILTIN_RSQRTF
:
11306 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
11308 case RS6000_BUILTIN_RSQRT
:
11309 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
11311 case POWER7_BUILTIN_BPERMD
:
11312 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
11313 ? CODE_FOR_bpermd_di
11314 : CODE_FOR_bpermd_si
), exp
, target
);
11316 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
11317 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
11319 int icode
= (int) CODE_FOR_altivec_lvsr
;
11320 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11321 enum machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
11325 gcc_assert (TARGET_ALTIVEC
);
11327 arg
= CALL_EXPR_ARG (exp
, 0);
11328 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
11329 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
11330 addr
= memory_address (mode
, op
);
11331 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
11335 /* For the load case need to negate the address. */
11336 op
= gen_reg_rtx (GET_MODE (addr
));
11337 emit_insn (gen_rtx_SET (VOIDmode
, op
,
11338 gen_rtx_NEG (GET_MODE (addr
), addr
)));
11340 op
= gen_rtx_MEM (mode
, op
);
11343 || GET_MODE (target
) != tmode
11344 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11345 target
= gen_reg_rtx (tmode
);
11347 /*pat = gen_altivec_lvsr (target, op);*/
11348 pat
= GEN_FCN (icode
) (target
, op
);
11356 case ALTIVEC_BUILTIN_VCFUX
:
11357 case ALTIVEC_BUILTIN_VCFSX
:
11358 case ALTIVEC_BUILTIN_VCTUXS
:
11359 case ALTIVEC_BUILTIN_VCTSXS
:
11360 /* FIXME: There's got to be a nicer way to handle this case than
11361 constructing a new CALL_EXPR. */
11362 if (call_expr_nargs (exp
) == 1)
11364 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
11365 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
11373 if (TARGET_ALTIVEC
)
11375 ret
= altivec_expand_builtin (exp
, target
, &success
);
11382 ret
= spe_expand_builtin (exp
, target
, &success
);
11387 if (TARGET_PAIRED_FLOAT
)
11389 ret
= paired_expand_builtin (exp
, target
, &success
);
11395 gcc_assert (TARGET_ALTIVEC
|| TARGET_VSX
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
);
11397 /* Handle simple unary operations. */
11399 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
11400 if (d
->code
== fcode
)
11401 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
11403 /* Handle simple binary operations. */
11405 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
11406 if (d
->code
== fcode
)
11407 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
11409 /* Handle simple ternary operations. */
11411 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
11412 if (d
->code
== fcode
)
11413 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
11415 gcc_unreachable ();
11419 rs6000_init_builtins (void)
11423 enum machine_mode mode
;
11425 if (TARGET_DEBUG_BUILTIN
)
11426 fprintf (stderr
, "rs6000_init_builtins%s%s%s%s\n",
11427 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
11428 (TARGET_SPE
) ? ", spe" : "",
11429 (TARGET_ALTIVEC
) ? ", altivec" : "",
11430 (TARGET_VSX
) ? ", vsx" : "");
11432 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
11433 V2SF_type_node
= build_vector_type (float_type_node
, 2);
11434 V2DI_type_node
= build_vector_type (intDI_type_node
, 2);
11435 V2DF_type_node
= build_vector_type (double_type_node
, 2);
11436 V4HI_type_node
= build_vector_type (intHI_type_node
, 4);
11437 V4SI_type_node
= build_vector_type (intSI_type_node
, 4);
11438 V4SF_type_node
= build_vector_type (float_type_node
, 4);
11439 V8HI_type_node
= build_vector_type (intHI_type_node
, 8);
11440 V16QI_type_node
= build_vector_type (intQI_type_node
, 16);
11442 unsigned_V16QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 16);
11443 unsigned_V8HI_type_node
= build_vector_type (unsigned_intHI_type_node
, 8);
11444 unsigned_V4SI_type_node
= build_vector_type (unsigned_intSI_type_node
, 4);
11445 unsigned_V2DI_type_node
= build_vector_type (unsigned_intDI_type_node
, 2);
11447 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
11448 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
11449 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
11450 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
11452 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11453 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11454 'vector unsigned short'. */
11456 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
11457 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
11458 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
11459 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
11460 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
11462 long_integer_type_internal_node
= long_integer_type_node
;
11463 long_unsigned_type_internal_node
= long_unsigned_type_node
;
11464 long_long_integer_type_internal_node
= long_long_integer_type_node
;
11465 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
11466 intQI_type_internal_node
= intQI_type_node
;
11467 uintQI_type_internal_node
= unsigned_intQI_type_node
;
11468 intHI_type_internal_node
= intHI_type_node
;
11469 uintHI_type_internal_node
= unsigned_intHI_type_node
;
11470 intSI_type_internal_node
= intSI_type_node
;
11471 uintSI_type_internal_node
= unsigned_intSI_type_node
;
11472 intDI_type_internal_node
= intDI_type_node
;
11473 uintDI_type_internal_node
= unsigned_intDI_type_node
;
11474 float_type_internal_node
= float_type_node
;
11475 double_type_internal_node
= double_type_node
;
11476 void_type_internal_node
= void_type_node
;
11478 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11480 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
11481 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
11482 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
11483 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
11484 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
11485 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
11486 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
11487 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
11488 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
11489 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
11490 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
11491 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
11492 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
11493 builtin_mode_to_type
[V4HImode
][0] = V4HI_type_node
;
11494 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
11495 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
11496 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
11497 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
11498 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
11499 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
11500 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
11502 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
11503 TYPE_NAME (bool_char_type_node
) = tdecl
;
11505 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
11506 TYPE_NAME (bool_short_type_node
) = tdecl
;
11508 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
11509 TYPE_NAME (bool_int_type_node
) = tdecl
;
11511 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
11512 TYPE_NAME (pixel_type_node
) = tdecl
;
11514 bool_V16QI_type_node
= build_vector_type (bool_char_type_node
, 16);
11515 bool_V8HI_type_node
= build_vector_type (bool_short_type_node
, 8);
11516 bool_V4SI_type_node
= build_vector_type (bool_int_type_node
, 4);
11517 bool_V2DI_type_node
= build_vector_type (bool_long_type_node
, 2);
11518 pixel_V8HI_type_node
= build_vector_type (pixel_type_node
, 8);
11520 tdecl
= add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node
);
11521 TYPE_NAME (unsigned_V16QI_type_node
) = tdecl
;
11523 tdecl
= add_builtin_type ("__vector signed char", V16QI_type_node
);
11524 TYPE_NAME (V16QI_type_node
) = tdecl
;
11526 tdecl
= add_builtin_type ("__vector __bool char", bool_V16QI_type_node
);
11527 TYPE_NAME ( bool_V16QI_type_node
) = tdecl
;
11529 tdecl
= add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node
);
11530 TYPE_NAME (unsigned_V8HI_type_node
) = tdecl
;
11532 tdecl
= add_builtin_type ("__vector signed short", V8HI_type_node
);
11533 TYPE_NAME (V8HI_type_node
) = tdecl
;
11535 tdecl
= add_builtin_type ("__vector __bool short", bool_V8HI_type_node
);
11536 TYPE_NAME (bool_V8HI_type_node
) = tdecl
;
11538 tdecl
= add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node
);
11539 TYPE_NAME (unsigned_V4SI_type_node
) = tdecl
;
11541 tdecl
= add_builtin_type ("__vector signed int", V4SI_type_node
);
11542 TYPE_NAME (V4SI_type_node
) = tdecl
;
11544 tdecl
= add_builtin_type ("__vector __bool int", bool_V4SI_type_node
);
11545 TYPE_NAME (bool_V4SI_type_node
) = tdecl
;
11547 tdecl
= add_builtin_type ("__vector float", V4SF_type_node
);
11548 TYPE_NAME (V4SF_type_node
) = tdecl
;
11550 tdecl
= add_builtin_type ("__vector __pixel", pixel_V8HI_type_node
);
11551 TYPE_NAME (pixel_V8HI_type_node
) = tdecl
;
11553 tdecl
= add_builtin_type ("__vector double", V2DF_type_node
);
11554 TYPE_NAME (V2DF_type_node
) = tdecl
;
11556 tdecl
= add_builtin_type ("__vector long", V2DI_type_node
);
11557 TYPE_NAME (V2DI_type_node
) = tdecl
;
11559 tdecl
= add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node
);
11560 TYPE_NAME (unsigned_V2DI_type_node
) = tdecl
;
11562 tdecl
= add_builtin_type ("__vector __bool long", bool_V2DI_type_node
);
11563 TYPE_NAME (bool_V2DI_type_node
) = tdecl
;
11565 /* Paired and SPE builtins are only available if you build a compiler with
11566 the appropriate options, so only create those builtins with the
11567 appropriate compiler option. Create Altivec and VSX builtins on machines
11568 with at least the general purpose extensions (970 and newer) to allow the
11569 use of the target attribute. */
11570 if (TARGET_PAIRED_FLOAT
)
11571 paired_init_builtins ();
11573 spe_init_builtins ();
11574 if (TARGET_EXTRA_BUILTINS
)
11575 altivec_init_builtins ();
11576 if (TARGET_EXTRA_BUILTINS
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
)
11577 rs6000_common_init_builtins ();
11579 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
11580 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
11581 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
11583 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
11584 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
11585 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
11587 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
11588 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
11589 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
11591 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
11592 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
11593 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
11595 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
11596 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
11597 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
11598 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
11601 /* AIX libm provides clog as __clog. */
11602 if ((tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
11603 set_user_assembler_name (tdecl
, "__clog");
11606 #ifdef SUBTARGET_INIT_BUILTINS
11607 SUBTARGET_INIT_BUILTINS
;
11611 /* Returns the rs6000 builtin decl for CODE. */
11614 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
11618 if (code
>= RS6000_BUILTIN_COUNT
)
11619 return error_mark_node
;
11621 fnmask
= rs6000_builtin_info
[code
].mask
;
11622 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
11624 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
11625 return error_mark_node
;
11628 return rs6000_builtin_decls
[code
];
11632 spe_init_builtins (void)
11634 tree puint_type_node
= build_pointer_type (unsigned_type_node
);
11635 tree pushort_type_node
= build_pointer_type (short_unsigned_type_node
);
11636 const struct builtin_description
*d
;
11639 tree v2si_ftype_4_v2si
11640 = build_function_type_list (opaque_V2SI_type_node
,
11641 opaque_V2SI_type_node
,
11642 opaque_V2SI_type_node
,
11643 opaque_V2SI_type_node
,
11644 opaque_V2SI_type_node
,
11647 tree v2sf_ftype_4_v2sf
11648 = build_function_type_list (opaque_V2SF_type_node
,
11649 opaque_V2SF_type_node
,
11650 opaque_V2SF_type_node
,
11651 opaque_V2SF_type_node
,
11652 opaque_V2SF_type_node
,
11655 tree int_ftype_int_v2si_v2si
11656 = build_function_type_list (integer_type_node
,
11658 opaque_V2SI_type_node
,
11659 opaque_V2SI_type_node
,
11662 tree int_ftype_int_v2sf_v2sf
11663 = build_function_type_list (integer_type_node
,
11665 opaque_V2SF_type_node
,
11666 opaque_V2SF_type_node
,
11669 tree void_ftype_v2si_puint_int
11670 = build_function_type_list (void_type_node
,
11671 opaque_V2SI_type_node
,
11676 tree void_ftype_v2si_puint_char
11677 = build_function_type_list (void_type_node
,
11678 opaque_V2SI_type_node
,
11683 tree void_ftype_v2si_pv2si_int
11684 = build_function_type_list (void_type_node
,
11685 opaque_V2SI_type_node
,
11686 opaque_p_V2SI_type_node
,
11690 tree void_ftype_v2si_pv2si_char
11691 = build_function_type_list (void_type_node
,
11692 opaque_V2SI_type_node
,
11693 opaque_p_V2SI_type_node
,
11697 tree void_ftype_int
11698 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
11700 tree int_ftype_void
11701 = build_function_type_list (integer_type_node
, NULL_TREE
);
11703 tree v2si_ftype_pv2si_int
11704 = build_function_type_list (opaque_V2SI_type_node
,
11705 opaque_p_V2SI_type_node
,
11709 tree v2si_ftype_puint_int
11710 = build_function_type_list (opaque_V2SI_type_node
,
11715 tree v2si_ftype_pushort_int
11716 = build_function_type_list (opaque_V2SI_type_node
,
11721 tree v2si_ftype_signed_char
11722 = build_function_type_list (opaque_V2SI_type_node
,
11723 signed_char_type_node
,
11726 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node
);
11728 /* Initialize irregular SPE builtins. */
11730 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int
, SPE_BUILTIN_MTSPEFSCR
);
11731 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void
, SPE_BUILTIN_MFSPEFSCR
);
11732 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDDX
);
11733 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDHX
);
11734 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDWX
);
11735 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHEX
);
11736 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHOX
);
11737 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWEX
);
11738 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWOX
);
11739 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDD
);
11740 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDH
);
11741 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDW
);
11742 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHE
);
11743 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHO
);
11744 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWE
);
11745 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWO
);
11746 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATFI
);
11747 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATI
);
11750 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDDX
);
11751 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDWX
);
11752 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDHX
);
11753 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHEX
);
11754 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOUX
);
11755 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOSX
);
11756 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLATX
);
11757 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLATX
);
11758 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLATX
);
11759 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLATX
);
11760 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLATX
);
11761 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDD
);
11762 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDW
);
11763 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDH
);
11764 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLAT
);
11765 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLAT
);
11766 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLAT
);
11767 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHE
);
11768 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOS
);
11769 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOU
);
11770 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLAT
);
11771 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLAT
);
11774 d
= bdesc_spe_predicates
;
11775 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, d
++)
11779 switch (insn_data
[d
->icode
].operand
[1].mode
)
11782 type
= int_ftype_int_v2si_v2si
;
11785 type
= int_ftype_int_v2sf_v2sf
;
11788 gcc_unreachable ();
11791 def_builtin (d
->name
, type
, d
->code
);
11794 /* Evsel predicates. */
11795 d
= bdesc_spe_evsel
;
11796 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, d
++)
11800 switch (insn_data
[d
->icode
].operand
[1].mode
)
11803 type
= v2si_ftype_4_v2si
;
11806 type
= v2sf_ftype_4_v2sf
;
11809 gcc_unreachable ();
11812 def_builtin (d
->name
, type
, d
->code
);
11817 paired_init_builtins (void)
11819 const struct builtin_description
*d
;
11822 tree int_ftype_int_v2sf_v2sf
11823 = build_function_type_list (integer_type_node
,
11828 tree pcfloat_type_node
=
11829 build_pointer_type (build_qualified_type
11830 (float_type_node
, TYPE_QUAL_CONST
));
11832 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
11833 long_integer_type_node
,
11836 tree void_ftype_v2sf_long_pcfloat
=
11837 build_function_type_list (void_type_node
,
11839 long_integer_type_node
,
11844 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
11845 PAIRED_BUILTIN_LX
);
11848 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
11849 PAIRED_BUILTIN_STX
);
11852 d
= bdesc_paired_preds
;
11853 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
11857 if (TARGET_DEBUG_BUILTIN
)
11858 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
11859 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
11860 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
11862 switch (insn_data
[d
->icode
].operand
[1].mode
)
11865 type
= int_ftype_int_v2sf_v2sf
;
11868 gcc_unreachable ();
11871 def_builtin (d
->name
, type
, d
->code
);
11876 altivec_init_builtins (void)
11878 const struct builtin_description
*d
;
11883 tree pvoid_type_node
= build_pointer_type (void_type_node
);
11885 tree pcvoid_type_node
11886 = build_pointer_type (build_qualified_type (void_type_node
,
11889 tree int_ftype_opaque
11890 = build_function_type_list (integer_type_node
,
11891 opaque_V4SI_type_node
, NULL_TREE
);
11892 tree opaque_ftype_opaque
11893 = build_function_type_list (integer_type_node
, NULL_TREE
);
11894 tree opaque_ftype_opaque_int
11895 = build_function_type_list (opaque_V4SI_type_node
,
11896 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
11897 tree opaque_ftype_opaque_opaque_int
11898 = build_function_type_list (opaque_V4SI_type_node
,
11899 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
11900 integer_type_node
, NULL_TREE
);
11901 tree int_ftype_int_opaque_opaque
11902 = build_function_type_list (integer_type_node
,
11903 integer_type_node
, opaque_V4SI_type_node
,
11904 opaque_V4SI_type_node
, NULL_TREE
);
11905 tree int_ftype_int_v4si_v4si
11906 = build_function_type_list (integer_type_node
,
11907 integer_type_node
, V4SI_type_node
,
11908 V4SI_type_node
, NULL_TREE
);
11909 tree void_ftype_v4si
11910 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
11911 tree v8hi_ftype_void
11912 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
11913 tree void_ftype_void
11914 = build_function_type_list (void_type_node
, NULL_TREE
);
11915 tree void_ftype_int
11916 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
11918 tree opaque_ftype_long_pcvoid
11919 = build_function_type_list (opaque_V4SI_type_node
,
11920 long_integer_type_node
, pcvoid_type_node
,
11922 tree v16qi_ftype_long_pcvoid
11923 = build_function_type_list (V16QI_type_node
,
11924 long_integer_type_node
, pcvoid_type_node
,
11926 tree v8hi_ftype_long_pcvoid
11927 = build_function_type_list (V8HI_type_node
,
11928 long_integer_type_node
, pcvoid_type_node
,
11930 tree v4si_ftype_long_pcvoid
11931 = build_function_type_list (V4SI_type_node
,
11932 long_integer_type_node
, pcvoid_type_node
,
11934 tree v4sf_ftype_long_pcvoid
11935 = build_function_type_list (V4SF_type_node
,
11936 long_integer_type_node
, pcvoid_type_node
,
11938 tree v2df_ftype_long_pcvoid
11939 = build_function_type_list (V2DF_type_node
,
11940 long_integer_type_node
, pcvoid_type_node
,
11942 tree v2di_ftype_long_pcvoid
11943 = build_function_type_list (V2DI_type_node
,
11944 long_integer_type_node
, pcvoid_type_node
,
11947 tree void_ftype_opaque_long_pvoid
11948 = build_function_type_list (void_type_node
,
11949 opaque_V4SI_type_node
, long_integer_type_node
,
11950 pvoid_type_node
, NULL_TREE
);
11951 tree void_ftype_v4si_long_pvoid
11952 = build_function_type_list (void_type_node
,
11953 V4SI_type_node
, long_integer_type_node
,
11954 pvoid_type_node
, NULL_TREE
);
11955 tree void_ftype_v16qi_long_pvoid
11956 = build_function_type_list (void_type_node
,
11957 V16QI_type_node
, long_integer_type_node
,
11958 pvoid_type_node
, NULL_TREE
);
11959 tree void_ftype_v8hi_long_pvoid
11960 = build_function_type_list (void_type_node
,
11961 V8HI_type_node
, long_integer_type_node
,
11962 pvoid_type_node
, NULL_TREE
);
11963 tree void_ftype_v4sf_long_pvoid
11964 = build_function_type_list (void_type_node
,
11965 V4SF_type_node
, long_integer_type_node
,
11966 pvoid_type_node
, NULL_TREE
);
11967 tree void_ftype_v2df_long_pvoid
11968 = build_function_type_list (void_type_node
,
11969 V2DF_type_node
, long_integer_type_node
,
11970 pvoid_type_node
, NULL_TREE
);
11971 tree void_ftype_v2di_long_pvoid
11972 = build_function_type_list (void_type_node
,
11973 V2DI_type_node
, long_integer_type_node
,
11974 pvoid_type_node
, NULL_TREE
);
11975 tree int_ftype_int_v8hi_v8hi
11976 = build_function_type_list (integer_type_node
,
11977 integer_type_node
, V8HI_type_node
,
11978 V8HI_type_node
, NULL_TREE
);
11979 tree int_ftype_int_v16qi_v16qi
11980 = build_function_type_list (integer_type_node
,
11981 integer_type_node
, V16QI_type_node
,
11982 V16QI_type_node
, NULL_TREE
);
11983 tree int_ftype_int_v4sf_v4sf
11984 = build_function_type_list (integer_type_node
,
11985 integer_type_node
, V4SF_type_node
,
11986 V4SF_type_node
, NULL_TREE
);
11987 tree int_ftype_int_v2df_v2df
11988 = build_function_type_list (integer_type_node
,
11989 integer_type_node
, V2DF_type_node
,
11990 V2DF_type_node
, NULL_TREE
);
11991 tree v4si_ftype_v4si
11992 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
11993 tree v8hi_ftype_v8hi
11994 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
11995 tree v16qi_ftype_v16qi
11996 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
11997 tree v4sf_ftype_v4sf
11998 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
11999 tree v2df_ftype_v2df
12000 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
12001 tree void_ftype_pcvoid_int_int
12002 = build_function_type_list (void_type_node
,
12003 pcvoid_type_node
, integer_type_node
,
12004 integer_type_node
, NULL_TREE
);
12006 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
12007 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
12008 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
12009 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
12010 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
12011 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
12012 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
12013 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
12014 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
12015 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
12016 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
12017 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
12018 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
12019 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
12020 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
12021 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
12022 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
12023 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
12024 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
12025 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
12026 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
12027 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
12028 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
12029 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
12030 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
12031 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
12032 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
12033 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
12034 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
12035 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
12037 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
12038 VSX_BUILTIN_LXVD2X_V2DF
);
12039 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
12040 VSX_BUILTIN_LXVD2X_V2DI
);
12041 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
12042 VSX_BUILTIN_LXVW4X_V4SF
);
12043 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
12044 VSX_BUILTIN_LXVW4X_V4SI
);
12045 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
12046 VSX_BUILTIN_LXVW4X_V8HI
);
12047 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
12048 VSX_BUILTIN_LXVW4X_V16QI
);
12049 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
12050 VSX_BUILTIN_STXVD2X_V2DF
);
12051 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
12052 VSX_BUILTIN_STXVD2X_V2DI
);
12053 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
12054 VSX_BUILTIN_STXVW4X_V4SF
);
12055 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
12056 VSX_BUILTIN_STXVW4X_V4SI
);
12057 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
12058 VSX_BUILTIN_STXVW4X_V8HI
);
12059 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
12060 VSX_BUILTIN_STXVW4X_V16QI
);
12061 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
12062 VSX_BUILTIN_VEC_LD
);
12063 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
12064 VSX_BUILTIN_VEC_ST
);
12066 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
12067 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
12068 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
12070 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
12071 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
12072 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
12073 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
12074 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
12075 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
12076 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
12077 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
12078 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
12079 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
12080 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
12081 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
12083 /* Cell builtins. */
12084 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
12085 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
12086 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
12087 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
12089 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
12090 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
12091 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
12092 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
12094 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
12095 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
12096 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
12097 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
12099 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
12100 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
12101 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
12102 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
12104 /* Add the DST variants. */
12106 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
12107 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
12109 /* Initialize the predicates. */
12110 d
= bdesc_altivec_preds
;
12111 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
12113 enum machine_mode mode1
;
12116 if (rs6000_overloaded_builtin_p (d
->code
))
12119 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
12124 type
= int_ftype_int_opaque_opaque
;
12127 type
= int_ftype_int_v4si_v4si
;
12130 type
= int_ftype_int_v8hi_v8hi
;
12133 type
= int_ftype_int_v16qi_v16qi
;
12136 type
= int_ftype_int_v4sf_v4sf
;
12139 type
= int_ftype_int_v2df_v2df
;
12142 gcc_unreachable ();
12145 def_builtin (d
->name
, type
, d
->code
);
12148 /* Initialize the abs* operators. */
12150 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
12152 enum machine_mode mode0
;
12155 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
12160 type
= v4si_ftype_v4si
;
12163 type
= v8hi_ftype_v8hi
;
12166 type
= v16qi_ftype_v16qi
;
12169 type
= v4sf_ftype_v4sf
;
12172 type
= v2df_ftype_v2df
;
12175 gcc_unreachable ();
12178 def_builtin (d
->name
, type
, d
->code
);
12181 /* Initialize target builtin that implements
12182 targetm.vectorize.builtin_mask_for_load. */
12184 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
12185 v16qi_ftype_long_pcvoid
,
12186 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
12187 BUILT_IN_MD
, NULL
, NULL_TREE
);
12188 TREE_READONLY (decl
) = 1;
12189 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12190 altivec_builtin_mask_for_load
= decl
;
12192 /* Access to the vec_init patterns. */
12193 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
12194 integer_type_node
, integer_type_node
,
12195 integer_type_node
, NULL_TREE
);
12196 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
12198 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
12199 short_integer_type_node
,
12200 short_integer_type_node
,
12201 short_integer_type_node
,
12202 short_integer_type_node
,
12203 short_integer_type_node
,
12204 short_integer_type_node
,
12205 short_integer_type_node
, NULL_TREE
);
12206 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
12208 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
12209 char_type_node
, char_type_node
,
12210 char_type_node
, char_type_node
,
12211 char_type_node
, char_type_node
,
12212 char_type_node
, char_type_node
,
12213 char_type_node
, char_type_node
,
12214 char_type_node
, char_type_node
,
12215 char_type_node
, char_type_node
,
12216 char_type_node
, NULL_TREE
);
12217 def_builtin ("__builtin_vec_init_v16qi", ftype
,
12218 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
12220 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
12221 float_type_node
, float_type_node
,
12222 float_type_node
, NULL_TREE
);
12223 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
12225 /* VSX builtins. */
12226 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
12227 double_type_node
, NULL_TREE
);
12228 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
12230 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
12231 intDI_type_node
, NULL_TREE
);
12232 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
12234 /* Access to the vec_set patterns. */
12235 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
12237 integer_type_node
, NULL_TREE
);
12238 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
12240 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
12242 integer_type_node
, NULL_TREE
);
12243 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
12245 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
12247 integer_type_node
, NULL_TREE
);
12248 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
12250 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
12252 integer_type_node
, NULL_TREE
);
12253 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
12255 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
12257 integer_type_node
, NULL_TREE
);
12258 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
12260 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
12262 integer_type_node
, NULL_TREE
);
12263 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
12265 /* Access to the vec_extract patterns. */
12266 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
12267 integer_type_node
, NULL_TREE
);
12268 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
12270 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
12271 integer_type_node
, NULL_TREE
);
12272 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
12274 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
12275 integer_type_node
, NULL_TREE
);
12276 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
12278 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
12279 integer_type_node
, NULL_TREE
);
12280 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
12282 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
12283 integer_type_node
, NULL_TREE
);
12284 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
12286 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
12287 integer_type_node
, NULL_TREE
);
12288 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
12291 /* Hash function for builtin functions with up to 3 arguments and a return
12294 builtin_hash_function (const void *hash_entry
)
12298 const struct builtin_hash_struct
*bh
=
12299 (const struct builtin_hash_struct
*) hash_entry
;
12301 for (i
= 0; i
< 4; i
++)
12303 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
12304 ret
= (ret
* 2) + bh
->uns_p
[i
];
12310 /* Compare builtin hash entries H1 and H2 for equivalence. */
12312 builtin_hash_eq (const void *h1
, const void *h2
)
12314 const struct builtin_hash_struct
*p1
= (const struct builtin_hash_struct
*) h1
;
12315 const struct builtin_hash_struct
*p2
= (const struct builtin_hash_struct
*) h2
;
12317 return ((p1
->mode
[0] == p2
->mode
[0])
12318 && (p1
->mode
[1] == p2
->mode
[1])
12319 && (p1
->mode
[2] == p2
->mode
[2])
12320 && (p1
->mode
[3] == p2
->mode
[3])
12321 && (p1
->uns_p
[0] == p2
->uns_p
[0])
12322 && (p1
->uns_p
[1] == p2
->uns_p
[1])
12323 && (p1
->uns_p
[2] == p2
->uns_p
[2])
12324 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
12327 /* Map types for builtin functions with an explicit return type and up to 3
12328 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12329 of the argument. */
12331 builtin_function_type (enum machine_mode mode_ret
, enum machine_mode mode_arg0
,
12332 enum machine_mode mode_arg1
, enum machine_mode mode_arg2
,
12333 enum rs6000_builtins builtin
, const char *name
)
12335 struct builtin_hash_struct h
;
12336 struct builtin_hash_struct
*h2
;
12340 tree ret_type
= NULL_TREE
;
12341 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
12343 /* Create builtin_hash_table. */
12344 if (builtin_hash_table
== NULL
)
12345 builtin_hash_table
= htab_create_ggc (1500, builtin_hash_function
,
12346 builtin_hash_eq
, NULL
);
12348 h
.type
= NULL_TREE
;
12349 h
.mode
[0] = mode_ret
;
12350 h
.mode
[1] = mode_arg0
;
12351 h
.mode
[2] = mode_arg1
;
12352 h
.mode
[3] = mode_arg2
;
12358 /* If the builtin is a type that produces unsigned results or takes unsigned
12359 arguments, and it is returned as a decl for the vectorizer (such as
12360 widening multiplies, permute), make sure the arguments and return value
12361 are type correct. */
12364 /* unsigned 2 argument functions. */
12365 case ALTIVEC_BUILTIN_VMULEUB_UNS
:
12366 case ALTIVEC_BUILTIN_VMULEUH_UNS
:
12367 case ALTIVEC_BUILTIN_VMULOUB_UNS
:
12368 case ALTIVEC_BUILTIN_VMULOUH_UNS
:
12374 /* unsigned 3 argument functions. */
12375 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
12376 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
12377 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
12378 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
12379 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
12380 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
12381 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
12382 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
12383 case VSX_BUILTIN_VPERM_16QI_UNS
:
12384 case VSX_BUILTIN_VPERM_8HI_UNS
:
12385 case VSX_BUILTIN_VPERM_4SI_UNS
:
12386 case VSX_BUILTIN_VPERM_2DI_UNS
:
12387 case VSX_BUILTIN_XXSEL_16QI_UNS
:
12388 case VSX_BUILTIN_XXSEL_8HI_UNS
:
12389 case VSX_BUILTIN_XXSEL_4SI_UNS
:
12390 case VSX_BUILTIN_XXSEL_2DI_UNS
:
12397 /* signed permute functions with unsigned char mask. */
12398 case ALTIVEC_BUILTIN_VPERM_16QI
:
12399 case ALTIVEC_BUILTIN_VPERM_8HI
:
12400 case ALTIVEC_BUILTIN_VPERM_4SI
:
12401 case ALTIVEC_BUILTIN_VPERM_4SF
:
12402 case ALTIVEC_BUILTIN_VPERM_2DI
:
12403 case ALTIVEC_BUILTIN_VPERM_2DF
:
12404 case VSX_BUILTIN_VPERM_16QI
:
12405 case VSX_BUILTIN_VPERM_8HI
:
12406 case VSX_BUILTIN_VPERM_4SI
:
12407 case VSX_BUILTIN_VPERM_4SF
:
12408 case VSX_BUILTIN_VPERM_2DI
:
12409 case VSX_BUILTIN_VPERM_2DF
:
12413 /* unsigned args, signed return. */
12414 case VSX_BUILTIN_XVCVUXDDP_UNS
:
12415 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
12419 /* signed args, unsigned return. */
12420 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
12421 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
12429 /* Figure out how many args are present. */
12430 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
12434 fatal_error ("internal error: builtin function %s had no type", name
);
12436 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
12437 if (!ret_type
&& h
.uns_p
[0])
12438 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
12441 fatal_error ("internal error: builtin function %s had an unexpected "
12442 "return type %s", name
, GET_MODE_NAME (h
.mode
[0]));
12444 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
12445 arg_type
[i
] = NULL_TREE
;
12447 for (i
= 0; i
< num_args
; i
++)
12449 int m
= (int) h
.mode
[i
+1];
12450 int uns_p
= h
.uns_p
[i
+1];
12452 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
12453 if (!arg_type
[i
] && uns_p
)
12454 arg_type
[i
] = builtin_mode_to_type
[m
][0];
12457 fatal_error ("internal error: builtin function %s, argument %d "
12458 "had unexpected argument type %s", name
, i
,
12459 GET_MODE_NAME (m
));
12462 found
= htab_find_slot (builtin_hash_table
, &h
, INSERT
);
12463 if (*found
== NULL
)
12465 h2
= ggc_alloc_builtin_hash_struct ();
12467 *found
= (void *)h2
;
12469 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
12470 arg_type
[2], NULL_TREE
);
12473 return ((struct builtin_hash_struct
*)(*found
))->type
;
12477 rs6000_common_init_builtins (void)
12479 const struct builtin_description
*d
;
12482 tree opaque_ftype_opaque
= NULL_TREE
;
12483 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
12484 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
12485 tree v2si_ftype_qi
= NULL_TREE
;
12486 tree v2si_ftype_v2si_qi
= NULL_TREE
;
12487 tree v2si_ftype_int_qi
= NULL_TREE
;
12488 unsigned builtin_mask
= rs6000_builtin_mask
;
12490 if (!TARGET_PAIRED_FLOAT
)
12492 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
12493 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
12496 /* Paired and SPE builtins are only available if you build a compiler with
12497 the appropriate options, so only create those builtins with the
12498 appropriate compiler option. Create Altivec and VSX builtins on machines
12499 with at least the general purpose extensions (970 and newer) to allow the
12500 use of the target attribute.. */
12502 if (TARGET_EXTRA_BUILTINS
)
12503 builtin_mask
|= RS6000_BTM_COMMON
;
12505 /* Add the ternary operators. */
12507 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
12510 unsigned mask
= d
->mask
;
12512 if ((mask
& builtin_mask
) != mask
)
12514 if (TARGET_DEBUG_BUILTIN
)
12515 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
12519 if (rs6000_overloaded_builtin_p (d
->code
))
12521 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
12522 type
= opaque_ftype_opaque_opaque_opaque
12523 = build_function_type_list (opaque_V4SI_type_node
,
12524 opaque_V4SI_type_node
,
12525 opaque_V4SI_type_node
,
12526 opaque_V4SI_type_node
,
12531 enum insn_code icode
= d
->icode
;
12532 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12535 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
12536 insn_data
[icode
].operand
[1].mode
,
12537 insn_data
[icode
].operand
[2].mode
,
12538 insn_data
[icode
].operand
[3].mode
,
12542 def_builtin (d
->name
, type
, d
->code
);
12545 /* Add the binary operators. */
12547 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
12549 enum machine_mode mode0
, mode1
, mode2
;
12551 unsigned mask
= d
->mask
;
12553 if ((mask
& builtin_mask
) != mask
)
12555 if (TARGET_DEBUG_BUILTIN
)
12556 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
12560 if (rs6000_overloaded_builtin_p (d
->code
))
12562 if (! (type
= opaque_ftype_opaque_opaque
))
12563 type
= opaque_ftype_opaque_opaque
12564 = build_function_type_list (opaque_V4SI_type_node
,
12565 opaque_V4SI_type_node
,
12566 opaque_V4SI_type_node
,
12571 enum insn_code icode
= d
->icode
;
12572 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12575 mode0
= insn_data
[icode
].operand
[0].mode
;
12576 mode1
= insn_data
[icode
].operand
[1].mode
;
12577 mode2
= insn_data
[icode
].operand
[2].mode
;
12579 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
12581 if (! (type
= v2si_ftype_v2si_qi
))
12582 type
= v2si_ftype_v2si_qi
12583 = build_function_type_list (opaque_V2SI_type_node
,
12584 opaque_V2SI_type_node
,
12589 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
12590 && mode2
== QImode
)
12592 if (! (type
= v2si_ftype_int_qi
))
12593 type
= v2si_ftype_int_qi
12594 = build_function_type_list (opaque_V2SI_type_node
,
12601 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
12605 def_builtin (d
->name
, type
, d
->code
);
12608 /* Add the simple unary operators. */
12610 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
12612 enum machine_mode mode0
, mode1
;
12614 unsigned mask
= d
->mask
;
12616 if ((mask
& builtin_mask
) != mask
)
12618 if (TARGET_DEBUG_BUILTIN
)
12619 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
12623 if (rs6000_overloaded_builtin_p (d
->code
))
12625 if (! (type
= opaque_ftype_opaque
))
12626 type
= opaque_ftype_opaque
12627 = build_function_type_list (opaque_V4SI_type_node
,
12628 opaque_V4SI_type_node
,
12633 enum insn_code icode
= d
->icode
;
12634 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12637 mode0
= insn_data
[icode
].operand
[0].mode
;
12638 mode1
= insn_data
[icode
].operand
[1].mode
;
12640 if (mode0
== V2SImode
&& mode1
== QImode
)
12642 if (! (type
= v2si_ftype_qi
))
12643 type
= v2si_ftype_qi
12644 = build_function_type_list (opaque_V2SI_type_node
,
12650 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
12654 def_builtin (d
->name
, type
, d
->code
);
12659 rs6000_init_libfuncs (void)
12661 if (!TARGET_IEEEQUAD
)
12662 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12663 if (!TARGET_XL_COMPAT
)
12665 set_optab_libfunc (add_optab
, TFmode
, "__gcc_qadd");
12666 set_optab_libfunc (sub_optab
, TFmode
, "__gcc_qsub");
12667 set_optab_libfunc (smul_optab
, TFmode
, "__gcc_qmul");
12668 set_optab_libfunc (sdiv_optab
, TFmode
, "__gcc_qdiv");
12670 if (!(TARGET_HARD_FLOAT
&& (TARGET_FPRS
|| TARGET_E500_DOUBLE
)))
12672 set_optab_libfunc (neg_optab
, TFmode
, "__gcc_qneg");
12673 set_optab_libfunc (eq_optab
, TFmode
, "__gcc_qeq");
12674 set_optab_libfunc (ne_optab
, TFmode
, "__gcc_qne");
12675 set_optab_libfunc (gt_optab
, TFmode
, "__gcc_qgt");
12676 set_optab_libfunc (ge_optab
, TFmode
, "__gcc_qge");
12677 set_optab_libfunc (lt_optab
, TFmode
, "__gcc_qlt");
12678 set_optab_libfunc (le_optab
, TFmode
, "__gcc_qle");
12680 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "__gcc_stoq");
12681 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "__gcc_dtoq");
12682 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "__gcc_qtos");
12683 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "__gcc_qtod");
12684 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "__gcc_qtoi");
12685 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "__gcc_qtou");
12686 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "__gcc_itoq");
12687 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "__gcc_utoq");
12690 if (!(TARGET_HARD_FLOAT
&& TARGET_FPRS
))
12691 set_optab_libfunc (unord_optab
, TFmode
, "__gcc_qunord");
12695 set_optab_libfunc (add_optab
, TFmode
, "_xlqadd");
12696 set_optab_libfunc (sub_optab
, TFmode
, "_xlqsub");
12697 set_optab_libfunc (smul_optab
, TFmode
, "_xlqmul");
12698 set_optab_libfunc (sdiv_optab
, TFmode
, "_xlqdiv");
12702 /* 32-bit SVR4 quad floating point routines. */
12704 set_optab_libfunc (add_optab
, TFmode
, "_q_add");
12705 set_optab_libfunc (sub_optab
, TFmode
, "_q_sub");
12706 set_optab_libfunc (neg_optab
, TFmode
, "_q_neg");
12707 set_optab_libfunc (smul_optab
, TFmode
, "_q_mul");
12708 set_optab_libfunc (sdiv_optab
, TFmode
, "_q_div");
12709 if (TARGET_PPC_GPOPT
)
12710 set_optab_libfunc (sqrt_optab
, TFmode
, "_q_sqrt");
12712 set_optab_libfunc (eq_optab
, TFmode
, "_q_feq");
12713 set_optab_libfunc (ne_optab
, TFmode
, "_q_fne");
12714 set_optab_libfunc (gt_optab
, TFmode
, "_q_fgt");
12715 set_optab_libfunc (ge_optab
, TFmode
, "_q_fge");
12716 set_optab_libfunc (lt_optab
, TFmode
, "_q_flt");
12717 set_optab_libfunc (le_optab
, TFmode
, "_q_fle");
12719 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_q_stoq");
12720 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_q_dtoq");
12721 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_q_qtos");
12722 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_q_qtod");
12723 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_q_qtoi");
12724 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_q_qtou");
12725 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_q_itoq");
12726 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_q_utoq");
12731 /* Expand a block clear operation, and return 1 if successful. Return 0
12732 if we should let the compiler generate normal code.
12734 operands[0] is the destination
12735 operands[1] is the length
12736 operands[3] is the alignment */
12739 expand_block_clear (rtx operands
[])
12741 rtx orig_dest
= operands
[0];
12742 rtx bytes_rtx
= operands
[1];
12743 rtx align_rtx
= operands
[3];
12744 bool constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
12745 HOST_WIDE_INT align
;
12746 HOST_WIDE_INT bytes
;
12751 /* If this is not a fixed size move, just call memcpy */
12755 /* This must be a fixed size alignment */
12756 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
12757 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
12759 /* Anything to clear? */
12760 bytes
= INTVAL (bytes_rtx
);
12764 /* Use the builtin memset after a point, to avoid huge code bloat.
12765 When optimize_size, avoid any significant code bloat; calling
12766 memset is about 4 instructions, so allow for one instruction to
12767 load zero and three to do clearing. */
12768 if (TARGET_ALTIVEC
&& align
>= 128)
12770 else if (TARGET_POWERPC64
&& align
>= 32)
12772 else if (TARGET_SPE
&& align
>= 64)
12777 if (optimize_size
&& bytes
> 3 * clear_step
)
12779 if (! optimize_size
&& bytes
> 8 * clear_step
)
12782 for (offset
= 0; bytes
> 0; offset
+= clear_bytes
, bytes
-= clear_bytes
)
12784 enum machine_mode mode
= BLKmode
;
12787 if (bytes
>= 16 && TARGET_ALTIVEC
&& align
>= 128)
12792 else if (bytes
>= 8 && TARGET_SPE
&& align
>= 64)
12797 else if (bytes
>= 8 && TARGET_POWERPC64
12798 /* 64-bit loads and stores require word-aligned
12800 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
12805 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
12806 { /* move 4 bytes */
12810 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
12811 { /* move 2 bytes */
12815 else /* move 1 byte at a time */
12821 dest
= adjust_address (orig_dest
, mode
, offset
);
12823 emit_move_insn (dest
, CONST0_RTX (mode
));
12830 /* Expand a block move operation, and return 1 if successful. Return 0
12831 if we should let the compiler generate normal code.
12833 operands[0] is the destination
12834 operands[1] is the source
12835 operands[2] is the length
12836 operands[3] is the alignment */
12838 #define MAX_MOVE_REG 4
12841 expand_block_move (rtx operands
[])
12843 rtx orig_dest
= operands
[0];
12844 rtx orig_src
= operands
[1];
12845 rtx bytes_rtx
= operands
[2];
12846 rtx align_rtx
= operands
[3];
12847 int constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
12852 rtx stores
[MAX_MOVE_REG
];
12855 /* If this is not a fixed size move, just call memcpy */
12859 /* This must be a fixed size alignment */
12860 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
12861 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
12863 /* Anything to move? */
12864 bytes
= INTVAL (bytes_rtx
);
12868 if (bytes
> rs6000_block_move_inline_limit
)
12871 for (offset
= 0; bytes
> 0; offset
+= move_bytes
, bytes
-= move_bytes
)
12874 rtx (*movmemsi
) (rtx
, rtx
, rtx
, rtx
);
12875 rtx (*mov
) (rtx
, rtx
);
12877 enum machine_mode mode
= BLKmode
;
12880 /* Altivec first, since it will be faster than a string move
12881 when it applies, and usually not significantly larger. */
12882 if (TARGET_ALTIVEC
&& bytes
>= 16 && align
>= 128)
12886 gen_func
.mov
= gen_movv4si
;
12888 else if (TARGET_SPE
&& bytes
>= 8 && align
>= 64)
12892 gen_func
.mov
= gen_movv2si
;
12894 else if (TARGET_STRING
12895 && bytes
> 24 /* move up to 32 bytes at a time */
12901 && ! fixed_regs
[10]
12902 && ! fixed_regs
[11]
12903 && ! fixed_regs
[12])
12905 move_bytes
= (bytes
> 32) ? 32 : bytes
;
12906 gen_func
.movmemsi
= gen_movmemsi_8reg
;
12908 else if (TARGET_STRING
12909 && bytes
> 16 /* move up to 24 bytes at a time */
12915 && ! fixed_regs
[10])
12917 move_bytes
= (bytes
> 24) ? 24 : bytes
;
12918 gen_func
.movmemsi
= gen_movmemsi_6reg
;
12920 else if (TARGET_STRING
12921 && bytes
> 8 /* move up to 16 bytes at a time */
12925 && ! fixed_regs
[8])
12927 move_bytes
= (bytes
> 16) ? 16 : bytes
;
12928 gen_func
.movmemsi
= gen_movmemsi_4reg
;
12930 else if (bytes
>= 8 && TARGET_POWERPC64
12931 /* 64-bit loads and stores require word-aligned
12933 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
12937 gen_func
.mov
= gen_movdi
;
12939 else if (TARGET_STRING
&& bytes
> 4 && !TARGET_POWERPC64
)
12940 { /* move up to 8 bytes at a time */
12941 move_bytes
= (bytes
> 8) ? 8 : bytes
;
12942 gen_func
.movmemsi
= gen_movmemsi_2reg
;
12944 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
12945 { /* move 4 bytes */
12948 gen_func
.mov
= gen_movsi
;
12950 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
12951 { /* move 2 bytes */
12954 gen_func
.mov
= gen_movhi
;
12956 else if (TARGET_STRING
&& bytes
> 1)
12957 { /* move up to 4 bytes at a time */
12958 move_bytes
= (bytes
> 4) ? 4 : bytes
;
12959 gen_func
.movmemsi
= gen_movmemsi_1reg
;
12961 else /* move 1 byte at a time */
12965 gen_func
.mov
= gen_movqi
;
12968 src
= adjust_address (orig_src
, mode
, offset
);
12969 dest
= adjust_address (orig_dest
, mode
, offset
);
12971 if (mode
!= BLKmode
)
12973 rtx tmp_reg
= gen_reg_rtx (mode
);
12975 emit_insn ((*gen_func
.mov
) (tmp_reg
, src
));
12976 stores
[num_reg
++] = (*gen_func
.mov
) (dest
, tmp_reg
);
12979 if (mode
== BLKmode
|| num_reg
>= MAX_MOVE_REG
|| bytes
== move_bytes
)
12982 for (i
= 0; i
< num_reg
; i
++)
12983 emit_insn (stores
[i
]);
12987 if (mode
== BLKmode
)
12989 /* Move the address into scratch registers. The movmemsi
12990 patterns require zero offset. */
12991 if (!REG_P (XEXP (src
, 0)))
12993 rtx src_reg
= copy_addr_to_reg (XEXP (src
, 0));
12994 src
= replace_equiv_address (src
, src_reg
);
12996 set_mem_size (src
, move_bytes
);
12998 if (!REG_P (XEXP (dest
, 0)))
13000 rtx dest_reg
= copy_addr_to_reg (XEXP (dest
, 0));
13001 dest
= replace_equiv_address (dest
, dest_reg
);
13003 set_mem_size (dest
, move_bytes
);
13005 emit_insn ((*gen_func
.movmemsi
) (dest
, src
,
13006 GEN_INT (move_bytes
& 31),
13015 /* Return a string to perform a load_multiple operation.
13016 operands[0] is the vector.
13017 operands[1] is the source address.
13018 operands[2] is the first destination register. */
13021 rs6000_output_load_multiple (rtx operands
[3])
13023 /* We have to handle the case where the pseudo used to contain the address
13024 is assigned to one of the output registers. */
13026 int words
= XVECLEN (operands
[0], 0);
13029 if (XVECLEN (operands
[0], 0) == 1)
13030 return "{l|lwz} %2,0(%1)";
13032 for (i
= 0; i
< words
; i
++)
13033 if (refers_to_regno_p (REGNO (operands
[2]) + i
,
13034 REGNO (operands
[2]) + i
+ 1, operands
[1], 0))
13038 xop
[0] = GEN_INT (4 * (words
-1));
13039 xop
[1] = operands
[1];
13040 xop
[2] = operands
[2];
13041 output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop
);
13046 xop
[0] = GEN_INT (4 * (words
-1));
13047 xop
[1] = operands
[1];
13048 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
13049 output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop
);
13054 for (j
= 0; j
< words
; j
++)
13057 xop
[0] = GEN_INT (j
* 4);
13058 xop
[1] = operands
[1];
13059 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + j
);
13060 output_asm_insn ("{l|lwz} %2,%0(%1)", xop
);
13062 xop
[0] = GEN_INT (i
* 4);
13063 xop
[1] = operands
[1];
13064 output_asm_insn ("{l|lwz} %1,%0(%1)", xop
);
13069 return "{lsi|lswi} %2,%1,%N0";
13073 /* A validation routine: say whether CODE, a condition code, and MODE
13074 match. The other alternatives either don't make sense or should
13075 never be generated. */
13078 validate_condition_mode (enum rtx_code code
, enum machine_mode mode
)
13080 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
13081 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
13082 && GET_MODE_CLASS (mode
) == MODE_CC
);
13084 /* These don't make sense. */
13085 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
13086 || mode
!= CCUNSmode
);
13088 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
13089 || mode
== CCUNSmode
);
13091 gcc_assert (mode
== CCFPmode
13092 || (code
!= ORDERED
&& code
!= UNORDERED
13093 && code
!= UNEQ
&& code
!= LTGT
13094 && code
!= UNGT
&& code
!= UNLT
13095 && code
!= UNGE
&& code
!= UNLE
));
13097 /* These should never be generated except for
13098 flag_finite_math_only. */
13099 gcc_assert (mode
!= CCFPmode
13100 || flag_finite_math_only
13101 || (code
!= LE
&& code
!= GE
13102 && code
!= UNEQ
&& code
!= LTGT
13103 && code
!= UNGT
&& code
!= UNLT
));
13105 /* These are invalid; the information is not there. */
13106 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
13110 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13111 mask required to convert the result of a rotate insn into a shift
13112 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13115 includes_lshift_p (rtx shiftop
, rtx andop
)
13117 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
13119 shift_mask
<<= INTVAL (shiftop
);
13121 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
13124 /* Similar, but for right shift. */
13127 includes_rshift_p (rtx shiftop
, rtx andop
)
13129 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
13131 shift_mask
>>= INTVAL (shiftop
);
13133 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
13136 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13137 to perform a left shift. It must have exactly SHIFTOP least
13138 significant 0's, then one or more 1's, then zero or more 0's. */
13141 includes_rldic_lshift_p (rtx shiftop
, rtx andop
)
13143 if (GET_CODE (andop
) == CONST_INT
)
13145 HOST_WIDE_INT c
, lsb
, shift_mask
;
13147 c
= INTVAL (andop
);
13148 if (c
== 0 || c
== ~0)
13152 shift_mask
<<= INTVAL (shiftop
);
13154 /* Find the least significant one bit. */
13157 /* It must coincide with the LSB of the shift mask. */
13158 if (-lsb
!= shift_mask
)
13161 /* Invert to look for the next transition (if any). */
13164 /* Remove the low group of ones (originally low group of zeros). */
13167 /* Again find the lsb, and check we have all 1's above. */
13171 else if (GET_CODE (andop
) == CONST_DOUBLE
13172 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
13174 HOST_WIDE_INT low
, high
, lsb
;
13175 HOST_WIDE_INT shift_mask_low
, shift_mask_high
;
13177 low
= CONST_DOUBLE_LOW (andop
);
13178 if (HOST_BITS_PER_WIDE_INT
< 64)
13179 high
= CONST_DOUBLE_HIGH (andop
);
13181 if ((low
== 0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== 0))
13182 || (low
== ~0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0)))
13185 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
13187 shift_mask_high
= ~0;
13188 if (INTVAL (shiftop
) > 32)
13189 shift_mask_high
<<= INTVAL (shiftop
) - 32;
13191 lsb
= high
& -high
;
13193 if (-lsb
!= shift_mask_high
|| INTVAL (shiftop
) < 32)
13199 lsb
= high
& -high
;
13200 return high
== -lsb
;
13203 shift_mask_low
= ~0;
13204 shift_mask_low
<<= INTVAL (shiftop
);
13208 if (-lsb
!= shift_mask_low
)
13211 if (HOST_BITS_PER_WIDE_INT
< 64)
13216 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
13218 lsb
= high
& -high
;
13219 return high
== -lsb
;
13223 return low
== -lsb
&& (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0);
13229 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13230 to perform a left shift. It must have SHIFTOP or more least
13231 significant 0's, with the remainder of the word 1's. */
13234 includes_rldicr_lshift_p (rtx shiftop
, rtx andop
)
13236 if (GET_CODE (andop
) == CONST_INT
)
13238 HOST_WIDE_INT c
, lsb
, shift_mask
;
13241 shift_mask
<<= INTVAL (shiftop
);
13242 c
= INTVAL (andop
);
13244 /* Find the least significant one bit. */
13247 /* It must be covered by the shift mask.
13248 This test also rejects c == 0. */
13249 if ((lsb
& shift_mask
) == 0)
13252 /* Check we have all 1's above the transition, and reject all 1's. */
13253 return c
== -lsb
&& lsb
!= 1;
13255 else if (GET_CODE (andop
) == CONST_DOUBLE
13256 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
13258 HOST_WIDE_INT low
, lsb
, shift_mask_low
;
13260 low
= CONST_DOUBLE_LOW (andop
);
13262 if (HOST_BITS_PER_WIDE_INT
< 64)
13264 HOST_WIDE_INT high
, shift_mask_high
;
13266 high
= CONST_DOUBLE_HIGH (andop
);
13270 shift_mask_high
= ~0;
13271 if (INTVAL (shiftop
) > 32)
13272 shift_mask_high
<<= INTVAL (shiftop
) - 32;
13274 lsb
= high
& -high
;
13276 if ((lsb
& shift_mask_high
) == 0)
13279 return high
== -lsb
;
13285 shift_mask_low
= ~0;
13286 shift_mask_low
<<= INTVAL (shiftop
);
13290 if ((lsb
& shift_mask_low
) == 0)
13293 return low
== -lsb
&& lsb
!= 1;
13299 /* Return 1 if operands will generate a valid arguments to rlwimi
13300 instruction for insert with right shift in 64-bit mode. The mask may
13301 not start on the first bit or stop on the last bit because wrap-around
13302 effects of instruction do not correspond to semantics of RTL insn. */
13305 insvdi_rshift_rlwimi_p (rtx sizeop
, rtx startop
, rtx shiftop
)
13307 if (INTVAL (startop
) > 32
13308 && INTVAL (startop
) < 64
13309 && INTVAL (sizeop
) > 1
13310 && INTVAL (sizeop
) + INTVAL (startop
) < 64
13311 && INTVAL (shiftop
) > 0
13312 && INTVAL (sizeop
) + INTVAL (shiftop
) < 32
13313 && (64 - (INTVAL (shiftop
) & 63)) >= INTVAL (sizeop
))
13319 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13320 for lfq and stfq insns iff the registers are hard registers. */
13323 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
13325 /* We might have been passed a SUBREG. */
13326 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
13329 /* We might have been passed non floating point registers. */
13330 if (!FP_REGNO_P (REGNO (reg1
))
13331 || !FP_REGNO_P (REGNO (reg2
)))
13334 return (REGNO (reg1
) == REGNO (reg2
) - 1);
13337 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13338 addr1 and addr2 must be in consecutive memory locations
13339 (addr2 == addr1 + 8). */
13342 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
13345 unsigned int reg1
, reg2
;
13346 int offset1
, offset2
;
13348 /* The mems cannot be volatile. */
13349 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
13352 addr1
= XEXP (mem1
, 0);
13353 addr2
= XEXP (mem2
, 0);
13355 /* Extract an offset (if used) from the first addr. */
13356 if (GET_CODE (addr1
) == PLUS
)
13358 /* If not a REG, return zero. */
13359 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
13363 reg1
= REGNO (XEXP (addr1
, 0));
13364 /* The offset must be constant! */
13365 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
13367 offset1
= INTVAL (XEXP (addr1
, 1));
13370 else if (GET_CODE (addr1
) != REG
)
13374 reg1
= REGNO (addr1
);
13375 /* This was a simple (mem (reg)) expression. Offset is 0. */
13379 /* And now for the second addr. */
13380 if (GET_CODE (addr2
) == PLUS
)
13382 /* If not a REG, return zero. */
13383 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
13387 reg2
= REGNO (XEXP (addr2
, 0));
13388 /* The offset must be constant. */
13389 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
13391 offset2
= INTVAL (XEXP (addr2
, 1));
13394 else if (GET_CODE (addr2
) != REG
)
13398 reg2
= REGNO (addr2
);
13399 /* This was a simple (mem (reg)) expression. Offset is 0. */
13403 /* Both of these must have the same base register. */
13407 /* The offset for the second addr must be 8 more than the first addr. */
13408 if (offset2
!= offset1
+ 8)
13411 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13418 rs6000_secondary_memory_needed_rtx (enum machine_mode mode
)
13420 static bool eliminated
= false;
13423 if (mode
!= SDmode
)
13424 ret
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
13427 rtx mem
= cfun
->machine
->sdmode_stack_slot
;
13428 gcc_assert (mem
!= NULL_RTX
);
13432 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
13433 cfun
->machine
->sdmode_stack_slot
= mem
;
13439 if (TARGET_DEBUG_ADDR
)
13441 fprintf (stderr
, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13442 GET_MODE_NAME (mode
));
13444 fprintf (stderr
, "\tNULL_RTX\n");
13453 rs6000_check_sdmode (tree
*tp
, int *walk_subtrees
, void *data ATTRIBUTE_UNUSED
)
13455 /* Don't walk into types. */
13456 if (*tp
== NULL_TREE
|| *tp
== error_mark_node
|| TYPE_P (*tp
))
13458 *walk_subtrees
= 0;
13462 switch (TREE_CODE (*tp
))
13471 case VIEW_CONVERT_EXPR
:
13472 if (TYPE_MODE (TREE_TYPE (*tp
)) == SDmode
)
13482 enum reload_reg_type
{
13484 VECTOR_REGISTER_TYPE
,
13485 OTHER_REGISTER_TYPE
13488 static enum reload_reg_type
13489 rs6000_reload_register_type (enum reg_class rclass
)
13495 return GPR_REGISTER_TYPE
;
13500 return VECTOR_REGISTER_TYPE
;
13503 return OTHER_REGISTER_TYPE
;
13507 /* Inform reload about cases where moving X with a mode MODE to a register in
13508 RCLASS requires an extra scratch or immediate register. Return the class
13509 needed for the immediate register.
13511 For VSX and Altivec, we may need a register to convert sp+offset into
13514 For misaligned 64-bit gpr loads and stores we need a register to
13515 convert an offset address to indirect. */
13518 rs6000_secondary_reload (bool in_p
,
13520 reg_class_t rclass_i
,
13521 enum machine_mode mode
,
13522 secondary_reload_info
*sri
)
13524 enum reg_class rclass
= (enum reg_class
) rclass_i
;
13525 reg_class_t ret
= ALL_REGS
;
13526 enum insn_code icode
;
13527 bool default_p
= false;
13529 sri
->icode
= CODE_FOR_nothing
;
13531 /* Convert vector loads and stores into gprs to use an additional base
13533 icode
= rs6000_vector_reload
[mode
][in_p
!= false];
13534 if (icode
!= CODE_FOR_nothing
)
13537 sri
->icode
= CODE_FOR_nothing
;
13538 sri
->extra_cost
= 0;
13540 if (GET_CODE (x
) == MEM
)
13542 rtx addr
= XEXP (x
, 0);
13544 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13545 an extra register in that case, but it would need an extra
13546 register if the addressing is reg+reg or (reg+reg)&(-16). */
13547 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
13549 if (!legitimate_indirect_address_p (addr
, false)
13550 && !rs6000_legitimate_offset_address_p (TImode
, addr
,
13553 sri
->icode
= icode
;
13554 /* account for splitting the loads, and converting the
13555 address from reg+reg to reg. */
13556 sri
->extra_cost
= (((TARGET_64BIT
) ? 3 : 5)
13557 + ((GET_CODE (addr
) == AND
) ? 1 : 0));
13560 /* Loads to and stores from vector registers can only do reg+reg
13561 addressing. Altivec registers can also do (reg+reg)&(-16). */
13562 else if (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
13563 || rclass
== FLOAT_REGS
|| rclass
== NO_REGS
)
13565 if (!VECTOR_MEM_ALTIVEC_P (mode
)
13566 && GET_CODE (addr
) == AND
13567 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
13568 && INTVAL (XEXP (addr
, 1)) == -16
13569 && (legitimate_indirect_address_p (XEXP (addr
, 0), false)
13570 || legitimate_indexed_address_p (XEXP (addr
, 0), false)))
13572 sri
->icode
= icode
;
13573 sri
->extra_cost
= ((GET_CODE (XEXP (addr
, 0)) == PLUS
)
13576 else if (!legitimate_indirect_address_p (addr
, false)
13577 && (rclass
== NO_REGS
13578 || !legitimate_indexed_address_p (addr
, false)))
13580 sri
->icode
= icode
;
13581 sri
->extra_cost
= 1;
13584 icode
= CODE_FOR_nothing
;
13586 /* Any other loads, including to pseudo registers which haven't been
13587 assigned to a register yet, default to require a scratch
13591 sri
->icode
= icode
;
13592 sri
->extra_cost
= 2;
13595 else if (REG_P (x
))
13597 int regno
= true_regnum (x
);
13599 icode
= CODE_FOR_nothing
;
13600 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
13604 enum reg_class xclass
= REGNO_REG_CLASS (regno
);
13605 enum reload_reg_type rtype1
= rs6000_reload_register_type (rclass
);
13606 enum reload_reg_type rtype2
= rs6000_reload_register_type (xclass
);
13608 /* If memory is needed, use default_secondary_reload to create the
13610 if (rtype1
!= rtype2
|| rtype1
== OTHER_REGISTER_TYPE
)
13619 else if (TARGET_POWERPC64
13620 && rs6000_reload_register_type (rclass
) == GPR_REGISTER_TYPE
13622 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
13624 rtx off
= address_offset (XEXP (x
, 0));
13625 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
13627 if (off
!= NULL_RTX
13628 && (INTVAL (off
) & 3) != 0
13629 && (unsigned HOST_WIDE_INT
) INTVAL (off
) + 0x8000 < 0x10000 - extra
)
13632 sri
->icode
= CODE_FOR_reload_di_load
;
13634 sri
->icode
= CODE_FOR_reload_di_store
;
13635 sri
->extra_cost
= 2;
13641 else if (!TARGET_POWERPC64
13642 && rs6000_reload_register_type (rclass
) == GPR_REGISTER_TYPE
13644 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
13646 rtx off
= address_offset (XEXP (x
, 0));
13647 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
13649 /* We need a secondary reload only when our legitimate_address_p
13650 says the address is good (as otherwise the entire address
13651 will be reloaded). So for mode sizes of 8 and 16 this will
13652 be when the offset is in the ranges [0x7ffc,0x7fff] and
13653 [0x7ff4,0x7ff7] respectively. Note that the address we see
13654 here may have been manipulated by legitimize_reload_address. */
13655 if (off
!= NULL_RTX
13656 && ((unsigned HOST_WIDE_INT
) INTVAL (off
) - (0x8000 - extra
)
13660 sri
->icode
= CODE_FOR_reload_si_load
;
13662 sri
->icode
= CODE_FOR_reload_si_store
;
13663 sri
->extra_cost
= 2;
13673 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
13675 gcc_assert (ret
!= ALL_REGS
);
13677 if (TARGET_DEBUG_ADDR
)
13680 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13682 reg_class_names
[ret
],
13683 in_p
? "true" : "false",
13684 reg_class_names
[rclass
],
13685 GET_MODE_NAME (mode
));
13688 fprintf (stderr
, ", default secondary reload");
13690 if (sri
->icode
!= CODE_FOR_nothing
)
13691 fprintf (stderr
, ", reload func = %s, extra cost = %d\n",
13692 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
13694 fprintf (stderr
, "\n");
13702 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13703 to SP+reg addressing. */
13706 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
13708 int regno
= true_regnum (reg
);
13709 enum machine_mode mode
= GET_MODE (reg
);
13710 enum reg_class rclass
;
13712 rtx and_op2
= NULL_RTX
;
13715 rtx scratch_or_premodify
= scratch
;
13719 if (TARGET_DEBUG_ADDR
)
13721 fprintf (stderr
, "\nrs6000_secondary_reload_inner, type = %s\n",
13722 store_p
? "store" : "load");
13723 fprintf (stderr
, "reg:\n");
13725 fprintf (stderr
, "mem:\n");
13727 fprintf (stderr
, "scratch:\n");
13728 debug_rtx (scratch
);
13731 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
13732 gcc_assert (GET_CODE (mem
) == MEM
);
13733 rclass
= REGNO_REG_CLASS (regno
);
13734 addr
= XEXP (mem
, 0);
13738 /* GPRs can handle reg + small constant, all other addresses need to use
13739 the scratch register. */
13742 if (GET_CODE (addr
) == AND
)
13744 and_op2
= XEXP (addr
, 1);
13745 addr
= XEXP (addr
, 0);
13748 if (GET_CODE (addr
) == PRE_MODIFY
)
13750 scratch_or_premodify
= XEXP (addr
, 0);
13751 gcc_assert (REG_P (scratch_or_premodify
));
13752 gcc_assert (GET_CODE (XEXP (addr
, 1)) == PLUS
);
13753 addr
= XEXP (addr
, 1);
13756 if (GET_CODE (addr
) == PLUS
13757 && (and_op2
!= NULL_RTX
13758 || !rs6000_legitimate_offset_address_p (TImode
, addr
,
13761 addr_op1
= XEXP (addr
, 0);
13762 addr_op2
= XEXP (addr
, 1);
13763 gcc_assert (legitimate_indirect_address_p (addr_op1
, false));
13765 if (!REG_P (addr_op2
)
13766 && (GET_CODE (addr_op2
) != CONST_INT
13767 || !satisfies_constraint_I (addr_op2
)))
13769 if (TARGET_DEBUG_ADDR
)
13772 "\nMove plus addr to register %s, mode = %s: ",
13773 rs6000_reg_names
[REGNO (scratch
)],
13774 GET_MODE_NAME (mode
));
13775 debug_rtx (addr_op2
);
13777 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
13778 addr_op2
= scratch
;
13781 emit_insn (gen_rtx_SET (VOIDmode
,
13782 scratch_or_premodify
,
13783 gen_rtx_PLUS (Pmode
,
13787 addr
= scratch_or_premodify
;
13788 scratch_or_premodify
= scratch
;
13790 else if (!legitimate_indirect_address_p (addr
, false)
13791 && !rs6000_legitimate_offset_address_p (TImode
, addr
,
13794 if (TARGET_DEBUG_ADDR
)
13796 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
13797 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
13798 GET_MODE_NAME (mode
));
13801 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13802 addr
= scratch_or_premodify
;
13803 scratch_or_premodify
= scratch
;
13807 /* Float/Altivec registers can only handle reg+reg addressing. Move
13808 other addresses into a scratch register. */
13813 /* With float regs, we need to handle the AND ourselves, since we can't
13814 use the Altivec instruction with an implicit AND -16. Allow scalar
13815 loads to float registers to use reg+offset even if VSX. */
13816 if (GET_CODE (addr
) == AND
13817 && (rclass
!= ALTIVEC_REGS
|| GET_MODE_SIZE (mode
) != 16
13818 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
13819 || INTVAL (XEXP (addr
, 1)) != -16
13820 || !VECTOR_MEM_ALTIVEC_P (mode
)))
13822 and_op2
= XEXP (addr
, 1);
13823 addr
= XEXP (addr
, 0);
13826 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13827 as the address later. */
13828 if (GET_CODE (addr
) == PRE_MODIFY
13829 && (!VECTOR_MEM_VSX_P (mode
)
13830 || and_op2
!= NULL_RTX
13831 || !legitimate_indexed_address_p (XEXP (addr
, 1), false)))
13833 scratch_or_premodify
= XEXP (addr
, 0);
13834 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify
,
13836 gcc_assert (GET_CODE (XEXP (addr
, 1)) == PLUS
);
13837 addr
= XEXP (addr
, 1);
13840 if (legitimate_indirect_address_p (addr
, false) /* reg */
13841 || legitimate_indexed_address_p (addr
, false) /* reg+reg */
13842 || GET_CODE (addr
) == PRE_MODIFY
/* VSX pre-modify */
13843 || (GET_CODE (addr
) == AND
/* Altivec memory */
13844 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
13845 && INTVAL (XEXP (addr
, 1)) == -16
13846 && VECTOR_MEM_ALTIVEC_P (mode
))
13847 || (rclass
== FLOAT_REGS
/* legacy float mem */
13848 && GET_MODE_SIZE (mode
) == 8
13849 && and_op2
== NULL_RTX
13850 && scratch_or_premodify
== scratch
13851 && rs6000_legitimate_offset_address_p (mode
, addr
, false, false)))
13854 else if (GET_CODE (addr
) == PLUS
)
13856 addr_op1
= XEXP (addr
, 0);
13857 addr_op2
= XEXP (addr
, 1);
13858 gcc_assert (REG_P (addr_op1
));
13860 if (TARGET_DEBUG_ADDR
)
13862 fprintf (stderr
, "\nMove plus addr to register %s, mode = %s: ",
13863 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
13864 debug_rtx (addr_op2
);
13866 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
13867 emit_insn (gen_rtx_SET (VOIDmode
,
13868 scratch_or_premodify
,
13869 gen_rtx_PLUS (Pmode
,
13872 addr
= scratch_or_premodify
;
13873 scratch_or_premodify
= scratch
;
13876 else if (GET_CODE (addr
) == SYMBOL_REF
|| GET_CODE (addr
) == CONST
13877 || GET_CODE (addr
) == CONST_INT
|| REG_P (addr
))
13879 if (TARGET_DEBUG_ADDR
)
13881 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
13882 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
13883 GET_MODE_NAME (mode
));
13887 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13888 addr
= scratch_or_premodify
;
13889 scratch_or_premodify
= scratch
;
13893 gcc_unreachable ();
13898 gcc_unreachable ();
13901 /* If the original address involved a pre-modify that we couldn't use the VSX
13902 memory instruction with update, and we haven't taken care of already,
13903 store the address in the pre-modify register and use that as the
13905 if (scratch_or_premodify
!= scratch
&& scratch_or_premodify
!= addr
)
13907 emit_insn (gen_rtx_SET (VOIDmode
, scratch_or_premodify
, addr
));
13908 addr
= scratch_or_premodify
;
13911 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13912 memory instruction, recreate the AND now, including the clobber which is
13913 generated by the general ANDSI3/ANDDI3 patterns for the
13914 andi. instruction. */
13915 if (and_op2
!= NULL_RTX
)
13917 if (! legitimate_indirect_address_p (addr
, false))
13919 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, addr
));
13923 if (TARGET_DEBUG_ADDR
)
13925 fprintf (stderr
, "\nAnd addr to register %s, mode = %s: ",
13926 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
13927 debug_rtx (and_op2
);
13930 and_rtx
= gen_rtx_SET (VOIDmode
,
13932 gen_rtx_AND (Pmode
,
13936 cc_clobber
= gen_rtx_CLOBBER (CCmode
, gen_rtx_SCRATCH (CCmode
));
13937 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
13938 gen_rtvec (2, and_rtx
, cc_clobber
)));
13942 /* Adjust the address if it changed. */
13943 if (addr
!= XEXP (mem
, 0))
13945 mem
= change_address (mem
, mode
, addr
);
13946 if (TARGET_DEBUG_ADDR
)
13947 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
13950 /* Now create the move. */
13952 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
13954 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
13959 /* Convert reloads involving 64-bit gprs and misaligned offset
13960 addressing, or multiple 32-bit gprs and offsets that are too large,
13961 to use indirect addressing. */
13964 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
13966 int regno
= true_regnum (reg
);
13967 enum reg_class rclass
;
13969 rtx scratch_or_premodify
= scratch
;
13971 if (TARGET_DEBUG_ADDR
)
13973 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
13974 store_p
? "store" : "load");
13975 fprintf (stderr
, "reg:\n");
13977 fprintf (stderr
, "mem:\n");
13979 fprintf (stderr
, "scratch:\n");
13980 debug_rtx (scratch
);
13983 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
13984 gcc_assert (GET_CODE (mem
) == MEM
);
13985 rclass
= REGNO_REG_CLASS (regno
);
13986 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
13987 addr
= XEXP (mem
, 0);
13989 if (GET_CODE (addr
) == PRE_MODIFY
)
13991 scratch_or_premodify
= XEXP (addr
, 0);
13992 gcc_assert (REG_P (scratch_or_premodify
));
13993 addr
= XEXP (addr
, 1);
13995 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
13997 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13999 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
14001 /* Now create the move. */
14003 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
14005 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
14010 /* Allocate a 64-bit stack slot to be used for copying SDmode
14011 values through if this function has any SDmode references. */
14014 rs6000_alloc_sdmode_stack_slot (void)
14018 gimple_stmt_iterator gsi
;
14020 gcc_assert (cfun
->machine
->sdmode_stack_slot
== NULL_RTX
);
14023 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
14025 tree ret
= walk_gimple_op (gsi_stmt (gsi
), rs6000_check_sdmode
, NULL
);
14028 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
14029 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
14035 /* Check for any SDmode parameters of the function. */
14036 for (t
= DECL_ARGUMENTS (cfun
->decl
); t
; t
= DECL_CHAIN (t
))
14038 if (TREE_TYPE (t
) == error_mark_node
)
14041 if (TYPE_MODE (TREE_TYPE (t
)) == SDmode
14042 || TYPE_MODE (DECL_ARG_TYPE (t
)) == SDmode
)
14044 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
14045 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
14053 rs6000_instantiate_decls (void)
14055 if (cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
14056 instantiate_decl_rtl (cfun
->machine
->sdmode_stack_slot
);
14059 /* Given an rtx X being reloaded into a reg required to be
14060 in class CLASS, return the class of reg to actually use.
14061 In general this is just CLASS; but on some machines
14062 in some cases it is preferable to use a more restrictive class.
14064 On the RS/6000, we have to return NO_REGS when we want to reload a
14065 floating-point CONST_DOUBLE to force it to be copied to memory.
14067 We also don't want to reload integer values into floating-point
14068 registers if we can at all help it. In fact, this can
14069 cause reload to die, if it tries to generate a reload of CTR
14070 into a FP register and discovers it doesn't have the memory location
14073 ??? Would it be a good idea to have reload do the converse, that is
14074 try to reload floating modes into FP registers if possible?
14077 static enum reg_class
14078 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
14080 enum machine_mode mode
= GET_MODE (x
);
14082 if (VECTOR_UNIT_VSX_P (mode
)
14083 && x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
14086 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
14087 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
14088 && easy_vector_constant (x
, mode
))
14089 return ALTIVEC_REGS
;
14091 if (CONSTANT_P (x
) && reg_classes_intersect_p (rclass
, FLOAT_REGS
))
14094 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
14095 return GENERAL_REGS
;
14097 /* For VSX, prefer the traditional registers for 64-bit values because we can
14098 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14099 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14100 prefer Altivec loads.. */
14101 if (rclass
== VSX_REGS
)
14103 if (GET_MODE_SIZE (mode
) <= 8)
14106 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
))
14107 return ALTIVEC_REGS
;
14115 /* Debug version of rs6000_preferred_reload_class. */
14116 static enum reg_class
14117 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
14119 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
14122 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14124 reg_class_names
[ret
], reg_class_names
[rclass
],
14125 GET_MODE_NAME (GET_MODE (x
)));
14131 /* If we are copying between FP or AltiVec registers and anything else, we need
14132 a memory location. The exception is when we are targeting ppc64 and the
14133 move to/from fpr to gpr instructions are available. Also, under VSX, you
14134 can copy vector registers from the FP register set to the Altivec register
14135 set and vice versa. */
14138 rs6000_secondary_memory_needed (enum reg_class class1
,
14139 enum reg_class class2
,
14140 enum machine_mode mode
)
14142 if (class1
== class2
)
14145 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14146 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14147 between these classes. But we need memory for other things that can go in
14148 FLOAT_REGS like SFmode. */
14150 && (VECTOR_MEM_VSX_P (mode
) || VECTOR_UNIT_VSX_P (mode
))
14151 && (class1
== VSX_REGS
|| class1
== ALTIVEC_REGS
14152 || class1
== FLOAT_REGS
))
14153 return (class2
!= VSX_REGS
&& class2
!= ALTIVEC_REGS
14154 && class2
!= FLOAT_REGS
);
14156 if (class1
== VSX_REGS
|| class2
== VSX_REGS
)
14159 if (class1
== FLOAT_REGS
14160 && (!TARGET_MFPGPR
|| !TARGET_POWERPC64
14161 || ((mode
!= DFmode
)
14162 && (mode
!= DDmode
)
14163 && (mode
!= DImode
))))
14166 if (class2
== FLOAT_REGS
14167 && (!TARGET_MFPGPR
|| !TARGET_POWERPC64
14168 || ((mode
!= DFmode
)
14169 && (mode
!= DDmode
)
14170 && (mode
!= DImode
))))
14173 if (class1
== ALTIVEC_REGS
|| class2
== ALTIVEC_REGS
)
14179 /* Debug version of rs6000_secondary_memory_needed. */
14181 rs6000_debug_secondary_memory_needed (enum reg_class class1
,
14182 enum reg_class class2
,
14183 enum machine_mode mode
)
14185 bool ret
= rs6000_secondary_memory_needed (class1
, class2
, mode
);
14188 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14189 "class2 = %s, mode = %s\n",
14190 ret
? "true" : "false", reg_class_names
[class1
],
14191 reg_class_names
[class2
], GET_MODE_NAME (mode
));
14196 /* Return the register class of a scratch register needed to copy IN into
14197 or out of a register in RCLASS in MODE. If it can be done directly,
14198 NO_REGS is returned. */
14200 static enum reg_class
14201 rs6000_secondary_reload_class (enum reg_class rclass
, enum machine_mode mode
,
14206 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
14208 && MACHOPIC_INDIRECT
14212 /* We cannot copy a symbolic operand directly into anything
14213 other than BASE_REGS for TARGET_ELF. So indicate that a
14214 register from BASE_REGS is needed as an intermediate
14217 On Darwin, pic addresses require a load from memory, which
14218 needs a base register. */
14219 if (rclass
!= BASE_REGS
14220 && (GET_CODE (in
) == SYMBOL_REF
14221 || GET_CODE (in
) == HIGH
14222 || GET_CODE (in
) == LABEL_REF
14223 || GET_CODE (in
) == CONST
))
14227 if (GET_CODE (in
) == REG
)
14229 regno
= REGNO (in
);
14230 if (regno
>= FIRST_PSEUDO_REGISTER
)
14232 regno
= true_regnum (in
);
14233 if (regno
>= FIRST_PSEUDO_REGISTER
)
14237 else if (GET_CODE (in
) == SUBREG
)
14239 regno
= true_regnum (in
);
14240 if (regno
>= FIRST_PSEUDO_REGISTER
)
14246 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14248 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
14249 || (regno
>= 0 && INT_REGNO_P (regno
)))
14252 /* Constants, memory, and FP registers can go into FP registers. */
14253 if ((regno
== -1 || FP_REGNO_P (regno
))
14254 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
14255 return (mode
!= SDmode
) ? NO_REGS
: GENERAL_REGS
;
14257 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14260 && (regno
== -1 || VSX_REGNO_P (regno
))
14261 && VSX_REG_CLASS_P (rclass
))
14264 /* Memory, and AltiVec registers can go into AltiVec registers. */
14265 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
14266 && rclass
== ALTIVEC_REGS
)
14269 /* We can copy among the CR registers. */
14270 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
14271 && regno
>= 0 && CR_REGNO_P (regno
))
14274 /* Otherwise, we need GENERAL_REGS. */
14275 return GENERAL_REGS
;
14278 /* Debug version of rs6000_secondary_reload_class. */
14279 static enum reg_class
14280 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
14281 enum machine_mode mode
, rtx in
)
14283 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
14285 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14286 "mode = %s, input rtx:\n",
14287 reg_class_names
[ret
], reg_class_names
[rclass
],
14288 GET_MODE_NAME (mode
));
14294 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14297 rs6000_cannot_change_mode_class (enum machine_mode from
,
14298 enum machine_mode to
,
14299 enum reg_class rclass
)
14301 unsigned from_size
= GET_MODE_SIZE (from
);
14302 unsigned to_size
= GET_MODE_SIZE (to
);
14304 if (from_size
!= to_size
)
14306 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
14307 return ((from_size
< 8 || to_size
< 8 || TARGET_IEEEQUAD
)
14308 && reg_classes_intersect_p (xclass
, rclass
));
14311 if (TARGET_E500_DOUBLE
14312 && ((((to
) == DFmode
) + ((from
) == DFmode
)) == 1
14313 || (((to
) == TFmode
) + ((from
) == TFmode
)) == 1
14314 || (((to
) == DDmode
) + ((from
) == DDmode
)) == 1
14315 || (((to
) == TDmode
) + ((from
) == TDmode
)) == 1
14316 || (((to
) == DImode
) + ((from
) == DImode
)) == 1))
14319 /* Since the VSX register set includes traditional floating point registers
14320 and altivec registers, just check for the size being different instead of
14321 trying to check whether the modes are vector modes. Otherwise it won't
14322 allow say DF and DI to change classes. */
14323 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
14324 return (from_size
!= 8 && from_size
!= 16);
14326 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
14327 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
14330 if (TARGET_SPE
&& (SPE_VECTOR_MODE (from
) + SPE_VECTOR_MODE (to
)) == 1
14331 && reg_classes_intersect_p (GENERAL_REGS
, rclass
))
14337 /* Debug version of rs6000_cannot_change_mode_class. */
14339 rs6000_debug_cannot_change_mode_class (enum machine_mode from
,
14340 enum machine_mode to
,
14341 enum reg_class rclass
)
14343 bool ret
= rs6000_cannot_change_mode_class (from
, to
, rclass
);
14346 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14347 "to = %s, rclass = %s\n",
14348 ret
? "true" : "false",
14349 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
14350 reg_class_names
[rclass
]);
14355 /* Given a comparison operation, return the bit number in CCR to test. We
14356 know this is a valid comparison.
14358 SCC_P is 1 if this is for an scc. That means that %D will have been
14359 used instead of %C, so the bits will be in different places.
14361 Return -1 if OP isn't a valid comparison for some reason. */
14364 ccr_bit (rtx op
, int scc_p
)
14366 enum rtx_code code
= GET_CODE (op
);
14367 enum machine_mode cc_mode
;
14372 if (!COMPARISON_P (op
))
14375 reg
= XEXP (op
, 0);
14377 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
14379 cc_mode
= GET_MODE (reg
);
14380 cc_regnum
= REGNO (reg
);
14381 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
14383 validate_condition_mode (code
, cc_mode
);
14385 /* When generating a sCOND operation, only positive conditions are
14388 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
14389 || code
== GTU
|| code
== LTU
);
14394 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
14396 return base_bit
+ 2;
14397 case GT
: case GTU
: case UNLE
:
14398 return base_bit
+ 1;
14399 case LT
: case LTU
: case UNGE
:
14401 case ORDERED
: case UNORDERED
:
14402 return base_bit
+ 3;
14405 /* If scc, we will have done a cror to put the bit in the
14406 unordered position. So test that bit. For integer, this is ! LT
14407 unless this is an scc insn. */
14408 return scc_p
? base_bit
+ 3 : base_bit
;
14411 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
14414 gcc_unreachable ();
14418 /* Return the GOT register. */
14421 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
14423 /* The second flow pass currently (June 1999) can't update
14424 regs_ever_live without disturbing other parts of the compiler, so
14425 update it here to make the prolog/epilogue code happy. */
14426 if (!can_create_pseudo_p ()
14427 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
14428 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
14430 crtl
->uses_pic_offset_table
= 1;
14432 return pic_offset_table_rtx
;
14435 static rs6000_stack_t stack_info
;
14437 /* Function to init struct machine_function.
14438 This will be called, via a pointer variable,
14439 from push_function_context. */
14441 static struct machine_function
*
14442 rs6000_init_machine_status (void)
14444 stack_info
.reload_completed
= 0;
14445 return ggc_alloc_cleared_machine_function ();
14448 /* These macros test for integers and extract the low-order bits. */
14450 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14451 && GET_MODE (X) == VOIDmode)
14453 #define INT_LOWPART(X) \
14454 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14457 extract_MB (rtx op
)
14460 unsigned long val
= INT_LOWPART (op
);
14462 /* If the high bit is zero, the value is the first 1 bit we find
14464 if ((val
& 0x80000000) == 0)
14466 gcc_assert (val
& 0xffffffff);
14469 while (((val
<<= 1) & 0x80000000) == 0)
14474 /* If the high bit is set and the low bit is not, or the mask is all
14475 1's, the value is zero. */
14476 if ((val
& 1) == 0 || (val
& 0xffffffff) == 0xffffffff)
14479 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14482 while (((val
>>= 1) & 1) != 0)
14489 extract_ME (rtx op
)
14492 unsigned long val
= INT_LOWPART (op
);
14494 /* If the low bit is zero, the value is the first 1 bit we find from
14496 if ((val
& 1) == 0)
14498 gcc_assert (val
& 0xffffffff);
14501 while (((val
>>= 1) & 1) == 0)
14507 /* If the low bit is set and the high bit is not, or the mask is all
14508 1's, the value is 31. */
14509 if ((val
& 0x80000000) == 0 || (val
& 0xffffffff) == 0xffffffff)
14512 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14515 while (((val
<<= 1) & 0x80000000) != 0)
14521 /* Locate some local-dynamic symbol still in use by this function
14522 so that we can print its name in some tls_ld pattern. */
14524 static const char *
14525 rs6000_get_some_local_dynamic_name (void)
14529 if (cfun
->machine
->some_ld_name
)
14530 return cfun
->machine
->some_ld_name
;
14532 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
14534 && for_each_rtx (&PATTERN (insn
),
14535 rs6000_get_some_local_dynamic_name_1
, 0))
14536 return cfun
->machine
->some_ld_name
;
14538 gcc_unreachable ();
14541 /* Helper function for rs6000_get_some_local_dynamic_name. */
14544 rs6000_get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
14548 if (GET_CODE (x
) == SYMBOL_REF
)
14550 const char *str
= XSTR (x
, 0);
14551 if (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
14553 cfun
->machine
->some_ld_name
= str
;
14561 /* Write out a function code label. */
14564 rs6000_output_function_entry (FILE *file
, const char *fname
)
14566 if (fname
[0] != '.')
14568 switch (DEFAULT_ABI
)
14571 gcc_unreachable ();
14577 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
14586 RS6000_OUTPUT_BASENAME (file
, fname
);
14589 /* Print an operand. Recognize special options, documented below. */
14592 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14593 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14595 #define SMALL_DATA_RELOC "sda21"
14596 #define SMALL_DATA_REG 0
14600 print_operand (FILE *file
, rtx x
, int code
)
14603 unsigned HOST_WIDE_INT uval
;
14608 /* Write out an instruction after the call which may be replaced
14609 with glue code by the loader. This depends on the AIX version. */
14610 asm_fprintf (file
, RS6000_CALL_GLUE
);
14613 /* %a is output_address. */
14616 /* If X is a constant integer whose low-order 5 bits are zero,
14617 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
14618 in the AIX assembler where "sri" with a zero shift count
14619 writes a trash instruction. */
14620 if (GET_CODE (x
) == CONST_INT
&& (INTVAL (x
) & 31) == 0)
14627 /* If constant, low-order 16 bits of constant, unsigned.
14628 Otherwise, write normally. */
14630 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 0xffff);
14632 print_operand (file
, x
, 0);
14636 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14637 for 64-bit mask direction. */
14638 putc (((INT_LOWPART (x
) & 1) == 0 ? 'r' : 'l'), file
);
14641 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14645 /* X is a CR register. Print the number of the GT bit of the CR. */
14646 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14647 output_operand_lossage ("invalid %%c value");
14649 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 1);
14653 /* Like 'J' but get to the GT bit only. */
14654 gcc_assert (REG_P (x
));
14656 /* Bit 1 is GT bit. */
14657 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
14659 /* Add one for shift count in rlinm for scc. */
14660 fprintf (file
, "%d", i
+ 1);
14664 /* X is a CR register. Print the number of the EQ bit of the CR */
14665 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14666 output_operand_lossage ("invalid %%E value");
14668 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
14672 /* X is a CR register. Print the shift count needed to move it
14673 to the high-order four bits. */
14674 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14675 output_operand_lossage ("invalid %%f value");
14677 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
14681 /* Similar, but print the count for the rotate in the opposite
14683 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14684 output_operand_lossage ("invalid %%F value");
14686 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
14690 /* X is a constant integer. If it is negative, print "m",
14691 otherwise print "z". This is to make an aze or ame insn. */
14692 if (GET_CODE (x
) != CONST_INT
)
14693 output_operand_lossage ("invalid %%G value");
14694 else if (INTVAL (x
) >= 0)
14701 /* If constant, output low-order five bits. Otherwise, write
14704 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 31);
14706 print_operand (file
, x
, 0);
14710 /* If constant, output low-order six bits. Otherwise, write
14713 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 63);
14715 print_operand (file
, x
, 0);
14719 /* Print `i' if this is a constant, else nothing. */
14725 /* Write the bit number in CCR for jump. */
14726 i
= ccr_bit (x
, 0);
14728 output_operand_lossage ("invalid %%j code");
14730 fprintf (file
, "%d", i
);
14734 /* Similar, but add one for shift count in rlinm for scc and pass
14735 scc flag to `ccr_bit'. */
14736 i
= ccr_bit (x
, 1);
14738 output_operand_lossage ("invalid %%J code");
14740 /* If we want bit 31, write a shift count of zero, not 32. */
14741 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
14745 /* X must be a constant. Write the 1's complement of the
14748 output_operand_lossage ("invalid %%k value");
14750 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INT_LOWPART (x
));
14754 /* X must be a symbolic constant on ELF. Write an
14755 expression suitable for an 'addi' that adds in the low 16
14756 bits of the MEM. */
14757 if (GET_CODE (x
) == CONST
)
14759 if (GET_CODE (XEXP (x
, 0)) != PLUS
14760 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
14761 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
14762 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
14763 output_operand_lossage ("invalid %%K value");
14765 print_operand_address (file
, x
);
14766 fputs ("@l", file
);
14769 /* %l is output_asm_label. */
14772 /* Write second word of DImode or DFmode reference. Works on register
14773 or non-indexed memory only. */
14775 fputs (reg_names
[REGNO (x
) + 1], file
);
14776 else if (MEM_P (x
))
14778 /* Handle possible auto-increment. Since it is pre-increment and
14779 we have already done it, we can just use an offset of word. */
14780 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
14781 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
14782 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
14784 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
14785 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
14788 output_address (XEXP (adjust_address_nv (x
, SImode
,
14792 if (small_data_operand (x
, GET_MODE (x
)))
14793 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
14794 reg_names
[SMALL_DATA_REG
]);
14799 /* MB value for a mask operand. */
14800 if (! mask_operand (x
, SImode
))
14801 output_operand_lossage ("invalid %%m value");
14803 fprintf (file
, "%d", extract_MB (x
));
14807 /* ME value for a mask operand. */
14808 if (! mask_operand (x
, SImode
))
14809 output_operand_lossage ("invalid %%M value");
14811 fprintf (file
, "%d", extract_ME (x
));
14814 /* %n outputs the negative of its operand. */
14817 /* Write the number of elements in the vector times 4. */
14818 if (GET_CODE (x
) != PARALLEL
)
14819 output_operand_lossage ("invalid %%N value");
14821 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
14825 /* Similar, but subtract 1 first. */
14826 if (GET_CODE (x
) != PARALLEL
)
14827 output_operand_lossage ("invalid %%O value");
14829 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
14833 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14835 || INT_LOWPART (x
) < 0
14836 || (i
= exact_log2 (INT_LOWPART (x
))) < 0)
14837 output_operand_lossage ("invalid %%p value");
14839 fprintf (file
, "%d", i
);
14843 /* The operand must be an indirect memory reference. The result
14844 is the register name. */
14845 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
14846 || REGNO (XEXP (x
, 0)) >= 32)
14847 output_operand_lossage ("invalid %%P value");
14849 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
14853 /* This outputs the logical code corresponding to a boolean
14854 expression. The expression may have one or both operands
14855 negated (if one, only the first one). For condition register
14856 logical operations, it will also treat the negated
14857 CR codes as NOTs, but not handle NOTs of them. */
14859 const char *const *t
= 0;
14861 enum rtx_code code
= GET_CODE (x
);
14862 static const char * const tbl
[3][3] = {
14863 { "and", "andc", "nor" },
14864 { "or", "orc", "nand" },
14865 { "xor", "eqv", "xor" } };
14869 else if (code
== IOR
)
14871 else if (code
== XOR
)
14874 output_operand_lossage ("invalid %%q value");
14876 if (GET_CODE (XEXP (x
, 0)) != NOT
)
14880 if (GET_CODE (XEXP (x
, 1)) == NOT
)
14898 /* X is a CR register. Print the mask for `mtcrf'. */
14899 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14900 output_operand_lossage ("invalid %%R value");
14902 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
14906 /* Low 5 bits of 32 - value */
14908 output_operand_lossage ("invalid %%s value");
14910 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INT_LOWPART (x
)) & 31);
14914 /* PowerPC64 mask position. All 0's is excluded.
14915 CONST_INT 32-bit mask is considered sign-extended so any
14916 transition must occur within the CONST_INT, not on the boundary. */
14917 if (! mask64_operand (x
, DImode
))
14918 output_operand_lossage ("invalid %%S value");
14920 uval
= INT_LOWPART (x
);
14922 if (uval
& 1) /* Clear Left */
14924 #if HOST_BITS_PER_WIDE_INT > 64
14925 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
14929 else /* Clear Right */
14932 #if HOST_BITS_PER_WIDE_INT > 64
14933 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
14939 gcc_assert (i
>= 0);
14940 fprintf (file
, "%d", i
);
14944 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
14945 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
14947 /* Bit 3 is OV bit. */
14948 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
14950 /* If we want bit 31, write a shift count of zero, not 32. */
14951 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
14955 /* Print the symbolic name of a branch target register. */
14956 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
14957 && REGNO (x
) != CTR_REGNO
))
14958 output_operand_lossage ("invalid %%T value");
14959 else if (REGNO (x
) == LR_REGNO
)
14960 fputs (TARGET_NEW_MNEMONICS
? "lr" : "r", file
);
14962 fputs ("ctr", file
);
14966 /* High-order 16 bits of constant for use in unsigned operand. */
14968 output_operand_lossage ("invalid %%u value");
14970 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
14971 (INT_LOWPART (x
) >> 16) & 0xffff);
14975 /* High-order 16 bits of constant for use in signed operand. */
14977 output_operand_lossage ("invalid %%v value");
14979 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
14980 (INT_LOWPART (x
) >> 16) & 0xffff);
14984 /* Print `u' if this has an auto-increment or auto-decrement. */
14986 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
14987 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
14988 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
14993 /* Print the trap code for this operand. */
14994 switch (GET_CODE (x
))
14997 fputs ("eq", file
); /* 4 */
15000 fputs ("ne", file
); /* 24 */
15003 fputs ("lt", file
); /* 16 */
15006 fputs ("le", file
); /* 20 */
15009 fputs ("gt", file
); /* 8 */
15012 fputs ("ge", file
); /* 12 */
15015 fputs ("llt", file
); /* 2 */
15018 fputs ("lle", file
); /* 6 */
15021 fputs ("lgt", file
); /* 1 */
15024 fputs ("lge", file
); /* 5 */
15027 gcc_unreachable ();
15032 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
15035 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
15036 ((INT_LOWPART (x
) & 0xffff) ^ 0x8000) - 0x8000);
15038 print_operand (file
, x
, 0);
15042 /* MB value for a PowerPC64 rldic operand. */
15043 i
= clz_hwi (GET_CODE (x
) == CONST_INT
15044 ? INTVAL (x
) : CONST_DOUBLE_HIGH (x
));
15046 #if HOST_BITS_PER_WIDE_INT == 32
15047 if (GET_CODE (x
) == CONST_INT
&& i
> 0)
15048 i
+= 32; /* zero-extend high-part was all 0's */
15049 else if (GET_CODE (x
) == CONST_DOUBLE
&& i
== 32)
15050 i
= clz_hwi (CONST_DOUBLE_LOW (x
)) + 32;
15053 fprintf (file
, "%d", i
);
15057 /* X is a FPR or Altivec register used in a VSX context. */
15058 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
15059 output_operand_lossage ("invalid %%x value");
15062 int reg
= REGNO (x
);
15063 int vsx_reg
= (FP_REGNO_P (reg
)
15065 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
15067 #ifdef TARGET_REGNAMES
15068 if (TARGET_REGNAMES
)
15069 fprintf (file
, "%%vs%d", vsx_reg
);
15072 fprintf (file
, "%d", vsx_reg
);
15078 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
15079 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
15080 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
15085 /* Like 'L', for third word of TImode */
15087 fputs (reg_names
[REGNO (x
) + 2], file
);
15088 else if (MEM_P (x
))
15090 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
15091 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15092 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
15093 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15094 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
15096 output_address (XEXP (adjust_address_nv (x
, SImode
, 8), 0));
15097 if (small_data_operand (x
, GET_MODE (x
)))
15098 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15099 reg_names
[SMALL_DATA_REG
]);
15104 /* X is a SYMBOL_REF. Write out the name preceded by a
15105 period and without any trailing data in brackets. Used for function
15106 names. If we are configured for System V (or the embedded ABI) on
15107 the PowerPC, do not emit the period, since those systems do not use
15108 TOCs and the like. */
15109 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
15111 /* Mark the decl as referenced so that cgraph will output the
15113 if (SYMBOL_REF_DECL (x
))
15114 mark_decl_referenced (SYMBOL_REF_DECL (x
));
15116 /* For macho, check to see if we need a stub. */
15119 const char *name
= XSTR (x
, 0);
15121 if (darwin_emit_branch_islands
15122 && MACHOPIC_INDIRECT
15123 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
15124 name
= machopic_indirection_name (x
, /*stub_p=*/true);
15126 assemble_name (file
, name
);
15128 else if (!DOT_SYMBOLS
)
15129 assemble_name (file
, XSTR (x
, 0));
15131 rs6000_output_function_entry (file
, XSTR (x
, 0));
15135 /* Like 'L', for last word of TImode. */
15137 fputs (reg_names
[REGNO (x
) + 3], file
);
15138 else if (MEM_P (x
))
15140 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
15141 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15142 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
15143 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15144 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
15146 output_address (XEXP (adjust_address_nv (x
, SImode
, 12), 0));
15147 if (small_data_operand (x
, GET_MODE (x
)))
15148 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15149 reg_names
[SMALL_DATA_REG
]);
15153 /* Print AltiVec or SPE memory operand. */
15158 gcc_assert (MEM_P (x
));
15162 /* Ugly hack because %y is overloaded. */
15163 if ((TARGET_SPE
|| TARGET_E500_DOUBLE
)
15164 && (GET_MODE_SIZE (GET_MODE (x
)) == 8
15165 || GET_MODE (x
) == TFmode
15166 || GET_MODE (x
) == TImode
))
15168 /* Handle [reg]. */
15171 fprintf (file
, "0(%s)", reg_names
[REGNO (tmp
)]);
15174 /* Handle [reg+UIMM]. */
15175 else if (GET_CODE (tmp
) == PLUS
&&
15176 GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
15180 gcc_assert (REG_P (XEXP (tmp
, 0)));
15182 x
= INTVAL (XEXP (tmp
, 1));
15183 fprintf (file
, "%d(%s)", x
, reg_names
[REGNO (XEXP (tmp
, 0))]);
15187 /* Fall through. Must be [reg+reg]. */
15189 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
15190 && GET_CODE (tmp
) == AND
15191 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
15192 && INTVAL (XEXP (tmp
, 1)) == -16)
15193 tmp
= XEXP (tmp
, 0);
15194 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
15195 && GET_CODE (tmp
) == PRE_MODIFY
)
15196 tmp
= XEXP (tmp
, 1);
15198 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
15201 if (!GET_CODE (tmp
) == PLUS
15202 || !REG_P (XEXP (tmp
, 0))
15203 || !REG_P (XEXP (tmp
, 1)))
15205 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15209 if (REGNO (XEXP (tmp
, 0)) == 0)
15210 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
15211 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
15213 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
15214 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
15221 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
15222 else if (MEM_P (x
))
15224 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15225 know the width from the mode. */
15226 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
15227 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
15228 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
15229 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15230 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
15231 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
15232 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15233 output_address (XEXP (XEXP (x
, 0), 1));
15235 output_address (XEXP (x
, 0));
15239 if (toc_relative_expr_p (x
, false))
15240 /* This hack along with a corresponding hack in
15241 rs6000_output_addr_const_extra arranges to output addends
15242 where the assembler expects to find them. eg.
15243 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15244 without this hack would be output as "x@toc+4". We
15246 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
15248 output_addr_const (file
, x
);
15253 assemble_name (file
, rs6000_get_some_local_dynamic_name ());
15257 output_operand_lossage ("invalid %%xn code");
15261 /* Print the address of an operand. */
15264 print_operand_address (FILE *file
, rtx x
)
15267 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
15268 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
15269 || GET_CODE (x
) == LABEL_REF
)
15271 output_addr_const (file
, x
);
15272 if (small_data_operand (x
, GET_MODE (x
)))
15273 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15274 reg_names
[SMALL_DATA_REG
]);
15276 gcc_assert (!TARGET_TOC
);
15278 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
15279 && REG_P (XEXP (x
, 1)))
15281 if (REGNO (XEXP (x
, 0)) == 0)
15282 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
15283 reg_names
[ REGNO (XEXP (x
, 0)) ]);
15285 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
15286 reg_names
[ REGNO (XEXP (x
, 1)) ]);
15288 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
15289 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
15290 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
15291 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
15293 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
15294 && CONSTANT_P (XEXP (x
, 1)))
15296 fprintf (file
, "lo16(");
15297 output_addr_const (file
, XEXP (x
, 1));
15298 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
15302 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
15303 && CONSTANT_P (XEXP (x
, 1)))
15305 output_addr_const (file
, XEXP (x
, 1));
15306 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
15309 else if (toc_relative_expr_p (x
, false))
15311 /* This hack along with a corresponding hack in
15312 rs6000_output_addr_const_extra arranges to output addends
15313 where the assembler expects to find them. eg.
15315 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15316 without this hack would be output as "x@toc+8@l(9)". We
15317 want "x+8@toc@l(9)". */
15318 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
15319 if (GET_CODE (x
) == LO_SUM
)
15320 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
15322 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base
, 0, 1))]);
15325 gcc_unreachable ();
15328 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15331 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
15333 if (GET_CODE (x
) == UNSPEC
)
15334 switch (XINT (x
, 1))
15336 case UNSPEC_TOCREL
:
15337 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
15338 && REG_P (XVECEXP (x
, 0, 1))
15339 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
15340 output_addr_const (file
, XVECEXP (x
, 0, 0));
15341 if (x
== tocrel_base
&& tocrel_offset
!= const0_rtx
)
15343 if (INTVAL (tocrel_offset
) >= 0)
15344 fprintf (file
, "+");
15345 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset
));
15347 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
15350 assemble_name (file
, toc_label_name
);
15352 else if (TARGET_ELF
)
15353 fputs ("@toc", file
);
15357 case UNSPEC_MACHOPIC_OFFSET
:
15358 output_addr_const (file
, XVECEXP (x
, 0, 0));
15360 machopic_output_function_base_name (file
);
15367 /* Target hook for assembling integer objects. The PowerPC version has
15368 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15369 is defined. It also needs to handle DI-mode objects on 64-bit
15373 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
15375 #ifdef RELOCATABLE_NEEDS_FIXUP
15376 /* Special handling for SI values. */
15377 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
15379 static int recurse
= 0;
15381 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15382 the .fixup section. Since the TOC section is already relocated, we
15383 don't need to mark it here. We used to skip the text section, but it
15384 should never be valid for relocated addresses to be placed in the text
15386 if (TARGET_RELOCATABLE
15387 && in_section
!= toc_section
15389 && GET_CODE (x
) != CONST_INT
15390 && GET_CODE (x
) != CONST_DOUBLE
15396 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
15398 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
15399 fprintf (asm_out_file
, "\t.long\t(");
15400 output_addr_const (asm_out_file
, x
);
15401 fprintf (asm_out_file
, ")@fixup\n");
15402 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
15403 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
15404 fprintf (asm_out_file
, "\t.long\t");
15405 assemble_name (asm_out_file
, buf
);
15406 fprintf (asm_out_file
, "\n\t.previous\n");
15410 /* Remove initial .'s to turn a -mcall-aixdesc function
15411 address into the address of the descriptor, not the function
15413 else if (GET_CODE (x
) == SYMBOL_REF
15414 && XSTR (x
, 0)[0] == '.'
15415 && DEFAULT_ABI
== ABI_AIX
)
15417 const char *name
= XSTR (x
, 0);
15418 while (*name
== '.')
15421 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
15425 #endif /* RELOCATABLE_NEEDS_FIXUP */
15426 return default_assemble_integer (x
, size
, aligned_p
);
15429 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15430 /* Emit an assembler directive to set symbol visibility for DECL to
15431 VISIBILITY_TYPE. */
15434 rs6000_assemble_visibility (tree decl
, int vis
)
15436 /* Functions need to have their entry point symbol visibility set as
15437 well as their descriptor symbol visibility. */
15438 if (DEFAULT_ABI
== ABI_AIX
15440 && TREE_CODE (decl
) == FUNCTION_DECL
)
15442 static const char * const visibility_types
[] = {
15443 NULL
, "internal", "hidden", "protected"
15446 const char *name
, *type
;
15448 name
= ((* targetm
.strip_name_encoding
)
15449 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
15450 type
= visibility_types
[vis
];
15452 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
15453 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
15456 default_assemble_visibility (decl
, vis
);
15461 rs6000_reverse_condition (enum machine_mode mode
, enum rtx_code code
)
15463 /* Reversal of FP compares takes care -- an ordered compare
15464 becomes an unordered compare and vice versa. */
15465 if (mode
== CCFPmode
15466 && (!flag_finite_math_only
15467 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
15468 || code
== UNEQ
|| code
== LTGT
))
15469 return reverse_condition_maybe_unordered (code
);
15471 return reverse_condition (code
);
15474 /* Generate a compare for CODE. Return a brand-new rtx that
15475 represents the result of the compare. */
15478 rs6000_generate_compare (rtx cmp
, enum machine_mode mode
)
15480 enum machine_mode comp_mode
;
15481 rtx compare_result
;
15482 enum rtx_code code
= GET_CODE (cmp
);
15483 rtx op0
= XEXP (cmp
, 0);
15484 rtx op1
= XEXP (cmp
, 1);
15486 if (FLOAT_MODE_P (mode
))
15487 comp_mode
= CCFPmode
;
15488 else if (code
== GTU
|| code
== LTU
15489 || code
== GEU
|| code
== LEU
)
15490 comp_mode
= CCUNSmode
;
15491 else if ((code
== EQ
|| code
== NE
)
15492 && unsigned_reg_p (op0
)
15493 && (unsigned_reg_p (op1
)
15494 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
15495 /* These are unsigned values, perhaps there will be a later
15496 ordering compare that can be shared with this one. */
15497 comp_mode
= CCUNSmode
;
15499 comp_mode
= CCmode
;
15501 /* If we have an unsigned compare, make sure we don't have a signed value as
15503 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
15504 && INTVAL (op1
) < 0)
15506 op0
= copy_rtx_if_shared (op0
);
15507 op1
= force_reg (GET_MODE (op0
), op1
);
15508 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
15511 /* First, the compare. */
15512 compare_result
= gen_reg_rtx (comp_mode
);
15514 /* E500 FP compare instructions on the GPRs. Yuck! */
15515 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
)
15516 && FLOAT_MODE_P (mode
))
15518 rtx cmp
, or_result
, compare_result2
;
15519 enum machine_mode op_mode
= GET_MODE (op0
);
15521 if (op_mode
== VOIDmode
)
15522 op_mode
= GET_MODE (op1
);
15524 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15525 This explains the following mess. */
15529 case EQ
: case UNEQ
: case NE
: case LTGT
:
15533 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15534 ? gen_tstsfeq_gpr (compare_result
, op0
, op1
)
15535 : gen_cmpsfeq_gpr (compare_result
, op0
, op1
);
15539 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15540 ? gen_tstdfeq_gpr (compare_result
, op0
, op1
)
15541 : gen_cmpdfeq_gpr (compare_result
, op0
, op1
);
15545 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15546 ? gen_tsttfeq_gpr (compare_result
, op0
, op1
)
15547 : gen_cmptfeq_gpr (compare_result
, op0
, op1
);
15551 gcc_unreachable ();
15555 case GT
: case GTU
: case UNGT
: case UNGE
: case GE
: case GEU
:
15559 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15560 ? gen_tstsfgt_gpr (compare_result
, op0
, op1
)
15561 : gen_cmpsfgt_gpr (compare_result
, op0
, op1
);
15565 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15566 ? gen_tstdfgt_gpr (compare_result
, op0
, op1
)
15567 : gen_cmpdfgt_gpr (compare_result
, op0
, op1
);
15571 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15572 ? gen_tsttfgt_gpr (compare_result
, op0
, op1
)
15573 : gen_cmptfgt_gpr (compare_result
, op0
, op1
);
15577 gcc_unreachable ();
15581 case LT
: case LTU
: case UNLT
: case UNLE
: case LE
: case LEU
:
15585 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15586 ? gen_tstsflt_gpr (compare_result
, op0
, op1
)
15587 : gen_cmpsflt_gpr (compare_result
, op0
, op1
);
15591 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15592 ? gen_tstdflt_gpr (compare_result
, op0
, op1
)
15593 : gen_cmpdflt_gpr (compare_result
, op0
, op1
);
15597 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15598 ? gen_tsttflt_gpr (compare_result
, op0
, op1
)
15599 : gen_cmptflt_gpr (compare_result
, op0
, op1
);
15603 gcc_unreachable ();
15607 gcc_unreachable ();
15610 /* Synthesize LE and GE from LT/GT || EQ. */
15611 if (code
== LE
|| code
== GE
|| code
== LEU
|| code
== GEU
)
15617 case LE
: code
= LT
; break;
15618 case GE
: code
= GT
; break;
15619 case LEU
: code
= LT
; break;
15620 case GEU
: code
= GT
; break;
15621 default: gcc_unreachable ();
15624 compare_result2
= gen_reg_rtx (CCFPmode
);
15630 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15631 ? gen_tstsfeq_gpr (compare_result2
, op0
, op1
)
15632 : gen_cmpsfeq_gpr (compare_result2
, op0
, op1
);
15636 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15637 ? gen_tstdfeq_gpr (compare_result2
, op0
, op1
)
15638 : gen_cmpdfeq_gpr (compare_result2
, op0
, op1
);
15642 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15643 ? gen_tsttfeq_gpr (compare_result2
, op0
, op1
)
15644 : gen_cmptfeq_gpr (compare_result2
, op0
, op1
);
15648 gcc_unreachable ();
15652 /* OR them together. */
15653 or_result
= gen_reg_rtx (CCFPmode
);
15654 cmp
= gen_e500_cr_ior_compare (or_result
, compare_result
,
15656 compare_result
= or_result
;
15661 if (code
== NE
|| code
== LTGT
)
15671 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15672 CLOBBERs to match cmptf_internal2 pattern. */
15673 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
15674 && GET_MODE (op0
) == TFmode
15675 && !TARGET_IEEEQUAD
15676 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_LONG_DOUBLE_128
)
15677 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
15679 gen_rtx_SET (VOIDmode
,
15681 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
15682 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15683 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15684 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15685 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15686 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15687 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15688 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15689 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15690 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
15691 else if (GET_CODE (op1
) == UNSPEC
15692 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
15694 rtx op1b
= XVECEXP (op1
, 0, 0);
15695 comp_mode
= CCEQmode
;
15696 compare_result
= gen_reg_rtx (CCEQmode
);
15698 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
15700 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
15703 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
15704 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
15707 /* Some kinds of FP comparisons need an OR operation;
15708 under flag_finite_math_only we don't bother. */
15709 if (FLOAT_MODE_P (mode
)
15710 && !flag_finite_math_only
15711 && !(TARGET_HARD_FLOAT
&& !TARGET_FPRS
)
15712 && (code
== LE
|| code
== GE
15713 || code
== UNEQ
|| code
== LTGT
15714 || code
== UNGT
|| code
== UNLT
))
15716 enum rtx_code or1
, or2
;
15717 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
15718 rtx or_result
= gen_reg_rtx (CCEQmode
);
15722 case LE
: or1
= LT
; or2
= EQ
; break;
15723 case GE
: or1
= GT
; or2
= EQ
; break;
15724 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
15725 case LTGT
: or1
= LT
; or2
= GT
; break;
15726 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
15727 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
15728 default: gcc_unreachable ();
15730 validate_condition_mode (or1
, comp_mode
);
15731 validate_condition_mode (or2
, comp_mode
);
15732 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
15733 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
15734 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
15735 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
15737 emit_insn (gen_rtx_SET (VOIDmode
, or_result
, compare2_rtx
));
15739 compare_result
= or_result
;
15743 validate_condition_mode (code
, GET_MODE (compare_result
));
15745 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
15749 /* Emit the RTL for an sISEL pattern. */
15752 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
15754 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
15758 rs6000_emit_sCOND (enum machine_mode mode
, rtx operands
[])
15761 enum machine_mode op_mode
;
15762 enum rtx_code cond_code
;
15763 rtx result
= operands
[0];
15765 if (TARGET_ISEL
&& (mode
== SImode
|| mode
== DImode
))
15767 rs6000_emit_sISEL (mode
, operands
);
15771 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
15772 cond_code
= GET_CODE (condition_rtx
);
15774 if (FLOAT_MODE_P (mode
)
15775 && !TARGET_FPRS
&& TARGET_HARD_FLOAT
)
15779 PUT_MODE (condition_rtx
, SImode
);
15780 t
= XEXP (condition_rtx
, 0);
15782 gcc_assert (cond_code
== NE
|| cond_code
== EQ
);
15784 if (cond_code
== NE
)
15785 emit_insn (gen_e500_flip_gt_bit (t
, t
));
15787 emit_insn (gen_move_from_CR_gt_bit (result
, t
));
15791 if (cond_code
== NE
15792 || cond_code
== GE
|| cond_code
== LE
15793 || cond_code
== GEU
|| cond_code
== LEU
15794 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
15796 rtx not_result
= gen_reg_rtx (CCEQmode
);
15797 rtx not_op
, rev_cond_rtx
;
15798 enum machine_mode cc_mode
;
15800 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
15802 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
15803 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
15804 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
15805 emit_insn (gen_rtx_SET (VOIDmode
, not_result
, not_op
));
15806 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
15809 op_mode
= GET_MODE (XEXP (operands
[1], 0));
15810 if (op_mode
== VOIDmode
)
15811 op_mode
= GET_MODE (XEXP (operands
[1], 1));
15813 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
15815 PUT_MODE (condition_rtx
, DImode
);
15816 convert_move (result
, condition_rtx
, 0);
15820 PUT_MODE (condition_rtx
, SImode
);
15821 emit_insn (gen_rtx_SET (VOIDmode
, result
, condition_rtx
));
15825 /* Emit a branch of kind CODE to location LOC. */
15828 rs6000_emit_cbranch (enum machine_mode mode
, rtx operands
[])
15830 rtx condition_rtx
, loc_ref
;
15832 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
15833 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
15834 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
15835 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
15836 loc_ref
, pc_rtx
)));
15839 /* Return the string to output a conditional branch to LABEL, which is
15840 the operand number of the label, or -1 if the branch is really a
15841 conditional return.
15843 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15844 condition code register and its mode specifies what kind of
15845 comparison we made.
15847 REVERSED is nonzero if we should reverse the sense of the comparison.
15849 INSN is the insn. */
15852 output_cbranch (rtx op
, const char *label
, int reversed
, rtx insn
)
15854 static char string
[64];
15855 enum rtx_code code
= GET_CODE (op
);
15856 rtx cc_reg
= XEXP (op
, 0);
15857 enum machine_mode mode
= GET_MODE (cc_reg
);
15858 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
15859 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
15860 int really_reversed
= reversed
^ need_longbranch
;
15866 validate_condition_mode (code
, mode
);
15868 /* Work out which way this really branches. We could use
15869 reverse_condition_maybe_unordered here always but this
15870 makes the resulting assembler clearer. */
15871 if (really_reversed
)
15873 /* Reversal of FP compares takes care -- an ordered compare
15874 becomes an unordered compare and vice versa. */
15875 if (mode
== CCFPmode
)
15876 code
= reverse_condition_maybe_unordered (code
);
15878 code
= reverse_condition (code
);
15881 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
) && mode
== CCFPmode
)
15883 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15888 /* Opposite of GT. */
15897 gcc_unreachable ();
15903 /* Not all of these are actually distinct opcodes, but
15904 we distinguish them for clarity of the resulting assembler. */
15905 case NE
: case LTGT
:
15906 ccode
= "ne"; break;
15907 case EQ
: case UNEQ
:
15908 ccode
= "eq"; break;
15910 ccode
= "ge"; break;
15911 case GT
: case GTU
: case UNGT
:
15912 ccode
= "gt"; break;
15914 ccode
= "le"; break;
15915 case LT
: case LTU
: case UNLT
:
15916 ccode
= "lt"; break;
15917 case UNORDERED
: ccode
= "un"; break;
15918 case ORDERED
: ccode
= "nu"; break;
15919 case UNGE
: ccode
= "nl"; break;
15920 case UNLE
: ccode
= "ng"; break;
15922 gcc_unreachable ();
15925 /* Maybe we have a guess as to how likely the branch is.
15926 The old mnemonics don't have a way to specify this information. */
15928 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
15929 if (note
!= NULL_RTX
)
15931 /* PROB is the difference from 50%. */
15932 int prob
= INTVAL (XEXP (note
, 0)) - REG_BR_PROB_BASE
/ 2;
15934 /* Only hint for highly probable/improbable branches on newer
15935 cpus as static prediction overrides processor dynamic
15936 prediction. For older cpus we may as well always hint, but
15937 assume not taken for branches that are very close to 50% as a
15938 mispredicted taken branch is more expensive than a
15939 mispredicted not-taken branch. */
15940 if (rs6000_always_hint
15941 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
15942 && br_prob_note_reliable_p (note
)))
15944 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
15945 && ((prob
> 0) ^ need_longbranch
))
15953 s
+= sprintf (s
, "{b%sr|b%slr%s} ", ccode
, ccode
, pred
);
15955 s
+= sprintf (s
, "{b%s|b%s%s} ", ccode
, ccode
, pred
);
15957 /* We need to escape any '%' characters in the reg_names string.
15958 Assume they'd only be the first character.... */
15959 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
15961 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
15965 /* If the branch distance was too far, we may have to use an
15966 unconditional branch to go the distance. */
15967 if (need_longbranch
)
15968 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
15970 s
+= sprintf (s
, ",%s", label
);
15976 /* Return the string to flip the GT bit on a CR. */
15978 output_e500_flip_gt_bit (rtx dst
, rtx src
)
15980 static char string
[64];
15983 gcc_assert (GET_CODE (dst
) == REG
&& CR_REGNO_P (REGNO (dst
))
15984 && GET_CODE (src
) == REG
&& CR_REGNO_P (REGNO (src
)));
15987 a
= 4 * (REGNO (dst
) - CR0_REGNO
) + 1;
15988 b
= 4 * (REGNO (src
) - CR0_REGNO
) + 1;
15990 sprintf (string
, "crnot %d,%d", a
, b
);
15994 /* Return insn for VSX or Altivec comparisons. */
15997 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
16000 enum machine_mode mode
= GET_MODE (op0
);
16008 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
16018 mask
= gen_reg_rtx (mode
);
16019 emit_insn (gen_rtx_SET (VOIDmode
,
16021 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
16028 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
16029 DMODE is expected destination mode. This is a recursive function. */
16032 rs6000_emit_vector_compare (enum rtx_code rcode
,
16034 enum machine_mode dmode
)
16037 bool swap_operands
= false;
16038 bool try_again
= false;
16040 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
16041 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
16043 /* See if the comparison works as is. */
16044 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
16052 swap_operands
= true;
16057 swap_operands
= true;
16065 /* Invert condition and try again.
16066 e.g., A != B becomes ~(A==B). */
16068 enum rtx_code rev_code
;
16069 enum insn_code nor_code
;
16072 rev_code
= reverse_condition_maybe_unordered (rcode
);
16073 if (rev_code
== UNKNOWN
)
16076 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
16077 if (nor_code
== CODE_FOR_nothing
)
16080 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
16084 mask
= gen_reg_rtx (dmode
);
16085 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
16093 /* Try GT/GTU/LT/LTU OR EQ */
16096 enum insn_code ior_code
;
16097 enum rtx_code new_code
;
16118 gcc_unreachable ();
16121 ior_code
= optab_handler (ior_optab
, dmode
);
16122 if (ior_code
== CODE_FOR_nothing
)
16125 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
16129 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
16133 mask
= gen_reg_rtx (dmode
);
16134 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
16152 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
16157 /* You only get two chances. */
16161 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16162 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16163 operands for the relation operation COND. */
16166 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
16167 rtx cond
, rtx cc_op0
, rtx cc_op1
)
16169 enum machine_mode dest_mode
= GET_MODE (dest
);
16170 enum machine_mode mask_mode
= GET_MODE (cc_op0
);
16171 enum rtx_code rcode
= GET_CODE (cond
);
16172 enum machine_mode cc_mode
= CCmode
;
16176 bool invert_move
= false;
16178 if (VECTOR_UNIT_NONE_P (dest_mode
))
16181 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
16182 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
16186 /* Swap operands if we can, and fall back to doing the operation as
16187 specified, and doing a NOR to invert the test. */
16193 /* Invert condition and try again.
16194 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16195 invert_move
= true;
16196 rcode
= reverse_condition_maybe_unordered (rcode
);
16197 if (rcode
== UNKNOWN
)
16201 /* Mark unsigned tests with CCUNSmode. */
16206 cc_mode
= CCUNSmode
;
16213 /* Get the vector mask for the given relational operations. */
16214 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
16222 op_true
= op_false
;
16226 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
16227 CONST0_RTX (dest_mode
));
16228 emit_insn (gen_rtx_SET (VOIDmode
,
16230 gen_rtx_IF_THEN_ELSE (dest_mode
,
16237 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16238 operands of the last comparison is nonzero/true, FALSE_COND if it
16239 is zero/false. Return 0 if the hardware has no such operation. */
16242 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
16244 enum rtx_code code
= GET_CODE (op
);
16245 rtx op0
= XEXP (op
, 0);
16246 rtx op1
= XEXP (op
, 1);
16247 REAL_VALUE_TYPE c1
;
16248 enum machine_mode compare_mode
= GET_MODE (op0
);
16249 enum machine_mode result_mode
= GET_MODE (dest
);
16251 bool is_against_zero
;
16253 /* These modes should always match. */
16254 if (GET_MODE (op1
) != compare_mode
16255 /* In the isel case however, we can use a compare immediate, so
16256 op1 may be a small constant. */
16257 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
16259 if (GET_MODE (true_cond
) != result_mode
)
16261 if (GET_MODE (false_cond
) != result_mode
)
16264 /* Don't allow using floating point comparisons for integer results for
16266 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
16269 /* First, work out if the hardware can do this at all, or
16270 if it's too slow.... */
16271 if (!FLOAT_MODE_P (compare_mode
))
16274 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
16277 else if (TARGET_HARD_FLOAT
&& !TARGET_FPRS
16278 && SCALAR_FLOAT_MODE_P (compare_mode
))
16281 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
16283 /* A floating-point subtract might overflow, underflow, or produce
16284 an inexact result, thus changing the floating-point flags, so it
16285 can't be generated if we care about that. It's safe if one side
16286 of the construct is zero, since then no subtract will be
16288 if (SCALAR_FLOAT_MODE_P (compare_mode
)
16289 && flag_trapping_math
&& ! is_against_zero
)
16292 /* Eliminate half of the comparisons by switching operands, this
16293 makes the remaining code simpler. */
16294 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
16295 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
16297 code
= reverse_condition_maybe_unordered (code
);
16299 true_cond
= false_cond
;
16303 /* UNEQ and LTGT take four instructions for a comparison with zero,
16304 it'll probably be faster to use a branch here too. */
16305 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
16308 if (GET_CODE (op1
) == CONST_DOUBLE
)
16309 REAL_VALUE_FROM_CONST_DOUBLE (c1
, op1
);
16311 /* We're going to try to implement comparisons by performing
16312 a subtract, then comparing against zero. Unfortunately,
16313 Inf - Inf is NaN which is not zero, and so if we don't
16314 know that the operand is finite and the comparison
16315 would treat EQ different to UNORDERED, we can't do it. */
16316 if (HONOR_INFINITIES (compare_mode
)
16317 && code
!= GT
&& code
!= UNGE
16318 && (GET_CODE (op1
) != CONST_DOUBLE
|| real_isinf (&c1
))
16319 /* Constructs of the form (a OP b ? a : b) are safe. */
16320 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
16321 || (! rtx_equal_p (op0
, true_cond
)
16322 && ! rtx_equal_p (op1
, true_cond
))))
16325 /* At this point we know we can use fsel. */
16327 /* Reduce the comparison to a comparison against zero. */
16328 if (! is_against_zero
)
16330 temp
= gen_reg_rtx (compare_mode
);
16331 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16332 gen_rtx_MINUS (compare_mode
, op0
, op1
)));
16334 op1
= CONST0_RTX (compare_mode
);
16337 /* If we don't care about NaNs we can reduce some of the comparisons
16338 down to faster ones. */
16339 if (! HONOR_NANS (compare_mode
))
16345 true_cond
= false_cond
;
16358 /* Now, reduce everything down to a GE. */
16365 temp
= gen_reg_rtx (compare_mode
);
16366 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16371 temp
= gen_reg_rtx (compare_mode
);
16372 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_ABS (compare_mode
, op0
)));
16377 temp
= gen_reg_rtx (compare_mode
);
16378 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16379 gen_rtx_NEG (compare_mode
,
16380 gen_rtx_ABS (compare_mode
, op0
))));
16385 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16386 temp
= gen_reg_rtx (result_mode
);
16387 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16388 gen_rtx_IF_THEN_ELSE (result_mode
,
16389 gen_rtx_GE (VOIDmode
,
16391 true_cond
, false_cond
)));
16392 false_cond
= true_cond
;
16395 temp
= gen_reg_rtx (compare_mode
);
16396 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16401 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16402 temp
= gen_reg_rtx (result_mode
);
16403 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16404 gen_rtx_IF_THEN_ELSE (result_mode
,
16405 gen_rtx_GE (VOIDmode
,
16407 true_cond
, false_cond
)));
16408 true_cond
= false_cond
;
16411 temp
= gen_reg_rtx (compare_mode
);
16412 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16417 gcc_unreachable ();
16420 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
16421 gen_rtx_IF_THEN_ELSE (result_mode
,
16422 gen_rtx_GE (VOIDmode
,
16424 true_cond
, false_cond
)));
16428 /* Same as above, but for ints (isel). */
16431 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
16433 rtx condition_rtx
, cr
;
16434 enum machine_mode mode
= GET_MODE (dest
);
16435 enum rtx_code cond_code
;
16436 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
16439 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
16442 /* We still have to do the compare, because isel doesn't do a
16443 compare, it just looks at the CRx bits set by a previous compare
16445 condition_rtx
= rs6000_generate_compare (op
, mode
);
16446 cond_code
= GET_CODE (condition_rtx
);
16447 cr
= XEXP (condition_rtx
, 0);
16448 signedp
= GET_MODE (cr
) == CCmode
;
16450 isel_func
= (mode
== SImode
16451 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
16452 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
16456 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
16457 /* isel handles these directly. */
16461 /* We need to swap the sense of the comparison. */
16464 true_cond
= false_cond
;
16466 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
16471 false_cond
= force_reg (mode
, false_cond
);
16472 if (true_cond
!= const0_rtx
)
16473 true_cond
= force_reg (mode
, true_cond
);
16475 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
16481 output_isel (rtx
*operands
)
16483 enum rtx_code code
;
16485 code
= GET_CODE (operands
[1]);
16487 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
16489 gcc_assert (GET_CODE (operands
[2]) == REG
16490 && GET_CODE (operands
[3]) == REG
);
16491 PUT_CODE (operands
[1], reverse_condition (code
));
16492 return "isel %0,%3,%2,%j1";
16495 return "isel %0,%2,%3,%j1";
16499 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
16501 enum machine_mode mode
= GET_MODE (op0
);
16505 /* VSX/altivec have direct min/max insns. */
16506 if ((code
== SMAX
|| code
== SMIN
)
16507 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
16508 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
16510 emit_insn (gen_rtx_SET (VOIDmode
,
16512 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
16516 if (code
== SMAX
|| code
== SMIN
)
16521 if (code
== SMAX
|| code
== UMAX
)
16522 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
16523 op0
, op1
, mode
, 0);
16525 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
16526 op1
, op0
, mode
, 0);
16527 gcc_assert (target
);
16528 if (target
!= dest
)
16529 emit_move_insn (dest
, target
);
16532 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16533 COND is true. Mark the jump as unlikely to be taken. */
16536 emit_unlikely_jump (rtx cond
, rtx label
)
16538 rtx very_unlikely
= GEN_INT (REG_BR_PROB_BASE
/ 100 - 1);
16541 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
16542 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
16543 add_reg_note (x
, REG_BR_PROB
, very_unlikely
);
16546 /* A subroutine of the atomic operation splitters. Emit a load-locked
16547 instruction in MODE. */
16550 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
16552 rtx (*fn
) (rtx
, rtx
) = NULL
;
16557 fn
= gen_load_lockedsi
;
16560 fn
= gen_load_lockeddi
;
16563 gcc_unreachable ();
16565 emit_insn (fn (reg
, mem
));
16568 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16569 instruction in MODE. */
16572 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
16574 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
16579 fn
= gen_store_conditionalsi
;
16582 fn
= gen_store_conditionaldi
;
16585 gcc_unreachable ();
16588 /* Emit sync before stwcx. to address PPC405 Erratum. */
16589 if (PPC405_ERRATUM77
)
16590 emit_insn (gen_hwsync ());
16592 emit_insn (fn (res
, mem
, val
));
16595 /* Expand barriers before and after a load_locked/store_cond sequence. */
16598 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
16600 rtx addr
= XEXP (mem
, 0);
16601 int strict_p
= (reload_in_progress
|| reload_completed
);
16603 if (!legitimate_indirect_address_p (addr
, strict_p
)
16604 && !legitimate_indexed_address_p (addr
, strict_p
))
16606 addr
= force_reg (Pmode
, addr
);
16607 mem
= replace_equiv_address_nv (mem
, addr
);
16612 case MEMMODEL_RELAXED
:
16613 case MEMMODEL_CONSUME
:
16614 case MEMMODEL_ACQUIRE
:
16616 case MEMMODEL_RELEASE
:
16617 case MEMMODEL_ACQ_REL
:
16618 emit_insn (gen_lwsync ());
16620 case MEMMODEL_SEQ_CST
:
16621 emit_insn (gen_hwsync ());
16624 gcc_unreachable ();
16630 rs6000_post_atomic_barrier (enum memmodel model
)
16634 case MEMMODEL_RELAXED
:
16635 case MEMMODEL_CONSUME
:
16636 case MEMMODEL_RELEASE
:
16638 case MEMMODEL_ACQUIRE
:
16639 case MEMMODEL_ACQ_REL
:
16640 case MEMMODEL_SEQ_CST
:
16641 emit_insn (gen_isync ());
16644 gcc_unreachable ();
16648 /* A subroutine of the various atomic expanders. For sub-word operations,
16649 we must adjust things to operate on SImode. Given the original MEM,
16650 return a new aligned memory. Also build and return the quantities by
16651 which to shift and mask. */
16654 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
16656 rtx addr
, align
, shift
, mask
, mem
;
16657 HOST_WIDE_INT shift_mask
;
16658 enum machine_mode mode
= GET_MODE (orig_mem
);
16660 /* For smaller modes, we have to implement this via SImode. */
16661 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
16663 addr
= XEXP (orig_mem
, 0);
16664 addr
= force_reg (GET_MODE (addr
), addr
);
16666 /* Aligned memory containing subword. Generate a new memory. We
16667 do not want any of the existing MEM_ATTR data, as we're now
16668 accessing memory outside the original object. */
16669 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
16670 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16671 mem
= gen_rtx_MEM (SImode
, align
);
16672 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
16673 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
16674 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
16676 /* Shift amount for subword relative to aligned word. */
16677 shift
= gen_reg_rtx (SImode
);
16678 addr
= gen_lowpart (SImode
, addr
);
16679 emit_insn (gen_rlwinm (shift
, addr
, GEN_INT (3), GEN_INT (shift_mask
)));
16680 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
16681 shift
, 1, OPTAB_LIB_WIDEN
);
16684 /* Mask for insertion. */
16685 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
16686 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16692 /* A subroutine of the various atomic expanders. For sub-word operands,
16693 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16696 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
16700 x
= gen_reg_rtx (SImode
);
16701 emit_insn (gen_rtx_SET (VOIDmode
, x
,
16702 gen_rtx_AND (SImode
,
16703 gen_rtx_NOT (SImode
, mask
),
16706 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
16711 /* A subroutine of the various atomic expanders. For sub-word operands,
16712 extract WIDE to NARROW via SHIFT. */
16715 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
16717 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
16718 wide
, 1, OPTAB_LIB_WIDEN
);
16719 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
16722 /* Expand an atomic compare and swap operation. */
16725 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
16727 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
16728 rtx label1
, label2
, x
, mask
, shift
;
16729 enum machine_mode mode
;
16730 enum memmodel mod_s
, mod_f
;
16733 boolval
= operands
[0];
16734 retval
= operands
[1];
16736 oldval
= operands
[3];
16737 newval
= operands
[4];
16738 is_weak
= (INTVAL (operands
[5]) != 0);
16739 mod_s
= (enum memmodel
) INTVAL (operands
[6]);
16740 mod_f
= (enum memmodel
) INTVAL (operands
[7]);
16741 mode
= GET_MODE (mem
);
16743 mask
= shift
= NULL_RTX
;
16744 if (mode
== QImode
|| mode
== HImode
)
16746 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16748 /* Shift and mask OLDVAL into position with the word. */
16749 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
16750 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
16751 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16753 /* Shift and mask NEWVAL into position within the word. */
16754 newval
= convert_modes (SImode
, mode
, newval
, 1);
16755 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
16756 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16758 /* Prepare to adjust the return value. */
16759 retval
= gen_reg_rtx (SImode
);
16762 else if (reg_overlap_mentioned_p (retval
, oldval
))
16763 oldval
= copy_to_reg (oldval
);
16765 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
16770 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16771 emit_label (XEXP (label1
, 0));
16773 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16775 emit_load_locked (mode
, retval
, mem
);
16780 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
16781 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16784 cond
= gen_reg_rtx (CCmode
);
16785 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
16786 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
16788 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16789 emit_unlikely_jump (x
, label2
);
16793 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
16795 emit_store_conditional (mode
, cond
, mem
, x
);
16799 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16800 emit_unlikely_jump (x
, label1
);
16803 if (mod_f
!= MEMMODEL_RELAXED
)
16804 emit_label (XEXP (label2
, 0));
16806 rs6000_post_atomic_barrier (mod_s
);
16808 if (mod_f
== MEMMODEL_RELAXED
)
16809 emit_label (XEXP (label2
, 0));
16812 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
16814 /* In all cases, CR0 contains EQ on success, and NE on failure. */
16815 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
16816 emit_insn (gen_rtx_SET (VOIDmode
, boolval
, x
));
16819 /* Expand an atomic exchange operation. */
16822 rs6000_expand_atomic_exchange (rtx operands
[])
16824 rtx retval
, mem
, val
, cond
;
16825 enum machine_mode mode
;
16826 enum memmodel model
;
16827 rtx label
, x
, mask
, shift
;
16829 retval
= operands
[0];
16832 model
= (enum memmodel
) INTVAL (operands
[3]);
16833 mode
= GET_MODE (mem
);
16835 mask
= shift
= NULL_RTX
;
16836 if (mode
== QImode
|| mode
== HImode
)
16838 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16840 /* Shift and mask VAL into position with the word. */
16841 val
= convert_modes (SImode
, mode
, val
, 1);
16842 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
16843 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16845 /* Prepare to adjust the return value. */
16846 retval
= gen_reg_rtx (SImode
);
16850 mem
= rs6000_pre_atomic_barrier (mem
, model
);
16852 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16853 emit_label (XEXP (label
, 0));
16855 emit_load_locked (mode
, retval
, mem
);
16859 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
16861 cond
= gen_reg_rtx (CCmode
);
16862 emit_store_conditional (mode
, cond
, mem
, x
);
16864 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16865 emit_unlikely_jump (x
, label
);
16867 rs6000_post_atomic_barrier (model
);
16870 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
16873 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16874 to perform. MEM is the memory on which to operate. VAL is the second
16875 operand of the binary operator. BEFORE and AFTER are optional locations to
16876 return the value of MEM either before of after the operation. MODEL_RTX
16877 is a CONST_INT containing the memory model to use. */
16880 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
16881 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
16883 enum memmodel model
= (enum memmodel
) INTVAL (model_rtx
);
16884 enum machine_mode mode
= GET_MODE (mem
);
16885 rtx label
, x
, cond
, mask
, shift
;
16886 rtx before
= orig_before
, after
= orig_after
;
16888 mask
= shift
= NULL_RTX
;
16889 if (mode
== QImode
|| mode
== HImode
)
16891 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16893 /* Shift and mask VAL into position with the word. */
16894 val
= convert_modes (SImode
, mode
, val
, 1);
16895 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
16896 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16902 /* We've already zero-extended VAL. That is sufficient to
16903 make certain that it does not affect other bits. */
16908 /* If we make certain that all of the other bits in VAL are
16909 set, that will be sufficient to not affect other bits. */
16910 x
= gen_rtx_NOT (SImode
, mask
);
16911 x
= gen_rtx_IOR (SImode
, x
, val
);
16912 emit_insn (gen_rtx_SET (VOIDmode
, val
, x
));
16919 /* These will all affect bits outside the field and need
16920 adjustment via MASK within the loop. */
16924 gcc_unreachable ();
16927 /* Prepare to adjust the return value. */
16928 before
= gen_reg_rtx (SImode
);
16930 after
= gen_reg_rtx (SImode
);
16934 mem
= rs6000_pre_atomic_barrier (mem
, model
);
16936 label
= gen_label_rtx ();
16937 emit_label (label
);
16938 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
16940 if (before
== NULL_RTX
)
16941 before
= gen_reg_rtx (mode
);
16943 emit_load_locked (mode
, before
, mem
);
16947 x
= expand_simple_binop (mode
, AND
, before
, val
,
16948 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16949 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
16953 after
= expand_simple_binop (mode
, code
, before
, val
,
16954 after
, 1, OPTAB_LIB_WIDEN
);
16960 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
16961 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16962 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
16965 cond
= gen_reg_rtx (CCmode
);
16966 emit_store_conditional (mode
, cond
, mem
, x
);
16968 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16969 emit_unlikely_jump (x
, label
);
16971 rs6000_post_atomic_barrier (model
);
16976 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
16978 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
16980 else if (orig_after
&& after
!= orig_after
)
16981 emit_move_insn (orig_after
, after
);
16984 /* Emit instructions to move SRC to DST. Called by splitters for
16985 multi-register moves. It will emit at most one instruction for
16986 each register that is accessed; that is, it won't emit li/lis pairs
16987 (or equivalent for 64-bit code). One of SRC or DST must be a hard
16991 rs6000_split_multireg_move (rtx dst
, rtx src
)
16993 /* The register number of the first register being moved. */
16995 /* The mode that is to be moved. */
16996 enum machine_mode mode
;
16997 /* The mode that the move is being done in, and its size. */
16998 enum machine_mode reg_mode
;
17000 /* The number of registers that will be moved. */
17003 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
17004 mode
= GET_MODE (dst
);
17005 nregs
= hard_regno_nregs
[reg
][mode
];
17006 if (FP_REGNO_P (reg
))
17007 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
17008 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
17009 else if (ALTIVEC_REGNO_P (reg
))
17010 reg_mode
= V16QImode
;
17011 else if (TARGET_E500_DOUBLE
&& mode
== TFmode
)
17014 reg_mode
= word_mode
;
17015 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
17017 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
17019 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
17021 /* Move register range backwards, if we might have destructive
17024 for (i
= nregs
- 1; i
>= 0; i
--)
17025 emit_insn (gen_rtx_SET (VOIDmode
,
17026 simplify_gen_subreg (reg_mode
, dst
, mode
,
17027 i
* reg_mode_size
),
17028 simplify_gen_subreg (reg_mode
, src
, mode
,
17029 i
* reg_mode_size
)));
17035 bool used_update
= false;
17036 rtx restore_basereg
= NULL_RTX
;
17038 if (MEM_P (src
) && INT_REGNO_P (reg
))
17042 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
17043 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
17046 breg
= XEXP (XEXP (src
, 0), 0);
17047 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
17048 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
17049 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
17050 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
17051 src
= replace_equiv_address (src
, breg
);
17053 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
17055 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
17057 rtx basereg
= XEXP (XEXP (src
, 0), 0);
17060 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
17061 emit_insn (gen_rtx_SET (VOIDmode
, ndst
,
17062 gen_rtx_MEM (reg_mode
, XEXP (src
, 0))));
17063 used_update
= true;
17066 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
17067 XEXP (XEXP (src
, 0), 1)));
17068 src
= replace_equiv_address (src
, basereg
);
17072 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
17073 emit_insn (gen_rtx_SET (VOIDmode
, basereg
, XEXP (src
, 0)));
17074 src
= replace_equiv_address (src
, basereg
);
17078 breg
= XEXP (src
, 0);
17079 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
17080 breg
= XEXP (breg
, 0);
17082 /* If the base register we are using to address memory is
17083 also a destination reg, then change that register last. */
17085 && REGNO (breg
) >= REGNO (dst
)
17086 && REGNO (breg
) < REGNO (dst
) + nregs
)
17087 j
= REGNO (breg
) - REGNO (dst
);
17089 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
17093 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
17094 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
17097 breg
= XEXP (XEXP (dst
, 0), 0);
17098 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
17099 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
17100 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
17102 /* We have to update the breg before doing the store.
17103 Use store with update, if available. */
17107 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
17108 emit_insn (TARGET_32BIT
17109 ? (TARGET_POWERPC64
17110 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
17111 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
17112 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
17113 used_update
= true;
17116 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
17117 dst
= replace_equiv_address (dst
, breg
);
17119 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
17120 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
17122 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
17124 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
17127 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
17128 emit_insn (gen_rtx_SET (VOIDmode
,
17129 gen_rtx_MEM (reg_mode
, XEXP (dst
, 0)), nsrc
));
17130 used_update
= true;
17133 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
17134 XEXP (XEXP (dst
, 0), 1)));
17135 dst
= replace_equiv_address (dst
, basereg
);
17139 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
17140 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
17141 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
17143 && REG_P (offsetreg
)
17144 && REGNO (basereg
) != REGNO (offsetreg
));
17145 if (REGNO (basereg
) == 0)
17147 rtx tmp
= offsetreg
;
17148 offsetreg
= basereg
;
17151 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
17152 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
17153 dst
= replace_equiv_address (dst
, basereg
);
17156 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
17157 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
17160 for (i
= 0; i
< nregs
; i
++)
17162 /* Calculate index to next subword. */
17167 /* If compiler already emitted move of first word by
17168 store with update, no need to do anything. */
17169 if (j
== 0 && used_update
)
17172 emit_insn (gen_rtx_SET (VOIDmode
,
17173 simplify_gen_subreg (reg_mode
, dst
, mode
,
17174 j
* reg_mode_size
),
17175 simplify_gen_subreg (reg_mode
, src
, mode
,
17176 j
* reg_mode_size
)));
17178 if (restore_basereg
!= NULL_RTX
)
17179 emit_insn (restore_basereg
);
17184 /* This page contains routines that are used to determine what the
17185 function prologue and epilogue code will do and write them out. */
17190 return !call_used_regs
[r
] && df_regs_ever_live_p (r
);
17193 /* Return the first fixed-point register that is required to be
17194 saved. 32 if none. */
17197 first_reg_to_save (void)
17201 /* Find lowest numbered live register. */
17202 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
17203 if (save_reg_p (first_reg
))
17206 if (first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
17207 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
17208 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
17209 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
))
17210 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
17211 first_reg
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
17215 && crtl
->uses_pic_offset_table
17216 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
17217 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
17223 /* Similar, for FP regs. */
17226 first_fp_reg_to_save (void)
17230 /* Find lowest numbered live register. */
17231 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
17232 if (save_reg_p (first_reg
))
17238 /* Similar, for AltiVec regs. */
17241 first_altivec_reg_to_save (void)
17245 /* Stack frame remains as is unless we are in AltiVec ABI. */
17246 if (! TARGET_ALTIVEC_ABI
)
17247 return LAST_ALTIVEC_REGNO
+ 1;
17249 /* On Darwin, the unwind routines are compiled without
17250 TARGET_ALTIVEC, and use save_world to save/restore the
17251 altivec registers when necessary. */
17252 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
17253 && ! TARGET_ALTIVEC
)
17254 return FIRST_ALTIVEC_REGNO
+ 20;
17256 /* Find lowest numbered live register. */
17257 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17258 if (save_reg_p (i
))
17264 /* Return a 32-bit mask of the AltiVec registers we need to set in
17265 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17266 the 32-bit word is 0. */
17268 static unsigned int
17269 compute_vrsave_mask (void)
17271 unsigned int i
, mask
= 0;
17273 /* On Darwin, the unwind routines are compiled without
17274 TARGET_ALTIVEC, and use save_world to save/restore the
17275 call-saved altivec registers when necessary. */
17276 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
17277 && ! TARGET_ALTIVEC
)
17280 /* First, find out if we use _any_ altivec registers. */
17281 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17282 if (df_regs_ever_live_p (i
))
17283 mask
|= ALTIVEC_REG_BIT (i
);
17288 /* Next, remove the argument registers from the set. These must
17289 be in the VRSAVE mask set by the caller, so we don't need to add
17290 them in again. More importantly, the mask we compute here is
17291 used to generate CLOBBERs in the set_vrsave insn, and we do not
17292 wish the argument registers to die. */
17293 for (i
= crtl
->args
.info
.vregno
- 1; i
>= ALTIVEC_ARG_MIN_REG
; --i
)
17294 mask
&= ~ALTIVEC_REG_BIT (i
);
17296 /* Similarly, remove the return value from the set. */
17299 diddle_return_value (is_altivec_return_reg
, &yes
);
17301 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
17307 /* For a very restricted set of circumstances, we can cut down the
17308 size of prologues/epilogues by calling our own save/restore-the-world
17312 compute_save_world_info (rs6000_stack_t
*info_ptr
)
17314 info_ptr
->world_save_p
= 1;
17315 info_ptr
->world_save_p
17316 = (WORLD_SAVE_P (info_ptr
)
17317 && DEFAULT_ABI
== ABI_DARWIN
17318 && !cfun
->has_nonlocal_label
17319 && info_ptr
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
17320 && info_ptr
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
17321 && info_ptr
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
17322 && info_ptr
->cr_save_p
);
17324 /* This will not work in conjunction with sibcalls. Make sure there
17325 are none. (This check is expensive, but seldom executed.) */
17326 if (WORLD_SAVE_P (info_ptr
))
17329 for ( insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
17330 if ( GET_CODE (insn
) == CALL_INSN
17331 && SIBLING_CALL_P (insn
))
17333 info_ptr
->world_save_p
= 0;
17338 if (WORLD_SAVE_P (info_ptr
))
17340 /* Even if we're not touching VRsave, make sure there's room on the
17341 stack for it, if it looks like we're calling SAVE_WORLD, which
17342 will attempt to save it. */
17343 info_ptr
->vrsave_size
= 4;
17345 /* If we are going to save the world, we need to save the link register too. */
17346 info_ptr
->lr_save_p
= 1;
17348 /* "Save" the VRsave register too if we're saving the world. */
17349 if (info_ptr
->vrsave_mask
== 0)
17350 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
17352 /* Because the Darwin register save/restore routines only handle
17353 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17355 gcc_assert (info_ptr
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
17356 && (info_ptr
->first_altivec_reg_save
17357 >= FIRST_SAVED_ALTIVEC_REGNO
));
17364 is_altivec_return_reg (rtx reg
, void *xyes
)
17366 bool *yes
= (bool *) xyes
;
17367 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
17372 /* Look for user-defined global regs in the range FIRST to LAST-1.
17373 We should not restore these, and so cannot use lmw or out-of-line
17374 restore functions if there are any. We also can't save them
17375 (well, emit frame notes for them), because frame unwinding during
17376 exception handling will restore saved registers. */
17379 global_regs_p (unsigned first
, unsigned last
)
17381 while (first
< last
)
17382 if (global_regs
[first
++])
17387 /* Determine the strategy for savings/restoring registers. */
17390 SAVRES_MULTIPLE
= 0x1,
17391 SAVE_INLINE_FPRS
= 0x2,
17392 SAVE_INLINE_GPRS
= 0x4,
17393 REST_INLINE_FPRS
= 0x8,
17394 REST_INLINE_GPRS
= 0x10,
17395 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x20,
17396 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x40,
17397 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x80,
17398 SAVE_INLINE_VRS
= 0x100,
17399 REST_INLINE_VRS
= 0x200
17403 rs6000_savres_strategy (rs6000_stack_t
*info
,
17404 bool using_static_chain_p
)
17409 if (TARGET_MULTIPLE
17410 && !TARGET_POWERPC64
17411 && !(TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
)
17412 && info
->first_gp_reg_save
< 31
17413 && !global_regs_p (info
->first_gp_reg_save
, 32))
17414 strategy
|= SAVRES_MULTIPLE
;
17416 if (crtl
->calls_eh_return
17417 || cfun
->machine
->ra_need_lr
)
17418 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
17419 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
17420 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
17422 if (info
->first_fp_reg_save
== 64
17423 /* The out-of-line FP routines use double-precision stores;
17424 we can't use those routines if we don't have such stores. */
17425 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
)
17426 || global_regs_p (info
->first_fp_reg_save
, 64))
17427 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17429 if (info
->first_gp_reg_save
== 32
17430 || (!(strategy
& SAVRES_MULTIPLE
)
17431 && global_regs_p (info
->first_gp_reg_save
, 32)))
17432 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17434 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
17435 || global_regs_p (info
->first_altivec_reg_save
, LAST_ALTIVEC_REGNO
+ 1))
17436 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17438 /* Define cutoff for using out-of-line functions to save registers. */
17439 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
17441 if (!optimize_size
)
17443 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17444 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17445 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17449 /* Prefer out-of-line restore if it will exit. */
17450 if (info
->first_fp_reg_save
> 61)
17451 strategy
|= SAVE_INLINE_FPRS
;
17452 if (info
->first_gp_reg_save
> 29)
17454 if (info
->first_fp_reg_save
== 64)
17455 strategy
|= SAVE_INLINE_GPRS
;
17457 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17459 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
17460 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17463 else if (DEFAULT_ABI
== ABI_DARWIN
)
17465 if (info
->first_fp_reg_save
> 60)
17466 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17467 if (info
->first_gp_reg_save
> 29)
17468 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17469 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17473 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
);
17474 if (info
->first_fp_reg_save
> 61)
17475 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17476 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17477 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17480 /* Don't bother to try to save things out-of-line if r11 is occupied
17481 by the static chain. It would require too much fiddling and the
17482 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17483 pointer on Darwin, and AIX uses r1 or r12. */
17484 if (using_static_chain_p
&& DEFAULT_ABI
!= ABI_AIX
)
17485 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
17487 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
17489 /* We can only use the out-of-line routines to restore if we've
17490 saved all the registers from first_fp_reg_save in the prologue.
17491 Otherwise, we risk loading garbage. */
17492 if ((strategy
& (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
)) == SAVE_INLINE_FPRS
)
17496 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
17497 if (!save_reg_p (i
))
17499 strategy
|= REST_INLINE_FPRS
;
17504 /* If we are going to use store multiple, then don't even bother
17505 with the out-of-line routines, since the store-multiple
17506 instruction will always be smaller. */
17507 if ((strategy
& SAVRES_MULTIPLE
))
17508 strategy
|= SAVE_INLINE_GPRS
;
17510 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17511 saved is an out-of-line save or restore. Set up the value for
17512 the next test (excluding out-of-line gpr restore). */
17513 lr_save_p
= (info
->lr_save_p
17514 || !(strategy
& SAVE_INLINE_GPRS
)
17515 || !(strategy
& SAVE_INLINE_FPRS
)
17516 || !(strategy
& SAVE_INLINE_VRS
)
17517 || !(strategy
& REST_INLINE_FPRS
)
17518 || !(strategy
& REST_INLINE_VRS
));
17520 /* The situation is more complicated with load multiple. We'd
17521 prefer to use the out-of-line routines for restores, since the
17522 "exit" out-of-line routines can handle the restore of LR and the
17523 frame teardown. However if doesn't make sense to use the
17524 out-of-line routine if that is the only reason we'd need to save
17525 LR, and we can't use the "exit" out-of-line gpr restore if we
17526 have saved some fprs; In those cases it is advantageous to use
17527 load multiple when available. */
17528 if ((strategy
& SAVRES_MULTIPLE
)
17530 || info
->first_fp_reg_save
!= 64))
17531 strategy
|= REST_INLINE_GPRS
;
17533 /* Saving CR interferes with the exit routines used on the SPE, so
17536 && info
->spe_64bit_regs_used
17537 && info
->cr_save_p
)
17538 strategy
|= REST_INLINE_GPRS
;
17540 /* We can only use load multiple or the out-of-line routines to
17541 restore if we've used store multiple or out-of-line routines
17542 in the prologue, i.e. if we've saved all the registers from
17543 first_gp_reg_save. Otherwise, we risk loading garbage. */
17544 if ((strategy
& (SAVE_INLINE_GPRS
| REST_INLINE_GPRS
| SAVRES_MULTIPLE
))
17545 == SAVE_INLINE_GPRS
)
17549 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
17550 if (!save_reg_p (i
))
17552 strategy
|= REST_INLINE_GPRS
;
17557 if (TARGET_ELF
&& TARGET_64BIT
)
17559 if (!(strategy
& SAVE_INLINE_FPRS
))
17560 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
17561 else if (!(strategy
& SAVE_INLINE_GPRS
)
17562 && info
->first_fp_reg_save
== 64)
17563 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
17565 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
17566 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
17568 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
17569 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
17574 /* Calculate the stack information for the current function. This is
17575 complicated by having two separate calling sequences, the AIX calling
17576 sequence and the V.4 calling sequence.
17578 AIX (and Darwin/Mac OS X) stack frames look like:
17580 SP----> +---------------------------------------+
17581 | back chain to caller | 0 0
17582 +---------------------------------------+
17583 | saved CR | 4 8 (8-11)
17584 +---------------------------------------+
17586 +---------------------------------------+
17587 | reserved for compilers | 12 24
17588 +---------------------------------------+
17589 | reserved for binders | 16 32
17590 +---------------------------------------+
17591 | saved TOC pointer | 20 40
17592 +---------------------------------------+
17593 | Parameter save area (P) | 24 48
17594 +---------------------------------------+
17595 | Alloca space (A) | 24+P etc.
17596 +---------------------------------------+
17597 | Local variable space (L) | 24+P+A
17598 +---------------------------------------+
17599 | Float/int conversion temporary (X) | 24+P+A+L
17600 +---------------------------------------+
17601 | Save area for AltiVec registers (W) | 24+P+A+L+X
17602 +---------------------------------------+
17603 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17604 +---------------------------------------+
17605 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17606 +---------------------------------------+
17607 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17608 +---------------------------------------+
17609 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17610 +---------------------------------------+
17611 old SP->| back chain to caller's caller |
17612 +---------------------------------------+
17614 The required alignment for AIX configurations is two words (i.e., 8
17618 V.4 stack frames look like:
17620 SP----> +---------------------------------------+
17621 | back chain to caller | 0
17622 +---------------------------------------+
17623 | caller's saved LR | 4
17624 +---------------------------------------+
17625 | Parameter save area (P) | 8
17626 +---------------------------------------+
17627 | Alloca space (A) | 8+P
17628 +---------------------------------------+
17629 | Varargs save area (V) | 8+P+A
17630 +---------------------------------------+
17631 | Local variable space (L) | 8+P+A+V
17632 +---------------------------------------+
17633 | Float/int conversion temporary (X) | 8+P+A+V+L
17634 +---------------------------------------+
17635 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17636 +---------------------------------------+
17637 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17638 +---------------------------------------+
17639 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17640 +---------------------------------------+
17641 | SPE: area for 64-bit GP registers |
17642 +---------------------------------------+
17643 | SPE alignment padding |
17644 +---------------------------------------+
17645 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17646 +---------------------------------------+
17647 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17648 +---------------------------------------+
17649 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17650 +---------------------------------------+
17651 old SP->| back chain to caller's caller |
17652 +---------------------------------------+
17654 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17655 given. (But note below and in sysv4.h that we require only 8 and
17656 may round up the size of our stack frame anyways. The historical
17657 reason is early versions of powerpc-linux which didn't properly
17658 align the stack at program startup. A happy side-effect is that
17659 -mno-eabi libraries can be used with -meabi programs.)
17661 The EABI configuration defaults to the V.4 layout. However,
17662 the stack alignment requirements may differ. If -mno-eabi is not
17663 given, the required stack alignment is 8 bytes; if -mno-eabi is
17664 given, the required alignment is 16 bytes. (But see V.4 comment
17667 #ifndef ABI_STACK_BOUNDARY
17668 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17671 static rs6000_stack_t
*
17672 rs6000_stack_info (void)
17674 rs6000_stack_t
*info_ptr
= &stack_info
;
17675 int reg_size
= TARGET_32BIT
? 4 : 8;
17679 HOST_WIDE_INT non_fixed_size
;
17680 bool using_static_chain_p
;
17682 if (reload_completed
&& info_ptr
->reload_completed
)
17685 memset (info_ptr
, 0, sizeof (*info_ptr
));
17686 info_ptr
->reload_completed
= reload_completed
;
17690 /* Cache value so we don't rescan instruction chain over and over. */
17691 if (cfun
->machine
->insn_chain_scanned_p
== 0)
17692 cfun
->machine
->insn_chain_scanned_p
17693 = spe_func_has_64bit_regs_p () + 1;
17694 info_ptr
->spe_64bit_regs_used
= cfun
->machine
->insn_chain_scanned_p
- 1;
17697 /* Select which calling sequence. */
17698 info_ptr
->abi
= DEFAULT_ABI
;
17700 /* Calculate which registers need to be saved & save area size. */
17701 info_ptr
->first_gp_reg_save
= first_reg_to_save ();
17702 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17703 even if it currently looks like we won't. Reload may need it to
17704 get at a constant; if so, it will have already created a constant
17705 pool entry for it. */
17706 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
17707 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
17708 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
17709 && crtl
->uses_const_pool
17710 && info_ptr
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
17711 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
17713 first_gp
= info_ptr
->first_gp_reg_save
;
17715 info_ptr
->gp_size
= reg_size
* (32 - first_gp
);
17717 /* For the SPE, we have an additional upper 32-bits on each GPR.
17718 Ideally we should save the entire 64-bits only when the upper
17719 half is used in SIMD instructions. Since we only record
17720 registers live (not the size they are used in), this proves
17721 difficult because we'd have to traverse the instruction chain at
17722 the right time, taking reload into account. This is a real pain,
17723 so we opt to save the GPRs in 64-bits always if but one register
17724 gets used in 64-bits. Otherwise, all the registers in the frame
17725 get saved in 32-bits.
17727 So... since when we save all GPRs (except the SP) in 64-bits, the
17728 traditional GP save area will be empty. */
17729 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17730 info_ptr
->gp_size
= 0;
17732 info_ptr
->first_fp_reg_save
= first_fp_reg_to_save ();
17733 info_ptr
->fp_size
= 8 * (64 - info_ptr
->first_fp_reg_save
);
17735 info_ptr
->first_altivec_reg_save
= first_altivec_reg_to_save ();
17736 info_ptr
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
17737 - info_ptr
->first_altivec_reg_save
);
17739 /* Does this function call anything? */
17740 info_ptr
->calls_p
= (! crtl
->is_leaf
17741 || cfun
->machine
->ra_needs_full_frame
);
17743 /* Determine if we need to save the condition code registers. */
17744 if (df_regs_ever_live_p (CR2_REGNO
)
17745 || df_regs_ever_live_p (CR3_REGNO
)
17746 || df_regs_ever_live_p (CR4_REGNO
))
17748 info_ptr
->cr_save_p
= 1;
17749 if (DEFAULT_ABI
== ABI_V4
)
17750 info_ptr
->cr_size
= reg_size
;
17753 /* If the current function calls __builtin_eh_return, then we need
17754 to allocate stack space for registers that will hold data for
17755 the exception handler. */
17756 if (crtl
->calls_eh_return
)
17759 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
17762 /* SPE saves EH registers in 64-bits. */
17763 ehrd_size
= i
* (TARGET_SPE_ABI
17764 && info_ptr
->spe_64bit_regs_used
!= 0
17765 ? UNITS_PER_SPE_WORD
: UNITS_PER_WORD
);
17770 /* Determine various sizes. */
17771 info_ptr
->reg_size
= reg_size
;
17772 info_ptr
->fixed_size
= RS6000_SAVE_AREA
;
17773 info_ptr
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
17774 info_ptr
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
17775 TARGET_ALTIVEC
? 16 : 8);
17776 if (FRAME_GROWS_DOWNWARD
)
17777 info_ptr
->vars_size
17778 += RS6000_ALIGN (info_ptr
->fixed_size
+ info_ptr
->vars_size
17779 + info_ptr
->parm_size
,
17780 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
17781 - (info_ptr
->fixed_size
+ info_ptr
->vars_size
17782 + info_ptr
->parm_size
);
17784 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17785 info_ptr
->spe_gp_size
= 8 * (32 - first_gp
);
17787 info_ptr
->spe_gp_size
= 0;
17789 if (TARGET_ALTIVEC_ABI
)
17790 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
17792 info_ptr
->vrsave_mask
= 0;
17794 if (TARGET_ALTIVEC_VRSAVE
&& info_ptr
->vrsave_mask
)
17795 info_ptr
->vrsave_size
= 4;
17797 info_ptr
->vrsave_size
= 0;
17799 compute_save_world_info (info_ptr
);
17801 /* Calculate the offsets. */
17802 switch (DEFAULT_ABI
)
17806 gcc_unreachable ();
17810 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
17811 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
17813 if (TARGET_ALTIVEC_ABI
)
17815 info_ptr
->vrsave_save_offset
17816 = info_ptr
->gp_save_offset
- info_ptr
->vrsave_size
;
17818 /* Align stack so vector save area is on a quadword boundary.
17819 The padding goes above the vectors. */
17820 if (info_ptr
->altivec_size
!= 0)
17821 info_ptr
->altivec_padding_size
17822 = info_ptr
->vrsave_save_offset
& 0xF;
17824 info_ptr
->altivec_padding_size
= 0;
17826 info_ptr
->altivec_save_offset
17827 = info_ptr
->vrsave_save_offset
17828 - info_ptr
->altivec_padding_size
17829 - info_ptr
->altivec_size
;
17830 gcc_assert (info_ptr
->altivec_size
== 0
17831 || info_ptr
->altivec_save_offset
% 16 == 0);
17833 /* Adjust for AltiVec case. */
17834 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
- ehrd_size
;
17837 info_ptr
->ehrd_offset
= info_ptr
->gp_save_offset
- ehrd_size
;
17838 info_ptr
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
17839 info_ptr
->lr_save_offset
= 2*reg_size
;
17843 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
17844 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
17845 info_ptr
->cr_save_offset
= info_ptr
->gp_save_offset
- info_ptr
->cr_size
;
17847 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17849 /* Align stack so SPE GPR save area is aligned on a
17850 double-word boundary. */
17851 if (info_ptr
->spe_gp_size
!= 0 && info_ptr
->cr_save_offset
!= 0)
17852 info_ptr
->spe_padding_size
17853 = 8 - (-info_ptr
->cr_save_offset
% 8);
17855 info_ptr
->spe_padding_size
= 0;
17857 info_ptr
->spe_gp_save_offset
17858 = info_ptr
->cr_save_offset
17859 - info_ptr
->spe_padding_size
17860 - info_ptr
->spe_gp_size
;
17862 /* Adjust for SPE case. */
17863 info_ptr
->ehrd_offset
= info_ptr
->spe_gp_save_offset
;
17865 else if (TARGET_ALTIVEC_ABI
)
17867 info_ptr
->vrsave_save_offset
17868 = info_ptr
->cr_save_offset
- info_ptr
->vrsave_size
;
17870 /* Align stack so vector save area is on a quadword boundary. */
17871 if (info_ptr
->altivec_size
!= 0)
17872 info_ptr
->altivec_padding_size
17873 = 16 - (-info_ptr
->vrsave_save_offset
% 16);
17875 info_ptr
->altivec_padding_size
= 0;
17877 info_ptr
->altivec_save_offset
17878 = info_ptr
->vrsave_save_offset
17879 - info_ptr
->altivec_padding_size
17880 - info_ptr
->altivec_size
;
17882 /* Adjust for AltiVec case. */
17883 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
;
17886 info_ptr
->ehrd_offset
= info_ptr
->cr_save_offset
;
17887 info_ptr
->ehrd_offset
-= ehrd_size
;
17888 info_ptr
->lr_save_offset
= reg_size
;
17892 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
17893 info_ptr
->save_size
= RS6000_ALIGN (info_ptr
->fp_size
17894 + info_ptr
->gp_size
17895 + info_ptr
->altivec_size
17896 + info_ptr
->altivec_padding_size
17897 + info_ptr
->spe_gp_size
17898 + info_ptr
->spe_padding_size
17900 + info_ptr
->cr_size
17901 + info_ptr
->vrsave_size
,
17904 non_fixed_size
= (info_ptr
->vars_size
17905 + info_ptr
->parm_size
17906 + info_ptr
->save_size
);
17908 info_ptr
->total_size
= RS6000_ALIGN (non_fixed_size
+ info_ptr
->fixed_size
,
17909 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
17911 /* Determine if we need to save the link register. */
17912 if (info_ptr
->calls_p
17913 || (DEFAULT_ABI
== ABI_AIX
17915 && !TARGET_PROFILE_KERNEL
)
17916 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
17917 #ifdef TARGET_RELOCATABLE
17918 || (TARGET_RELOCATABLE
&& (get_pool_size () != 0))
17920 || rs6000_ra_ever_killed ())
17921 info_ptr
->lr_save_p
= 1;
17923 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
17924 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
17925 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
17926 info_ptr
->savres_strategy
= rs6000_savres_strategy (info_ptr
,
17927 using_static_chain_p
);
17929 if (!(info_ptr
->savres_strategy
& SAVE_INLINE_GPRS
)
17930 || !(info_ptr
->savres_strategy
& SAVE_INLINE_FPRS
)
17931 || !(info_ptr
->savres_strategy
& SAVE_INLINE_VRS
)
17932 || !(info_ptr
->savres_strategy
& REST_INLINE_GPRS
)
17933 || !(info_ptr
->savres_strategy
& REST_INLINE_FPRS
)
17934 || !(info_ptr
->savres_strategy
& REST_INLINE_VRS
))
17935 info_ptr
->lr_save_p
= 1;
17937 if (info_ptr
->lr_save_p
)
17938 df_set_regs_ever_live (LR_REGNO
, true);
17940 /* Determine if we need to allocate any stack frame:
17942 For AIX we need to push the stack if a frame pointer is needed
17943 (because the stack might be dynamically adjusted), if we are
17944 debugging, if we make calls, or if the sum of fp_save, gp_save,
17945 and local variables are more than the space needed to save all
17946 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
17947 + 18*8 = 288 (GPR13 reserved).
17949 For V.4 we don't have the stack cushion that AIX uses, but assume
17950 that the debugger can handle stackless frames. */
17952 if (info_ptr
->calls_p
)
17953 info_ptr
->push_p
= 1;
17955 else if (DEFAULT_ABI
== ABI_V4
)
17956 info_ptr
->push_p
= non_fixed_size
!= 0;
17958 else if (frame_pointer_needed
)
17959 info_ptr
->push_p
= 1;
17961 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
17962 info_ptr
->push_p
= 1;
17965 info_ptr
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
17967 /* Zero offsets if we're not saving those registers. */
17968 if (info_ptr
->fp_size
== 0)
17969 info_ptr
->fp_save_offset
= 0;
17971 if (info_ptr
->gp_size
== 0)
17972 info_ptr
->gp_save_offset
= 0;
17974 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->altivec_size
== 0)
17975 info_ptr
->altivec_save_offset
= 0;
17977 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->vrsave_mask
== 0)
17978 info_ptr
->vrsave_save_offset
= 0;
17980 if (! TARGET_SPE_ABI
17981 || info_ptr
->spe_64bit_regs_used
== 0
17982 || info_ptr
->spe_gp_size
== 0)
17983 info_ptr
->spe_gp_save_offset
= 0;
17985 if (! info_ptr
->lr_save_p
)
17986 info_ptr
->lr_save_offset
= 0;
17988 if (! info_ptr
->cr_save_p
)
17989 info_ptr
->cr_save_offset
= 0;
17994 /* Return true if the current function uses any GPRs in 64-bit SIMD
17998 spe_func_has_64bit_regs_p (void)
18002 /* Functions that save and restore all the call-saved registers will
18003 need to save/restore the registers in 64-bits. */
18004 if (crtl
->calls_eh_return
18005 || cfun
->calls_setjmp
18006 || crtl
->has_nonlocal_goto
)
18009 insns
= get_insns ();
18011 for (insn
= NEXT_INSN (insns
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
18017 /* FIXME: This should be implemented with attributes...
18019 (set_attr "spe64" "true")....then,
18020 if (get_spe64(insn)) return true;
18022 It's the only reliable way to do the stuff below. */
18024 i
= PATTERN (insn
);
18025 if (GET_CODE (i
) == SET
)
18027 enum machine_mode mode
= GET_MODE (SET_SRC (i
));
18029 if (SPE_VECTOR_MODE (mode
))
18031 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
))
18041 debug_stack_info (rs6000_stack_t
*info
)
18043 const char *abi_string
;
18046 info
= rs6000_stack_info ();
18048 fprintf (stderr
, "\nStack information for function %s:\n",
18049 ((current_function_decl
&& DECL_NAME (current_function_decl
))
18050 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
18055 default: abi_string
= "Unknown"; break;
18056 case ABI_NONE
: abi_string
= "NONE"; break;
18057 case ABI_AIX
: abi_string
= "AIX"; break;
18058 case ABI_DARWIN
: abi_string
= "Darwin"; break;
18059 case ABI_V4
: abi_string
= "V.4"; break;
18062 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
18064 if (TARGET_ALTIVEC_ABI
)
18065 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
18067 if (TARGET_SPE_ABI
)
18068 fprintf (stderr
, "\tSPE ABI extensions enabled.\n");
18070 if (info
->first_gp_reg_save
!= 32)
18071 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
18073 if (info
->first_fp_reg_save
!= 64)
18074 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
18076 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
18077 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
18078 info
->first_altivec_reg_save
);
18080 if (info
->lr_save_p
)
18081 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
18083 if (info
->cr_save_p
)
18084 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
18086 if (info
->vrsave_mask
)
18087 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
18090 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
18093 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
18095 if (info
->gp_save_offset
)
18096 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
18098 if (info
->fp_save_offset
)
18099 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
18101 if (info
->altivec_save_offset
)
18102 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
18103 info
->altivec_save_offset
);
18105 if (info
->spe_gp_save_offset
)
18106 fprintf (stderr
, "\tspe_gp_save_offset = %5d\n",
18107 info
->spe_gp_save_offset
);
18109 if (info
->vrsave_save_offset
)
18110 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
18111 info
->vrsave_save_offset
);
18113 if (info
->lr_save_offset
)
18114 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
18116 if (info
->cr_save_offset
)
18117 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
18119 if (info
->varargs_save_offset
)
18120 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
18122 if (info
->total_size
)
18123 fprintf (stderr
, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
18126 if (info
->vars_size
)
18127 fprintf (stderr
, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
18130 if (info
->parm_size
)
18131 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
18133 if (info
->fixed_size
)
18134 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
18137 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
18139 if (info
->spe_gp_size
)
18140 fprintf (stderr
, "\tspe_gp_size = %5d\n", info
->spe_gp_size
);
18143 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
18145 if (info
->altivec_size
)
18146 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
18148 if (info
->vrsave_size
)
18149 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
18151 if (info
->altivec_padding_size
)
18152 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
18153 info
->altivec_padding_size
);
18155 if (info
->spe_padding_size
)
18156 fprintf (stderr
, "\tspe_padding_size = %5d\n",
18157 info
->spe_padding_size
);
18160 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
18162 if (info
->save_size
)
18163 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
18165 if (info
->reg_size
!= 4)
18166 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
18168 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
18170 fprintf (stderr
, "\n");
18174 rs6000_return_addr (int count
, rtx frame
)
18176 /* Currently we don't optimize very well between prolog and body
18177 code and for PIC code the code can be actually quite bad, so
18178 don't try to be too clever here. */
18179 if (count
!= 0 || (DEFAULT_ABI
!= ABI_AIX
&& flag_pic
))
18181 cfun
->machine
->ra_needs_full_frame
= 1;
18188 plus_constant (Pmode
,
18190 (gen_rtx_MEM (Pmode
,
18191 memory_address (Pmode
, frame
))),
18192 RETURN_ADDRESS_OFFSET
)));
18195 cfun
->machine
->ra_need_lr
= 1;
18196 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
18199 /* Say whether a function is a candidate for sibcall handling or not. */
18202 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
18207 fntype
= TREE_TYPE (decl
);
18209 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
18211 /* We can't do it if the called function has more vector parameters
18212 than the current function; there's nowhere to put the VRsave code. */
18213 if (TARGET_ALTIVEC_ABI
18214 && TARGET_ALTIVEC_VRSAVE
18215 && !(decl
&& decl
== current_function_decl
))
18217 function_args_iterator args_iter
;
18221 /* Functions with vector parameters are required to have a
18222 prototype, so the argument type info must be available
18224 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
18225 if (TREE_CODE (type
) == VECTOR_TYPE
18226 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
18229 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
18230 if (TREE_CODE (type
) == VECTOR_TYPE
18231 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
18238 /* Under the AIX ABI we can't allow calls to non-local functions,
18239 because the callee may have a different TOC pointer to the
18240 caller and there's no way to ensure we restore the TOC when we
18241 return. With the secure-plt SYSV ABI we can't make non-local
18242 calls when -fpic/PIC because the plt call stubs use r30. */
18243 if (DEFAULT_ABI
== ABI_DARWIN
18244 || (DEFAULT_ABI
== ABI_AIX
18246 && !DECL_EXTERNAL (decl
)
18247 && (*targetm
.binds_local_p
) (decl
))
18248 || (DEFAULT_ABI
== ABI_V4
18249 && (!TARGET_SECURE_PLT
18252 && (*targetm
.binds_local_p
) (decl
)))))
18254 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
18256 if (!lookup_attribute ("longcall", attr_list
)
18257 || lookup_attribute ("shortcall", attr_list
))
18264 /* NULL if INSN insn is valid within a low-overhead loop.
18265 Otherwise return why doloop cannot be applied.
18266 PowerPC uses the COUNT register for branch on table instructions. */
18268 static const char *
18269 rs6000_invalid_within_doloop (const_rtx insn
)
18272 return "Function call in the loop.";
18275 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
18276 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
))
18277 return "Computed branch in the loop.";
18283 rs6000_ra_ever_killed (void)
18289 if (cfun
->is_thunk
)
18292 if (cfun
->machine
->lr_save_state
)
18293 return cfun
->machine
->lr_save_state
- 1;
18295 /* regs_ever_live has LR marked as used if any sibcalls are present,
18296 but this should not force saving and restoring in the
18297 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18298 clobbers LR, so that is inappropriate. */
18300 /* Also, the prologue can generate a store into LR that
18301 doesn't really count, like this:
18304 bcl to set PIC register
18308 When we're called from the epilogue, we need to avoid counting
18309 this as a store. */
18311 push_topmost_sequence ();
18312 top
= get_insns ();
18313 pop_topmost_sequence ();
18314 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
18316 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
18322 if (!SIBLING_CALL_P (insn
))
18325 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
18327 else if (set_of (reg
, insn
) != NULL_RTX
18328 && !prologue_epilogue_contains (insn
))
18335 /* Emit instructions needed to load the TOC register.
18336 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18337 a constant pool; or for SVR4 -fpic. */
18340 rs6000_emit_load_toc_table (int fromprolog
)
18343 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
18345 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
)
18348 rtx lab
, tmp1
, tmp2
, got
;
18350 lab
= gen_label_rtx ();
18351 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
18352 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18354 got
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
18356 got
= rs6000_got_sym ();
18357 tmp1
= tmp2
= dest
;
18360 tmp1
= gen_reg_rtx (Pmode
);
18361 tmp2
= gen_reg_rtx (Pmode
);
18363 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
18364 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
18365 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
18366 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
18368 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
18370 emit_insn (gen_load_toc_v4_pic_si ());
18371 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18373 else if (TARGET_ELF
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
== 2)
18376 rtx temp0
= (fromprolog
18377 ? gen_rtx_REG (Pmode
, 0)
18378 : gen_reg_rtx (Pmode
));
18384 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
18385 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18387 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
18388 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18390 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
18391 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18392 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
18398 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
18399 lab
= gen_label_rtx ();
18400 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
18401 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18402 if (TARGET_LINK_STACK
)
18403 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
18404 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
18406 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
18408 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
18410 /* This is for AIX code running in non-PIC ELF32. */
18413 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
18414 realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18416 emit_insn (gen_elf_high (dest
, realsym
));
18417 emit_insn (gen_elf_low (dest
, dest
, realsym
));
18421 gcc_assert (DEFAULT_ABI
== ABI_AIX
);
18424 emit_insn (gen_load_toc_aix_si (dest
));
18426 emit_insn (gen_load_toc_aix_di (dest
));
18430 /* Emit instructions to restore the link register after determining where
18431 its value has been stored. */
18434 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
18436 rs6000_stack_t
*info
= rs6000_stack_info ();
18439 operands
[0] = source
;
18440 operands
[1] = scratch
;
18442 if (info
->lr_save_p
)
18444 rtx frame_rtx
= stack_pointer_rtx
;
18445 HOST_WIDE_INT sp_offset
= 0;
18448 if (frame_pointer_needed
18449 || cfun
->calls_alloca
18450 || info
->total_size
> 32767)
18452 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
18453 emit_move_insn (operands
[1], tmp
);
18454 frame_rtx
= operands
[1];
18456 else if (info
->push_p
)
18457 sp_offset
= info
->total_size
;
18459 tmp
= plus_constant (Pmode
, frame_rtx
,
18460 info
->lr_save_offset
+ sp_offset
);
18461 tmp
= gen_frame_mem (Pmode
, tmp
);
18462 emit_move_insn (tmp
, operands
[0]);
18465 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
18467 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18468 state of lr_save_p so any change from here on would be a bug. In
18469 particular, stop rs6000_ra_ever_killed from considering the SET
18470 of lr we may have added just above. */
18471 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
18474 static GTY(()) alias_set_type set
= -1;
18477 get_TOC_alias_set (void)
18480 set
= new_alias_set ();
18484 /* This returns nonzero if the current function uses the TOC. This is
18485 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18486 is generated by the ABI_V4 load_toc_* patterns. */
18493 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
18496 rtx pat
= PATTERN (insn
);
18499 if (GET_CODE (pat
) == PARALLEL
)
18500 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
18502 rtx sub
= XVECEXP (pat
, 0, i
);
18503 if (GET_CODE (sub
) == USE
)
18505 sub
= XEXP (sub
, 0);
18506 if (GET_CODE (sub
) == UNSPEC
18507 && XINT (sub
, 1) == UNSPEC_TOC
)
18517 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
18519 rtx tocrel
, tocreg
, hi
;
18521 if (TARGET_DEBUG_ADDR
)
18523 if (GET_CODE (symbol
) == SYMBOL_REF
)
18524 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18528 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
18529 GET_RTX_NAME (GET_CODE (symbol
)));
18530 debug_rtx (symbol
);
18534 if (!can_create_pseudo_p ())
18535 df_set_regs_ever_live (TOC_REGISTER
, true);
18537 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
18538 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
18539 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
18542 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
18543 if (largetoc_reg
!= NULL
)
18545 emit_move_insn (largetoc_reg
, hi
);
18548 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
18551 /* Issue assembly directives that create a reference to the given DWARF
18552 FRAME_TABLE_LABEL from the current function section. */
18554 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
18556 fprintf (asm_out_file
, "\t.ref %s\n",
18557 (* targetm
.strip_name_encoding
) (frame_table_label
));
18560 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18561 and the change to the stack pointer. */
18564 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
18571 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
18572 if (hard_frame_needed
)
18573 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
18574 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
18575 || (hard_frame_needed
18576 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
18579 p
= rtvec_alloc (i
);
18582 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
18583 RTVEC_ELT (p
, i
) = gen_rtx_SET (VOIDmode
, mem
, const0_rtx
);
18586 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
18589 /* Emit the correct code for allocating stack space, as insns.
18590 If COPY_REG, make sure a copy of the old frame is left there.
18591 The generated code may use hard register 0 as a temporary. */
18594 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
18597 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
18598 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
18599 rtx todec
= gen_int_mode (-size
, Pmode
);
18602 if (INTVAL (todec
) != -size
)
18604 warning (0, "stack frame too large");
18605 emit_insn (gen_trap ());
18609 if (crtl
->limit_stack
)
18611 if (REG_P (stack_limit_rtx
)
18612 && REGNO (stack_limit_rtx
) > 1
18613 && REGNO (stack_limit_rtx
) <= 31)
18615 emit_insn (gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
)));
18616 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
18619 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
18621 && DEFAULT_ABI
== ABI_V4
)
18623 rtx toload
= gen_rtx_CONST (VOIDmode
,
18624 gen_rtx_PLUS (Pmode
,
18628 emit_insn (gen_elf_high (tmp_reg
, toload
));
18629 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
18630 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
18634 warning (0, "stack limit expression is not supported");
18640 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
18642 emit_move_insn (copy_reg
, stack_reg
);
18647 /* Need a note here so that try_split doesn't get confused. */
18648 if (get_last_insn () == NULL_RTX
)
18649 emit_note (NOTE_INSN_DELETED
);
18650 insn
= emit_move_insn (tmp_reg
, todec
);
18651 try_split (PATTERN (insn
), insn
, 0);
18655 insn
= emit_insn (TARGET_32BIT
18656 ? gen_movsi_update_stack (stack_reg
, stack_reg
,
18658 : gen_movdi_di_update_stack (stack_reg
, stack_reg
,
18659 todec
, stack_reg
));
18660 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18661 it now and set the alias set/attributes. The above gen_*_update
18662 calls will generate a PARALLEL with the MEM set being the first
18664 par
= PATTERN (insn
);
18665 gcc_assert (GET_CODE (par
) == PARALLEL
);
18666 set
= XVECEXP (par
, 0, 0);
18667 gcc_assert (GET_CODE (set
) == SET
);
18668 mem
= SET_DEST (set
);
18669 gcc_assert (MEM_P (mem
));
18670 MEM_NOTRAP_P (mem
) = 1;
18671 set_mem_alias_set (mem
, get_frame_alias_set ());
18673 RTX_FRAME_RELATED_P (insn
) = 1;
18674 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
18675 gen_rtx_SET (VOIDmode
, stack_reg
,
18676 gen_rtx_PLUS (Pmode
, stack_reg
,
18677 GEN_INT (-size
))));
18680 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18682 #if PROBE_INTERVAL > 32768
18683 #error Cannot use indexed addressing mode for stack probing
18686 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18687 inclusive. These are offsets from the current stack pointer. */
18690 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
18692 /* See if we have a constant small number of probes to generate. If so,
18693 that's the easy case. */
18694 if (first
+ size
<= 32768)
18698 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18699 it exceeds SIZE. If only one probe is needed, this will not
18700 generate any code. Then probe at FIRST + SIZE. */
18701 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
18702 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
18705 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
18709 /* Otherwise, do the same as above, but in a loop. Note that we must be
18710 extra careful with variables wrapping around because we might be at
18711 the very top (or the very bottom) of the address space and we have
18712 to be able to handle this case properly; in particular, we use an
18713 equality test for the loop condition. */
18716 HOST_WIDE_INT rounded_size
;
18717 rtx r12
= gen_rtx_REG (Pmode
, 12);
18718 rtx r0
= gen_rtx_REG (Pmode
, 0);
18720 /* Sanity check for the addressing mode we're going to use. */
18721 gcc_assert (first
<= 32768);
18723 /* Step 1: round SIZE to the previous multiple of the interval. */
18725 rounded_size
= size
& -PROBE_INTERVAL
;
18728 /* Step 2: compute initial and final value of the loop counter. */
18730 /* TEST_ADDR = SP + FIRST. */
18731 emit_insn (gen_rtx_SET (VOIDmode
, r12
,
18732 plus_constant (Pmode
, stack_pointer_rtx
,
18735 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18736 if (rounded_size
> 32768)
18738 emit_move_insn (r0
, GEN_INT (-rounded_size
));
18739 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
18740 gen_rtx_PLUS (Pmode
, r12
, r0
)));
18743 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
18744 plus_constant (Pmode
, r12
, -rounded_size
)));
18747 /* Step 3: the loop
18749 while (TEST_ADDR != LAST_ADDR)
18751 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18755 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18756 until it is equal to ROUNDED_SIZE. */
18759 emit_insn (gen_probe_stack_rangedi (r12
, r12
, r0
));
18761 emit_insn (gen_probe_stack_rangesi (r12
, r12
, r0
));
18764 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18765 that SIZE is equal to ROUNDED_SIZE. */
18767 if (size
!= rounded_size
)
18768 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
18772 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18773 absolute addresses. */
18776 output_probe_stack_range (rtx reg1
, rtx reg2
)
18778 static int labelno
= 0;
18779 char loop_lab
[32], end_lab
[32];
18782 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
18783 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
18785 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
18787 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
18791 output_asm_insn ("{cmp|cmpd} 0,%0,%1", xops
);
18793 output_asm_insn ("{cmp|cmpw} 0,%0,%1", xops
);
18795 fputs ("\tbeq 0,", asm_out_file
);
18796 assemble_name_raw (asm_out_file
, end_lab
);
18797 fputc ('\n', asm_out_file
);
18799 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
18800 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
18801 output_asm_insn ("{cal %0,%1(%0)|addi %0,%0,%1}", xops
);
18803 /* Probe at TEST_ADDR and branch. */
18804 xops
[1] = gen_rtx_REG (Pmode
, 0);
18805 output_asm_insn ("{st|stw} %1,0(%0)", xops
);
18806 fprintf (asm_out_file
, "\tb ");
18807 assemble_name_raw (asm_out_file
, loop_lab
);
18808 fputc ('\n', asm_out_file
);
18810 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
18815 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18816 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18817 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18818 deduce these equivalences by itself so it wasn't necessary to hold
18819 its hand so much. Don't be tempted to always supply d2_f_d_e with
18820 the actual cfa register, ie. r31 when we are using a hard frame
18821 pointer. That fails when saving regs off r1, and sched moves the
18822 r31 setup past the reg saves. */
18825 rs6000_frame_related (rtx insn
, rtx reg
, HOST_WIDE_INT val
,
18826 rtx reg2
, rtx rreg
)
18830 if (REGNO (reg
) == STACK_POINTER_REGNUM
&& reg2
== NULL_RTX
)
18832 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
18835 gcc_checking_assert (val
== 0);
18836 real
= PATTERN (insn
);
18837 if (GET_CODE (real
) == PARALLEL
)
18838 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
18839 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
18841 rtx set
= XVECEXP (real
, 0, i
);
18843 RTX_FRAME_RELATED_P (set
) = 1;
18845 RTX_FRAME_RELATED_P (insn
) = 1;
18849 /* copy_rtx will not make unique copies of registers, so we need to
18850 ensure we don't have unwanted sharing here. */
18852 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
18855 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
18857 real
= copy_rtx (PATTERN (insn
));
18859 if (reg2
!= NULL_RTX
)
18860 real
= replace_rtx (real
, reg2
, rreg
);
18862 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
18863 gcc_checking_assert (val
== 0);
18865 real
= replace_rtx (real
, reg
,
18866 gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
,
18867 STACK_POINTER_REGNUM
),
18870 /* We expect that 'real' is either a SET or a PARALLEL containing
18871 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18872 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18874 if (GET_CODE (real
) == SET
)
18878 temp
= simplify_rtx (SET_SRC (set
));
18880 SET_SRC (set
) = temp
;
18881 temp
= simplify_rtx (SET_DEST (set
));
18883 SET_DEST (set
) = temp
;
18884 if (GET_CODE (SET_DEST (set
)) == MEM
)
18886 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
18888 XEXP (SET_DEST (set
), 0) = temp
;
18895 gcc_assert (GET_CODE (real
) == PARALLEL
);
18896 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
18897 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
18899 rtx set
= XVECEXP (real
, 0, i
);
18901 temp
= simplify_rtx (SET_SRC (set
));
18903 SET_SRC (set
) = temp
;
18904 temp
= simplify_rtx (SET_DEST (set
));
18906 SET_DEST (set
) = temp
;
18907 if (GET_CODE (SET_DEST (set
)) == MEM
)
18909 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
18911 XEXP (SET_DEST (set
), 0) = temp
;
18913 RTX_FRAME_RELATED_P (set
) = 1;
18917 RTX_FRAME_RELATED_P (insn
) = 1;
18918 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, real
);
18923 /* Returns an insn that has a vrsave set operation with the
18924 appropriate CLOBBERs. */
18927 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
18930 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
18931 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
18934 = gen_rtx_SET (VOIDmode
,
18936 gen_rtx_UNSPEC_VOLATILE (SImode
,
18937 gen_rtvec (2, reg
, vrsave
),
18938 UNSPECV_SET_VRSAVE
));
18942 /* We need to clobber the registers in the mask so the scheduler
18943 does not move sets to VRSAVE before sets of AltiVec registers.
18945 However, if the function receives nonlocal gotos, reload will set
18946 all call saved registers live. We will end up with:
18948 (set (reg 999) (mem))
18949 (parallel [ (set (reg vrsave) (unspec blah))
18950 (clobber (reg 999))])
18952 The clobber will cause the store into reg 999 to be dead, and
18953 flow will attempt to delete an epilogue insn. In this case, we
18954 need an unspec use/set of the register. */
18956 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
18957 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
18959 if (!epiloguep
|| call_used_regs
[i
])
18960 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
18961 gen_rtx_REG (V4SImode
, i
));
18964 rtx reg
= gen_rtx_REG (V4SImode
, i
);
18967 = gen_rtx_SET (VOIDmode
,
18969 gen_rtx_UNSPEC (V4SImode
,
18970 gen_rtvec (1, reg
), 27));
18974 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
18976 for (i
= 0; i
< nclobs
; ++i
)
18977 XVECEXP (insn
, 0, i
) = clobs
[i
];
18983 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
18987 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
18988 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
18989 return gen_rtx_SET (VOIDmode
, store
? mem
: reg
, store
? reg
: mem
);
18993 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
18995 return gen_frame_set (reg
, frame_reg
, offset
, false);
18999 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
19001 return gen_frame_set (reg
, frame_reg
, offset
, true);
19004 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
19005 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
19008 emit_frame_save (rtx frame_reg
, enum machine_mode mode
,
19009 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
19013 /* Some cases that need register indexed addressing. */
19014 gcc_checking_assert (!((TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
19015 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
19016 || (TARGET_E500_DOUBLE
&& mode
== DFmode
)
19018 && SPE_VECTOR_MODE (mode
)
19019 && !SPE_CONST_OFFSET_OK (offset
))));
19021 reg
= gen_rtx_REG (mode
, regno
);
19022 insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
19023 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
19024 NULL_RTX
, NULL_RTX
);
19027 /* Emit an offset memory reference suitable for a frame store, while
19028 converting to a valid addressing mode. */
19031 gen_frame_mem_offset (enum machine_mode mode
, rtx reg
, int offset
)
19033 rtx int_rtx
, offset_rtx
;
19035 int_rtx
= GEN_INT (offset
);
19037 if ((TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode
) && !SPE_CONST_OFFSET_OK (offset
))
19038 || (TARGET_E500_DOUBLE
&& mode
== DFmode
))
19040 offset_rtx
= gen_rtx_REG (Pmode
, FIXED_SCRATCH
);
19041 emit_move_insn (offset_rtx
, int_rtx
);
19044 offset_rtx
= int_rtx
;
19046 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, offset_rtx
));
19049 #ifndef TARGET_FIX_AND_CONTINUE
19050 #define TARGET_FIX_AND_CONTINUE 0
19053 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19054 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19055 #define LAST_SAVRES_REGISTER 31
19056 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19067 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
19069 /* Temporary holding space for an out-of-line register save/restore
19071 static char savres_routine_name
[30];
19073 /* Return the name for an out-of-line register save/restore routine.
19074 We are saving/restoring GPRs if GPR is true. */
19077 rs6000_savres_routine_name (rs6000_stack_t
*info
, int regno
, int sel
)
19079 const char *prefix
= "";
19080 const char *suffix
= "";
19082 /* Different targets are supposed to define
19083 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19084 routine name could be defined with:
19086 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19088 This is a nice idea in practice, but in reality, things are
19089 complicated in several ways:
19091 - ELF targets have save/restore routines for GPRs.
19093 - SPE targets use different prefixes for 32/64-bit registers, and
19094 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19096 - PPC64 ELF targets have routines for save/restore of GPRs that
19097 differ in what they do with the link register, so having a set
19098 prefix doesn't work. (We only use one of the save routines at
19099 the moment, though.)
19101 - PPC32 elf targets have "exit" versions of the restore routines
19102 that restore the link register and can save some extra space.
19103 These require an extra suffix. (There are also "tail" versions
19104 of the restore routines and "GOT" versions of the save routines,
19105 but we don't generate those at present. Same problems apply,
19108 We deal with all this by synthesizing our own prefix/suffix and
19109 using that for the simple sprintf call shown above. */
19112 /* No floating point saves on the SPE. */
19113 gcc_assert ((sel
& SAVRES_REG
) == SAVRES_GPR
);
19115 if ((sel
& SAVRES_SAVE
))
19116 prefix
= info
->spe_64bit_regs_used
? "_save64gpr_" : "_save32gpr_";
19118 prefix
= info
->spe_64bit_regs_used
? "_rest64gpr_" : "_rest32gpr_";
19120 if ((sel
& SAVRES_LR
))
19123 else if (DEFAULT_ABI
== ABI_V4
)
19128 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19129 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
19130 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19131 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
19132 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19133 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
19137 if ((sel
& SAVRES_LR
))
19140 else if (DEFAULT_ABI
== ABI_AIX
)
19142 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19143 /* No out-of-line save/restore routines for GPRs on AIX. */
19144 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
19148 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19149 prefix
= ((sel
& SAVRES_SAVE
)
19150 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
19151 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
19152 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19154 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19155 if ((sel
& SAVRES_LR
))
19156 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
19160 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
19161 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
19164 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19165 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
19170 if (DEFAULT_ABI
== ABI_DARWIN
)
19172 /* The Darwin approach is (slightly) different, in order to be
19173 compatible with code generated by the system toolchain. There is a
19174 single symbol for the start of save sequence, and the code here
19175 embeds an offset into that code on the basis of the first register
19177 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
19178 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19179 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
19180 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
19181 (regno
- 13) * 4, prefix
, regno
);
19182 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19183 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
19184 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
19185 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19186 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
19187 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
19192 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
19194 return savres_routine_name
;
19197 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19198 We are saving/restoring GPRs if GPR is true. */
19201 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
19203 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19204 ? info
->first_gp_reg_save
19205 : (sel
& SAVRES_REG
) == SAVRES_FPR
19206 ? info
->first_fp_reg_save
- 32
19207 : (sel
& SAVRES_REG
) == SAVRES_VR
19208 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
19213 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19214 versions of the gpr routines. */
19215 if (TARGET_SPE_ABI
&& (sel
& SAVRES_REG
) == SAVRES_GPR
19216 && info
->spe_64bit_regs_used
)
19217 select
^= SAVRES_FPR
^ SAVRES_GPR
;
19219 /* Don't generate bogus routine names. */
19220 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
19221 && regno
<= LAST_SAVRES_REGISTER
19222 && select
>= 0 && select
<= 12);
19224 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
19230 name
= rs6000_savres_routine_name (info
, regno
, sel
);
19232 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
19233 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
19234 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
19240 /* Emit a sequence of insns, including a stack tie if needed, for
19241 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19242 reset the stack pointer, but move the base of the frame into
19243 reg UPDT_REGNO for use by out-of-line register restore routines. */
19246 rs6000_emit_stack_reset (rs6000_stack_t
*info
,
19247 rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
19248 unsigned updt_regno
)
19252 /* This blockage is needed so that sched doesn't decide to move
19253 the sp change before the register restores. */
19254 if (DEFAULT_ABI
== ABI_V4
19256 && info
->spe_64bit_regs_used
!= 0
19257 && info
->first_gp_reg_save
!= 32))
19258 rs6000_emit_stack_tie (frame_reg_rtx
, frame_pointer_needed
);
19260 /* If we are restoring registers out-of-line, we will be using the
19261 "exit" variants of the restore routines, which will reset the
19262 stack for us. But we do need to point updt_reg into the
19263 right place for those routines. */
19264 updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
19266 if (frame_off
!= 0)
19267 return emit_insn (gen_add3_insn (updt_reg_rtx
,
19268 frame_reg_rtx
, GEN_INT (frame_off
)));
19269 else if (REGNO (frame_reg_rtx
) != updt_regno
)
19270 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
19275 /* Return the register number used as a pointer by out-of-line
19276 save/restore functions. */
19278 static inline unsigned
19279 ptr_regno_for_savres (int sel
)
19281 if (DEFAULT_ABI
== ABI_AIX
)
19282 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
19283 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
19286 /* Construct a parallel rtx describing the effect of a call to an
19287 out-of-line register save/restore routine, and emit the insn
19288 or jump_insn as appropriate. */
19291 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
19292 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
19293 enum machine_mode reg_mode
, int sel
)
19296 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
19297 int reg_size
= GET_MODE_SIZE (reg_mode
);
19303 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19304 ? info
->first_gp_reg_save
19305 : (sel
& SAVRES_REG
) == SAVRES_FPR
19306 ? info
->first_fp_reg_save
19307 : (sel
& SAVRES_REG
) == SAVRES_VR
19308 ? info
->first_altivec_reg_save
19310 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19312 : (sel
& SAVRES_REG
) == SAVRES_FPR
19314 : (sel
& SAVRES_REG
) == SAVRES_VR
19315 ? LAST_ALTIVEC_REGNO
+ 1
19317 n_regs
= end_reg
- start_reg
;
19318 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
19319 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
19322 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19323 RTVEC_ELT (p
, offset
++) = ret_rtx
;
19325 RTVEC_ELT (p
, offset
++)
19326 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
19328 sym
= rs6000_savres_routine_sym (info
, sel
);
19329 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
19331 use_reg
= ptr_regno_for_savres (sel
);
19332 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19334 /* Vector regs are saved/restored using [reg+reg] addressing. */
19335 RTVEC_ELT (p
, offset
++)
19336 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
19337 RTVEC_ELT (p
, offset
++)
19338 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
19341 RTVEC_ELT (p
, offset
++)
19342 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
19344 for (i
= 0; i
< end_reg
- start_reg
; i
++)
19345 RTVEC_ELT (p
, i
+ offset
)
19346 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
19347 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
19348 (sel
& SAVRES_SAVE
) != 0);
19350 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19351 RTVEC_ELT (p
, i
+ offset
)
19352 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
19354 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
19356 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19358 insn
= emit_jump_insn (par
);
19359 JUMP_LABEL (insn
) = ret_rtx
;
19362 insn
= emit_insn (par
);
19366 /* Determine whether the gp REG is really used. */
19369 rs6000_reg_live_or_pic_offset_p (int reg
)
19371 /* If the function calls eh_return, claim used all the registers that would
19372 be checked for liveness otherwise. This is required for the PIC offset
19373 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19374 register allocation purposes in this case. */
19376 return (((crtl
->calls_eh_return
|| df_regs_ever_live_p (reg
))
19377 && (!call_used_regs
[reg
]
19378 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
19379 && !TARGET_SINGLE_PIC_BASE
19380 && TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
19381 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
19382 && !TARGET_SINGLE_PIC_BASE
19383 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
19384 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
))));
19387 /* Emit function prologue as insns. */
19390 rs6000_emit_prologue (void)
19392 rs6000_stack_t
*info
= rs6000_stack_info ();
19393 enum machine_mode reg_mode
= Pmode
;
19394 int reg_size
= TARGET_32BIT
? 4 : 8;
19395 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
19396 rtx frame_reg_rtx
= sp_reg_rtx
;
19397 unsigned int cr_save_regno
;
19398 rtx cr_save_rtx
= NULL_RTX
;
19401 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
19402 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
19403 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
19404 /* Offset to top of frame for frame_reg and sp respectively. */
19405 HOST_WIDE_INT frame_off
= 0;
19406 HOST_WIDE_INT sp_off
= 0;
19408 #ifdef ENABLE_CHECKING
19409 /* Track and check usage of r0, r11, r12. */
19410 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
19411 #define START_USE(R) do \
19413 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19414 reg_inuse |= 1 << (R); \
19416 #define END_USE(R) do \
19418 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19419 reg_inuse &= ~(1 << (R)); \
19421 #define NOT_INUSE(R) do \
19423 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19426 #define START_USE(R) do {} while (0)
19427 #define END_USE(R) do {} while (0)
19428 #define NOT_INUSE(R) do {} while (0)
19431 if (flag_stack_usage_info
)
19432 current_function_static_stack_size
= info
->total_size
;
19434 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
&& info
->total_size
)
19435 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
, info
->total_size
);
19437 if (TARGET_FIX_AND_CONTINUE
)
19439 /* gdb on darwin arranges to forward a function from the old
19440 address by modifying the first 5 instructions of the function
19441 to branch to the overriding function. This is necessary to
19442 permit function pointers that point to the old function to
19443 actually forward to the new function. */
19444 emit_insn (gen_nop ());
19445 emit_insn (gen_nop ());
19446 emit_insn (gen_nop ());
19447 emit_insn (gen_nop ());
19448 emit_insn (gen_nop ());
19451 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
19453 reg_mode
= V2SImode
;
19457 /* Handle world saves specially here. */
19458 if (WORLD_SAVE_P (info
))
19465 /* save_world expects lr in r0. */
19466 reg0
= gen_rtx_REG (Pmode
, 0);
19467 if (info
->lr_save_p
)
19469 insn
= emit_move_insn (reg0
,
19470 gen_rtx_REG (Pmode
, LR_REGNO
));
19471 RTX_FRAME_RELATED_P (insn
) = 1;
19474 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19475 assumptions about the offsets of various bits of the stack
19477 gcc_assert (info
->gp_save_offset
== -220
19478 && info
->fp_save_offset
== -144
19479 && info
->lr_save_offset
== 8
19480 && info
->cr_save_offset
== 4
19483 && (!crtl
->calls_eh_return
19484 || info
->ehrd_offset
== -432)
19485 && info
->vrsave_save_offset
== -224
19486 && info
->altivec_save_offset
== -416);
19488 treg
= gen_rtx_REG (SImode
, 11);
19489 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
19491 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19492 in R11. It also clobbers R12, so beware! */
19494 /* Preserve CR2 for save_world prologues */
19496 sz
+= 32 - info
->first_gp_reg_save
;
19497 sz
+= 64 - info
->first_fp_reg_save
;
19498 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
19499 p
= rtvec_alloc (sz
);
19501 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
19502 gen_rtx_REG (SImode
,
19504 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
19505 gen_rtx_SYMBOL_REF (Pmode
,
19507 /* We do floats first so that the instruction pattern matches
19509 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
19511 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
19513 info
->first_fp_reg_save
+ i
),
19515 info
->fp_save_offset
+ frame_off
+ 8 * i
);
19516 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
19518 = gen_frame_store (gen_rtx_REG (V4SImode
,
19519 info
->first_altivec_reg_save
+ i
),
19521 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
19522 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19524 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
19526 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
19528 /* CR register traditionally saved as CR2. */
19530 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
19531 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
19532 /* Explain about use of R0. */
19533 if (info
->lr_save_p
)
19535 = gen_frame_store (reg0
,
19536 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
19537 /* Explain what happens to the stack pointer. */
19539 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
19540 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, sp_reg_rtx
, newval
);
19543 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
19544 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19545 treg
, GEN_INT (-info
->total_size
));
19546 sp_off
= frame_off
= info
->total_size
;
19549 strategy
= info
->savres_strategy
;
19551 /* For V.4, update stack before we do any saving and set back pointer. */
19552 if (! WORLD_SAVE_P (info
)
19554 && (DEFAULT_ABI
== ABI_V4
19555 || crtl
->calls_eh_return
))
19557 bool need_r11
= (TARGET_SPE
19558 ? (!(strategy
& SAVE_INLINE_GPRS
)
19559 && info
->spe_64bit_regs_used
== 0)
19560 : (!(strategy
& SAVE_INLINE_FPRS
)
19561 || !(strategy
& SAVE_INLINE_GPRS
)
19562 || !(strategy
& SAVE_INLINE_VRS
)));
19563 int ptr_regno
= -1;
19564 rtx ptr_reg
= NULL_RTX
;
19567 if (info
->total_size
< 32767)
19568 frame_off
= info
->total_size
;
19571 else if (info
->cr_save_p
19573 || info
->first_fp_reg_save
< 64
19574 || info
->first_gp_reg_save
< 32
19575 || info
->altivec_size
!= 0
19576 || info
->vrsave_mask
!= 0
19577 || crtl
->calls_eh_return
)
19581 /* The prologue won't be saving any regs so there is no need
19582 to set up a frame register to access any frame save area.
19583 We also won't be using frame_off anywhere below, but set
19584 the correct value anyway to protect against future
19585 changes to this function. */
19586 frame_off
= info
->total_size
;
19588 if (ptr_regno
!= -1)
19590 /* Set up the frame offset to that needed by the first
19591 out-of-line save function. */
19592 START_USE (ptr_regno
);
19593 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19594 frame_reg_rtx
= ptr_reg
;
19595 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
19596 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
19597 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
19598 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
19599 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
19600 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
19601 frame_off
= -ptr_off
;
19603 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
19604 sp_off
= info
->total_size
;
19605 if (frame_reg_rtx
!= sp_reg_rtx
)
19606 rs6000_emit_stack_tie (frame_reg_rtx
, false);
19609 /* If we use the link register, get it into r0. */
19610 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
)
19612 rtx addr
, reg
, mem
;
19614 reg
= gen_rtx_REG (Pmode
, 0);
19616 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
19617 RTX_FRAME_RELATED_P (insn
) = 1;
19619 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
19620 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
19622 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
19623 GEN_INT (info
->lr_save_offset
+ frame_off
));
19624 mem
= gen_rtx_MEM (Pmode
, addr
);
19625 /* This should not be of rs6000_sr_alias_set, because of
19626 __builtin_return_address. */
19628 insn
= emit_move_insn (mem
, reg
);
19629 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19630 NULL_RTX
, NULL_RTX
);
19635 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19636 r12 will be needed by out-of-line gpr restore. */
19637 cr_save_regno
= (DEFAULT_ABI
== ABI_AIX
19638 && !(strategy
& (SAVE_INLINE_GPRS
19639 | SAVE_NOINLINE_GPRS_SAVES_LR
))
19641 if (!WORLD_SAVE_P (info
)
19643 && REGNO (frame_reg_rtx
) != cr_save_regno
19644 && !(using_static_chain_p
&& cr_save_regno
== 11))
19648 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
19649 START_USE (cr_save_regno
);
19650 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
19651 RTX_FRAME_RELATED_P (insn
) = 1;
19652 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19653 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19654 But that's OK. All we have to do is specify that _one_ condition
19655 code register is saved in this stack slot. The thrower's epilogue
19656 will then restore all the call-saved registers.
19657 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19658 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
,
19659 gen_rtx_REG (SImode
, CR2_REGNO
));
19660 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
19663 /* Do any required saving of fpr's. If only one or two to save, do
19664 it ourselves. Otherwise, call function. */
19665 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
19668 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
19669 if (save_reg_p (info
->first_fp_reg_save
+ i
))
19670 emit_frame_save (frame_reg_rtx
,
19671 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
19672 ? DFmode
: SFmode
),
19673 info
->first_fp_reg_save
+ i
,
19674 info
->fp_save_offset
+ frame_off
+ 8 * i
,
19675 sp_off
- frame_off
);
19677 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
19679 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
19680 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
19681 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
19682 rtx ptr_reg
= frame_reg_rtx
;
19684 if (REGNO (frame_reg_rtx
) == ptr_regno
)
19685 gcc_checking_assert (frame_off
== 0);
19688 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19689 NOT_INUSE (ptr_regno
);
19690 emit_insn (gen_add3_insn (ptr_reg
,
19691 frame_reg_rtx
, GEN_INT (frame_off
)));
19693 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
19694 info
->fp_save_offset
,
19695 info
->lr_save_offset
,
19697 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
19698 NULL_RTX
, NULL_RTX
);
19703 /* Save GPRs. This is done as a PARALLEL if we are using
19704 the store-multiple instructions. */
19705 if (!WORLD_SAVE_P (info
)
19707 && info
->spe_64bit_regs_used
!= 0
19708 && info
->first_gp_reg_save
!= 32)
19711 rtx spe_save_area_ptr
;
19712 HOST_WIDE_INT save_off
;
19713 int ool_adjust
= 0;
19715 /* Determine whether we can address all of the registers that need
19716 to be saved with an offset from frame_reg_rtx that fits in
19717 the small const field for SPE memory instructions. */
19718 int spe_regs_addressable
19719 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
19720 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
19721 && (strategy
& SAVE_INLINE_GPRS
));
19723 if (spe_regs_addressable
)
19725 spe_save_area_ptr
= frame_reg_rtx
;
19726 save_off
= frame_off
;
19730 /* Make r11 point to the start of the SPE save area. We need
19731 to be careful here if r11 is holding the static chain. If
19732 it is, then temporarily save it in r0. */
19733 HOST_WIDE_INT offset
;
19735 if (!(strategy
& SAVE_INLINE_GPRS
))
19736 ool_adjust
= 8 * (info
->first_gp_reg_save
19737 - (FIRST_SAVRES_REGISTER
+ 1));
19738 offset
= info
->spe_gp_save_offset
+ frame_off
- ool_adjust
;
19739 spe_save_area_ptr
= gen_rtx_REG (Pmode
, 11);
19740 save_off
= frame_off
- offset
;
19742 if (using_static_chain_p
)
19744 rtx r0
= gen_rtx_REG (Pmode
, 0);
19747 gcc_assert (info
->first_gp_reg_save
> 11);
19749 emit_move_insn (r0
, spe_save_area_ptr
);
19751 else if (REGNO (frame_reg_rtx
) != 11)
19754 emit_insn (gen_addsi3 (spe_save_area_ptr
,
19755 frame_reg_rtx
, GEN_INT (offset
)));
19756 if (!using_static_chain_p
&& REGNO (frame_reg_rtx
) == 11)
19757 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
19760 if ((strategy
& SAVE_INLINE_GPRS
))
19762 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19763 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
19764 emit_frame_save (spe_save_area_ptr
, reg_mode
,
19765 info
->first_gp_reg_save
+ i
,
19766 (info
->spe_gp_save_offset
+ save_off
19768 sp_off
- save_off
);
19772 insn
= rs6000_emit_savres_rtx (info
, spe_save_area_ptr
,
19773 info
->spe_gp_save_offset
+ save_off
,
19775 SAVRES_SAVE
| SAVRES_GPR
);
19777 rs6000_frame_related (insn
, spe_save_area_ptr
, sp_off
- save_off
,
19778 NULL_RTX
, NULL_RTX
);
19781 /* Move the static chain pointer back. */
19782 if (!spe_regs_addressable
)
19784 if (using_static_chain_p
)
19786 emit_move_insn (spe_save_area_ptr
, gen_rtx_REG (Pmode
, 0));
19789 else if (REGNO (frame_reg_rtx
) != 11)
19793 else if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
19795 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
19796 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
19797 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
19798 rtx ptr_reg
= frame_reg_rtx
;
19799 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
19800 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
19804 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19806 /* Need to adjust r11 (r12) if we saved any FPRs. */
19807 if (end_save
+ frame_off
!= 0)
19809 rtx offset
= GEN_INT (end_save
+ frame_off
);
19812 frame_off
= -end_save
;
19814 NOT_INUSE (ptr_regno
);
19815 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
19817 else if (!ptr_set_up
)
19819 NOT_INUSE (ptr_regno
);
19820 emit_move_insn (ptr_reg
, frame_reg_rtx
);
19822 ptr_off
= -end_save
;
19823 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
19824 info
->gp_save_offset
+ ptr_off
,
19825 info
->lr_save_offset
+ ptr_off
,
19827 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
19828 NULL_RTX
, NULL_RTX
);
19832 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVRES_MULTIPLE
))
19836 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
19837 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19839 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
19841 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
19842 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
19843 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19844 NULL_RTX
, NULL_RTX
);
19846 else if (!WORLD_SAVE_P (info
))
19849 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19850 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
19851 emit_frame_save (frame_reg_rtx
, reg_mode
,
19852 info
->first_gp_reg_save
+ i
,
19853 info
->gp_save_offset
+ frame_off
+ reg_size
* i
,
19854 sp_off
- frame_off
);
19857 if (crtl
->calls_eh_return
)
19864 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
19865 if (regno
== INVALID_REGNUM
)
19869 p
= rtvec_alloc (i
);
19873 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
19874 if (regno
== INVALID_REGNUM
)
19878 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
19880 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
19881 RTVEC_ELT (p
, i
) = insn
;
19882 RTX_FRAME_RELATED_P (insn
) = 1;
19885 insn
= emit_insn (gen_blockage ());
19886 RTX_FRAME_RELATED_P (insn
) = 1;
19887 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
19890 /* In AIX ABI we need to make sure r2 is really saved. */
19891 if (TARGET_AIX
&& crtl
->calls_eh_return
)
19893 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
19894 rtx save_insn
, join_insn
, note
;
19895 long toc_restore_insn
;
19897 tmp_reg
= gen_rtx_REG (Pmode
, 11);
19898 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
19899 if (using_static_chain_p
)
19902 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
19906 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
19907 /* Peek at instruction to which this function returns. If it's
19908 restoring r2, then we know we've already saved r2. We can't
19909 unconditionally save r2 because the value we have will already
19910 be updated if we arrived at this function via a plt call or
19911 toc adjusting stub. */
19912 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
19913 toc_restore_insn
= TARGET_32BIT
? 0x80410014 : 0xE8410028;
19914 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
19915 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
19916 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
19917 validate_condition_mode (EQ
, CCUNSmode
);
19918 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
19919 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
19920 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
19921 toc_save_done
= gen_label_rtx ();
19922 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
19923 gen_rtx_EQ (VOIDmode
, compare_result
,
19925 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
19927 jump
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, jump
));
19928 JUMP_LABEL (jump
) = toc_save_done
;
19929 LABEL_NUSES (toc_save_done
) += 1;
19931 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
19932 TOC_REGNUM
, frame_off
+ 5 * reg_size
,
19933 sp_off
- frame_off
);
19935 emit_label (toc_save_done
);
19937 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
19938 have a CFG that has different saves along different paths.
19939 Move the note to a dummy blockage insn, which describes that
19940 R2 is unconditionally saved after the label. */
19941 /* ??? An alternate representation might be a special insn pattern
19942 containing both the branch and the store. That might let the
19943 code that minimizes the number of DW_CFA_advance opcodes better
19944 freedom in placing the annotations. */
19945 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
19947 remove_note (save_insn
, note
);
19949 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
19950 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
19951 RTX_FRAME_RELATED_P (save_insn
) = 0;
19953 join_insn
= emit_insn (gen_blockage ());
19954 REG_NOTES (join_insn
) = note
;
19955 RTX_FRAME_RELATED_P (join_insn
) = 1;
19957 if (using_static_chain_p
)
19959 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
19966 /* Save CR if we use any that must be preserved. */
19967 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
19969 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
19970 GEN_INT (info
->cr_save_offset
+ frame_off
));
19971 rtx mem
= gen_frame_mem (SImode
, addr
);
19972 /* See the large comment above about why CR2_REGNO is used. */
19973 rtx magic_eh_cr_reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
19975 /* If we didn't copy cr before, do so now using r0. */
19976 if (cr_save_rtx
== NULL_RTX
)
19981 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
19982 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
19983 RTX_FRAME_RELATED_P (insn
) = 1;
19984 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
, magic_eh_cr_reg
);
19985 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
19987 insn
= emit_move_insn (mem
, cr_save_rtx
);
19988 END_USE (REGNO (cr_save_rtx
));
19990 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19991 NULL_RTX
, NULL_RTX
);
19994 /* Update stack and set back pointer unless this is V.4,
19995 for which it was done previously. */
19996 if (!WORLD_SAVE_P (info
) && info
->push_p
19997 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
19999 rtx ptr_reg
= NULL
;
20002 /* If saving altivec regs we need to be able to address all save
20003 locations using a 16-bit offset. */
20004 if ((strategy
& SAVE_INLINE_VRS
) == 0
20005 || (info
->altivec_size
!= 0
20006 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
20007 + info
->total_size
- frame_off
) > 32767)
20008 || (info
->vrsave_mask
!= 0
20009 && (info
->vrsave_save_offset
20010 + info
->total_size
- frame_off
) > 32767))
20012 int sel
= SAVRES_SAVE
| SAVRES_VR
;
20013 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
20015 if (using_static_chain_p
20016 && ptr_regno
== STATIC_CHAIN_REGNUM
)
20018 if (REGNO (frame_reg_rtx
) != ptr_regno
)
20019 START_USE (ptr_regno
);
20020 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
20021 frame_reg_rtx
= ptr_reg
;
20022 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
20023 frame_off
= -ptr_off
;
20025 else if (REGNO (frame_reg_rtx
) == 1)
20026 frame_off
= info
->total_size
;
20027 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
20028 sp_off
= info
->total_size
;
20029 if (frame_reg_rtx
!= sp_reg_rtx
)
20030 rs6000_emit_stack_tie (frame_reg_rtx
, false);
20033 /* Set frame pointer, if needed. */
20034 if (frame_pointer_needed
)
20036 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
20038 RTX_FRAME_RELATED_P (insn
) = 1;
20041 /* Save AltiVec registers if needed. Save here because the red zone does
20042 not always include AltiVec registers. */
20043 if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
20044 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
20046 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20048 /* Oddly, the vector save/restore functions point r0 at the end
20049 of the save area, then use r11 or r12 to load offsets for
20050 [reg+reg] addressing. */
20051 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20052 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
20053 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20055 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
20057 if (end_save
+ frame_off
!= 0)
20059 rtx offset
= GEN_INT (end_save
+ frame_off
);
20061 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20064 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20066 ptr_off
= -end_save
;
20067 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20068 info
->altivec_save_offset
+ ptr_off
,
20069 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
20070 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
20071 NULL_RTX
, NULL_RTX
);
20072 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
20074 /* The oddity mentioned above clobbered our frame reg. */
20075 emit_move_insn (frame_reg_rtx
, ptr_reg
);
20076 frame_off
= ptr_off
;
20079 else if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
20080 && info
->altivec_size
!= 0)
20084 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20085 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20087 rtx areg
, savereg
, mem
;
20090 offset
= (info
->altivec_save_offset
+ frame_off
20091 + 16 * (i
- info
->first_altivec_reg_save
));
20093 savereg
= gen_rtx_REG (V4SImode
, i
);
20096 areg
= gen_rtx_REG (Pmode
, 0);
20097 emit_move_insn (areg
, GEN_INT (offset
));
20099 /* AltiVec addressing mode is [reg+reg]. */
20100 mem
= gen_frame_mem (V4SImode
,
20101 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
20103 insn
= emit_move_insn (mem
, savereg
);
20105 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
20106 areg
, GEN_INT (offset
));
20110 /* VRSAVE is a bit vector representing which AltiVec registers
20111 are used. The OS uses this to determine which vector
20112 registers to save on a context switch. We need to save
20113 VRSAVE on the stack frame, add whatever AltiVec registers we
20114 used in this function, and do the corresponding magic in the
20117 if (!WORLD_SAVE_P (info
)
20119 && TARGET_ALTIVEC_VRSAVE
20120 && info
->vrsave_mask
!= 0)
20126 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20127 be using r12 as frame_reg_rtx and r11 as the static chain
20128 pointer for nested functions. */
20130 if (DEFAULT_ABI
== ABI_AIX
&& !using_static_chain_p
)
20132 else if (REGNO (frame_reg_rtx
) == 12)
20135 if (using_static_chain_p
)
20139 NOT_INUSE (save_regno
);
20140 reg
= gen_rtx_REG (SImode
, save_regno
);
20141 vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
20143 emit_insn (gen_get_vrsave_internal (reg
));
20145 emit_insn (gen_rtx_SET (VOIDmode
, reg
, vrsave
));
20148 offset
= info
->vrsave_save_offset
+ frame_off
;
20149 insn
= emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
20151 /* Include the registers in the mask. */
20152 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT ((int) info
->vrsave_mask
)));
20154 insn
= emit_insn (generate_set_vrsave (reg
, info
, 0));
20157 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20158 if (!TARGET_SINGLE_PIC_BASE
20159 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
20160 || (DEFAULT_ABI
== ABI_V4
20161 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
20162 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
20164 /* If emit_load_toc_table will use the link register, we need to save
20165 it. We use R12 for this purpose because emit_load_toc_table
20166 can use register 0. This allows us to use a plain 'blr' to return
20167 from the procedure more often. */
20168 int save_LR_around_toc_setup
= (TARGET_ELF
20169 && DEFAULT_ABI
!= ABI_AIX
20171 && ! info
->lr_save_p
20172 && EDGE_COUNT (EXIT_BLOCK_PTR
->preds
) > 0);
20173 if (save_LR_around_toc_setup
)
20175 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20176 rtx tmp
= gen_rtx_REG (Pmode
, 12);
20178 insn
= emit_move_insn (tmp
, lr
);
20179 RTX_FRAME_RELATED_P (insn
) = 1;
20181 rs6000_emit_load_toc_table (TRUE
);
20183 insn
= emit_move_insn (lr
, tmp
);
20184 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
20185 RTX_FRAME_RELATED_P (insn
) = 1;
20188 rs6000_emit_load_toc_table (TRUE
);
20192 if (!TARGET_SINGLE_PIC_BASE
20193 && DEFAULT_ABI
== ABI_DARWIN
20194 && flag_pic
&& crtl
->uses_pic_offset_table
)
20196 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20197 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
20199 /* Save and restore LR locally around this call (in R0). */
20200 if (!info
->lr_save_p
)
20201 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
20203 emit_insn (gen_load_macho_picbase (src
));
20205 emit_move_insn (gen_rtx_REG (Pmode
,
20206 RS6000_PIC_OFFSET_TABLE_REGNUM
),
20209 if (!info
->lr_save_p
)
20210 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
20214 /* If we need to, save the TOC register after doing the stack setup.
20215 Do not emit eh frame info for this save. The unwinder wants info,
20216 conceptually attached to instructions in this function, about
20217 register values in the caller of this function. This R2 may have
20218 already been changed from the value in the caller.
20219 We don't attempt to write accurate DWARF EH frame info for R2
20220 because code emitted by gcc for a (non-pointer) function call
20221 doesn't save and restore R2. Instead, R2 is managed out-of-line
20222 by a linker generated plt call stub when the function resides in
20223 a shared library. This behaviour is costly to describe in DWARF,
20224 both in terms of the size of DWARF info and the time taken in the
20225 unwinder to interpret it. R2 changes, apart from the
20226 calls_eh_return case earlier in this function, are handled by
20227 linux-unwind.h frob_update_context. */
20228 if (rs6000_save_toc_in_prologue_p ())
20230 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
20231 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, 5 * reg_size
));
20235 /* Write function prologue. */
20238 rs6000_output_function_prologue (FILE *file
,
20239 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
20241 rs6000_stack_t
*info
= rs6000_stack_info ();
20243 if (TARGET_DEBUG_STACK
)
20244 debug_stack_info (info
);
20246 /* Write .extern for any function we will call to save and restore
20248 if (info
->first_fp_reg_save
< 64
20253 int regno
= info
->first_fp_reg_save
- 32;
20255 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
20257 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
20258 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20259 name
= rs6000_savres_routine_name (info
, regno
, sel
);
20260 fprintf (file
, "\t.extern %s\n", name
);
20262 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
20264 bool lr
= (info
->savres_strategy
20265 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
20266 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20267 name
= rs6000_savres_routine_name (info
, regno
, sel
);
20268 fprintf (file
, "\t.extern %s\n", name
);
20272 rs6000_pic_labelno
++;
20275 /* Non-zero if vmx regs are restored before the frame pop, zero if
20276 we restore after the pop when possible. */
20277 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20279 /* Restoring cr is a two step process: loading a reg from the frame
20280 save, then moving the reg to cr. For ABI_V4 we must let the
20281 unwinder know that the stack location is no longer valid at or
20282 before the stack deallocation, but we can't emit a cfa_restore for
20283 cr at the stack deallocation like we do for other registers.
20284 The trouble is that it is possible for the move to cr to be
20285 scheduled after the stack deallocation. So say exactly where cr
20286 is located on each of the two insns. */
20289 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
20291 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
20292 rtx reg
= gen_rtx_REG (SImode
, regno
);
20293 rtx insn
= emit_move_insn (reg
, mem
);
20295 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
20297 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
20298 rtx set
= gen_rtx_SET (VOIDmode
, reg
, cr
);
20300 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
20301 RTX_FRAME_RELATED_P (insn
) = 1;
20306 /* Reload CR from REG. */
20309 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
20314 if (using_mfcr_multiple
)
20316 for (i
= 0; i
< 8; i
++)
20317 if (save_reg_p (CR0_REGNO
+ i
))
20319 gcc_assert (count
);
20322 if (using_mfcr_multiple
&& count
> 1)
20327 p
= rtvec_alloc (count
);
20330 for (i
= 0; i
< 8; i
++)
20331 if (save_reg_p (CR0_REGNO
+ i
))
20333 rtvec r
= rtvec_alloc (2);
20334 RTVEC_ELT (r
, 0) = reg
;
20335 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
20336 RTVEC_ELT (p
, ndx
) =
20337 gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
20338 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
20341 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
20342 gcc_assert (ndx
== count
);
20345 for (i
= 0; i
< 8; i
++)
20346 if (save_reg_p (CR0_REGNO
+ i
))
20347 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
20350 if (!exit_func
&& (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
20352 rtx insn
= get_last_insn ();
20353 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
20355 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
20356 RTX_FRAME_RELATED_P (insn
) = 1;
20360 /* Like cr, the move to lr instruction can be scheduled after the
20361 stack deallocation, but unlike cr, its stack frame save is still
20362 valid. So we only need to emit the cfa_restore on the correct
20366 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
20368 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
20369 rtx reg
= gen_rtx_REG (Pmode
, regno
);
20371 emit_move_insn (reg
, mem
);
20375 restore_saved_lr (int regno
, bool exit_func
)
20377 rtx reg
= gen_rtx_REG (Pmode
, regno
);
20378 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20379 rtx insn
= emit_move_insn (lr
, reg
);
20381 if (!exit_func
&& flag_shrink_wrap
)
20383 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
20384 RTX_FRAME_RELATED_P (insn
) = 1;
20389 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
20391 if (info
->cr_save_p
)
20392 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20393 gen_rtx_REG (SImode
, CR2_REGNO
),
20395 if (info
->lr_save_p
)
20396 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20397 gen_rtx_REG (Pmode
, LR_REGNO
),
20399 return cfa_restores
;
20402 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20403 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20404 below stack pointer not cloberred by signals. */
20407 offset_below_red_zone_p (HOST_WIDE_INT offset
)
20409 return offset
< (DEFAULT_ABI
== ABI_V4
20411 : TARGET_32BIT
? -220 : -288);
20414 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20417 emit_cfa_restores (rtx cfa_restores
)
20419 rtx insn
= get_last_insn ();
20420 rtx
*loc
= ®_NOTES (insn
);
20423 loc
= &XEXP (*loc
, 1);
20424 *loc
= cfa_restores
;
20425 RTX_FRAME_RELATED_P (insn
) = 1;
20428 /* Emit function epilogue as insns. */
20431 rs6000_emit_epilogue (int sibcall
)
20433 rs6000_stack_t
*info
;
20434 int restoring_GPRs_inline
;
20435 int restoring_FPRs_inline
;
20436 int using_load_multiple
;
20437 int using_mtcr_multiple
;
20438 int use_backchain_to_restore_sp
;
20441 HOST_WIDE_INT frame_off
= 0;
20442 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
20443 rtx frame_reg_rtx
= sp_reg_rtx
;
20444 rtx cfa_restores
= NULL_RTX
;
20446 rtx cr_save_reg
= NULL_RTX
;
20447 enum machine_mode reg_mode
= Pmode
;
20448 int reg_size
= TARGET_32BIT
? 4 : 8;
20451 unsigned ptr_regno
;
20453 info
= rs6000_stack_info ();
20455 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
20457 reg_mode
= V2SImode
;
20461 strategy
= info
->savres_strategy
;
20462 using_load_multiple
= strategy
& SAVRES_MULTIPLE
;
20463 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
20464 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
20465 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
20466 || rs6000_cpu
== PROCESSOR_PPC603
20467 || rs6000_cpu
== PROCESSOR_PPC750
20469 /* Restore via the backchain when we have a large frame, since this
20470 is more efficient than an addis, addi pair. The second condition
20471 here will not trigger at the moment; We don't actually need a
20472 frame pointer for alloca, but the generic parts of the compiler
20473 give us one anyway. */
20474 use_backchain_to_restore_sp
= (info
->total_size
> 32767 - info
->lr_save_offset
20475 || (cfun
->calls_alloca
20476 && !frame_pointer_needed
));
20477 restore_lr
= (info
->lr_save_p
20478 && (restoring_FPRs_inline
20479 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
20480 && (restoring_GPRs_inline
20481 || info
->first_fp_reg_save
< 64));
20483 if (WORLD_SAVE_P (info
))
20487 const char *alloc_rname
;
20490 /* eh_rest_world_r10 will return to the location saved in the LR
20491 stack slot (which is not likely to be our caller.)
20492 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20493 rest_world is similar, except any R10 parameter is ignored.
20494 The exception-handling stuff that was here in 2.95 is no
20495 longer necessary. */
20499 + 32 - info
->first_gp_reg_save
20500 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
20501 + 63 + 1 - info
->first_fp_reg_save
);
20503 strcpy (rname
, ((crtl
->calls_eh_return
) ?
20504 "*eh_rest_world_r10" : "*rest_world"));
20505 alloc_rname
= ggc_strdup (rname
);
20508 RTVEC_ELT (p
, j
++) = ret_rtx
;
20509 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
20510 gen_rtx_REG (Pmode
,
20513 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
20514 /* The instruction pattern requires a clobber here;
20515 it is shared with the restVEC helper. */
20517 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
20520 /* CR register traditionally saved as CR2. */
20521 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
20523 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
20524 if (flag_shrink_wrap
)
20526 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20527 gen_rtx_REG (Pmode
, LR_REGNO
),
20529 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20533 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
20535 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
20537 = gen_frame_load (reg
,
20538 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
20539 if (flag_shrink_wrap
)
20540 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20542 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
20544 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
20546 = gen_frame_load (reg
,
20547 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
20548 if (flag_shrink_wrap
)
20549 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20551 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
20553 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
20554 ? DFmode
: SFmode
),
20555 info
->first_fp_reg_save
+ i
);
20557 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
20558 if (flag_shrink_wrap
)
20559 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20562 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
20564 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
20566 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
20568 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
20570 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
20571 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
20573 if (flag_shrink_wrap
)
20575 REG_NOTES (insn
) = cfa_restores
;
20576 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
20577 RTX_FRAME_RELATED_P (insn
) = 1;
20582 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20584 frame_off
= info
->total_size
;
20586 /* Restore AltiVec registers if we must do so before adjusting the
20588 if (TARGET_ALTIVEC_ABI
20589 && info
->altivec_size
!= 0
20590 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20591 || (DEFAULT_ABI
!= ABI_V4
20592 && offset_below_red_zone_p (info
->altivec_save_offset
))))
20595 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
20597 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
20598 if (use_backchain_to_restore_sp
)
20600 int frame_regno
= 11;
20602 if ((strategy
& REST_INLINE_VRS
) == 0)
20604 /* Of r11 and r12, select the one not clobbered by an
20605 out-of-line restore function for the frame register. */
20606 frame_regno
= 11 + 12 - scratch_regno
;
20608 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
20609 emit_move_insn (frame_reg_rtx
,
20610 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20613 else if (frame_pointer_needed
)
20614 frame_reg_rtx
= hard_frame_pointer_rtx
;
20616 if ((strategy
& REST_INLINE_VRS
) == 0)
20618 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20620 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20621 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20623 if (end_save
+ frame_off
!= 0)
20625 rtx offset
= GEN_INT (end_save
+ frame_off
);
20627 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20630 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20632 ptr_off
= -end_save
;
20633 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20634 info
->altivec_save_offset
+ ptr_off
,
20635 0, V4SImode
, SAVRES_VR
);
20639 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20640 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20642 rtx addr
, areg
, mem
, reg
;
20644 areg
= gen_rtx_REG (Pmode
, 0);
20646 (areg
, GEN_INT (info
->altivec_save_offset
20648 + 16 * (i
- info
->first_altivec_reg_save
)));
20650 /* AltiVec addressing mode is [reg+reg]. */
20651 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
20652 mem
= gen_frame_mem (V4SImode
, addr
);
20654 reg
= gen_rtx_REG (V4SImode
, i
);
20655 emit_move_insn (reg
, mem
);
20659 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20660 if (((strategy
& REST_INLINE_VRS
) == 0
20661 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
20662 && (flag_shrink_wrap
20663 || (offset_below_red_zone_p
20664 (info
->altivec_save_offset
20665 + 16 * (i
- info
->first_altivec_reg_save
)))))
20667 rtx reg
= gen_rtx_REG (V4SImode
, i
);
20668 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20672 /* Restore VRSAVE if we must do so before adjusting the stack. */
20674 && TARGET_ALTIVEC_VRSAVE
20675 && info
->vrsave_mask
!= 0
20676 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20677 || (DEFAULT_ABI
!= ABI_V4
20678 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
20682 if (frame_reg_rtx
== sp_reg_rtx
)
20684 if (use_backchain_to_restore_sp
)
20686 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20687 emit_move_insn (frame_reg_rtx
,
20688 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20691 else if (frame_pointer_needed
)
20692 frame_reg_rtx
= hard_frame_pointer_rtx
;
20695 reg
= gen_rtx_REG (SImode
, 12);
20696 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20697 info
->vrsave_save_offset
+ frame_off
));
20699 emit_insn (generate_set_vrsave (reg
, info
, 1));
20703 /* If we have a large stack frame, restore the old stack pointer
20704 using the backchain. */
20705 if (use_backchain_to_restore_sp
)
20707 if (frame_reg_rtx
== sp_reg_rtx
)
20709 /* Under V.4, don't reset the stack pointer until after we're done
20710 loading the saved registers. */
20711 if (DEFAULT_ABI
== ABI_V4
)
20712 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20714 insn
= emit_move_insn (frame_reg_rtx
,
20715 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20718 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20719 && DEFAULT_ABI
== ABI_V4
)
20720 /* frame_reg_rtx has been set up by the altivec restore. */
20724 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
20725 frame_reg_rtx
= sp_reg_rtx
;
20728 /* If we have a frame pointer, we can restore the old stack pointer
20730 else if (frame_pointer_needed
)
20732 frame_reg_rtx
= sp_reg_rtx
;
20733 if (DEFAULT_ABI
== ABI_V4
)
20734 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20735 /* Prevent reordering memory accesses against stack pointer restore. */
20736 else if (cfun
->calls_alloca
20737 || offset_below_red_zone_p (-info
->total_size
))
20738 rs6000_emit_stack_tie (frame_reg_rtx
, true);
20740 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
20741 GEN_INT (info
->total_size
)));
20744 else if (info
->push_p
20745 && DEFAULT_ABI
!= ABI_V4
20746 && !crtl
->calls_eh_return
)
20748 /* Prevent reordering memory accesses against stack pointer restore. */
20749 if (cfun
->calls_alloca
20750 || offset_below_red_zone_p (-info
->total_size
))
20751 rs6000_emit_stack_tie (frame_reg_rtx
, false);
20752 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
20753 GEN_INT (info
->total_size
)));
20756 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
20760 REG_NOTES (insn
) = cfa_restores
;
20761 cfa_restores
= NULL_RTX
;
20763 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
20764 RTX_FRAME_RELATED_P (insn
) = 1;
20767 /* Restore AltiVec registers if we have not done so already. */
20768 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20769 && TARGET_ALTIVEC_ABI
20770 && info
->altivec_size
!= 0
20771 && (DEFAULT_ABI
== ABI_V4
20772 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
20776 if ((strategy
& REST_INLINE_VRS
) == 0)
20778 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20780 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20781 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
20782 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20784 if (end_save
+ frame_off
!= 0)
20786 rtx offset
= GEN_INT (end_save
+ frame_off
);
20788 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20791 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20793 ptr_off
= -end_save
;
20794 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20795 info
->altivec_save_offset
+ ptr_off
,
20796 0, V4SImode
, SAVRES_VR
);
20797 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
20799 /* Frame reg was clobbered by out-of-line save. Restore it
20800 from ptr_reg, and if we are calling out-of-line gpr or
20801 fpr restore set up the correct pointer and offset. */
20802 unsigned newptr_regno
= 1;
20803 if (!restoring_GPRs_inline
)
20805 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
20806 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
20807 newptr_regno
= ptr_regno_for_savres (sel
);
20808 end_save
= info
->gp_save_offset
+ info
->gp_size
;
20810 else if (!restoring_FPRs_inline
)
20812 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
20813 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20814 newptr_regno
= ptr_regno_for_savres (sel
);
20815 end_save
= info
->gp_save_offset
+ info
->gp_size
;
20818 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
20819 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
20821 if (end_save
+ ptr_off
!= 0)
20823 rtx offset
= GEN_INT (end_save
+ ptr_off
);
20825 frame_off
= -end_save
;
20826 emit_insn (gen_add3_insn (frame_reg_rtx
, ptr_reg
, offset
));
20830 frame_off
= ptr_off
;
20831 emit_move_insn (frame_reg_rtx
, ptr_reg
);
20837 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20838 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20840 rtx addr
, areg
, mem
, reg
;
20842 areg
= gen_rtx_REG (Pmode
, 0);
20844 (areg
, GEN_INT (info
->altivec_save_offset
20846 + 16 * (i
- info
->first_altivec_reg_save
)));
20848 /* AltiVec addressing mode is [reg+reg]. */
20849 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
20850 mem
= gen_frame_mem (V4SImode
, addr
);
20852 reg
= gen_rtx_REG (V4SImode
, i
);
20853 emit_move_insn (reg
, mem
);
20857 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20858 if (((strategy
& REST_INLINE_VRS
) == 0
20859 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
20860 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
20862 rtx reg
= gen_rtx_REG (V4SImode
, i
);
20863 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20867 /* Restore VRSAVE if we have not done so already. */
20868 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20870 && TARGET_ALTIVEC_VRSAVE
20871 && info
->vrsave_mask
!= 0
20872 && (DEFAULT_ABI
== ABI_V4
20873 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
20877 reg
= gen_rtx_REG (SImode
, 12);
20878 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20879 info
->vrsave_save_offset
+ frame_off
));
20881 emit_insn (generate_set_vrsave (reg
, info
, 1));
20884 /* If we exit by an out-of-line restore function on ABI_V4 then that
20885 function will deallocate the stack, so we don't need to worry
20886 about the unwinder restoring cr from an invalid stack frame
20888 exit_func
= (!restoring_FPRs_inline
20889 || (!restoring_GPRs_inline
20890 && info
->first_fp_reg_save
== 64));
20892 /* Get the old lr if we saved it. If we are restoring registers
20893 out-of-line, then the out-of-line routines can do this for us. */
20894 if (restore_lr
&& restoring_GPRs_inline
)
20895 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
20897 /* Get the old cr if we saved it. */
20898 if (info
->cr_save_p
)
20900 unsigned cr_save_regno
= 12;
20902 if (!restoring_GPRs_inline
)
20904 /* Ensure we don't use the register used by the out-of-line
20905 gpr register restore below. */
20906 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
20907 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
20908 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
20910 if (gpr_ptr_regno
== 12)
20911 cr_save_regno
= 11;
20912 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
20914 else if (REGNO (frame_reg_rtx
) == 12)
20915 cr_save_regno
= 11;
20917 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
20918 info
->cr_save_offset
+ frame_off
,
20922 /* Set LR here to try to overlap restores below. */
20923 if (restore_lr
&& restoring_GPRs_inline
)
20924 restore_saved_lr (0, exit_func
);
20926 /* Load exception handler data registers, if needed. */
20927 if (crtl
->calls_eh_return
)
20929 unsigned int i
, regno
;
20933 rtx reg
= gen_rtx_REG (reg_mode
, 2);
20934 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20935 frame_off
+ 5 * reg_size
));
20942 regno
= EH_RETURN_DATA_REGNO (i
);
20943 if (regno
== INVALID_REGNUM
)
20946 /* Note: possible use of r0 here to address SPE regs. */
20947 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
20948 info
->ehrd_offset
+ frame_off
20949 + reg_size
* (int) i
);
20951 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
20955 /* Restore GPRs. This is done as a PARALLEL if we are using
20956 the load-multiple instructions. */
20958 && info
->spe_64bit_regs_used
20959 && info
->first_gp_reg_save
!= 32)
20961 /* Determine whether we can address all of the registers that need
20962 to be saved with an offset from frame_reg_rtx that fits in
20963 the small const field for SPE memory instructions. */
20964 int spe_regs_addressable
20965 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
20966 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
20967 && restoring_GPRs_inline
);
20969 if (!spe_regs_addressable
)
20971 int ool_adjust
= 0;
20972 rtx old_frame_reg_rtx
= frame_reg_rtx
;
20973 /* Make r11 point to the start of the SPE save area. We worried about
20974 not clobbering it when we were saving registers in the prologue.
20975 There's no need to worry here because the static chain is passed
20976 anew to every function. */
20978 if (!restoring_GPRs_inline
)
20979 ool_adjust
= 8 * (info
->first_gp_reg_save
20980 - (FIRST_SAVRES_REGISTER
+ 1));
20981 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20982 emit_insn (gen_addsi3 (frame_reg_rtx
, old_frame_reg_rtx
,
20983 GEN_INT (info
->spe_gp_save_offset
20986 /* Keep the invariant that frame_reg_rtx + frame_off points
20987 at the top of the stack frame. */
20988 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
20991 if (restoring_GPRs_inline
)
20993 HOST_WIDE_INT spe_offset
= info
->spe_gp_save_offset
+ frame_off
;
20995 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
20996 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
20998 rtx offset
, addr
, mem
, reg
;
21000 /* We're doing all this to ensure that the immediate offset
21001 fits into the immediate field of 'evldd'. */
21002 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset
+ reg_size
* i
));
21004 offset
= GEN_INT (spe_offset
+ reg_size
* i
);
21005 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, offset
);
21006 mem
= gen_rtx_MEM (V2SImode
, addr
);
21007 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
21009 emit_move_insn (reg
, mem
);
21013 rs6000_emit_savres_rtx (info
, frame_reg_rtx
,
21014 info
->spe_gp_save_offset
+ frame_off
,
21015 info
->lr_save_offset
+ frame_off
,
21017 SAVRES_GPR
| SAVRES_LR
);
21019 else if (!restoring_GPRs_inline
)
21021 /* We are jumping to an out-of-line function. */
21023 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
21024 bool can_use_exit
= end_save
== 0;
21025 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
21028 /* Emit stack reset code if we need it. */
21029 ptr_regno
= ptr_regno_for_savres (sel
);
21030 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
21032 rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
21033 else if (end_save
+ frame_off
!= 0)
21034 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
21035 GEN_INT (end_save
+ frame_off
)));
21036 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
21037 emit_move_insn (ptr_reg
, frame_reg_rtx
);
21038 if (REGNO (frame_reg_rtx
) == ptr_regno
)
21039 frame_off
= -end_save
;
21041 if (can_use_exit
&& info
->cr_save_p
)
21042 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
21044 ptr_off
= -end_save
;
21045 rs6000_emit_savres_rtx (info
, ptr_reg
,
21046 info
->gp_save_offset
+ ptr_off
,
21047 info
->lr_save_offset
+ ptr_off
,
21050 else if (using_load_multiple
)
21053 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
21054 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21056 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
21058 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
21059 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
21063 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21064 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
21065 emit_insn (gen_frame_load
21066 (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
21068 info
->gp_save_offset
+ frame_off
+ reg_size
* i
));
21071 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
21073 /* If the frame pointer was used then we can't delay emitting
21074 a REG_CFA_DEF_CFA note. This must happen on the insn that
21075 restores the frame pointer, r31. We may have already emitted
21076 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21077 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21078 be harmless if emitted. */
21079 if (frame_pointer_needed
)
21081 insn
= get_last_insn ();
21082 add_reg_note (insn
, REG_CFA_DEF_CFA
,
21083 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
21084 RTX_FRAME_RELATED_P (insn
) = 1;
21087 /* Set up cfa_restores. We always need these when
21088 shrink-wrapping. If not shrink-wrapping then we only need
21089 the cfa_restore when the stack location is no longer valid.
21090 The cfa_restores must be emitted on or before the insn that
21091 invalidates the stack, and of course must not be emitted
21092 before the insn that actually does the restore. The latter
21093 is why it is a bad idea to emit the cfa_restores as a group
21094 on the last instruction here that actually does a restore:
21095 That insn may be reordered with respect to others doing
21097 if (flag_shrink_wrap
21098 && !restoring_GPRs_inline
21099 && info
->first_fp_reg_save
== 64)
21100 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
21102 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
21103 if (!restoring_GPRs_inline
21104 || using_load_multiple
21105 || rs6000_reg_live_or_pic_offset_p (i
))
21107 rtx reg
= gen_rtx_REG (reg_mode
, i
);
21109 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
21113 if (!restoring_GPRs_inline
21114 && info
->first_fp_reg_save
== 64)
21116 /* We are jumping to an out-of-line function. */
21118 emit_cfa_restores (cfa_restores
);
21122 if (restore_lr
&& !restoring_GPRs_inline
)
21124 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
21125 restore_saved_lr (0, exit_func
);
21128 /* Restore fpr's if we need to do it without calling a function. */
21129 if (restoring_FPRs_inline
)
21130 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
21131 if (save_reg_p (info
->first_fp_reg_save
+ i
))
21133 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
21134 ? DFmode
: SFmode
),
21135 info
->first_fp_reg_save
+ i
);
21136 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
21137 info
->fp_save_offset
+ frame_off
+ 8 * i
));
21138 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
21139 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
21142 /* If we saved cr, restore it here. Just those that were used. */
21143 if (info
->cr_save_p
)
21144 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
21146 /* If this is V.4, unwind the stack pointer after all of the loads
21147 have been done, or set up r11 if we are restoring fp out of line. */
21149 if (!restoring_FPRs_inline
)
21151 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
21152 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
21153 ptr_regno
= ptr_regno_for_savres (sel
);
21156 insn
= rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
21157 if (REGNO (frame_reg_rtx
) == ptr_regno
)
21160 if (insn
&& restoring_FPRs_inline
)
21164 REG_NOTES (insn
) = cfa_restores
;
21165 cfa_restores
= NULL_RTX
;
21167 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
21168 RTX_FRAME_RELATED_P (insn
) = 1;
21171 if (crtl
->calls_eh_return
)
21173 rtx sa
= EH_RETURN_STACKADJ_RTX
;
21174 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
21180 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
21181 if (! restoring_FPRs_inline
)
21183 p
= rtvec_alloc (4 + 64 - info
->first_fp_reg_save
);
21184 RTVEC_ELT (p
, 0) = ret_rtx
;
21190 /* We can't hang the cfa_restores off a simple return,
21191 since the shrink-wrap code sometimes uses an existing
21192 return. This means there might be a path from
21193 pre-prologue code to this return, and dwarf2cfi code
21194 wants the eh_frame unwinder state to be the same on
21195 all paths to any point. So we need to emit the
21196 cfa_restores before the return. For -m64 we really
21197 don't need epilogue cfa_restores at all, except for
21198 this irritating dwarf2cfi with shrink-wrap
21199 requirement; The stack red-zone means eh_frame info
21200 from the prologue telling the unwinder to restore
21201 from the stack is perfectly good right to the end of
21203 emit_insn (gen_blockage ());
21204 emit_cfa_restores (cfa_restores
);
21205 cfa_restores
= NULL_RTX
;
21207 p
= rtvec_alloc (2);
21208 RTVEC_ELT (p
, 0) = simple_return_rtx
;
21211 RTVEC_ELT (p
, 1) = ((restoring_FPRs_inline
|| !lr
)
21212 ? gen_rtx_USE (VOIDmode
,
21213 gen_rtx_REG (Pmode
, LR_REGNO
))
21214 : gen_rtx_CLOBBER (VOIDmode
,
21215 gen_rtx_REG (Pmode
, LR_REGNO
)));
21217 /* If we have to restore more than two FP registers, branch to the
21218 restore function. It will return to our caller. */
21219 if (! restoring_FPRs_inline
)
21224 if (flag_shrink_wrap
)
21225 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
21227 sym
= rs6000_savres_routine_sym (info
,
21228 SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
21229 RTVEC_ELT (p
, 2) = gen_rtx_USE (VOIDmode
, sym
);
21230 RTVEC_ELT (p
, 3) = gen_rtx_USE (VOIDmode
,
21231 gen_rtx_REG (Pmode
,
21232 DEFAULT_ABI
== ABI_AIX
21234 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
21236 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
21238 RTVEC_ELT (p
, i
+ 4)
21239 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
21240 if (flag_shrink_wrap
)
21241 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
21246 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
21252 /* Ensure the cfa_restores are hung off an insn that won't
21253 be reordered above other restores. */
21254 emit_insn (gen_blockage ());
21256 emit_cfa_restores (cfa_restores
);
21260 /* Write function epilogue. */
21263 rs6000_output_function_epilogue (FILE *file
,
21264 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
21267 macho_branch_islands ();
21268 /* Mach-O doesn't support labels at the end of objects, so if
21269 it looks like we might want one, insert a NOP. */
21271 rtx insn
= get_last_insn ();
21272 rtx deleted_debug_label
= NULL_RTX
;
21275 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
21277 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21278 notes only, instead set their CODE_LABEL_NUMBER to -1,
21279 otherwise there would be code generation differences
21280 in between -g and -g0. */
21281 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
21282 deleted_debug_label
= insn
;
21283 insn
= PREV_INSN (insn
);
21288 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
21289 fputs ("\tnop\n", file
);
21290 else if (deleted_debug_label
)
21291 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
21292 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
21293 CODE_LABEL_NUMBER (insn
) = -1;
21297 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21300 We don't output a traceback table if -finhibit-size-directive was
21301 used. The documentation for -finhibit-size-directive reads
21302 ``don't output a @code{.size} assembler directive, or anything
21303 else that would cause trouble if the function is split in the
21304 middle, and the two halves are placed at locations far apart in
21305 memory.'' The traceback table has this property, since it
21306 includes the offset from the start of the function to the
21307 traceback table itself.
21309 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21310 different traceback table. */
21311 if (DEFAULT_ABI
== ABI_AIX
&& ! flag_inhibit_size_directive
21312 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
21314 const char *fname
= NULL
;
21315 const char *language_string
= lang_hooks
.name
;
21316 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
21318 int optional_tbtab
;
21319 rs6000_stack_t
*info
= rs6000_stack_info ();
21321 if (rs6000_traceback
== traceback_full
)
21322 optional_tbtab
= 1;
21323 else if (rs6000_traceback
== traceback_part
)
21324 optional_tbtab
= 0;
21326 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
21328 if (optional_tbtab
)
21330 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
21331 while (*fname
== '.') /* V.4 encodes . in the name */
21334 /* Need label immediately before tbtab, so we can compute
21335 its offset from the function start. */
21336 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
21337 ASM_OUTPUT_LABEL (file
, fname
);
21340 /* The .tbtab pseudo-op can only be used for the first eight
21341 expressions, since it can't handle the possibly variable
21342 length fields that follow. However, if you omit the optional
21343 fields, the assembler outputs zeros for all optional fields
21344 anyways, giving each variable length field is minimum length
21345 (as defined in sys/debug.h). Thus we can not use the .tbtab
21346 pseudo-op at all. */
21348 /* An all-zero word flags the start of the tbtab, for debuggers
21349 that have to find it by searching forward from the entry
21350 point or from the current pc. */
21351 fputs ("\t.long 0\n", file
);
21353 /* Tbtab format type. Use format type 0. */
21354 fputs ("\t.byte 0,", file
);
21356 /* Language type. Unfortunately, there does not seem to be any
21357 official way to discover the language being compiled, so we
21358 use language_string.
21359 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21360 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21361 a number, so for now use 9. LTO and Go aren't assigned numbers
21362 either, so for now use 0. */
21363 if (! strcmp (language_string
, "GNU C")
21364 || ! strcmp (language_string
, "GNU GIMPLE")
21365 || ! strcmp (language_string
, "GNU Go"))
21367 else if (! strcmp (language_string
, "GNU F77")
21368 || ! strcmp (language_string
, "GNU Fortran"))
21370 else if (! strcmp (language_string
, "GNU Pascal"))
21372 else if (! strcmp (language_string
, "GNU Ada"))
21374 else if (! strcmp (language_string
, "GNU C++")
21375 || ! strcmp (language_string
, "GNU Objective-C++"))
21377 else if (! strcmp (language_string
, "GNU Java"))
21379 else if (! strcmp (language_string
, "GNU Objective-C"))
21382 gcc_unreachable ();
21383 fprintf (file
, "%d,", i
);
21385 /* 8 single bit fields: global linkage (not set for C extern linkage,
21386 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21387 from start of procedure stored in tbtab, internal function, function
21388 has controlled storage, function has no toc, function uses fp,
21389 function logs/aborts fp operations. */
21390 /* Assume that fp operations are used if any fp reg must be saved. */
21391 fprintf (file
, "%d,",
21392 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
21394 /* 6 bitfields: function is interrupt handler, name present in
21395 proc table, function calls alloca, on condition directives
21396 (controls stack walks, 3 bits), saves condition reg, saves
21398 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21399 set up as a frame pointer, even when there is no alloca call. */
21400 fprintf (file
, "%d,",
21401 ((optional_tbtab
<< 6)
21402 | ((optional_tbtab
& frame_pointer_needed
) << 5)
21403 | (info
->cr_save_p
<< 1)
21404 | (info
->lr_save_p
)));
21406 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21408 fprintf (file
, "%d,",
21409 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
21411 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21412 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
21414 if (optional_tbtab
)
21416 /* Compute the parameter info from the function decl argument
21419 int next_parm_info_bit
= 31;
21421 for (decl
= DECL_ARGUMENTS (current_function_decl
);
21422 decl
; decl
= DECL_CHAIN (decl
))
21424 rtx parameter
= DECL_INCOMING_RTL (decl
);
21425 enum machine_mode mode
= GET_MODE (parameter
);
21427 if (GET_CODE (parameter
) == REG
)
21429 if (SCALAR_FLOAT_MODE_P (mode
))
21450 gcc_unreachable ();
21453 /* If only one bit will fit, don't or in this entry. */
21454 if (next_parm_info_bit
> 0)
21455 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
21456 next_parm_info_bit
-= 2;
21460 fixed_parms
+= ((GET_MODE_SIZE (mode
)
21461 + (UNITS_PER_WORD
- 1))
21463 next_parm_info_bit
-= 1;
21469 /* Number of fixed point parameters. */
21470 /* This is actually the number of words of fixed point parameters; thus
21471 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21472 fprintf (file
, "%d,", fixed_parms
);
21474 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21476 /* This is actually the number of fp registers that hold parameters;
21477 and thus the maximum value is 13. */
21478 /* Set parameters on stack bit if parameters are not in their original
21479 registers, regardless of whether they are on the stack? Xlc
21480 seems to set the bit when not optimizing. */
21481 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
21483 if (! optional_tbtab
)
21486 /* Optional fields follow. Some are variable length. */
21488 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21489 11 double float. */
21490 /* There is an entry for each parameter in a register, in the order that
21491 they occur in the parameter list. Any intervening arguments on the
21492 stack are ignored. If the list overflows a long (max possible length
21493 34 bits) then completely leave off all elements that don't fit. */
21494 /* Only emit this long if there was at least one parameter. */
21495 if (fixed_parms
|| float_parms
)
21496 fprintf (file
, "\t.long %d\n", parm_info
);
21498 /* Offset from start of code to tb table. */
21499 fputs ("\t.long ", file
);
21500 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
21501 RS6000_OUTPUT_BASENAME (file
, fname
);
21503 rs6000_output_function_entry (file
, fname
);
21506 /* Interrupt handler mask. */
21507 /* Omit this long, since we never set the interrupt handler bit
21510 /* Number of CTL (controlled storage) anchors. */
21511 /* Omit this long, since the has_ctl bit is never set above. */
21513 /* Displacement into stack of each CTL anchor. */
21514 /* Omit this list of longs, because there are no CTL anchors. */
21516 /* Length of function name. */
21519 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
21521 /* Function name. */
21522 assemble_string (fname
, strlen (fname
));
21524 /* Register for alloca automatic storage; this is always reg 31.
21525 Only emit this if the alloca bit was set above. */
21526 if (frame_pointer_needed
)
21527 fputs ("\t.byte 31\n", file
);
21529 fputs ("\t.align 2\n", file
);
21533 /* A C compound statement that outputs the assembler code for a thunk
21534 function, used to implement C++ virtual function calls with
21535 multiple inheritance. The thunk acts as a wrapper around a virtual
21536 function, adjusting the implicit object parameter before handing
21537 control off to the real function.
21539 First, emit code to add the integer DELTA to the location that
21540 contains the incoming first argument. Assume that this argument
21541 contains a pointer, and is the one used to pass the `this' pointer
21542 in C++. This is the incoming argument *before* the function
21543 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21544 values of all other incoming arguments.
21546 After the addition, emit code to jump to FUNCTION, which is a
21547 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21548 not touch the return address. Hence returning from FUNCTION will
21549 return to whoever called the current `thunk'.
21551 The effect must be as if FUNCTION had been called directly with the
21552 adjusted first argument. This macro is responsible for emitting
21553 all of the code for a thunk function; output_function_prologue()
21554 and output_function_epilogue() are not invoked.
21556 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21557 been extracted from it.) It might possibly be useful on some
21558 targets, but probably not.
21560 If you do not define this macro, the target-independent code in the
21561 C++ frontend will generate a less efficient heavyweight thunk that
21562 calls FUNCTION instead of jumping to it. The generic approach does
21563 not support varargs. */
21566 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
21567 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
21570 rtx this_rtx
, insn
, funexp
;
21572 reload_completed
= 1;
21573 epilogue_completed
= 1;
21575 /* Mark the end of the (empty) prologue. */
21576 emit_note (NOTE_INSN_PROLOGUE_END
);
21578 /* Find the "this" pointer. If the function returns a structure,
21579 the structure return pointer is in r3. */
21580 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
21581 this_rtx
= gen_rtx_REG (Pmode
, 4);
21583 this_rtx
= gen_rtx_REG (Pmode
, 3);
21585 /* Apply the constant offset, if required. */
21587 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
21589 /* Apply the offset from the vtable, if required. */
21592 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
21593 rtx tmp
= gen_rtx_REG (Pmode
, 12);
21595 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
21596 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
21598 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
21599 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
21603 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
21605 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
21607 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
21610 /* Generate a tail call to the target function. */
21611 if (!TREE_USED (function
))
21613 assemble_external (function
);
21614 TREE_USED (function
) = 1;
21616 funexp
= XEXP (DECL_RTL (function
), 0);
21617 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
21620 if (MACHOPIC_INDIRECT
)
21621 funexp
= machopic_indirect_call_target (funexp
);
21624 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21625 generate sibcall RTL explicitly. */
21626 insn
= emit_call_insn (
21627 gen_rtx_PARALLEL (VOIDmode
,
21629 gen_rtx_CALL (VOIDmode
,
21630 funexp
, const0_rtx
),
21631 gen_rtx_USE (VOIDmode
, const0_rtx
),
21632 gen_rtx_USE (VOIDmode
,
21633 gen_rtx_REG (SImode
,
21635 simple_return_rtx
)));
21636 SIBLING_CALL_P (insn
) = 1;
21639 /* Run just enough of rest_of_compilation to get the insns emitted.
21640 There's not really enough bulk here to make other passes such as
21641 instruction scheduling worth while. Note that use_thunk calls
21642 assemble_start_function and assemble_end_function. */
21643 insn
= get_insns ();
21644 insn_locators_alloc ();
21645 shorten_branches (insn
);
21646 final_start_function (insn
, file
, 1);
21647 final (insn
, file
, 1);
21648 final_end_function ();
21650 reload_completed
= 0;
21651 epilogue_completed
= 0;
21654 /* A quick summary of the various types of 'constant-pool tables'
21657 Target Flags Name One table per
21658 AIX (none) AIX TOC object file
21659 AIX -mfull-toc AIX TOC object file
21660 AIX -mminimal-toc AIX minimal TOC translation unit
21661 SVR4/EABI (none) SVR4 SDATA object file
21662 SVR4/EABI -fpic SVR4 pic object file
21663 SVR4/EABI -fPIC SVR4 PIC translation unit
21664 SVR4/EABI -mrelocatable EABI TOC function
21665 SVR4/EABI -maix AIX TOC object file
21666 SVR4/EABI -maix -mminimal-toc
21667 AIX minimal TOC translation unit
21669 Name Reg. Set by entries contains:
21670 made by addrs? fp? sum?
21672 AIX TOC 2 crt0 as Y option option
21673 AIX minimal TOC 30 prolog gcc Y Y option
21674 SVR4 SDATA 13 crt0 gcc N Y N
21675 SVR4 pic 30 prolog ld Y not yet N
21676 SVR4 PIC 30 prolog gcc Y option option
21677 EABI TOC 30 prolog gcc Y option option
21681 /* Hash functions for the hash table. */
21684 rs6000_hash_constant (rtx k
)
21686 enum rtx_code code
= GET_CODE (k
);
21687 enum machine_mode mode
= GET_MODE (k
);
21688 unsigned result
= (code
<< 3) ^ mode
;
21689 const char *format
;
21692 format
= GET_RTX_FORMAT (code
);
21693 flen
= strlen (format
);
21699 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
21702 if (mode
!= VOIDmode
)
21703 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
21715 for (; fidx
< flen
; fidx
++)
21716 switch (format
[fidx
])
21721 const char *str
= XSTR (k
, fidx
);
21722 len
= strlen (str
);
21723 result
= result
* 613 + len
;
21724 for (i
= 0; i
< len
; i
++)
21725 result
= result
* 613 + (unsigned) str
[i
];
21730 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
21734 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
21737 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
21738 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
21742 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
21743 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
21750 gcc_unreachable ();
21757 toc_hash_function (const void *hash_entry
)
21759 const struct toc_hash_struct
*thc
=
21760 (const struct toc_hash_struct
*) hash_entry
;
21761 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
21764 /* Compare H1 and H2 for equivalence. */
21767 toc_hash_eq (const void *h1
, const void *h2
)
21769 rtx r1
= ((const struct toc_hash_struct
*) h1
)->key
;
21770 rtx r2
= ((const struct toc_hash_struct
*) h2
)->key
;
21772 if (((const struct toc_hash_struct
*) h1
)->key_mode
21773 != ((const struct toc_hash_struct
*) h2
)->key_mode
)
21776 return rtx_equal_p (r1
, r2
);
21779 /* These are the names given by the C++ front-end to vtables, and
21780 vtable-like objects. Ideally, this logic should not be here;
21781 instead, there should be some programmatic way of inquiring as
21782 to whether or not an object is a vtable. */
21784 #define VTABLE_NAME_P(NAME) \
21785 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
21786 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
21787 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
21788 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
21789 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
21791 #ifdef NO_DOLLAR_IN_LABEL
21792 /* Return a GGC-allocated character string translating dollar signs in
21793 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
21796 rs6000_xcoff_strip_dollar (const char *name
)
21802 q
= (const char *) strchr (name
, '$');
21804 if (q
== 0 || q
== name
)
21807 len
= strlen (name
);
21808 strip
= XALLOCAVEC (char, len
+ 1);
21809 strcpy (strip
, name
);
21810 p
= strip
+ (q
- name
);
21814 p
= strchr (p
+ 1, '$');
21817 return ggc_alloc_string (strip
, len
);
21822 rs6000_output_symbol_ref (FILE *file
, rtx x
)
21824 /* Currently C++ toc references to vtables can be emitted before it
21825 is decided whether the vtable is public or private. If this is
21826 the case, then the linker will eventually complain that there is
21827 a reference to an unknown section. Thus, for vtables only,
21828 we emit the TOC reference to reference the symbol and not the
21830 const char *name
= XSTR (x
, 0);
21832 if (VTABLE_NAME_P (name
))
21834 RS6000_OUTPUT_BASENAME (file
, name
);
21837 assemble_name (file
, name
);
21840 /* Output a TOC entry. We derive the entry name from what is being
21844 output_toc (FILE *file
, rtx x
, int labelno
, enum machine_mode mode
)
21847 const char *name
= buf
;
21849 HOST_WIDE_INT offset
= 0;
21851 gcc_assert (!TARGET_NO_TOC
);
21853 /* When the linker won't eliminate them, don't output duplicate
21854 TOC entries (this happens on AIX if there is any kind of TOC,
21855 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
21857 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
21859 struct toc_hash_struct
*h
;
21862 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
21863 time because GGC is not initialized at that point. */
21864 if (toc_hash_table
== NULL
)
21865 toc_hash_table
= htab_create_ggc (1021, toc_hash_function
,
21866 toc_hash_eq
, NULL
);
21868 h
= ggc_alloc_toc_hash_struct ();
21870 h
->key_mode
= mode
;
21871 h
->labelno
= labelno
;
21873 found
= htab_find_slot (toc_hash_table
, h
, INSERT
);
21874 if (*found
== NULL
)
21876 else /* This is indeed a duplicate.
21877 Set this label equal to that label. */
21879 fputs ("\t.set ", file
);
21880 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
21881 fprintf (file
, "%d,", labelno
);
21882 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
21883 fprintf (file
, "%d\n", ((*(const struct toc_hash_struct
**)
21889 /* If we're going to put a double constant in the TOC, make sure it's
21890 aligned properly when strict alignment is on. */
21891 if (GET_CODE (x
) == CONST_DOUBLE
21892 && STRICT_ALIGNMENT
21893 && GET_MODE_BITSIZE (mode
) >= 64
21894 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
21895 ASM_OUTPUT_ALIGN (file
, 3);
21898 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
21900 /* Handle FP constants specially. Note that if we have a minimal
21901 TOC, things we put here aren't actually in the TOC, so we can allow
21903 if (GET_CODE (x
) == CONST_DOUBLE
&&
21904 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
))
21906 REAL_VALUE_TYPE rv
;
21909 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
21910 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
21911 REAL_VALUE_TO_TARGET_DECIMAL128 (rv
, k
);
21913 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv
, k
);
21917 if (TARGET_MINIMAL_TOC
)
21918 fputs (DOUBLE_INT_ASM_OP
, file
);
21920 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21921 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21922 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21923 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
21924 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21925 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21930 if (TARGET_MINIMAL_TOC
)
21931 fputs ("\t.long ", file
);
21933 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21934 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21935 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21936 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
21937 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21938 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21942 else if (GET_CODE (x
) == CONST_DOUBLE
&&
21943 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
21945 REAL_VALUE_TYPE rv
;
21948 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
21950 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
21951 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, k
);
21953 REAL_VALUE_TO_TARGET_DOUBLE (rv
, k
);
21957 if (TARGET_MINIMAL_TOC
)
21958 fputs (DOUBLE_INT_ASM_OP
, file
);
21960 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
21961 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21962 fprintf (file
, "0x%lx%08lx\n",
21963 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21968 if (TARGET_MINIMAL_TOC
)
21969 fputs ("\t.long ", file
);
21971 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
21972 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21973 fprintf (file
, "0x%lx,0x%lx\n",
21974 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21978 else if (GET_CODE (x
) == CONST_DOUBLE
&&
21979 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
21981 REAL_VALUE_TYPE rv
;
21984 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
21985 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
21986 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
21988 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
21992 if (TARGET_MINIMAL_TOC
)
21993 fputs (DOUBLE_INT_ASM_OP
, file
);
21995 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
21996 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
22001 if (TARGET_MINIMAL_TOC
)
22002 fputs ("\t.long ", file
);
22004 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
22005 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
22009 else if (GET_MODE (x
) == VOIDmode
22010 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
22012 unsigned HOST_WIDE_INT low
;
22013 HOST_WIDE_INT high
;
22015 if (GET_CODE (x
) == CONST_DOUBLE
)
22017 low
= CONST_DOUBLE_LOW (x
);
22018 high
= CONST_DOUBLE_HIGH (x
);
22021 #if HOST_BITS_PER_WIDE_INT == 32
22024 high
= (low
& 0x80000000) ? ~0 : 0;
22028 low
= INTVAL (x
) & 0xffffffff;
22029 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
22033 /* TOC entries are always Pmode-sized, but since this
22034 is a bigendian machine then if we're putting smaller
22035 integer constants in the TOC we have to pad them.
22036 (This is still a win over putting the constants in
22037 a separate constant pool, because then we'd have
22038 to have both a TOC entry _and_ the actual constant.)
22040 For a 32-bit target, CONST_INT values are loaded and shifted
22041 entirely within `low' and can be stored in one TOC entry. */
22043 /* It would be easy to make this work, but it doesn't now. */
22044 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
22046 if (POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
22048 #if HOST_BITS_PER_WIDE_INT == 32
22049 lshift_double (low
, high
, POINTER_SIZE
- GET_MODE_BITSIZE (mode
),
22050 POINTER_SIZE
, &low
, &high
, 0);
22053 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
22054 high
= (HOST_WIDE_INT
) low
>> 32;
22061 if (TARGET_MINIMAL_TOC
)
22062 fputs (DOUBLE_INT_ASM_OP
, file
);
22064 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
22065 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22066 fprintf (file
, "0x%lx%08lx\n",
22067 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22072 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
22074 if (TARGET_MINIMAL_TOC
)
22075 fputs ("\t.long ", file
);
22077 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
22078 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22079 fprintf (file
, "0x%lx,0x%lx\n",
22080 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22084 if (TARGET_MINIMAL_TOC
)
22085 fputs ("\t.long ", file
);
22087 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
22088 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
22094 if (GET_CODE (x
) == CONST
)
22096 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
22097 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
22099 base
= XEXP (XEXP (x
, 0), 0);
22100 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
22103 switch (GET_CODE (base
))
22106 name
= XSTR (base
, 0);
22110 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
22111 CODE_LABEL_NUMBER (XEXP (base
, 0)));
22115 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
22119 gcc_unreachable ();
22122 if (TARGET_MINIMAL_TOC
)
22123 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
22126 fputs ("\t.tc ", file
);
22127 RS6000_OUTPUT_BASENAME (file
, name
);
22130 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
22132 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
22134 fputs ("[TC],", file
);
22137 /* Currently C++ toc references to vtables can be emitted before it
22138 is decided whether the vtable is public or private. If this is
22139 the case, then the linker will eventually complain that there is
22140 a TOC reference to an unknown section. Thus, for vtables only,
22141 we emit the TOC reference to reference the symbol and not the
22143 if (VTABLE_NAME_P (name
))
22145 RS6000_OUTPUT_BASENAME (file
, name
);
22147 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
22148 else if (offset
> 0)
22149 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
22152 output_addr_const (file
, x
);
22156 /* Output an assembler pseudo-op to write an ASCII string of N characters
22157 starting at P to FILE.
22159 On the RS/6000, we have to do this using the .byte operation and
22160 write out special characters outside the quoted string.
22161 Also, the assembler is broken; very long strings are truncated,
22162 so we must artificially break them up early. */
22165 output_ascii (FILE *file
, const char *p
, int n
)
22168 int i
, count_string
;
22169 const char *for_string
= "\t.byte \"";
22170 const char *for_decimal
= "\t.byte ";
22171 const char *to_close
= NULL
;
22174 for (i
= 0; i
< n
; i
++)
22177 if (c
>= ' ' && c
< 0177)
22180 fputs (for_string
, file
);
22183 /* Write two quotes to get one. */
22191 for_decimal
= "\"\n\t.byte ";
22195 if (count_string
>= 512)
22197 fputs (to_close
, file
);
22199 for_string
= "\t.byte \"";
22200 for_decimal
= "\t.byte ";
22208 fputs (for_decimal
, file
);
22209 fprintf (file
, "%d", c
);
22211 for_string
= "\n\t.byte \"";
22212 for_decimal
= ", ";
22218 /* Now close the string if we have written one. Then end the line. */
22220 fputs (to_close
, file
);
22223 /* Generate a unique section name for FILENAME for a section type
22224 represented by SECTION_DESC. Output goes into BUF.
22226 SECTION_DESC can be any string, as long as it is different for each
22227 possible section type.
22229 We name the section in the same manner as xlc. The name begins with an
22230 underscore followed by the filename (after stripping any leading directory
22231 names) with the last period replaced by the string SECTION_DESC. If
22232 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22236 rs6000_gen_section_name (char **buf
, const char *filename
,
22237 const char *section_desc
)
22239 const char *q
, *after_last_slash
, *last_period
= 0;
22243 after_last_slash
= filename
;
22244 for (q
= filename
; *q
; q
++)
22247 after_last_slash
= q
+ 1;
22248 else if (*q
== '.')
22252 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
22253 *buf
= (char *) xmalloc (len
);
22258 for (q
= after_last_slash
; *q
; q
++)
22260 if (q
== last_period
)
22262 strcpy (p
, section_desc
);
22263 p
+= strlen (section_desc
);
22267 else if (ISALNUM (*q
))
22271 if (last_period
== 0)
22272 strcpy (p
, section_desc
);
22277 /* Emit profile function. */
22280 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
22282 /* Non-standard profiling for kernels, which just saves LR then calls
22283 _mcount without worrying about arg saves. The idea is to change
22284 the function prologue as little as possible as it isn't easy to
22285 account for arg save/restore code added just for _mcount. */
22286 if (TARGET_PROFILE_KERNEL
)
22289 if (DEFAULT_ABI
== ABI_AIX
)
22291 #ifndef NO_PROFILE_COUNTERS
22292 # define NO_PROFILE_COUNTERS 0
22294 if (NO_PROFILE_COUNTERS
)
22295 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
22296 LCT_NORMAL
, VOIDmode
, 0);
22300 const char *label_name
;
22303 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
22304 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
22305 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
22307 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
22308 LCT_NORMAL
, VOIDmode
, 1, fun
, Pmode
);
22311 else if (DEFAULT_ABI
== ABI_DARWIN
)
22313 const char *mcount_name
= RS6000_MCOUNT
;
22314 int caller_addr_regno
= LR_REGNO
;
22316 /* Be conservative and always set this, at least for now. */
22317 crtl
->uses_pic_offset_table
= 1;
22320 /* For PIC code, set up a stub and collect the caller's address
22321 from r0, which is where the prologue puts it. */
22322 if (MACHOPIC_INDIRECT
22323 && crtl
->uses_pic_offset_table
)
22324 caller_addr_regno
= 0;
22326 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
22327 LCT_NORMAL
, VOIDmode
, 1,
22328 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
22332 /* Write function profiler code. */
22335 output_function_profiler (FILE *file
, int labelno
)
22339 switch (DEFAULT_ABI
)
22342 gcc_unreachable ();
22347 warning (0, "no profiling of 64-bit code for this ABI");
22350 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
22351 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
22352 if (NO_PROFILE_COUNTERS
)
22354 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22355 reg_names
[0], reg_names
[1]);
22357 else if (TARGET_SECURE_PLT
&& flag_pic
)
22359 if (TARGET_LINK_STACK
)
22362 get_ppc476_thunk_name (name
);
22363 asm_fprintf (file
, "\tbl %s\n", name
);
22366 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
22367 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22368 reg_names
[0], reg_names
[1]);
22369 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
22370 asm_fprintf (file
, "\t{cau|addis} %s,%s,",
22371 reg_names
[12], reg_names
[12]);
22372 assemble_name (file
, buf
);
22373 asm_fprintf (file
, "-1b@ha\n\t{cal|la} %s,", reg_names
[0]);
22374 assemble_name (file
, buf
);
22375 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
22377 else if (flag_pic
== 1)
22379 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
22380 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22381 reg_names
[0], reg_names
[1]);
22382 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
22383 asm_fprintf (file
, "\t{l|lwz} %s,", reg_names
[0]);
22384 assemble_name (file
, buf
);
22385 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
22387 else if (flag_pic
> 1)
22389 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22390 reg_names
[0], reg_names
[1]);
22391 /* Now, we need to get the address of the label. */
22392 if (TARGET_LINK_STACK
)
22395 get_ppc476_thunk_name (name
);
22396 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
22397 assemble_name (file
, buf
);
22398 fputs ("-.\n1:", file
);
22399 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
22400 asm_fprintf (file
, "\taddi %s,%s,4\n",
22401 reg_names
[11], reg_names
[11]);
22405 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
22406 assemble_name (file
, buf
);
22407 fputs ("-.\n1:", file
);
22408 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
22410 asm_fprintf (file
, "\t{l|lwz} %s,0(%s)\n",
22411 reg_names
[0], reg_names
[11]);
22412 asm_fprintf (file
, "\t{cax|add} %s,%s,%s\n",
22413 reg_names
[0], reg_names
[0], reg_names
[11]);
22417 asm_fprintf (file
, "\t{liu|lis} %s,", reg_names
[12]);
22418 assemble_name (file
, buf
);
22419 fputs ("@ha\n", file
);
22420 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22421 reg_names
[0], reg_names
[1]);
22422 asm_fprintf (file
, "\t{cal|la} %s,", reg_names
[0]);
22423 assemble_name (file
, buf
);
22424 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
22427 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22428 fprintf (file
, "\tbl %s%s\n",
22429 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
22434 if (!TARGET_PROFILE_KERNEL
)
22436 /* Don't do anything, done in output_profile_hook (). */
22440 gcc_assert (!TARGET_32BIT
);
22442 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
22443 asm_fprintf (file
, "\tstd %s,16(%s)\n", reg_names
[0], reg_names
[1]);
22445 if (cfun
->static_chain_decl
!= NULL
)
22447 asm_fprintf (file
, "\tstd %s,24(%s)\n",
22448 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
22449 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
22450 asm_fprintf (file
, "\tld %s,24(%s)\n",
22451 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
22454 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
22462 /* The following variable value is the last issued insn. */
22464 static rtx last_scheduled_insn
;
22466 /* The following variable helps to balance issuing of load and
22467 store instructions */
22469 static int load_store_pendulum
;
22471 /* Power4 load update and store update instructions are cracked into a
22472 load or store and an integer insn which are executed in the same cycle.
22473 Branches have their own dispatch slot which does not count against the
22474 GCC issue rate, but it changes the program flow so there are no other
22475 instructions to issue in this cycle. */
22478 rs6000_variable_issue_1 (rtx insn
, int more
)
22480 last_scheduled_insn
= insn
;
22481 if (GET_CODE (PATTERN (insn
)) == USE
22482 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22484 cached_can_issue_more
= more
;
22485 return cached_can_issue_more
;
22488 if (insn_terminates_group_p (insn
, current_group
))
22490 cached_can_issue_more
= 0;
22491 return cached_can_issue_more
;
22494 /* If no reservation, but reach here */
22495 if (recog_memoized (insn
) < 0)
22498 if (rs6000_sched_groups
)
22500 if (is_microcoded_insn (insn
))
22501 cached_can_issue_more
= 0;
22502 else if (is_cracked_insn (insn
))
22503 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
22505 cached_can_issue_more
= more
- 1;
22507 return cached_can_issue_more
;
22510 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
22513 cached_can_issue_more
= more
- 1;
22514 return cached_can_issue_more
;
22518 rs6000_variable_issue (FILE *stream
, int verbose
, rtx insn
, int more
)
22520 int r
= rs6000_variable_issue_1 (insn
, more
);
22522 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
22526 /* Adjust the cost of a scheduling dependency. Return the new cost of
22527 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22530 rs6000_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
22532 enum attr_type attr_type
;
22534 if (! recog_memoized (insn
))
22537 switch (REG_NOTE_KIND (link
))
22541 /* Data dependency; DEP_INSN writes a register that INSN reads
22542 some cycles later. */
22544 /* Separate a load from a narrower, dependent store. */
22545 if (rs6000_sched_groups
22546 && GET_CODE (PATTERN (insn
)) == SET
22547 && GET_CODE (PATTERN (dep_insn
)) == SET
22548 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
22549 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
22550 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
22551 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
22554 attr_type
= get_attr_type (insn
);
22559 /* Tell the first scheduling pass about the latency between
22560 a mtctr and bctr (and mtlr and br/blr). The first
22561 scheduling pass will not know about this latency since
22562 the mtctr instruction, which has the latency associated
22563 to it, will be generated by reload. */
22566 /* Leave some extra cycles between a compare and its
22567 dependent branch, to inhibit expensive mispredicts. */
22568 if ((rs6000_cpu_attr
== CPU_PPC603
22569 || rs6000_cpu_attr
== CPU_PPC604
22570 || rs6000_cpu_attr
== CPU_PPC604E
22571 || rs6000_cpu_attr
== CPU_PPC620
22572 || rs6000_cpu_attr
== CPU_PPC630
22573 || rs6000_cpu_attr
== CPU_PPC750
22574 || rs6000_cpu_attr
== CPU_PPC7400
22575 || rs6000_cpu_attr
== CPU_PPC7450
22576 || rs6000_cpu_attr
== CPU_PPCE5500
22577 || rs6000_cpu_attr
== CPU_PPCE6500
22578 || rs6000_cpu_attr
== CPU_POWER4
22579 || rs6000_cpu_attr
== CPU_POWER5
22580 || rs6000_cpu_attr
== CPU_POWER7
22581 || rs6000_cpu_attr
== CPU_CELL
)
22582 && recog_memoized (dep_insn
)
22583 && (INSN_CODE (dep_insn
) >= 0))
22585 switch (get_attr_type (dep_insn
))
22589 case TYPE_DELAYED_COMPARE
:
22590 case TYPE_IMUL_COMPARE
:
22591 case TYPE_LMUL_COMPARE
:
22592 case TYPE_FPCOMPARE
:
22593 case TYPE_CR_LOGICAL
:
22594 case TYPE_DELAYED_CR
:
22603 case TYPE_STORE_UX
:
22605 case TYPE_FPSTORE_U
:
22606 case TYPE_FPSTORE_UX
:
22607 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22608 && recog_memoized (dep_insn
)
22609 && (INSN_CODE (dep_insn
) >= 0))
22612 if (GET_CODE (PATTERN (insn
)) != SET
)
22613 /* If this happens, we have to extend this to schedule
22614 optimally. Return default for now. */
22617 /* Adjust the cost for the case where the value written
22618 by a fixed point operation is used as the address
22619 gen value on a store. */
22620 switch (get_attr_type (dep_insn
))
22627 if (! store_data_bypass_p (dep_insn
, insn
))
22631 case TYPE_LOAD_EXT
:
22632 case TYPE_LOAD_EXT_U
:
22633 case TYPE_LOAD_EXT_UX
:
22634 case TYPE_VAR_SHIFT_ROTATE
:
22635 case TYPE_VAR_DELAYED_COMPARE
:
22637 if (! store_data_bypass_p (dep_insn
, insn
))
22643 case TYPE_FAST_COMPARE
:
22646 case TYPE_INSERT_WORD
:
22647 case TYPE_INSERT_DWORD
:
22648 case TYPE_FPLOAD_U
:
22649 case TYPE_FPLOAD_UX
:
22651 case TYPE_STORE_UX
:
22652 case TYPE_FPSTORE_U
:
22653 case TYPE_FPSTORE_UX
:
22655 if (! store_data_bypass_p (dep_insn
, insn
))
22663 case TYPE_IMUL_COMPARE
:
22664 case TYPE_LMUL_COMPARE
:
22666 if (! store_data_bypass_p (dep_insn
, insn
))
22672 if (! store_data_bypass_p (dep_insn
, insn
))
22678 if (! store_data_bypass_p (dep_insn
, insn
))
22691 case TYPE_LOAD_EXT
:
22692 case TYPE_LOAD_EXT_U
:
22693 case TYPE_LOAD_EXT_UX
:
22694 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22695 && recog_memoized (dep_insn
)
22696 && (INSN_CODE (dep_insn
) >= 0))
22699 /* Adjust the cost for the case where the value written
22700 by a fixed point instruction is used within the address
22701 gen portion of a subsequent load(u)(x) */
22702 switch (get_attr_type (dep_insn
))
22709 if (set_to_load_agen (dep_insn
, insn
))
22713 case TYPE_LOAD_EXT
:
22714 case TYPE_LOAD_EXT_U
:
22715 case TYPE_LOAD_EXT_UX
:
22716 case TYPE_VAR_SHIFT_ROTATE
:
22717 case TYPE_VAR_DELAYED_COMPARE
:
22719 if (set_to_load_agen (dep_insn
, insn
))
22725 case TYPE_FAST_COMPARE
:
22728 case TYPE_INSERT_WORD
:
22729 case TYPE_INSERT_DWORD
:
22730 case TYPE_FPLOAD_U
:
22731 case TYPE_FPLOAD_UX
:
22733 case TYPE_STORE_UX
:
22734 case TYPE_FPSTORE_U
:
22735 case TYPE_FPSTORE_UX
:
22737 if (set_to_load_agen (dep_insn
, insn
))
22745 case TYPE_IMUL_COMPARE
:
22746 case TYPE_LMUL_COMPARE
:
22748 if (set_to_load_agen (dep_insn
, insn
))
22754 if (set_to_load_agen (dep_insn
, insn
))
22760 if (set_to_load_agen (dep_insn
, insn
))
22771 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22772 && recog_memoized (dep_insn
)
22773 && (INSN_CODE (dep_insn
) >= 0)
22774 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
22781 /* Fall out to return default cost. */
22785 case REG_DEP_OUTPUT
:
22786 /* Output dependency; DEP_INSN writes a register that INSN writes some
22788 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22789 && recog_memoized (dep_insn
)
22790 && (INSN_CODE (dep_insn
) >= 0))
22792 attr_type
= get_attr_type (insn
);
22797 if (get_attr_type (dep_insn
) == TYPE_FP
)
22801 if (get_attr_type (dep_insn
) == TYPE_MFFGPR
)
22809 /* Anti dependency; DEP_INSN reads a register that INSN writes some
22814 gcc_unreachable ();
22820 /* Debug version of rs6000_adjust_cost. */
22823 rs6000_debug_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
22825 int ret
= rs6000_adjust_cost (insn
, link
, dep_insn
, cost
);
22831 switch (REG_NOTE_KIND (link
))
22833 default: dep
= "unknown depencency"; break;
22834 case REG_DEP_TRUE
: dep
= "data dependency"; break;
22835 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
22836 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
22840 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
22841 "%s, insn:\n", ret
, cost
, dep
);
22849 /* The function returns a true if INSN is microcoded.
22850 Return false otherwise. */
22853 is_microcoded_insn (rtx insn
)
22855 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22856 || GET_CODE (PATTERN (insn
)) == USE
22857 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22860 if (rs6000_cpu_attr
== CPU_CELL
)
22861 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
22863 if (rs6000_sched_groups
)
22865 enum attr_type type
= get_attr_type (insn
);
22866 if (type
== TYPE_LOAD_EXT_U
22867 || type
== TYPE_LOAD_EXT_UX
22868 || type
== TYPE_LOAD_UX
22869 || type
== TYPE_STORE_UX
22870 || type
== TYPE_MFCR
)
22877 /* The function returns true if INSN is cracked into 2 instructions
22878 by the processor (and therefore occupies 2 issue slots). */
22881 is_cracked_insn (rtx insn
)
22883 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22884 || GET_CODE (PATTERN (insn
)) == USE
22885 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22888 if (rs6000_sched_groups
)
22890 enum attr_type type
= get_attr_type (insn
);
22891 if (type
== TYPE_LOAD_U
|| type
== TYPE_STORE_U
22892 || type
== TYPE_FPLOAD_U
|| type
== TYPE_FPSTORE_U
22893 || type
== TYPE_FPLOAD_UX
|| type
== TYPE_FPSTORE_UX
22894 || type
== TYPE_LOAD_EXT
|| type
== TYPE_DELAYED_CR
22895 || type
== TYPE_COMPARE
|| type
== TYPE_DELAYED_COMPARE
22896 || type
== TYPE_IMUL_COMPARE
|| type
== TYPE_LMUL_COMPARE
22897 || type
== TYPE_IDIV
|| type
== TYPE_LDIV
22898 || type
== TYPE_INSERT_WORD
)
22905 /* The function returns true if INSN can be issued only from
22906 the branch slot. */
22909 is_branch_slot_insn (rtx insn
)
22911 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22912 || GET_CODE (PATTERN (insn
)) == USE
22913 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22916 if (rs6000_sched_groups
)
22918 enum attr_type type
= get_attr_type (insn
);
22919 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
22927 /* The function returns true if out_inst sets a value that is
22928 used in the address generation computation of in_insn */
22930 set_to_load_agen (rtx out_insn
, rtx in_insn
)
22932 rtx out_set
, in_set
;
22934 /* For performance reasons, only handle the simple case where
22935 both loads are a single_set. */
22936 out_set
= single_set (out_insn
);
22939 in_set
= single_set (in_insn
);
22941 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
22947 /* Try to determine base/offset/size parts of the given MEM.
22948 Return true if successful, false if all the values couldn't
22951 This function only looks for REG or REG+CONST address forms.
22952 REG+REG address form will return false. */
22955 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
22956 HOST_WIDE_INT
*size
)
22959 if MEM_SIZE_KNOWN_P (mem
)
22960 *size
= MEM_SIZE (mem
);
22964 if (GET_CODE (XEXP (mem
, 0)) == PRE_MODIFY
)
22965 addr_rtx
= XEXP (XEXP (mem
, 0), 1);
22967 addr_rtx
= (XEXP (mem
, 0));
22969 if (GET_CODE (addr_rtx
) == REG
)
22974 else if (GET_CODE (addr_rtx
) == PLUS
22975 && CONST_INT_P (XEXP (addr_rtx
, 1)))
22977 *base
= XEXP (addr_rtx
, 0);
22978 *offset
= INTVAL (XEXP (addr_rtx
, 1));
22986 /* The function returns true if the target storage location of
22987 mem1 is adjacent to the target storage location of mem2 */
22988 /* Return 1 if memory locations are adjacent. */
22991 adjacent_mem_locations (rtx mem1
, rtx mem2
)
22994 HOST_WIDE_INT off1
, size1
, off2
, size2
;
22996 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
22997 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
22998 return ((REGNO (reg1
) == REGNO (reg2
))
22999 && ((off1
+ size1
== off2
)
23000 || (off2
+ size2
== off1
)));
23005 /* This function returns true if it can be determined that the two MEM
23006 locations overlap by at least 1 byte based on base reg/offset/size. */
23009 mem_locations_overlap (rtx mem1
, rtx mem2
)
23012 HOST_WIDE_INT off1
, size1
, off2
, size2
;
23014 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
23015 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
23016 return ((REGNO (reg1
) == REGNO (reg2
))
23017 && (((off1
<= off2
) && (off1
+ size1
> off2
))
23018 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
23023 /* A C statement (sans semicolon) to update the integer scheduling
23024 priority INSN_PRIORITY (INSN). Increase the priority to execute the
23025 INSN earlier, reduce the priority to execute INSN later. Do not
23026 define this macro if you do not need to adjust the scheduling
23027 priorities of insns. */
23030 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED
, int priority
)
23032 rtx load_mem
, str_mem
;
23033 /* On machines (like the 750) which have asymmetric integer units,
23034 where one integer unit can do multiply and divides and the other
23035 can't, reduce the priority of multiply/divide so it is scheduled
23036 before other integer operations. */
23039 if (! INSN_P (insn
))
23042 if (GET_CODE (PATTERN (insn
)) == USE
)
23045 switch (rs6000_cpu_attr
) {
23047 switch (get_attr_type (insn
))
23054 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
23055 priority
, priority
);
23056 if (priority
>= 0 && priority
< 0x01000000)
23063 if (insn_must_be_first_in_group (insn
)
23064 && reload_completed
23065 && current_sched_info
->sched_max_insns_priority
23066 && rs6000_sched_restricted_insns_priority
)
23069 /* Prioritize insns that can be dispatched only in the first
23071 if (rs6000_sched_restricted_insns_priority
== 1)
23072 /* Attach highest priority to insn. This means that in
23073 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23074 precede 'priority' (critical path) considerations. */
23075 return current_sched_info
->sched_max_insns_priority
;
23076 else if (rs6000_sched_restricted_insns_priority
== 2)
23077 /* Increase priority of insn by a minimal amount. This means that in
23078 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23079 considerations precede dispatch-slot restriction considerations. */
23080 return (priority
+ 1);
23083 if (rs6000_cpu
== PROCESSOR_POWER6
23084 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
23085 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
23086 /* Attach highest priority to insn if the scheduler has just issued two
23087 stores and this instruction is a load, or two loads and this instruction
23088 is a store. Power6 wants loads and stores scheduled alternately
23090 return current_sched_info
->sched_max_insns_priority
;
23095 /* Return true if the instruction is nonpipelined on the Cell. */
23097 is_nonpipeline_insn (rtx insn
)
23099 enum attr_type type
;
23100 if (!insn
|| !NONDEBUG_INSN_P (insn
)
23101 || GET_CODE (PATTERN (insn
)) == USE
23102 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23105 type
= get_attr_type (insn
);
23106 if (type
== TYPE_IMUL
23107 || type
== TYPE_IMUL2
23108 || type
== TYPE_IMUL3
23109 || type
== TYPE_LMUL
23110 || type
== TYPE_IDIV
23111 || type
== TYPE_LDIV
23112 || type
== TYPE_SDIV
23113 || type
== TYPE_DDIV
23114 || type
== TYPE_SSQRT
23115 || type
== TYPE_DSQRT
23116 || type
== TYPE_MFCR
23117 || type
== TYPE_MFCRF
23118 || type
== TYPE_MFJMPR
)
23126 /* Return how many instructions the machine can issue per cycle. */
23129 rs6000_issue_rate (void)
23131 /* Unless scheduling for register pressure, use issue rate of 1 for
23132 first scheduling pass to decrease degradation. */
23133 if (!reload_completed
&& !flag_sched_pressure
)
23136 switch (rs6000_cpu_attr
) {
23138 case CPU_PPC601
: /* ? */
23148 case CPU_PPCE300C2
:
23149 case CPU_PPCE300C3
:
23150 case CPU_PPCE500MC
:
23151 case CPU_PPCE500MC64
:
23172 /* Return how many instructions to look ahead for better insn
23176 rs6000_use_sched_lookahead (void)
23178 switch (rs6000_cpu_attr
)
23185 return (reload_completed
? 8 : 0);
23192 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23194 rs6000_use_sched_lookahead_guard (rtx insn
)
23196 if (rs6000_cpu_attr
!= CPU_CELL
)
23199 if (insn
== NULL_RTX
|| !INSN_P (insn
))
23202 if (!reload_completed
23203 || is_nonpipeline_insn (insn
)
23204 || is_microcoded_insn (insn
))
23210 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23211 and return true. */
23214 find_mem_ref (rtx pat
, rtx
*mem_ref
)
23219 /* stack_tie does not produce any real memory traffic. */
23220 if (tie_operand (pat
, VOIDmode
))
23223 if (GET_CODE (pat
) == MEM
)
23229 /* Recursively process the pattern. */
23230 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
23232 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
23236 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
23239 else if (fmt
[i
] == 'E')
23240 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
23242 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
23250 /* Determine if PAT is a PATTERN of a load insn. */
23253 is_load_insn1 (rtx pat
, rtx
*load_mem
)
23255 if (!pat
|| pat
== NULL_RTX
)
23258 if (GET_CODE (pat
) == SET
)
23259 return find_mem_ref (SET_SRC (pat
), load_mem
);
23261 if (GET_CODE (pat
) == PARALLEL
)
23265 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
23266 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
23273 /* Determine if INSN loads from memory. */
23276 is_load_insn (rtx insn
, rtx
*load_mem
)
23278 if (!insn
|| !INSN_P (insn
))
23281 if (GET_CODE (insn
) == CALL_INSN
)
23284 return is_load_insn1 (PATTERN (insn
), load_mem
);
23287 /* Determine if PAT is a PATTERN of a store insn. */
23290 is_store_insn1 (rtx pat
, rtx
*str_mem
)
23292 if (!pat
|| pat
== NULL_RTX
)
23295 if (GET_CODE (pat
) == SET
)
23296 return find_mem_ref (SET_DEST (pat
), str_mem
);
23298 if (GET_CODE (pat
) == PARALLEL
)
23302 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
23303 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
23310 /* Determine if INSN stores to memory. */
23313 is_store_insn (rtx insn
, rtx
*str_mem
)
23315 if (!insn
|| !INSN_P (insn
))
23318 return is_store_insn1 (PATTERN (insn
), str_mem
);
23321 /* Returns whether the dependence between INSN and NEXT is considered
23322 costly by the given target. */
23325 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
23329 rtx load_mem
, str_mem
;
23331 /* If the flag is not enabled - no dependence is considered costly;
23332 allow all dependent insns in the same group.
23333 This is the most aggressive option. */
23334 if (rs6000_sched_costly_dep
== no_dep_costly
)
23337 /* If the flag is set to 1 - a dependence is always considered costly;
23338 do not allow dependent instructions in the same group.
23339 This is the most conservative option. */
23340 if (rs6000_sched_costly_dep
== all_deps_costly
)
23343 insn
= DEP_PRO (dep
);
23344 next
= DEP_CON (dep
);
23346 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
23347 && is_load_insn (next
, &load_mem
)
23348 && is_store_insn (insn
, &str_mem
))
23349 /* Prevent load after store in the same group. */
23352 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
23353 && is_load_insn (next
, &load_mem
)
23354 && is_store_insn (insn
, &str_mem
)
23355 && DEP_TYPE (dep
) == REG_DEP_TRUE
23356 && mem_locations_overlap(str_mem
, load_mem
))
23357 /* Prevent load after store in the same group if it is a true
23361 /* The flag is set to X; dependences with latency >= X are considered costly,
23362 and will not be scheduled in the same group. */
23363 if (rs6000_sched_costly_dep
<= max_dep_latency
23364 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
23370 /* Return the next insn after INSN that is found before TAIL is reached,
23371 skipping any "non-active" insns - insns that will not actually occupy
23372 an issue slot. Return NULL_RTX if such an insn is not found. */
23375 get_next_active_insn (rtx insn
, rtx tail
)
23377 if (insn
== NULL_RTX
|| insn
== tail
)
23382 insn
= NEXT_INSN (insn
);
23383 if (insn
== NULL_RTX
|| insn
== tail
)
23388 || (NONJUMP_INSN_P (insn
)
23389 && GET_CODE (PATTERN (insn
)) != USE
23390 && GET_CODE (PATTERN (insn
)) != CLOBBER
23391 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
23397 /* We are about to begin issuing insns for this clock cycle. */
23400 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
23401 rtx
*ready ATTRIBUTE_UNUSED
,
23402 int *pn_ready ATTRIBUTE_UNUSED
,
23403 int clock_var ATTRIBUTE_UNUSED
)
23405 int n_ready
= *pn_ready
;
23408 fprintf (dump
, "// rs6000_sched_reorder :\n");
23410 /* Reorder the ready list, if the second to last ready insn
23411 is a nonepipeline insn. */
23412 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
23414 if (is_nonpipeline_insn (ready
[n_ready
- 1])
23415 && (recog_memoized (ready
[n_ready
- 2]) > 0))
23416 /* Simply swap first two insns. */
23418 rtx tmp
= ready
[n_ready
- 1];
23419 ready
[n_ready
- 1] = ready
[n_ready
- 2];
23420 ready
[n_ready
- 2] = tmp
;
23424 if (rs6000_cpu
== PROCESSOR_POWER6
)
23425 load_store_pendulum
= 0;
23427 return rs6000_issue_rate ();
23430 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23433 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx
*ready
,
23434 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
23437 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
23439 /* For Power6, we need to handle some special cases to try and keep the
23440 store queue from overflowing and triggering expensive flushes.
23442 This code monitors how load and store instructions are being issued
23443 and skews the ready list one way or the other to increase the likelihood
23444 that a desired instruction is issued at the proper time.
23446 A couple of things are done. First, we maintain a "load_store_pendulum"
23447 to track the current state of load/store issue.
23449 - If the pendulum is at zero, then no loads or stores have been
23450 issued in the current cycle so we do nothing.
23452 - If the pendulum is 1, then a single load has been issued in this
23453 cycle and we attempt to locate another load in the ready list to
23456 - If the pendulum is -2, then two stores have already been
23457 issued in this cycle, so we increase the priority of the first load
23458 in the ready list to increase it's likelihood of being chosen first
23461 - If the pendulum is -1, then a single store has been issued in this
23462 cycle and we attempt to locate another store in the ready list to
23463 issue with it, preferring a store to an adjacent memory location to
23464 facilitate store pairing in the store queue.
23466 - If the pendulum is 2, then two loads have already been
23467 issued in this cycle, so we increase the priority of the first store
23468 in the ready list to increase it's likelihood of being chosen first
23471 - If the pendulum < -2 or > 2, then do nothing.
23473 Note: This code covers the most common scenarios. There exist non
23474 load/store instructions which make use of the LSU and which
23475 would need to be accounted for to strictly model the behavior
23476 of the machine. Those instructions are currently unaccounted
23477 for to help minimize compile time overhead of this code.
23479 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
23483 rtx tmp
, load_mem
, str_mem
;
23485 if (is_store_insn (last_scheduled_insn
, &str_mem
))
23486 /* Issuing a store, swing the load_store_pendulum to the left */
23487 load_store_pendulum
--;
23488 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
23489 /* Issuing a load, swing the load_store_pendulum to the right */
23490 load_store_pendulum
++;
23492 return cached_can_issue_more
;
23494 /* If the pendulum is balanced, or there is only one instruction on
23495 the ready list, then all is well, so return. */
23496 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
23497 return cached_can_issue_more
;
23499 if (load_store_pendulum
== 1)
23501 /* A load has been issued in this cycle. Scan the ready list
23502 for another load to issue with it */
23507 if (is_load_insn (ready
[pos
], &load_mem
))
23509 /* Found a load. Move it to the head of the ready list,
23510 and adjust it's priority so that it is more likely to
23513 for (i
=pos
; i
<*pn_ready
-1; i
++)
23514 ready
[i
] = ready
[i
+ 1];
23515 ready
[*pn_ready
-1] = tmp
;
23517 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23518 INSN_PRIORITY (tmp
)++;
23524 else if (load_store_pendulum
== -2)
23526 /* Two stores have been issued in this cycle. Increase the
23527 priority of the first load in the ready list to favor it for
23528 issuing in the next cycle. */
23533 if (is_load_insn (ready
[pos
], &load_mem
)
23535 && INSN_PRIORITY_KNOWN (ready
[pos
]))
23537 INSN_PRIORITY (ready
[pos
])++;
23539 /* Adjust the pendulum to account for the fact that a load
23540 was found and increased in priority. This is to prevent
23541 increasing the priority of multiple loads */
23542 load_store_pendulum
--;
23549 else if (load_store_pendulum
== -1)
23551 /* A store has been issued in this cycle. Scan the ready list for
23552 another store to issue with it, preferring a store to an adjacent
23554 int first_store_pos
= -1;
23560 if (is_store_insn (ready
[pos
], &str_mem
))
23563 /* Maintain the index of the first store found on the
23565 if (first_store_pos
== -1)
23566 first_store_pos
= pos
;
23568 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
23569 && adjacent_mem_locations (str_mem
, str_mem2
))
23571 /* Found an adjacent store. Move it to the head of the
23572 ready list, and adjust it's priority so that it is
23573 more likely to stay there */
23575 for (i
=pos
; i
<*pn_ready
-1; i
++)
23576 ready
[i
] = ready
[i
+ 1];
23577 ready
[*pn_ready
-1] = tmp
;
23579 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23580 INSN_PRIORITY (tmp
)++;
23582 first_store_pos
= -1;
23590 if (first_store_pos
>= 0)
23592 /* An adjacent store wasn't found, but a non-adjacent store was,
23593 so move the non-adjacent store to the front of the ready
23594 list, and adjust its priority so that it is more likely to
23596 tmp
= ready
[first_store_pos
];
23597 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
23598 ready
[i
] = ready
[i
+ 1];
23599 ready
[*pn_ready
-1] = tmp
;
23600 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23601 INSN_PRIORITY (tmp
)++;
23604 else if (load_store_pendulum
== 2)
23606 /* Two loads have been issued in this cycle. Increase the priority
23607 of the first store in the ready list to favor it for issuing in
23613 if (is_store_insn (ready
[pos
], &str_mem
)
23615 && INSN_PRIORITY_KNOWN (ready
[pos
]))
23617 INSN_PRIORITY (ready
[pos
])++;
23619 /* Adjust the pendulum to account for the fact that a store
23620 was found and increased in priority. This is to prevent
23621 increasing the priority of multiple stores */
23622 load_store_pendulum
++;
23631 return cached_can_issue_more
;
23634 /* Return whether the presence of INSN causes a dispatch group termination
23635 of group WHICH_GROUP.
23637 If WHICH_GROUP == current_group, this function will return true if INSN
23638 causes the termination of the current group (i.e, the dispatch group to
23639 which INSN belongs). This means that INSN will be the last insn in the
23640 group it belongs to.
23642 If WHICH_GROUP == previous_group, this function will return true if INSN
23643 causes the termination of the previous group (i.e, the dispatch group that
23644 precedes the group to which INSN belongs). This means that INSN will be
23645 the first insn in the group it belongs to). */
23648 insn_terminates_group_p (rtx insn
, enum group_termination which_group
)
23655 first
= insn_must_be_first_in_group (insn
);
23656 last
= insn_must_be_last_in_group (insn
);
23661 if (which_group
== current_group
)
23663 else if (which_group
== previous_group
)
23671 insn_must_be_first_in_group (rtx insn
)
23673 enum attr_type type
;
23676 || GET_CODE (insn
) == NOTE
23677 || DEBUG_INSN_P (insn
)
23678 || GET_CODE (PATTERN (insn
)) == USE
23679 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23682 switch (rs6000_cpu
)
23684 case PROCESSOR_POWER5
:
23685 if (is_cracked_insn (insn
))
23687 case PROCESSOR_POWER4
:
23688 if (is_microcoded_insn (insn
))
23691 if (!rs6000_sched_groups
)
23694 type
= get_attr_type (insn
);
23701 case TYPE_DELAYED_CR
:
23702 case TYPE_CR_LOGICAL
:
23716 case PROCESSOR_POWER6
:
23717 type
= get_attr_type (insn
);
23721 case TYPE_INSERT_DWORD
:
23725 case TYPE_VAR_SHIFT_ROTATE
:
23732 case TYPE_INSERT_WORD
:
23733 case TYPE_DELAYED_COMPARE
:
23734 case TYPE_IMUL_COMPARE
:
23735 case TYPE_LMUL_COMPARE
:
23736 case TYPE_FPCOMPARE
:
23747 case TYPE_LOAD_EXT_UX
:
23749 case TYPE_STORE_UX
:
23750 case TYPE_FPLOAD_U
:
23751 case TYPE_FPLOAD_UX
:
23752 case TYPE_FPSTORE_U
:
23753 case TYPE_FPSTORE_UX
:
23759 case PROCESSOR_POWER7
:
23760 type
= get_attr_type (insn
);
23764 case TYPE_CR_LOGICAL
:
23771 case TYPE_DELAYED_COMPARE
:
23772 case TYPE_VAR_DELAYED_COMPARE
:
23778 case TYPE_LOAD_EXT
:
23779 case TYPE_LOAD_EXT_U
:
23780 case TYPE_LOAD_EXT_UX
:
23782 case TYPE_STORE_UX
:
23783 case TYPE_FPLOAD_U
:
23784 case TYPE_FPLOAD_UX
:
23785 case TYPE_FPSTORE_U
:
23786 case TYPE_FPSTORE_UX
:
23802 insn_must_be_last_in_group (rtx insn
)
23804 enum attr_type type
;
23807 || GET_CODE (insn
) == NOTE
23808 || DEBUG_INSN_P (insn
)
23809 || GET_CODE (PATTERN (insn
)) == USE
23810 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23813 switch (rs6000_cpu
) {
23814 case PROCESSOR_POWER4
:
23815 case PROCESSOR_POWER5
:
23816 if (is_microcoded_insn (insn
))
23819 if (is_branch_slot_insn (insn
))
23823 case PROCESSOR_POWER6
:
23824 type
= get_attr_type (insn
);
23831 case TYPE_VAR_SHIFT_ROTATE
:
23838 case TYPE_DELAYED_COMPARE
:
23839 case TYPE_IMUL_COMPARE
:
23840 case TYPE_LMUL_COMPARE
:
23841 case TYPE_FPCOMPARE
:
23855 case PROCESSOR_POWER7
:
23856 type
= get_attr_type (insn
);
23864 case TYPE_LOAD_EXT_U
:
23865 case TYPE_LOAD_EXT_UX
:
23866 case TYPE_STORE_UX
:
23879 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
23880 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
23883 is_costly_group (rtx
*group_insns
, rtx next_insn
)
23886 int issue_rate
= rs6000_issue_rate ();
23888 for (i
= 0; i
< issue_rate
; i
++)
23890 sd_iterator_def sd_it
;
23892 rtx insn
= group_insns
[i
];
23897 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
23899 rtx next
= DEP_CON (dep
);
23901 if (next
== next_insn
23902 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
23910 /* Utility of the function redefine_groups.
23911 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
23912 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
23913 to keep it "far" (in a separate group) from GROUP_INSNS, following
23914 one of the following schemes, depending on the value of the flag
23915 -minsert_sched_nops = X:
23916 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
23917 in order to force NEXT_INSN into a separate group.
23918 (2) X < sched_finish_regroup_exact: insert exactly X nops.
23919 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
23920 insertion (has a group just ended, how many vacant issue slots remain in the
23921 last group, and how many dispatch groups were encountered so far). */
23924 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
23925 rtx next_insn
, bool *group_end
, int can_issue_more
,
23930 int issue_rate
= rs6000_issue_rate ();
23931 bool end
= *group_end
;
23934 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
23935 return can_issue_more
;
23937 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
23938 return can_issue_more
;
23940 force
= is_costly_group (group_insns
, next_insn
);
23942 return can_issue_more
;
23944 if (sched_verbose
> 6)
23945 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
23946 *group_count
,can_issue_more
);
23948 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
23951 can_issue_more
= 0;
23953 /* Since only a branch can be issued in the last issue_slot, it is
23954 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
23955 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
23956 in this case the last nop will start a new group and the branch
23957 will be forced to the new group. */
23958 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
23961 /* Power6 and Power7 have special group ending nop. */
23962 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
)
23964 nop
= gen_group_ending_nop ();
23965 emit_insn_before (nop
, next_insn
);
23966 can_issue_more
= 0;
23969 while (can_issue_more
> 0)
23972 emit_insn_before (nop
, next_insn
);
23980 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
23982 int n_nops
= rs6000_sched_insert_nops
;
23984 /* Nops can't be issued from the branch slot, so the effective
23985 issue_rate for nops is 'issue_rate - 1'. */
23986 if (can_issue_more
== 0)
23987 can_issue_more
= issue_rate
;
23989 if (can_issue_more
== 0)
23991 can_issue_more
= issue_rate
- 1;
23994 for (i
= 0; i
< issue_rate
; i
++)
23996 group_insns
[i
] = 0;
24003 emit_insn_before (nop
, next_insn
);
24004 if (can_issue_more
== issue_rate
- 1) /* new group begins */
24007 if (can_issue_more
== 0)
24009 can_issue_more
= issue_rate
- 1;
24012 for (i
= 0; i
< issue_rate
; i
++)
24014 group_insns
[i
] = 0;
24020 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
24023 /* Is next_insn going to start a new group? */
24026 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
24027 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
24028 || (can_issue_more
< issue_rate
&&
24029 insn_terminates_group_p (next_insn
, previous_group
)));
24030 if (*group_end
&& end
)
24033 if (sched_verbose
> 6)
24034 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
24035 *group_count
, can_issue_more
);
24036 return can_issue_more
;
24039 return can_issue_more
;
24042 /* This function tries to synch the dispatch groups that the compiler "sees"
24043 with the dispatch groups that the processor dispatcher is expected to
24044 form in practice. It tries to achieve this synchronization by forcing the
24045 estimated processor grouping on the compiler (as opposed to the function
24046 'pad_goups' which tries to force the scheduler's grouping on the processor).
24048 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24049 examines the (estimated) dispatch groups that will be formed by the processor
24050 dispatcher. It marks these group boundaries to reflect the estimated
24051 processor grouping, overriding the grouping that the scheduler had marked.
24052 Depending on the value of the flag '-minsert-sched-nops' this function can
24053 force certain insns into separate groups or force a certain distance between
24054 them by inserting nops, for example, if there exists a "costly dependence"
24057 The function estimates the group boundaries that the processor will form as
24058 follows: It keeps track of how many vacant issue slots are available after
24059 each insn. A subsequent insn will start a new group if one of the following
24061 - no more vacant issue slots remain in the current dispatch group.
24062 - only the last issue slot, which is the branch slot, is vacant, but the next
24063 insn is not a branch.
24064 - only the last 2 or less issue slots, including the branch slot, are vacant,
24065 which means that a cracked insn (which occupies two issue slots) can't be
24066 issued in this group.
24067 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24068 start a new group. */
24071 redefine_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
24073 rtx insn
, next_insn
;
24075 int can_issue_more
;
24078 int group_count
= 0;
24082 issue_rate
= rs6000_issue_rate ();
24083 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
24084 for (i
= 0; i
< issue_rate
; i
++)
24086 group_insns
[i
] = 0;
24088 can_issue_more
= issue_rate
;
24090 insn
= get_next_active_insn (prev_head_insn
, tail
);
24093 while (insn
!= NULL_RTX
)
24095 slot
= (issue_rate
- can_issue_more
);
24096 group_insns
[slot
] = insn
;
24098 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
24099 if (insn_terminates_group_p (insn
, current_group
))
24100 can_issue_more
= 0;
24102 next_insn
= get_next_active_insn (insn
, tail
);
24103 if (next_insn
== NULL_RTX
)
24104 return group_count
+ 1;
24106 /* Is next_insn going to start a new group? */
24108 = (can_issue_more
== 0
24109 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
24110 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
24111 || (can_issue_more
< issue_rate
&&
24112 insn_terminates_group_p (next_insn
, previous_group
)));
24114 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
24115 next_insn
, &group_end
, can_issue_more
,
24121 can_issue_more
= 0;
24122 for (i
= 0; i
< issue_rate
; i
++)
24124 group_insns
[i
] = 0;
24128 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
24129 PUT_MODE (next_insn
, VOIDmode
);
24130 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
24131 PUT_MODE (next_insn
, TImode
);
24134 if (can_issue_more
== 0)
24135 can_issue_more
= issue_rate
;
24138 return group_count
;
24141 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24142 dispatch group boundaries that the scheduler had marked. Pad with nops
24143 any dispatch groups which have vacant issue slots, in order to force the
24144 scheduler's grouping on the processor dispatcher. The function
24145 returns the number of dispatch groups found. */
24148 pad_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
24150 rtx insn
, next_insn
;
24153 int can_issue_more
;
24155 int group_count
= 0;
24157 /* Initialize issue_rate. */
24158 issue_rate
= rs6000_issue_rate ();
24159 can_issue_more
= issue_rate
;
24161 insn
= get_next_active_insn (prev_head_insn
, tail
);
24162 next_insn
= get_next_active_insn (insn
, tail
);
24164 while (insn
!= NULL_RTX
)
24167 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
24169 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
24171 if (next_insn
== NULL_RTX
)
24176 /* If the scheduler had marked group termination at this location
24177 (between insn and next_insn), and neither insn nor next_insn will
24178 force group termination, pad the group with nops to force group
24181 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
24182 && !insn_terminates_group_p (insn
, current_group
)
24183 && !insn_terminates_group_p (next_insn
, previous_group
))
24185 if (!is_branch_slot_insn (next_insn
))
24188 while (can_issue_more
)
24191 emit_insn_before (nop
, next_insn
);
24196 can_issue_more
= issue_rate
;
24201 next_insn
= get_next_active_insn (insn
, tail
);
24204 return group_count
;
24207 /* We're beginning a new block. Initialize data structures as necessary. */
24210 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
24211 int sched_verbose ATTRIBUTE_UNUSED
,
24212 int max_ready ATTRIBUTE_UNUSED
)
24214 last_scheduled_insn
= NULL_RTX
;
24215 load_store_pendulum
= 0;
24218 /* The following function is called at the end of scheduling BB.
24219 After reload, it inserts nops at insn group bundling. */
24222 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
24227 fprintf (dump
, "=== Finishing schedule.\n");
24229 if (reload_completed
&& rs6000_sched_groups
)
24231 /* Do not run sched_finish hook when selective scheduling enabled. */
24232 if (sel_sched_p ())
24235 if (rs6000_sched_insert_nops
== sched_finish_none
)
24238 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
24239 n_groups
= pad_groups (dump
, sched_verbose
,
24240 current_sched_info
->prev_head
,
24241 current_sched_info
->next_tail
);
24243 n_groups
= redefine_groups (dump
, sched_verbose
,
24244 current_sched_info
->prev_head
,
24245 current_sched_info
->next_tail
);
24247 if (sched_verbose
>= 6)
24249 fprintf (dump
, "ngroups = %d\n", n_groups
);
24250 print_rtl (dump
, current_sched_info
->prev_head
);
24251 fprintf (dump
, "Done finish_sched\n");
24256 struct _rs6000_sched_context
24258 short cached_can_issue_more
;
24259 rtx last_scheduled_insn
;
24260 int load_store_pendulum
;
24263 typedef struct _rs6000_sched_context rs6000_sched_context_def
;
24264 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
24266 /* Allocate store for new scheduling context. */
24268 rs6000_alloc_sched_context (void)
24270 return xmalloc (sizeof (rs6000_sched_context_def
));
24273 /* If CLEAN_P is true then initializes _SC with clean data,
24274 and from the global context otherwise. */
24276 rs6000_init_sched_context (void *_sc
, bool clean_p
)
24278 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
24282 sc
->cached_can_issue_more
= 0;
24283 sc
->last_scheduled_insn
= NULL_RTX
;
24284 sc
->load_store_pendulum
= 0;
24288 sc
->cached_can_issue_more
= cached_can_issue_more
;
24289 sc
->last_scheduled_insn
= last_scheduled_insn
;
24290 sc
->load_store_pendulum
= load_store_pendulum
;
24294 /* Sets the global scheduling context to the one pointed to by _SC. */
24296 rs6000_set_sched_context (void *_sc
)
24298 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
24300 gcc_assert (sc
!= NULL
);
24302 cached_can_issue_more
= sc
->cached_can_issue_more
;
24303 last_scheduled_insn
= sc
->last_scheduled_insn
;
24304 load_store_pendulum
= sc
->load_store_pendulum
;
24309 rs6000_free_sched_context (void *_sc
)
24311 gcc_assert (_sc
!= NULL
);
24317 /* Length in units of the trampoline for entering a nested function. */
24320 rs6000_trampoline_size (void)
24324 switch (DEFAULT_ABI
)
24327 gcc_unreachable ();
24330 ret
= (TARGET_32BIT
) ? 12 : 24;
24335 ret
= (TARGET_32BIT
) ? 40 : 48;
24342 /* Emit RTL insns to initialize the variable parts of a trampoline.
24343 FNADDR is an RTX for the address of the function's pure code.
24344 CXT is an RTX for the static chain value for the function. */
24347 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
24349 int regsize
= (TARGET_32BIT
) ? 4 : 8;
24350 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
24351 rtx ctx_reg
= force_reg (Pmode
, cxt
);
24352 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
24354 switch (DEFAULT_ABI
)
24357 gcc_unreachable ();
24359 /* Under AIX, just build the 3 word function descriptor */
24362 rtx fnmem
, fn_reg
, toc_reg
;
24364 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
24365 error ("You cannot take the address of a nested function if you use "
24366 "the -mno-pointers-to-nested-functions option.");
24368 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
24369 fn_reg
= gen_reg_rtx (Pmode
);
24370 toc_reg
= gen_reg_rtx (Pmode
);
24372 /* Macro to shorten the code expansions below. */
24373 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24375 m_tramp
= replace_equiv_address (m_tramp
, addr
);
24377 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
24378 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
24379 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
24380 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
24381 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
24387 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24390 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
24391 LCT_NORMAL
, VOIDmode
, 4,
24393 GEN_INT (rs6000_trampoline_size ()), SImode
,
24401 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24402 identifier as an argument, so the front end shouldn't look it up. */
24405 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
24407 return is_attribute_p ("altivec", attr_id
);
24410 /* Handle the "altivec" attribute. The attribute may have
24411 arguments as follows:
24413 __attribute__((altivec(vector__)))
24414 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24415 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24417 and may appear more than once (e.g., 'vector bool char') in a
24418 given declaration. */
24421 rs6000_handle_altivec_attribute (tree
*node
,
24422 tree name ATTRIBUTE_UNUSED
,
24424 int flags ATTRIBUTE_UNUSED
,
24425 bool *no_add_attrs
)
24427 tree type
= *node
, result
= NULL_TREE
;
24428 enum machine_mode mode
;
24431 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
24432 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
24433 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
24436 while (POINTER_TYPE_P (type
)
24437 || TREE_CODE (type
) == FUNCTION_TYPE
24438 || TREE_CODE (type
) == METHOD_TYPE
24439 || TREE_CODE (type
) == ARRAY_TYPE
)
24440 type
= TREE_TYPE (type
);
24442 mode
= TYPE_MODE (type
);
24444 /* Check for invalid AltiVec type qualifiers. */
24445 if (type
== long_double_type_node
)
24446 error ("use of %<long double%> in AltiVec types is invalid");
24447 else if (type
== boolean_type_node
)
24448 error ("use of boolean types in AltiVec types is invalid");
24449 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
24450 error ("use of %<complex%> in AltiVec types is invalid");
24451 else if (DECIMAL_FLOAT_MODE_P (mode
))
24452 error ("use of decimal floating point types in AltiVec types is invalid");
24453 else if (!TARGET_VSX
)
24455 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
24458 error ("use of %<long%> in AltiVec types is invalid for "
24459 "64-bit code without -mvsx");
24460 else if (rs6000_warn_altivec_long
)
24461 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24464 else if (type
== long_long_unsigned_type_node
24465 || type
== long_long_integer_type_node
)
24466 error ("use of %<long long%> in AltiVec types is invalid without "
24468 else if (type
== double_type_node
)
24469 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24472 switch (altivec_type
)
24475 unsigned_p
= TYPE_UNSIGNED (type
);
24479 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
24482 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
24485 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
24488 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
24490 case SFmode
: result
= V4SF_type_node
; break;
24491 case DFmode
: result
= V2DF_type_node
; break;
24492 /* If the user says 'vector int bool', we may be handed the 'bool'
24493 attribute _before_ the 'vector' attribute, and so select the
24494 proper type in the 'b' case below. */
24495 case V4SImode
: case V8HImode
: case V16QImode
: case V4SFmode
:
24496 case V2DImode
: case V2DFmode
:
24504 case DImode
: case V2DImode
: result
= bool_V2DI_type_node
; break;
24505 case SImode
: case V4SImode
: result
= bool_V4SI_type_node
; break;
24506 case HImode
: case V8HImode
: result
= bool_V8HI_type_node
; break;
24507 case QImode
: case V16QImode
: result
= bool_V16QI_type_node
;
24514 case V8HImode
: result
= pixel_V8HI_type_node
;
24520 /* Propagate qualifiers attached to the element type
24521 onto the vector type. */
24522 if (result
&& result
!= type
&& TYPE_QUALS (type
))
24523 result
= build_qualified_type (result
, TYPE_QUALS (type
));
24525 *no_add_attrs
= true; /* No need to hang on to the attribute. */
24528 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
24533 /* AltiVec defines four built-in scalar types that serve as vector
24534 elements; we must teach the compiler how to mangle them. */
24536 static const char *
24537 rs6000_mangle_type (const_tree type
)
24539 type
= TYPE_MAIN_VARIANT (type
);
24541 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
24542 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
24545 if (type
== bool_char_type_node
) return "U6__boolc";
24546 if (type
== bool_short_type_node
) return "U6__bools";
24547 if (type
== pixel_type_node
) return "u7__pixel";
24548 if (type
== bool_int_type_node
) return "U6__booli";
24549 if (type
== bool_long_type_node
) return "U6__booll";
24551 /* Mangle IBM extended float long double as `g' (__float128) on
24552 powerpc*-linux where long-double-64 previously was the default. */
24553 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
24555 && TARGET_LONG_DOUBLE_128
24556 && !TARGET_IEEEQUAD
)
24559 /* For all other types, use normal C++ mangling. */
24563 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24564 struct attribute_spec.handler. */
24567 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
24568 tree args ATTRIBUTE_UNUSED
,
24569 int flags ATTRIBUTE_UNUSED
,
24570 bool *no_add_attrs
)
24572 if (TREE_CODE (*node
) != FUNCTION_TYPE
24573 && TREE_CODE (*node
) != FIELD_DECL
24574 && TREE_CODE (*node
) != TYPE_DECL
)
24576 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
24578 *no_add_attrs
= true;
24584 /* Set longcall attributes on all functions declared when
24585 rs6000_default_long_calls is true. */
24587 rs6000_set_default_type_attributes (tree type
)
24589 if (rs6000_default_long_calls
24590 && (TREE_CODE (type
) == FUNCTION_TYPE
24591 || TREE_CODE (type
) == METHOD_TYPE
))
24592 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
24594 TYPE_ATTRIBUTES (type
));
24597 darwin_set_default_type_attributes (type
);
24601 /* Return a reference suitable for calling a function with the
24602 longcall attribute. */
24605 rs6000_longcall_ref (rtx call_ref
)
24607 const char *call_name
;
24610 if (GET_CODE (call_ref
) != SYMBOL_REF
)
24613 /* System V adds '.' to the internal name, so skip them. */
24614 call_name
= XSTR (call_ref
, 0);
24615 if (*call_name
== '.')
24617 while (*call_name
== '.')
24620 node
= get_identifier (call_name
);
24621 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
24624 return force_reg (Pmode
, call_ref
);
24627 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24628 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24631 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24632 struct attribute_spec.handler. */
24634 rs6000_handle_struct_attribute (tree
*node
, tree name
,
24635 tree args ATTRIBUTE_UNUSED
,
24636 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
24639 if (DECL_P (*node
))
24641 if (TREE_CODE (*node
) == TYPE_DECL
)
24642 type
= &TREE_TYPE (*node
);
24647 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
24648 || TREE_CODE (*type
) == UNION_TYPE
)))
24650 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
24651 *no_add_attrs
= true;
24654 else if ((is_attribute_p ("ms_struct", name
)
24655 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
24656 || ((is_attribute_p ("gcc_struct", name
)
24657 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
24659 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
24661 *no_add_attrs
= true;
24668 rs6000_ms_bitfield_layout_p (const_tree record_type
)
24670 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
24671 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
24672 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
24675 #ifdef USING_ELFOS_H
24677 /* A get_unnamed_section callback, used for switching to toc_section. */
24680 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
24682 if (DEFAULT_ABI
== ABI_AIX
24683 && TARGET_MINIMAL_TOC
24684 && !TARGET_RELOCATABLE
)
24686 if (!toc_initialized
)
24688 toc_initialized
= 1;
24689 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
24690 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
24691 fprintf (asm_out_file
, "\t.tc ");
24692 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
24693 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24694 fprintf (asm_out_file
, "\n");
24696 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24697 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24698 fprintf (asm_out_file
, " = .+32768\n");
24701 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24703 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_RELOCATABLE
)
24704 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
24707 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24708 if (!toc_initialized
)
24710 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24711 fprintf (asm_out_file
, " = .+32768\n");
24712 toc_initialized
= 1;
24717 /* Implement TARGET_ASM_INIT_SECTIONS. */
24720 rs6000_elf_asm_init_sections (void)
24723 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
24726 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
24727 SDATA2_SECTION_ASM_OP
);
24730 /* Implement TARGET_SELECT_RTX_SECTION. */
24733 rs6000_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
24734 unsigned HOST_WIDE_INT align
)
24736 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
24737 return toc_section
;
24739 return default_elf_select_rtx_section (mode
, x
, align
);
24742 /* For a SYMBOL_REF, set generic flags and then perform some
24743 target-specific processing.
24745 When the AIX ABI is requested on a non-AIX system, replace the
24746 function name with the real name (with a leading .) rather than the
24747 function descriptor name. This saves a lot of overriding code to
24748 read the prefixes. */
24750 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
24752 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
24754 default_encode_section_info (decl
, rtl
, first
);
24757 && TREE_CODE (decl
) == FUNCTION_DECL
24759 && DEFAULT_ABI
== ABI_AIX
)
24761 rtx sym_ref
= XEXP (rtl
, 0);
24762 size_t len
= strlen (XSTR (sym_ref
, 0));
24763 char *str
= XALLOCAVEC (char, len
+ 2);
24765 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
24766 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
24771 compare_section_name (const char *section
, const char *templ
)
24775 len
= strlen (templ
);
24776 return (strncmp (section
, templ
, len
) == 0
24777 && (section
[len
] == 0 || section
[len
] == '.'));
24781 rs6000_elf_in_small_data_p (const_tree decl
)
24783 if (rs6000_sdata
== SDATA_NONE
)
24786 /* We want to merge strings, so we never consider them small data. */
24787 if (TREE_CODE (decl
) == STRING_CST
)
24790 /* Functions are never in the small data area. */
24791 if (TREE_CODE (decl
) == FUNCTION_DECL
)
24794 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
24796 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
24797 if (compare_section_name (section
, ".sdata")
24798 || compare_section_name (section
, ".sdata2")
24799 || compare_section_name (section
, ".gnu.linkonce.s")
24800 || compare_section_name (section
, ".sbss")
24801 || compare_section_name (section
, ".sbss2")
24802 || compare_section_name (section
, ".gnu.linkonce.sb")
24803 || strcmp (section
, ".PPC.EMB.sdata0") == 0
24804 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
24809 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
24812 && size
<= g_switch_value
24813 /* If it's not public, and we're not going to reference it there,
24814 there's no need to put it in the small data section. */
24815 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
24822 #endif /* USING_ELFOS_H */
24824 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
24827 rs6000_use_blocks_for_constant_p (enum machine_mode mode
, const_rtx x
)
24829 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
24832 /* Return a REG that occurs in ADDR with coefficient 1.
24833 ADDR can be effectively incremented by incrementing REG.
24835 r0 is special and we must not select it as an address
24836 register by this routine since our caller will try to
24837 increment the returned register via an "la" instruction. */
24840 find_addr_reg (rtx addr
)
24842 while (GET_CODE (addr
) == PLUS
)
24844 if (GET_CODE (XEXP (addr
, 0)) == REG
24845 && REGNO (XEXP (addr
, 0)) != 0)
24846 addr
= XEXP (addr
, 0);
24847 else if (GET_CODE (XEXP (addr
, 1)) == REG
24848 && REGNO (XEXP (addr
, 1)) != 0)
24849 addr
= XEXP (addr
, 1);
24850 else if (CONSTANT_P (XEXP (addr
, 0)))
24851 addr
= XEXP (addr
, 1);
24852 else if (CONSTANT_P (XEXP (addr
, 1)))
24853 addr
= XEXP (addr
, 0);
24855 gcc_unreachable ();
24857 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
24862 rs6000_fatal_bad_address (rtx op
)
24864 fatal_insn ("bad address", op
);
24869 typedef struct branch_island_d
{
24870 tree function_name
;
24875 DEF_VEC_O(branch_island
);
24876 DEF_VEC_ALLOC_O(branch_island
,gc
);
24878 static VEC(branch_island
,gc
) *branch_islands
;
24880 /* Remember to generate a branch island for far calls to the given
24884 add_compiler_branch_island (tree label_name
, tree function_name
,
24887 branch_island
*bi
= VEC_safe_push (branch_island
, gc
, branch_islands
, NULL
);
24889 bi
->function_name
= function_name
;
24890 bi
->label_name
= label_name
;
24891 bi
->line_number
= line_number
;
24894 /* Generate far-jump branch islands for everything recorded in
24895 branch_islands. Invoked immediately after the last instruction of
24896 the epilogue has been emitted; the branch islands must be appended
24897 to, and contiguous with, the function body. Mach-O stubs are
24898 generated in machopic_output_stub(). */
24901 macho_branch_islands (void)
24905 while (!VEC_empty (branch_island
, branch_islands
))
24907 branch_island
*bi
= VEC_last (branch_island
, branch_islands
);
24908 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
24909 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
24910 char name_buf
[512];
24911 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
24912 if (name
[0] == '*' || name
[0] == '&')
24913 strcpy (name_buf
, name
+1);
24917 strcpy (name_buf
+1, name
);
24919 strcpy (tmp_buf
, "\n");
24920 strcat (tmp_buf
, label
);
24921 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24922 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
24923 dbxout_stabd (N_SLINE
, bi
->line_number
);
24924 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24927 if (TARGET_LINK_STACK
)
24930 get_ppc476_thunk_name (name
);
24931 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
24932 strcat (tmp_buf
, name
);
24933 strcat (tmp_buf
, "\n");
24934 strcat (tmp_buf
, label
);
24935 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
24939 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
24940 strcat (tmp_buf
, label
);
24941 strcat (tmp_buf
, "_pic\n");
24942 strcat (tmp_buf
, label
);
24943 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
24946 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
24947 strcat (tmp_buf
, name_buf
);
24948 strcat (tmp_buf
, " - ");
24949 strcat (tmp_buf
, label
);
24950 strcat (tmp_buf
, "_pic)\n");
24952 strcat (tmp_buf
, "\tmtlr r0\n");
24954 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
24955 strcat (tmp_buf
, name_buf
);
24956 strcat (tmp_buf
, " - ");
24957 strcat (tmp_buf
, label
);
24958 strcat (tmp_buf
, "_pic)\n");
24960 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
24964 strcat (tmp_buf
, ":\nlis r12,hi16(");
24965 strcat (tmp_buf
, name_buf
);
24966 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
24967 strcat (tmp_buf
, name_buf
);
24968 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
24970 output_asm_insn (tmp_buf
, 0);
24971 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24972 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
24973 dbxout_stabd (N_SLINE
, bi
->line_number
);
24974 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24975 VEC_pop (branch_island
, branch_islands
);
24979 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
24980 already there or not. */
24983 no_previous_def (tree function_name
)
24988 FOR_EACH_VEC_ELT (branch_island
, branch_islands
, ix
, bi
)
24989 if (function_name
== bi
->function_name
)
24994 /* GET_PREV_LABEL gets the label name from the previous definition of
24998 get_prev_label (tree function_name
)
25003 FOR_EACH_VEC_ELT (branch_island
, branch_islands
, ix
, bi
)
25004 if (function_name
== bi
->function_name
)
25005 return bi
->label_name
;
25009 /* INSN is either a function call or a millicode call. It may have an
25010 unconditional jump in its delay slot.
25012 CALL_DEST is the routine we are calling. */
25015 output_call (rtx insn
, rtx
*operands
, int dest_operand_number
,
25016 int cookie_operand_number
)
25018 static char buf
[256];
25019 if (darwin_emit_branch_islands
25020 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
25021 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
25024 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
25026 if (no_previous_def (funname
))
25028 rtx label_rtx
= gen_label_rtx ();
25029 char *label_buf
, temp_buf
[256];
25030 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
25031 CODE_LABEL_NUMBER (label_rtx
));
25032 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
25033 labelname
= get_identifier (label_buf
);
25034 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
25037 labelname
= get_prev_label (funname
);
25039 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25040 instruction will reach 'foo', otherwise link as 'bl L42'".
25041 "L42" should be a 'branch island', that will do a far jump to
25042 'foo'. Branch islands are generated in
25043 macho_branch_islands(). */
25044 sprintf (buf
, "jbsr %%z%d,%.246s",
25045 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
25048 sprintf (buf
, "bl %%z%d", dest_operand_number
);
25052 /* Generate PIC and indirect symbol stubs. */
25055 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
25057 unsigned int length
;
25058 char *symbol_name
, *lazy_ptr_name
;
25059 char *local_label_0
;
25060 static int label
= 0;
25062 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25063 symb
= (*targetm
.strip_name_encoding
) (symb
);
25066 length
= strlen (symb
);
25067 symbol_name
= XALLOCAVEC (char, length
+ 32);
25068 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
25070 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
25071 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
25074 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
25076 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
25080 fprintf (file
, "\t.align 5\n");
25082 fprintf (file
, "%s:\n", stub
);
25083 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25086 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25087 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
25089 fprintf (file
, "\tmflr r0\n");
25090 if (TARGET_LINK_STACK
)
25093 get_ppc476_thunk_name (name
);
25094 fprintf (file
, "\tbl %s\n", name
);
25095 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
25099 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
25100 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
25102 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
25103 lazy_ptr_name
, local_label_0
);
25104 fprintf (file
, "\tmtlr r0\n");
25105 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
25106 (TARGET_64BIT
? "ldu" : "lwzu"),
25107 lazy_ptr_name
, local_label_0
);
25108 fprintf (file
, "\tmtctr r12\n");
25109 fprintf (file
, "\tbctr\n");
25113 fprintf (file
, "\t.align 4\n");
25115 fprintf (file
, "%s:\n", stub
);
25116 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25118 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
25119 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
25120 (TARGET_64BIT
? "ldu" : "lwzu"),
25122 fprintf (file
, "\tmtctr r12\n");
25123 fprintf (file
, "\tbctr\n");
25126 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
25127 fprintf (file
, "%s:\n", lazy_ptr_name
);
25128 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25129 fprintf (file
, "%sdyld_stub_binding_helper\n",
25130 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
25133 /* Legitimize PIC addresses. If the address is already
25134 position-independent, we return ORIG. Newly generated
25135 position-independent addresses go into a reg. This is REG if non
25136 zero, otherwise we allocate register(s) as necessary. */
25138 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25141 rs6000_machopic_legitimize_pic_address (rtx orig
, enum machine_mode mode
,
25146 if (reg
== NULL
&& ! reload_in_progress
&& ! reload_completed
)
25147 reg
= gen_reg_rtx (Pmode
);
25149 if (GET_CODE (orig
) == CONST
)
25153 if (GET_CODE (XEXP (orig
, 0)) == PLUS
25154 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
25157 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
25159 /* Use a different reg for the intermediate value, as
25160 it will be marked UNCHANGING. */
25161 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
25162 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
25165 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
25168 if (GET_CODE (offset
) == CONST_INT
)
25170 if (SMALL_INT (offset
))
25171 return plus_constant (Pmode
, base
, INTVAL (offset
));
25172 else if (! reload_in_progress
&& ! reload_completed
)
25173 offset
= force_reg (Pmode
, offset
);
25176 rtx mem
= force_const_mem (Pmode
, orig
);
25177 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
25180 return gen_rtx_PLUS (Pmode
, base
, offset
);
25183 /* Fall back on generic machopic code. */
25184 return machopic_legitimize_pic_address (orig
, mode
, reg
);
25187 /* Output a .machine directive for the Darwin assembler, and call
25188 the generic start_file routine. */
25191 rs6000_darwin_file_start (void)
25193 static const struct
25199 { "ppc64", "ppc64", MASK_64BIT
},
25200 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
25201 { "power4", "ppc970", 0 },
25202 { "G5", "ppc970", 0 },
25203 { "7450", "ppc7450", 0 },
25204 { "7400", "ppc7400", MASK_ALTIVEC
},
25205 { "G4", "ppc7400", 0 },
25206 { "750", "ppc750", 0 },
25207 { "740", "ppc750", 0 },
25208 { "G3", "ppc750", 0 },
25209 { "604e", "ppc604e", 0 },
25210 { "604", "ppc604", 0 },
25211 { "603e", "ppc603", 0 },
25212 { "603", "ppc603", 0 },
25213 { "601", "ppc601", 0 },
25214 { NULL
, "ppc", 0 } };
25215 const char *cpu_id
= "";
25218 rs6000_file_start ();
25219 darwin_file_start ();
25221 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25223 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
25224 cpu_id
= rs6000_default_cpu
;
25226 if (global_options_set
.x_rs6000_cpu_index
)
25227 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
25229 /* Look through the mapping array. Pick the first name that either
25230 matches the argument, has a bit set in IF_SET that is also set
25231 in the target flags, or has a NULL name. */
25234 while (mapping
[i
].arg
!= NULL
25235 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
25236 && (mapping
[i
].if_set
& target_flags
) == 0)
25239 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
25242 #endif /* TARGET_MACHO */
25246 rs6000_elf_reloc_rw_mask (void)
25250 else if (DEFAULT_ABI
== ABI_AIX
)
25256 /* Record an element in the table of global constructors. SYMBOL is
25257 a SYMBOL_REF of the function to be called; PRIORITY is a number
25258 between 0 and MAX_INIT_PRIORITY.
25260 This differs from default_named_section_asm_out_constructor in
25261 that we have special handling for -mrelocatable. */
25263 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
25265 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
25267 const char *section
= ".ctors";
25270 if (priority
!= DEFAULT_INIT_PRIORITY
)
25272 sprintf (buf
, ".ctors.%.5u",
25273 /* Invert the numbering so the linker puts us in the proper
25274 order; constructors are run from right to left, and the
25275 linker sorts in increasing order. */
25276 MAX_INIT_PRIORITY
- priority
);
25280 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
25281 assemble_align (POINTER_SIZE
);
25283 if (TARGET_RELOCATABLE
)
25285 fputs ("\t.long (", asm_out_file
);
25286 output_addr_const (asm_out_file
, symbol
);
25287 fputs (")@fixup\n", asm_out_file
);
25290 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
25293 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
25295 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
25297 const char *section
= ".dtors";
25300 if (priority
!= DEFAULT_INIT_PRIORITY
)
25302 sprintf (buf
, ".dtors.%.5u",
25303 /* Invert the numbering so the linker puts us in the proper
25304 order; constructors are run from right to left, and the
25305 linker sorts in increasing order. */
25306 MAX_INIT_PRIORITY
- priority
);
25310 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
25311 assemble_align (POINTER_SIZE
);
25313 if (TARGET_RELOCATABLE
)
25315 fputs ("\t.long (", asm_out_file
);
25316 output_addr_const (asm_out_file
, symbol
);
25317 fputs (")@fixup\n", asm_out_file
);
25320 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
25324 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
25328 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
25329 ASM_OUTPUT_LABEL (file
, name
);
25330 fputs (DOUBLE_INT_ASM_OP
, file
);
25331 rs6000_output_function_entry (file
, name
);
25332 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
25335 fputs ("\t.size\t", file
);
25336 assemble_name (file
, name
);
25337 fputs (",24\n\t.type\t.", file
);
25338 assemble_name (file
, name
);
25339 fputs (",@function\n", file
);
25340 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
25342 fputs ("\t.globl\t.", file
);
25343 assemble_name (file
, name
);
25348 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
25349 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
25350 rs6000_output_function_entry (file
, name
);
25351 fputs (":\n", file
);
25355 if (TARGET_RELOCATABLE
25356 && !TARGET_SECURE_PLT
25357 && (get_pool_size () != 0 || crtl
->profile
)
25362 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
25364 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
25365 fprintf (file
, "\t.long ");
25366 assemble_name (file
, buf
);
25368 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
25369 assemble_name (file
, buf
);
25373 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
25374 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
25376 if (DEFAULT_ABI
== ABI_AIX
)
25378 const char *desc_name
, *orig_name
;
25380 orig_name
= (*targetm
.strip_name_encoding
) (name
);
25381 desc_name
= orig_name
;
25382 while (*desc_name
== '.')
25385 if (TREE_PUBLIC (decl
))
25386 fprintf (file
, "\t.globl %s\n", desc_name
);
25388 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
25389 fprintf (file
, "%s:\n", desc_name
);
25390 fprintf (file
, "\t.long %s\n", orig_name
);
25391 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
25392 if (DEFAULT_ABI
== ABI_AIX
)
25393 fputs ("\t.long 0\n", file
);
25394 fprintf (file
, "\t.previous\n");
25396 ASM_OUTPUT_LABEL (file
, name
);
25399 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
25401 rs6000_elf_file_end (void)
25403 #ifdef HAVE_AS_GNU_ATTRIBUTE
25404 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
25406 if (rs6000_passes_float
)
25407 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n",
25408 ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
) ? 1
25409 : (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_SINGLE_FLOAT
) ? 3
25411 if (rs6000_passes_vector
)
25412 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
25413 (TARGET_ALTIVEC_ABI
? 2
25414 : TARGET_SPE_ABI
? 3
25416 if (rs6000_returns_struct
)
25417 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
25418 aix_struct_return
? 2 : 1);
25421 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25423 file_end_indicate_exec_stack ();
25430 rs6000_xcoff_asm_output_anchor (rtx symbol
)
25434 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
25435 SYMBOL_REF_BLOCK_OFFSET (symbol
));
25436 ASM_OUTPUT_DEF (asm_out_file
, XSTR (symbol
, 0), buffer
);
25440 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
25442 fputs (GLOBAL_ASM_OP
, stream
);
25443 RS6000_OUTPUT_BASENAME (stream
, name
);
25444 putc ('\n', stream
);
25447 /* A get_unnamed_decl callback, used for read-only sections. PTR
25448 points to the section string variable. */
25451 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
25453 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
25454 *(const char *const *) directive
,
25455 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
25458 /* Likewise for read-write sections. */
25461 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
25463 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
25464 *(const char *const *) directive
,
25465 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
25468 /* A get_unnamed_section callback, used for switching to toc_section. */
25471 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
25473 if (TARGET_MINIMAL_TOC
)
25475 /* toc_section is always selected at least once from
25476 rs6000_xcoff_file_start, so this is guaranteed to
25477 always be defined once and only once in each file. */
25478 if (!toc_initialized
)
25480 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
25481 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
25482 toc_initialized
= 1;
25484 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
25485 (TARGET_32BIT
? "" : ",3"));
25488 fputs ("\t.toc\n", asm_out_file
);
25491 /* Implement TARGET_ASM_INIT_SECTIONS. */
25494 rs6000_xcoff_asm_init_sections (void)
25496 read_only_data_section
25497 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
25498 &xcoff_read_only_section_name
);
25500 private_data_section
25501 = get_unnamed_section (SECTION_WRITE
,
25502 rs6000_xcoff_output_readwrite_section_asm_op
,
25503 &xcoff_private_data_section_name
);
25505 read_only_private_data_section
25506 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
25507 &xcoff_private_data_section_name
);
25510 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
25512 readonly_data_section
= read_only_data_section
;
25513 exception_section
= data_section
;
25517 rs6000_xcoff_reloc_rw_mask (void)
25523 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
25524 tree decl ATTRIBUTE_UNUSED
)
25527 static const char * const suffix
[3] = { "PR", "RO", "RW" };
25529 if (flags
& SECTION_CODE
)
25531 else if (flags
& SECTION_WRITE
)
25536 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
25537 (flags
& SECTION_CODE
) ? "." : "",
25538 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
25542 rs6000_xcoff_select_section (tree decl
, int reloc
,
25543 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
25545 if (decl_readonly_section (decl
, reloc
))
25547 if (TREE_PUBLIC (decl
))
25548 return read_only_data_section
;
25550 return read_only_private_data_section
;
25554 if (TREE_PUBLIC (decl
))
25555 return data_section
;
25557 return private_data_section
;
25562 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
25566 /* Use select_section for private and uninitialized data. */
25567 if (!TREE_PUBLIC (decl
)
25568 || DECL_COMMON (decl
)
25569 || DECL_INITIAL (decl
) == NULL_TREE
25570 || DECL_INITIAL (decl
) == error_mark_node
25571 || (flag_zero_initialized_in_bss
25572 && initializer_zerop (DECL_INITIAL (decl
))))
25575 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
25576 name
= (*targetm
.strip_name_encoding
) (name
);
25577 DECL_SECTION_NAME (decl
) = build_string (strlen (name
), name
);
25580 /* Select section for constant in constant pool.
25582 On RS/6000, all constants are in the private read-only data area.
25583 However, if this is being placed in the TOC it must be output as a
25587 rs6000_xcoff_select_rtx_section (enum machine_mode mode
, rtx x
,
25588 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
25590 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
25591 return toc_section
;
25593 return read_only_private_data_section
;
25596 /* Remove any trailing [DS] or the like from the symbol name. */
25598 static const char *
25599 rs6000_xcoff_strip_name_encoding (const char *name
)
25604 len
= strlen (name
);
25605 if (name
[len
- 1] == ']')
25606 return ggc_alloc_string (name
, len
- 4);
25611 /* Section attributes. AIX is always PIC. */
25613 static unsigned int
25614 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
25616 unsigned int align
;
25617 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
25619 /* Align to at least UNIT size. */
25620 if (flags
& SECTION_CODE
|| !decl
)
25621 align
= MIN_UNITS_PER_WORD
;
25623 /* Increase alignment of large objects if not already stricter. */
25624 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
25625 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
25626 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
25628 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
25631 /* Output at beginning of assembler file.
25633 Initialize the section names for the RS/6000 at this point.
25635 Specify filename, including full path, to assembler.
25637 We want to go into the TOC section so at least one .toc will be emitted.
25638 Also, in order to output proper .bs/.es pairs, we need at least one static
25639 [RW] section emitted.
25641 Finally, declare mcount when profiling to make the assembler happy. */
25644 rs6000_xcoff_file_start (void)
25646 rs6000_gen_section_name (&xcoff_bss_section_name
,
25647 main_input_filename
, ".bss_");
25648 rs6000_gen_section_name (&xcoff_private_data_section_name
,
25649 main_input_filename
, ".rw_");
25650 rs6000_gen_section_name (&xcoff_read_only_section_name
,
25651 main_input_filename
, ".ro_");
25653 fputs ("\t.file\t", asm_out_file
);
25654 output_quoted_string (asm_out_file
, main_input_filename
);
25655 fputc ('\n', asm_out_file
);
25656 if (write_symbols
!= NO_DEBUG
)
25657 switch_to_section (private_data_section
);
25658 switch_to_section (text_section
);
25660 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
25661 rs6000_file_start ();
25664 /* Output at end of assembler file.
25665 On the RS/6000, referencing data should automatically pull in text. */
25668 rs6000_xcoff_file_end (void)
25670 switch_to_section (text_section
);
25671 fputs ("_section_.text:\n", asm_out_file
);
25672 switch_to_section (data_section
);
25673 fputs (TARGET_32BIT
25674 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25677 #endif /* TARGET_XCOFF */
25679 /* Compute a (partial) cost for rtx X. Return true if the complete
25680 cost has been computed, and false if subexpressions should be
25681 scanned. In either case, *TOTAL contains the cost result. */
25684 rs6000_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
25685 int *total
, bool speed
)
25687 enum machine_mode mode
= GET_MODE (x
);
25691 /* On the RS/6000, if it is valid in the insn, it is free. */
25693 if (((outer_code
== SET
25694 || outer_code
== PLUS
25695 || outer_code
== MINUS
)
25696 && (satisfies_constraint_I (x
)
25697 || satisfies_constraint_L (x
)))
25698 || (outer_code
== AND
25699 && (satisfies_constraint_K (x
)
25701 ? satisfies_constraint_L (x
)
25702 : satisfies_constraint_J (x
))
25703 || mask_operand (x
, mode
)
25705 && mask64_operand (x
, DImode
))))
25706 || ((outer_code
== IOR
|| outer_code
== XOR
)
25707 && (satisfies_constraint_K (x
)
25709 ? satisfies_constraint_L (x
)
25710 : satisfies_constraint_J (x
))))
25711 || outer_code
== ASHIFT
25712 || outer_code
== ASHIFTRT
25713 || outer_code
== LSHIFTRT
25714 || outer_code
== ROTATE
25715 || outer_code
== ROTATERT
25716 || outer_code
== ZERO_EXTRACT
25717 || (outer_code
== MULT
25718 && satisfies_constraint_I (x
))
25719 || ((outer_code
== DIV
|| outer_code
== UDIV
25720 || outer_code
== MOD
|| outer_code
== UMOD
)
25721 && exact_log2 (INTVAL (x
)) >= 0)
25722 || (outer_code
== COMPARE
25723 && (satisfies_constraint_I (x
)
25724 || satisfies_constraint_K (x
)))
25725 || ((outer_code
== EQ
|| outer_code
== NE
)
25726 && (satisfies_constraint_I (x
)
25727 || satisfies_constraint_K (x
)
25729 ? satisfies_constraint_L (x
)
25730 : satisfies_constraint_J (x
))))
25731 || (outer_code
== GTU
25732 && satisfies_constraint_I (x
))
25733 || (outer_code
== LTU
25734 && satisfies_constraint_P (x
)))
25739 else if ((outer_code
== PLUS
25740 && reg_or_add_cint_operand (x
, VOIDmode
))
25741 || (outer_code
== MINUS
25742 && reg_or_sub_cint_operand (x
, VOIDmode
))
25743 || ((outer_code
== SET
25744 || outer_code
== IOR
25745 || outer_code
== XOR
)
25747 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
25749 *total
= COSTS_N_INSNS (1);
25755 if (mode
== DImode
&& code
== CONST_DOUBLE
)
25757 if ((outer_code
== IOR
|| outer_code
== XOR
)
25758 && CONST_DOUBLE_HIGH (x
) == 0
25759 && (CONST_DOUBLE_LOW (x
)
25760 & ~ (unsigned HOST_WIDE_INT
) 0xffff) == 0)
25765 else if ((outer_code
== AND
&& and64_2_operand (x
, DImode
))
25766 || ((outer_code
== SET
25767 || outer_code
== IOR
25768 || outer_code
== XOR
)
25769 && CONST_DOUBLE_HIGH (x
) == 0))
25771 *total
= COSTS_N_INSNS (1);
25781 /* When optimizing for size, MEM should be slightly more expensive
25782 than generating address, e.g., (plus (reg) (const)).
25783 L1 cache latency is about two instructions. */
25784 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
25793 if (FLOAT_MODE_P (mode
))
25794 *total
= rs6000_cost
->fp
;
25796 *total
= COSTS_N_INSNS (1);
25800 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
25801 && satisfies_constraint_I (XEXP (x
, 1)))
25803 if (INTVAL (XEXP (x
, 1)) >= -256
25804 && INTVAL (XEXP (x
, 1)) <= 255)
25805 *total
= rs6000_cost
->mulsi_const9
;
25807 *total
= rs6000_cost
->mulsi_const
;
25809 else if (mode
== SFmode
)
25810 *total
= rs6000_cost
->fp
;
25811 else if (FLOAT_MODE_P (mode
))
25812 *total
= rs6000_cost
->dmul
;
25813 else if (mode
== DImode
)
25814 *total
= rs6000_cost
->muldi
;
25816 *total
= rs6000_cost
->mulsi
;
25820 if (mode
== SFmode
)
25821 *total
= rs6000_cost
->fp
;
25823 *total
= rs6000_cost
->dmul
;
25828 if (FLOAT_MODE_P (mode
))
25830 *total
= mode
== DFmode
? rs6000_cost
->ddiv
25831 : rs6000_cost
->sdiv
;
25838 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
25839 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
25841 if (code
== DIV
|| code
== MOD
)
25843 *total
= COSTS_N_INSNS (2);
25846 *total
= COSTS_N_INSNS (1);
25850 if (GET_MODE (XEXP (x
, 1)) == DImode
)
25851 *total
= rs6000_cost
->divdi
;
25853 *total
= rs6000_cost
->divsi
;
25855 /* Add in shift and subtract for MOD. */
25856 if (code
== MOD
|| code
== UMOD
)
25857 *total
+= COSTS_N_INSNS (2);
25862 *total
= COSTS_N_INSNS (4);
25866 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
25870 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
25874 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
25886 *total
= COSTS_N_INSNS (1);
25894 /* Handle mul_highpart. */
25895 if (outer_code
== TRUNCATE
25896 && GET_CODE (XEXP (x
, 0)) == MULT
)
25898 if (mode
== DImode
)
25899 *total
= rs6000_cost
->muldi
;
25901 *total
= rs6000_cost
->mulsi
;
25904 else if (outer_code
== AND
)
25907 *total
= COSTS_N_INSNS (1);
25912 if (GET_CODE (XEXP (x
, 0)) == MEM
)
25915 *total
= COSTS_N_INSNS (1);
25921 if (!FLOAT_MODE_P (mode
))
25923 *total
= COSTS_N_INSNS (1);
25929 case UNSIGNED_FLOAT
:
25932 case FLOAT_TRUNCATE
:
25933 *total
= rs6000_cost
->fp
;
25937 if (mode
== DFmode
)
25940 *total
= rs6000_cost
->fp
;
25944 switch (XINT (x
, 1))
25947 *total
= rs6000_cost
->fp
;
25959 *total
= COSTS_N_INSNS (1);
25962 else if (FLOAT_MODE_P (mode
)
25963 && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
&& TARGET_FPRS
)
25965 *total
= rs6000_cost
->fp
;
25973 /* Carry bit requires mode == Pmode.
25974 NEG or PLUS already counted so only add one. */
25976 && (outer_code
== NEG
|| outer_code
== PLUS
))
25978 *total
= COSTS_N_INSNS (1);
25981 if (outer_code
== SET
)
25983 if (XEXP (x
, 1) == const0_rtx
)
25985 if (TARGET_ISEL
&& !TARGET_MFCRF
)
25986 *total
= COSTS_N_INSNS (8);
25988 *total
= COSTS_N_INSNS (2);
25991 else if (mode
== Pmode
)
25993 *total
= COSTS_N_INSNS (3);
26002 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
26004 if (TARGET_ISEL
&& !TARGET_MFCRF
)
26005 *total
= COSTS_N_INSNS (8);
26007 *total
= COSTS_N_INSNS (2);
26011 if (outer_code
== COMPARE
)
26025 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
26028 rs6000_debug_rtx_costs (rtx x
, int code
, int outer_code
, int opno
, int *total
,
26031 bool ret
= rs6000_rtx_costs (x
, code
, outer_code
, opno
, total
, speed
);
26034 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
26035 "opno = %d, total = %d, speed = %s, x:\n",
26036 ret
? "complete" : "scan inner",
26037 GET_RTX_NAME (code
),
26038 GET_RTX_NAME (outer_code
),
26041 speed
? "true" : "false");
26048 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26051 rs6000_debug_address_cost (rtx x
, bool speed
)
26053 int ret
= TARGET_ADDRESS_COST (x
, speed
);
26055 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26056 ret
, speed
? "true" : "false");
26063 /* A C expression returning the cost of moving data from a register of class
26064 CLASS1 to one of CLASS2. */
26067 rs6000_register_move_cost (enum machine_mode mode
,
26068 reg_class_t from
, reg_class_t to
)
26072 if (TARGET_DEBUG_COST
)
26075 /* Moves from/to GENERAL_REGS. */
26076 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
26077 || reg_classes_intersect_p (from
, GENERAL_REGS
))
26079 reg_class_t rclass
= from
;
26081 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
26084 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
26085 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
26086 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
26088 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26090 else if (rclass
== CR_REGS
)
26093 /* For those processors that have slow LR/CTR moves, make them more
26094 expensive than memory in order to bias spills to memory .*/
26095 else if ((rs6000_cpu
== PROCESSOR_POWER6
26096 || rs6000_cpu
== PROCESSOR_POWER7
)
26097 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
26098 ret
= 6 * hard_regno_nregs
[0][mode
];
26101 /* A move will cost one instruction per GPR moved. */
26102 ret
= 2 * hard_regno_nregs
[0][mode
];
26105 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26106 else if (VECTOR_UNIT_VSX_P (mode
)
26107 && reg_classes_intersect_p (to
, VSX_REGS
)
26108 && reg_classes_intersect_p (from
, VSX_REGS
))
26109 ret
= 2 * hard_regno_nregs
[32][mode
];
26111 /* Moving between two similar registers is just one instruction. */
26112 else if (reg_classes_intersect_p (to
, from
))
26113 ret
= (mode
== TFmode
|| mode
== TDmode
) ? 4 : 2;
26115 /* Everything else has to go through GENERAL_REGS. */
26117 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
26118 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
26120 if (TARGET_DEBUG_COST
)
26122 if (dbg_cost_ctrl
== 1)
26124 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26125 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
26126 reg_class_names
[to
]);
26133 /* A C expressions returning the cost of moving data of MODE from a register to
26137 rs6000_memory_move_cost (enum machine_mode mode
, reg_class_t rclass
,
26138 bool in ATTRIBUTE_UNUSED
)
26142 if (TARGET_DEBUG_COST
)
26145 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
26146 ret
= 4 * hard_regno_nregs
[0][mode
];
26147 else if (reg_classes_intersect_p (rclass
, FLOAT_REGS
))
26148 ret
= 4 * hard_regno_nregs
[32][mode
];
26149 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
26150 ret
= 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
26152 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
26154 if (TARGET_DEBUG_COST
)
26156 if (dbg_cost_ctrl
== 1)
26158 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26159 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
26166 /* Returns a code for a target-specific builtin that implements
26167 reciprocal of the function, or NULL_TREE if not available. */
26170 rs6000_builtin_reciprocal (unsigned int fn
, bool md_fn
,
26171 bool sqrt ATTRIBUTE_UNUSED
)
26173 if (optimize_insn_for_size_p ())
26179 case VSX_BUILTIN_XVSQRTDP
:
26180 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
26183 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
26185 case VSX_BUILTIN_XVSQRTSP
:
26186 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
26189 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
26198 case BUILT_IN_SQRT
:
26199 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode
))
26202 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRT
];
26204 case BUILT_IN_SQRTF
:
26205 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode
))
26208 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRTF
];
26215 /* Load up a constant. If the mode is a vector mode, splat the value across
26216 all of the vector elements. */
26219 rs6000_load_constant_and_splat (enum machine_mode mode
, REAL_VALUE_TYPE dconst
)
26223 if (mode
== SFmode
|| mode
== DFmode
)
26225 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, mode
);
26226 reg
= force_reg (mode
, d
);
26228 else if (mode
== V4SFmode
)
26230 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, SFmode
);
26231 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
26232 reg
= gen_reg_rtx (mode
);
26233 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
26235 else if (mode
== V2DFmode
)
26237 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, DFmode
);
26238 rtvec v
= gen_rtvec (2, d
, d
);
26239 reg
= gen_reg_rtx (mode
);
26240 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
26243 gcc_unreachable ();
26248 /* Generate an FMA instruction. */
26251 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
26253 enum machine_mode mode
= GET_MODE (target
);
26256 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
26257 gcc_assert (dst
!= NULL
);
26260 emit_move_insn (target
, dst
);
26263 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26266 rs6000_emit_msub (rtx target
, rtx m1
, rtx m2
, rtx a
)
26268 enum machine_mode mode
= GET_MODE (target
);
26271 /* Altivec does not support fms directly;
26272 generate in terms of fma in that case. */
26273 if (optab_handler (fms_optab
, mode
) != CODE_FOR_nothing
)
26274 dst
= expand_ternary_op (mode
, fms_optab
, m1
, m2
, a
, target
, 0);
26277 a
= expand_unop (mode
, neg_optab
, a
, NULL_RTX
, 0);
26278 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
26280 gcc_assert (dst
!= NULL
);
26283 emit_move_insn (target
, dst
);
26286 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26289 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
26291 enum machine_mode mode
= GET_MODE (dst
);
26294 /* This is a tad more complicated, since the fnma_optab is for
26295 a different expression: fma(-m1, m2, a), which is the same
26296 thing except in the case of signed zeros.
26298 Fortunately we know that if FMA is supported that FNMSUB is
26299 also supported in the ISA. Just expand it directly. */
26301 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
26303 r
= gen_rtx_NEG (mode
, a
);
26304 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
26305 r
= gen_rtx_NEG (mode
, r
);
26306 emit_insn (gen_rtx_SET (VOIDmode
, dst
, r
));
26309 /* Newton-Raphson approximation of floating point divide with just 2 passes
26310 (either single precision floating point, or newer machines with higher
26311 accuracy estimates). Support both scalar and vector divide. Assumes no
26312 trapping math and finite arguments. */
26315 rs6000_emit_swdiv_high_precision (rtx dst
, rtx n
, rtx d
)
26317 enum machine_mode mode
= GET_MODE (dst
);
26318 rtx x0
, e0
, e1
, y1
, u0
, v0
;
26319 enum insn_code code
= optab_handler (smul_optab
, mode
);
26320 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26321 rtx one
= rs6000_load_constant_and_splat (mode
, dconst1
);
26323 gcc_assert (code
!= CODE_FOR_nothing
);
26325 /* x0 = 1./d estimate */
26326 x0
= gen_reg_rtx (mode
);
26327 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26328 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
26331 e0
= gen_reg_rtx (mode
);
26332 rs6000_emit_nmsub (e0
, d
, x0
, one
); /* e0 = 1. - (d * x0) */
26334 e1
= gen_reg_rtx (mode
);
26335 rs6000_emit_madd (e1
, e0
, e0
, e0
); /* e1 = (e0 * e0) + e0 */
26337 y1
= gen_reg_rtx (mode
);
26338 rs6000_emit_madd (y1
, e1
, x0
, x0
); /* y1 = (e1 * x0) + x0 */
26340 u0
= gen_reg_rtx (mode
);
26341 emit_insn (gen_mul (u0
, n
, y1
)); /* u0 = n * y1 */
26343 v0
= gen_reg_rtx (mode
);
26344 rs6000_emit_nmsub (v0
, d
, u0
, n
); /* v0 = n - (d * u0) */
26346 rs6000_emit_madd (dst
, v0
, y1
, u0
); /* dst = (v0 * y1) + u0 */
26349 /* Newton-Raphson approximation of floating point divide that has a low
26350 precision estimate. Assumes no trapping math and finite arguments. */
26353 rs6000_emit_swdiv_low_precision (rtx dst
, rtx n
, rtx d
)
26355 enum machine_mode mode
= GET_MODE (dst
);
26356 rtx x0
, e0
, e1
, e2
, y1
, y2
, y3
, u0
, v0
, one
;
26357 enum insn_code code
= optab_handler (smul_optab
, mode
);
26358 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26360 gcc_assert (code
!= CODE_FOR_nothing
);
26362 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
26364 /* x0 = 1./d estimate */
26365 x0
= gen_reg_rtx (mode
);
26366 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26367 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
26370 e0
= gen_reg_rtx (mode
);
26371 rs6000_emit_nmsub (e0
, d
, x0
, one
); /* e0 = 1. - d * x0 */
26373 y1
= gen_reg_rtx (mode
);
26374 rs6000_emit_madd (y1
, e0
, x0
, x0
); /* y1 = x0 + e0 * x0 */
26376 e1
= gen_reg_rtx (mode
);
26377 emit_insn (gen_mul (e1
, e0
, e0
)); /* e1 = e0 * e0 */
26379 y2
= gen_reg_rtx (mode
);
26380 rs6000_emit_madd (y2
, e1
, y1
, y1
); /* y2 = y1 + e1 * y1 */
26382 e2
= gen_reg_rtx (mode
);
26383 emit_insn (gen_mul (e2
, e1
, e1
)); /* e2 = e1 * e1 */
26385 y3
= gen_reg_rtx (mode
);
26386 rs6000_emit_madd (y3
, e2
, y2
, y2
); /* y3 = y2 + e2 * y2 */
26388 u0
= gen_reg_rtx (mode
);
26389 emit_insn (gen_mul (u0
, n
, y3
)); /* u0 = n * y3 */
26391 v0
= gen_reg_rtx (mode
);
26392 rs6000_emit_nmsub (v0
, d
, u0
, n
); /* v0 = n - d * u0 */
26394 rs6000_emit_madd (dst
, v0
, y3
, u0
); /* dst = u0 + v0 * y3 */
26397 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26398 add a reg_note saying that this was a division. Support both scalar and
26399 vector divide. Assumes no trapping math and finite arguments. */
26402 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
26404 enum machine_mode mode
= GET_MODE (dst
);
26406 if (RS6000_RECIP_HIGH_PRECISION_P (mode
))
26407 rs6000_emit_swdiv_high_precision (dst
, n
, d
);
26409 rs6000_emit_swdiv_low_precision (dst
, n
, d
);
26412 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
26415 /* Newton-Raphson approximation of single/double-precision floating point
26416 rsqrt. Assumes no trapping math and finite arguments. */
26419 rs6000_emit_swrsqrt (rtx dst
, rtx src
)
26421 enum machine_mode mode
= GET_MODE (src
);
26422 rtx x0
= gen_reg_rtx (mode
);
26423 rtx y
= gen_reg_rtx (mode
);
26424 int passes
= (TARGET_RECIP_PRECISION
) ? 2 : 3;
26425 REAL_VALUE_TYPE dconst3_2
;
26428 enum insn_code code
= optab_handler (smul_optab
, mode
);
26429 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26431 gcc_assert (code
!= CODE_FOR_nothing
);
26433 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26434 real_from_integer (&dconst3_2
, VOIDmode
, 3, 0, 0);
26435 SET_REAL_EXP (&dconst3_2
, REAL_EXP (&dconst3_2
) - 1);
26437 halfthree
= rs6000_load_constant_and_splat (mode
, dconst3_2
);
26439 /* x0 = rsqrt estimate */
26440 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26441 gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
26444 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26445 rs6000_emit_msub (y
, src
, halfthree
, src
);
26447 for (i
= 0; i
< passes
; i
++)
26449 rtx x1
= gen_reg_rtx (mode
);
26450 rtx u
= gen_reg_rtx (mode
);
26451 rtx v
= gen_reg_rtx (mode
);
26453 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26454 emit_insn (gen_mul (u
, x0
, x0
));
26455 rs6000_emit_nmsub (v
, y
, u
, halfthree
);
26456 emit_insn (gen_mul (x1
, x0
, v
));
26460 emit_move_insn (dst
, x0
);
26464 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26465 (Power7) targets. DST is the target, and SRC is the argument operand. */
26468 rs6000_emit_popcount (rtx dst
, rtx src
)
26470 enum machine_mode mode
= GET_MODE (dst
);
26473 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26474 if (TARGET_POPCNTD
)
26476 if (mode
== SImode
)
26477 emit_insn (gen_popcntdsi2 (dst
, src
));
26479 emit_insn (gen_popcntddi2 (dst
, src
));
26483 tmp1
= gen_reg_rtx (mode
);
26485 if (mode
== SImode
)
26487 emit_insn (gen_popcntbsi2 (tmp1
, src
));
26488 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
26490 tmp2
= force_reg (SImode
, tmp2
);
26491 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
26495 emit_insn (gen_popcntbdi2 (tmp1
, src
));
26496 tmp2
= expand_mult (DImode
, tmp1
,
26497 GEN_INT ((HOST_WIDE_INT
)
26498 0x01010101 << 32 | 0x01010101),
26500 tmp2
= force_reg (DImode
, tmp2
);
26501 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
26506 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26507 target, and SRC is the argument operand. */
26510 rs6000_emit_parity (rtx dst
, rtx src
)
26512 enum machine_mode mode
= GET_MODE (dst
);
26515 tmp
= gen_reg_rtx (mode
);
26517 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26520 if (mode
== SImode
)
26522 emit_insn (gen_popcntbsi2 (tmp
, src
));
26523 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
26527 emit_insn (gen_popcntbdi2 (tmp
, src
));
26528 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
26533 if (mode
== SImode
)
26535 /* Is mult+shift >= shift+xor+shift+xor? */
26536 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
26538 rtx tmp1
, tmp2
, tmp3
, tmp4
;
26540 tmp1
= gen_reg_rtx (SImode
);
26541 emit_insn (gen_popcntbsi2 (tmp1
, src
));
26543 tmp2
= gen_reg_rtx (SImode
);
26544 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
26545 tmp3
= gen_reg_rtx (SImode
);
26546 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
26548 tmp4
= gen_reg_rtx (SImode
);
26549 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
26550 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
26553 rs6000_emit_popcount (tmp
, src
);
26554 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
26558 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26559 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
26561 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
26563 tmp1
= gen_reg_rtx (DImode
);
26564 emit_insn (gen_popcntbdi2 (tmp1
, src
));
26566 tmp2
= gen_reg_rtx (DImode
);
26567 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
26568 tmp3
= gen_reg_rtx (DImode
);
26569 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
26571 tmp4
= gen_reg_rtx (DImode
);
26572 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
26573 tmp5
= gen_reg_rtx (DImode
);
26574 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
26576 tmp6
= gen_reg_rtx (DImode
);
26577 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
26578 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
26581 rs6000_emit_popcount (tmp
, src
);
26582 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
26586 /* Expand an Altivec constant permutation. Return true if we match
26587 an efficient implementation; false to fall back to VPERM. */
26590 altivec_expand_vec_perm_const (rtx operands
[4])
26592 struct altivec_perm_insn
{
26593 enum insn_code impl
;
26594 unsigned char perm
[16];
26596 static const struct altivec_perm_insn patterns
[] = {
26597 { CODE_FOR_altivec_vpkuhum
,
26598 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26599 { CODE_FOR_altivec_vpkuwum
,
26600 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26601 { CODE_FOR_altivec_vmrghb
,
26602 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26603 { CODE_FOR_altivec_vmrghh
,
26604 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26605 { CODE_FOR_altivec_vmrghw
,
26606 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26607 { CODE_FOR_altivec_vmrglb
,
26608 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26609 { CODE_FOR_altivec_vmrglh
,
26610 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26611 { CODE_FOR_altivec_vmrglw
,
26612 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26615 unsigned int i
, j
, elt
, which
;
26616 unsigned char perm
[16];
26617 rtx target
, op0
, op1
, sel
, x
;
26620 target
= operands
[0];
26625 /* Unpack the constant selector. */
26626 for (i
= which
= 0; i
< 16; ++i
)
26628 rtx e
= XVECEXP (sel
, 0, i
);
26629 elt
= INTVAL (e
) & 31;
26630 which
|= (elt
< 16 ? 1 : 2);
26634 /* Simplify the constant selector based on operands. */
26638 gcc_unreachable ();
26642 if (!rtx_equal_p (op0
, op1
))
26647 for (i
= 0; i
< 16; ++i
)
26659 /* Look for splat patterns. */
26664 for (i
= 0; i
< 16; ++i
)
26665 if (perm
[i
] != elt
)
26669 emit_insn (gen_altivec_vspltb (target
, op0
, GEN_INT (elt
)));
26675 for (i
= 0; i
< 16; i
+= 2)
26676 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
26680 x
= gen_reg_rtx (V8HImode
);
26681 emit_insn (gen_altivec_vsplth (x
, gen_lowpart (V8HImode
, op0
),
26682 GEN_INT (elt
/ 2)));
26683 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26690 for (i
= 0; i
< 16; i
+= 4)
26692 || perm
[i
+ 1] != elt
+ 1
26693 || perm
[i
+ 2] != elt
+ 2
26694 || perm
[i
+ 3] != elt
+ 3)
26698 x
= gen_reg_rtx (V4SImode
);
26699 emit_insn (gen_altivec_vspltw (x
, gen_lowpart (V4SImode
, op0
),
26700 GEN_INT (elt
/ 4)));
26701 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26707 /* Look for merge and pack patterns. */
26708 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
26712 elt
= patterns
[j
].perm
[0];
26713 if (perm
[0] == elt
)
26715 else if (perm
[0] == elt
+ 16)
26719 for (i
= 1; i
< 16; ++i
)
26721 elt
= patterns
[j
].perm
[i
];
26723 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
26724 else if (one_vec
&& elt
>= 16)
26726 if (perm
[i
] != elt
)
26731 enum insn_code icode
= patterns
[j
].impl
;
26732 enum machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
26733 enum machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
26736 x
= op0
, op0
= op1
, op1
= x
;
26737 if (imode
!= V16QImode
)
26739 op0
= gen_lowpart (imode
, op0
);
26740 op1
= gen_lowpart (imode
, op1
);
26742 if (omode
== V16QImode
)
26745 x
= gen_reg_rtx (omode
);
26746 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
26747 if (omode
!= V16QImode
)
26748 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26756 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
26757 Return true if we match an efficient implementation. */
26760 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
26761 unsigned char perm0
, unsigned char perm1
)
26765 /* If both selectors come from the same operand, fold to single op. */
26766 if ((perm0
& 2) == (perm1
& 2))
26773 /* If both operands are equal, fold to simpler permutation. */
26774 if (rtx_equal_p (op0
, op1
))
26777 perm1
= (perm1
& 1) + 2;
26779 /* If the first selector comes from the second operand, swap. */
26780 else if (perm0
& 2)
26786 x
= op0
, op0
= op1
, op1
= x
;
26788 /* If the second selector does not come from the second operand, fail. */
26789 else if ((perm1
& 2) == 0)
26793 if (target
!= NULL
)
26795 enum machine_mode vmode
, dmode
;
26798 vmode
= GET_MODE (target
);
26799 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
26800 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4);
26802 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
26803 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
26804 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
26805 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
26811 rs6000_expand_vec_perm_const (rtx operands
[4])
26813 rtx target
, op0
, op1
, sel
;
26814 unsigned char perm0
, perm1
;
26816 target
= operands
[0];
26821 /* Unpack the constant selector. */
26822 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
26823 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
26825 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
26828 /* Test whether a constant permutation is supported. */
26831 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
26832 const unsigned char *sel
)
26834 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
26835 if (TARGET_ALTIVEC
)
26838 /* Check for ps_merge* or evmerge* insns. */
26839 if ((TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
26840 || (TARGET_SPE
&& vmode
== V2SImode
))
26842 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
26843 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
26844 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
26850 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
26853 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
26854 enum machine_mode vmode
, unsigned nelt
, rtx perm
[])
26856 enum machine_mode imode
;
26860 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
26862 imode
= GET_MODE_INNER (vmode
);
26863 imode
= mode_for_size (GET_MODE_BITSIZE (imode
), MODE_INT
, 0);
26864 imode
= mode_for_vector (imode
, nelt
);
26867 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
26868 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
26870 emit_move_insn (target
, x
);
26873 /* Expand an extract even operation. */
26876 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
26878 enum machine_mode vmode
= GET_MODE (target
);
26879 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
26882 for (i
= 0; i
< nelt
; i
++)
26883 perm
[i
] = GEN_INT (i
* 2);
26885 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
26888 /* Expand a vector interleave operation. */
26891 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
26893 enum machine_mode vmode
= GET_MODE (target
);
26894 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
26897 high
= (highp
== BYTES_BIG_ENDIAN
? 0 : nelt
/ 2);
26898 for (i
= 0; i
< nelt
/ 2; i
++)
26900 perm
[i
* 2] = GEN_INT (i
+ high
);
26901 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
26904 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
26907 /* Return an RTX representing where to find the function value of a
26908 function returning MODE. */
26910 rs6000_complex_function_value (enum machine_mode mode
)
26912 unsigned int regno
;
26914 enum machine_mode inner
= GET_MODE_INNER (mode
);
26915 unsigned int inner_bytes
= GET_MODE_SIZE (inner
);
26917 if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
26918 regno
= FP_ARG_RETURN
;
26921 regno
= GP_ARG_RETURN
;
26923 /* 32-bit is OK since it'll go in r3/r4. */
26924 if (TARGET_32BIT
&& inner_bytes
>= 4)
26925 return gen_rtx_REG (mode
, regno
);
26928 if (inner_bytes
>= 8)
26929 return gen_rtx_REG (mode
, regno
);
26931 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
26933 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
26934 GEN_INT (inner_bytes
));
26935 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
26938 /* Target hook for TARGET_FUNCTION_VALUE.
26940 On the SPE, both FPs and vectors are returned in r3.
26942 On RS/6000 an integer value is in r3 and a floating-point value is in
26943 fp1, unless -msoft-float. */
26946 rs6000_function_value (const_tree valtype
,
26947 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
26948 bool outgoing ATTRIBUTE_UNUSED
)
26950 enum machine_mode mode
;
26951 unsigned int regno
;
26953 /* Special handling for structs in darwin64. */
26955 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
26957 CUMULATIVE_ARGS valcum
;
26961 valcum
.fregno
= FP_ARG_MIN_REG
;
26962 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
26963 /* Do a trial code generation as if this were going to be passed as
26964 an argument; if any part goes in memory, we return NULL. */
26965 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
26968 /* Otherwise fall through to standard ABI rules. */
26971 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DImode
)
26973 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
26974 return gen_rtx_PARALLEL (DImode
,
26976 gen_rtx_EXPR_LIST (VOIDmode
,
26977 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
26979 gen_rtx_EXPR_LIST (VOIDmode
,
26980 gen_rtx_REG (SImode
,
26981 GP_ARG_RETURN
+ 1),
26984 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DCmode
)
26986 return gen_rtx_PARALLEL (DCmode
,
26988 gen_rtx_EXPR_LIST (VOIDmode
,
26989 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
26991 gen_rtx_EXPR_LIST (VOIDmode
,
26992 gen_rtx_REG (SImode
,
26993 GP_ARG_RETURN
+ 1),
26995 gen_rtx_EXPR_LIST (VOIDmode
,
26996 gen_rtx_REG (SImode
,
26997 GP_ARG_RETURN
+ 2),
26999 gen_rtx_EXPR_LIST (VOIDmode
,
27000 gen_rtx_REG (SImode
,
27001 GP_ARG_RETURN
+ 3),
27005 mode
= TYPE_MODE (valtype
);
27006 if ((INTEGRAL_TYPE_P (valtype
) && GET_MODE_BITSIZE (mode
) < BITS_PER_WORD
)
27007 || POINTER_TYPE_P (valtype
))
27008 mode
= TARGET_32BIT
? SImode
: DImode
;
27010 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
27011 /* _Decimal128 must use an even/odd register pair. */
27012 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
27013 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
27014 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
27015 regno
= FP_ARG_RETURN
;
27016 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
27017 && targetm
.calls
.split_complex_arg
)
27018 return rs6000_complex_function_value (mode
);
27019 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27020 return register is used in both cases, and we won't see V2DImode/V2DFmode
27021 for pure altivec, combine the two cases. */
27022 else if (TREE_CODE (valtype
) == VECTOR_TYPE
27023 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
27024 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
27025 regno
= ALTIVEC_ARG_RETURN
;
27026 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
27027 && (mode
== DFmode
|| mode
== DCmode
27028 || mode
== TFmode
|| mode
== TCmode
))
27029 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
27031 regno
= GP_ARG_RETURN
;
27033 return gen_rtx_REG (mode
, regno
);
27036 /* Define how to find the value returned by a library function
27037 assuming the value has mode MODE. */
27039 rs6000_libcall_value (enum machine_mode mode
)
27041 unsigned int regno
;
27043 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
27045 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27046 return gen_rtx_PARALLEL (DImode
,
27048 gen_rtx_EXPR_LIST (VOIDmode
,
27049 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
27051 gen_rtx_EXPR_LIST (VOIDmode
,
27052 gen_rtx_REG (SImode
,
27053 GP_ARG_RETURN
+ 1),
27057 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
27058 /* _Decimal128 must use an even/odd register pair. */
27059 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
27060 else if (SCALAR_FLOAT_MODE_P (mode
)
27061 && TARGET_HARD_FLOAT
&& TARGET_FPRS
27062 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
27063 regno
= FP_ARG_RETURN
;
27064 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27065 return register is used in both cases, and we won't see V2DImode/V2DFmode
27066 for pure altivec, combine the two cases. */
27067 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
27068 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
27069 regno
= ALTIVEC_ARG_RETURN
;
27070 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
27071 return rs6000_complex_function_value (mode
);
27072 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
27073 && (mode
== DFmode
|| mode
== DCmode
27074 || mode
== TFmode
|| mode
== TCmode
))
27075 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
27077 regno
= GP_ARG_RETURN
;
27079 return gen_rtx_REG (mode
, regno
);
27083 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27084 Frame pointer elimination is automatically handled.
27086 For the RS/6000, if frame pointer elimination is being done, we would like
27087 to convert ap into fp, not sp.
27089 We need r30 if -mminimal-toc was specified, and there are constant pool
27093 rs6000_can_eliminate (const int from
, const int to
)
27095 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
27096 ? ! frame_pointer_needed
27097 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
27098 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
|| get_pool_size () == 0
27102 /* Define the offset between two registers, FROM to be eliminated and its
27103 replacement TO, at the start of a routine. */
27105 rs6000_initial_elimination_offset (int from
, int to
)
27107 rs6000_stack_t
*info
= rs6000_stack_info ();
27108 HOST_WIDE_INT offset
;
27110 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27111 offset
= info
->push_p
? 0 : -info
->total_size
;
27112 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27114 offset
= info
->push_p
? 0 : -info
->total_size
;
27115 if (FRAME_GROWS_DOWNWARD
)
27116 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
27118 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
27119 offset
= FRAME_GROWS_DOWNWARD
27120 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
27122 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
27123 offset
= info
->total_size
;
27124 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27125 offset
= info
->push_p
? info
->total_size
: 0;
27126 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
27129 gcc_unreachable ();
27135 rs6000_dwarf_register_span (rtx reg
)
27139 unsigned regno
= REGNO (reg
);
27140 enum machine_mode mode
= GET_MODE (reg
);
27144 && (SPE_VECTOR_MODE (GET_MODE (reg
))
27145 || (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
)
27146 && mode
!= SFmode
&& mode
!= SDmode
&& mode
!= SCmode
)))
27151 regno
= REGNO (reg
);
27153 /* The duality of the SPE register size wreaks all kinds of havoc.
27154 This is a way of distinguishing r0 in 32-bits from r0 in
27156 words
= (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
27157 gcc_assert (words
<= 4);
27158 for (i
= 0; i
< words
; i
++, regno
++)
27160 if (BYTES_BIG_ENDIAN
)
27162 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
+ 1200);
27163 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
);
27167 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
);
27168 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
+ 1200);
27172 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (words
* 2, parts
));
27175 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27178 rs6000_init_dwarf_reg_sizes_extra (tree address
)
27183 enum machine_mode mode
= TYPE_MODE (char_type_node
);
27184 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
27185 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
27186 rtx value
= gen_int_mode (4, mode
);
27188 for (i
= 1201; i
< 1232; i
++)
27190 int column
= DWARF_REG_TO_UNWIND_COLUMN (i
);
27191 HOST_WIDE_INT offset
27192 = DWARF_FRAME_REGNUM (column
) * GET_MODE_SIZE (mode
);
27194 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
27199 /* Map internal gcc register numbers to DWARF2 register numbers. */
27202 rs6000_dbx_register_number (unsigned int regno
)
27204 if (regno
<= 63 || write_symbols
!= DWARF2_DEBUG
)
27206 if (regno
== LR_REGNO
)
27208 if (regno
== CTR_REGNO
)
27210 if (CR_REGNO_P (regno
))
27211 return regno
- CR0_REGNO
+ 86;
27212 if (regno
== CA_REGNO
)
27213 return 101; /* XER */
27214 if (ALTIVEC_REGNO_P (regno
))
27215 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
27216 if (regno
== VRSAVE_REGNO
)
27218 if (regno
== VSCR_REGNO
)
27220 if (regno
== SPE_ACC_REGNO
)
27222 if (regno
== SPEFSCR_REGNO
)
27224 /* SPE high reg number. We get these values of regno from
27225 rs6000_dwarf_register_span. */
27226 gcc_assert (regno
>= 1200 && regno
< 1232);
27230 /* target hook eh_return_filter_mode */
27231 static enum machine_mode
27232 rs6000_eh_return_filter_mode (void)
27234 return TARGET_32BIT
? SImode
: word_mode
;
27237 /* Target hook for scalar_mode_supported_p. */
27239 rs6000_scalar_mode_supported_p (enum machine_mode mode
)
27241 if (DECIMAL_FLOAT_MODE_P (mode
))
27242 return default_decimal_float_supported_p ();
27244 return default_scalar_mode_supported_p (mode
);
27247 /* Target hook for vector_mode_supported_p. */
27249 rs6000_vector_mode_supported_p (enum machine_mode mode
)
27252 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
27255 if (TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
27258 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
27265 /* Target hook for invalid_arg_for_unprototyped_fn. */
27266 static const char *
27267 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
27269 return (!rs6000_darwin64_abi
27271 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
27272 && (funcdecl
== NULL_TREE
27273 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
27274 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
27275 ? N_("AltiVec argument passed to unprototyped function")
27279 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27280 setup by using __stack_chk_fail_local hidden function instead of
27281 calling __stack_chk_fail directly. Otherwise it is better to call
27282 __stack_chk_fail directly. */
27284 static tree ATTRIBUTE_UNUSED
27285 rs6000_stack_protect_fail (void)
27287 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
27288 ? default_hidden_stack_protect_fail ()
27289 : default_external_stack_protect_fail ();
27293 rs6000_final_prescan_insn (rtx insn
, rtx
*operand ATTRIBUTE_UNUSED
,
27294 int num_operands ATTRIBUTE_UNUSED
)
27296 if (rs6000_warn_cell_microcode
)
27299 int insn_code_number
= recog_memoized (insn
);
27300 location_t location
= locator_location (INSN_LOCATOR (insn
));
27302 /* Punt on insns we cannot recognize. */
27303 if (insn_code_number
< 0)
27306 temp
= get_insn_template (insn_code_number
, insn
);
27308 if (get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
)
27309 warning_at (location
, OPT_mwarn_cell_microcode
,
27310 "emitting microcode insn %s\t[%s] #%d",
27311 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
27312 else if (get_attr_cell_micro (insn
) == CELL_MICRO_CONDITIONAL
)
27313 warning_at (location
, OPT_mwarn_cell_microcode
,
27314 "emitting conditional microcode insn %s\t[%s] #%d",
27315 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
27320 /* Mask options that we want to support inside of attribute((target)) and
27321 #pragma GCC target operations. Note, we do not include things like
27322 64/32-bit, endianess, hard/soft floating point, etc. that would have
27323 different calling sequences. */
27325 struct rs6000_opt_mask
{
27326 const char *name
; /* option name */
27327 int mask
; /* mask to set */
27328 bool invert
; /* invert sense of mask */
27329 bool valid_target
; /* option is a target option */
27332 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
27334 { "altivec", MASK_ALTIVEC
, false, true },
27335 { "cmpb", MASK_CMPB
, false, true },
27336 { "dlmzb", MASK_DLMZB
, false, true },
27337 { "fprnd", MASK_FPRND
, false, true },
27338 { "hard-dfp", MASK_DFP
, false, true },
27339 { "isel", MASK_ISEL
, false, true },
27340 { "mfcrf", MASK_MFCRF
, false, true },
27341 { "mfpgpr", MASK_MFPGPR
, false, true },
27342 { "mulhw", MASK_MULHW
, false, true },
27343 { "multiple", MASK_MULTIPLE
, false, true },
27344 { "update", MASK_NO_UPDATE
, true , true },
27345 { "popcntb", MASK_POPCNTB
, false, true },
27346 { "popcntd", MASK_POPCNTD
, false, true },
27347 { "powerpc-gfxopt", MASK_PPC_GFXOPT
, false, true },
27348 { "powerpc-gpopt", MASK_PPC_GPOPT
, false, true },
27349 { "recip-precision", MASK_RECIP_PRECISION
, false, true },
27350 { "string", MASK_STRING
, false, true },
27351 { "vsx", MASK_VSX
, false, true },
27354 { "aix64", MASK_64BIT
, false, false },
27355 { "aix32", MASK_64BIT
, true, false },
27357 { "64", MASK_64BIT
, false, false },
27358 { "32", MASK_64BIT
, true, false },
27362 { "eabi", MASK_EABI
, false, false },
27364 #ifdef MASK_LITTLE_ENDIAN
27365 { "little", MASK_LITTLE_ENDIAN
, false, false },
27366 { "big", MASK_LITTLE_ENDIAN
, true, false },
27368 #ifdef MASK_RELOCATABLE
27369 { "relocatable", MASK_RELOCATABLE
, false, false },
27371 #ifdef MASK_STRICT_ALIGN
27372 { "strict-align", MASK_STRICT_ALIGN
, false, false },
27374 { "soft-float", MASK_SOFT_FLOAT
, false, false },
27375 { "string", MASK_STRING
, false, false },
27378 /* Builtin mask mapping for printing the flags. */
27379 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
27381 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
27382 { "vsx", RS6000_BTM_VSX
, false, false },
27383 { "spe", RS6000_BTM_SPE
, false, false },
27384 { "paired", RS6000_BTM_PAIRED
, false, false },
27385 { "fre", RS6000_BTM_FRE
, false, false },
27386 { "fres", RS6000_BTM_FRES
, false, false },
27387 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
27388 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
27389 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
27390 { "cell", RS6000_BTM_CELL
, false, false },
27393 /* Option variables that we want to support inside attribute((target)) and
27394 #pragma GCC target operations. */
27396 struct rs6000_opt_var
{
27397 const char *name
; /* option name */
27398 size_t global_offset
; /* offset of the option in global_options. */
27399 size_t target_offset
; /* offset of the option in target optiosn. */
27402 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
27405 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
27406 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
27407 { "avoid-indexed-addresses",
27408 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
27409 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
27411 offsetof (struct gcc_options
, x_rs6000_paired_float
),
27412 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
27414 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
27415 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
27418 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27419 parsing. Return true if there were no errors. */
27422 rs6000_inner_target_options (tree args
, bool attr_p
)
27426 if (args
== NULL_TREE
)
27429 else if (TREE_CODE (args
) == STRING_CST
)
27431 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
27434 while ((q
= strtok (p
, ",")) != NULL
)
27436 bool error_p
= false;
27437 bool not_valid_p
= false;
27438 const char *cpu_opt
= NULL
;
27441 if (strncmp (q
, "cpu=", 4) == 0)
27443 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
27444 if (cpu_index
>= 0)
27445 rs6000_cpu_index
= cpu_index
;
27452 else if (strncmp (q
, "tune=", 5) == 0)
27454 int tune_index
= rs6000_cpu_name_lookup (q
+5);
27455 if (tune_index
>= 0)
27456 rs6000_tune_index
= tune_index
;
27466 bool invert
= false;
27470 if (strncmp (r
, "no-", 3) == 0)
27476 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
27477 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
27479 int mask
= rs6000_opt_masks
[i
].mask
;
27481 if (!rs6000_opt_masks
[i
].valid_target
)
27482 not_valid_p
= true;
27486 target_flags_explicit
|= mask
;
27488 /* VSX needs altivec, so -mvsx automagically sets
27490 if (mask
== MASK_VSX
&& !invert
)
27491 mask
|= MASK_ALTIVEC
;
27493 if (rs6000_opt_masks
[i
].invert
)
27497 target_flags
&= ~mask
;
27499 target_flags
|= mask
;
27504 if (error_p
&& !not_valid_p
)
27506 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
27507 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
27509 size_t j
= rs6000_opt_vars
[i
].global_offset
;
27510 *((int *) ((char *)&global_options
+ j
)) = !invert
;
27519 const char *eprefix
, *esuffix
;
27524 eprefix
= "__attribute__((__target__(";
27529 eprefix
= "#pragma GCC target ";
27534 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt
, eprefix
,
27536 else if (not_valid_p
)
27537 error ("%s\"%s\"%s is not allowed", eprefix
, q
, esuffix
);
27539 error ("%s\"%s\"%s is invalid", eprefix
, q
, esuffix
);
27544 else if (TREE_CODE (args
) == TREE_LIST
)
27548 tree value
= TREE_VALUE (args
);
27551 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
27555 args
= TREE_CHAIN (args
);
27557 while (args
!= NULL_TREE
);
27561 gcc_unreachable ();
27566 /* Print out the target options as a list for -mdebug=target. */
27569 rs6000_debug_target_options (tree args
, const char *prefix
)
27571 if (args
== NULL_TREE
)
27572 fprintf (stderr
, "%s<NULL>", prefix
);
27574 else if (TREE_CODE (args
) == STRING_CST
)
27576 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
27579 while ((q
= strtok (p
, ",")) != NULL
)
27582 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
27587 else if (TREE_CODE (args
) == TREE_LIST
)
27591 tree value
= TREE_VALUE (args
);
27594 rs6000_debug_target_options (value
, prefix
);
27597 args
= TREE_CHAIN (args
);
27599 while (args
!= NULL_TREE
);
27603 gcc_unreachable ();
27609 /* Hook to validate attribute((target("..."))). */
27612 rs6000_valid_attribute_p (tree fndecl
,
27613 tree
ARG_UNUSED (name
),
27617 struct cl_target_option cur_target
;
27619 tree old_optimize
= build_optimization_node ();
27620 tree new_target
, new_optimize
;
27621 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
27623 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
27625 if (TARGET_DEBUG_TARGET
)
27627 tree tname
= DECL_NAME (fndecl
);
27628 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
27630 fprintf (stderr
, "function: %.*s\n",
27631 (int) IDENTIFIER_LENGTH (tname
),
27632 IDENTIFIER_POINTER (tname
));
27634 fprintf (stderr
, "function: unknown\n");
27636 fprintf (stderr
, "args:");
27637 rs6000_debug_target_options (args
, " ");
27638 fprintf (stderr
, "\n");
27641 fprintf (stderr
, "flags: 0x%x\n", flags
);
27643 fprintf (stderr
, "--------------------\n");
27646 old_optimize
= build_optimization_node ();
27647 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
27649 /* If the function changed the optimization levels as well as setting target
27650 options, start with the optimizations specified. */
27651 if (func_optimize
&& func_optimize
!= old_optimize
)
27652 cl_optimization_restore (&global_options
,
27653 TREE_OPTIMIZATION (func_optimize
));
27655 /* The target attributes may also change some optimization flags, so update
27656 the optimization options if necessary. */
27657 cl_target_option_save (&cur_target
, &global_options
);
27658 rs6000_cpu_index
= rs6000_tune_index
= -1;
27659 ret
= rs6000_inner_target_options (args
, true);
27661 /* Set up any additional state. */
27664 ret
= rs6000_option_override_internal (false);
27665 new_target
= build_target_option_node ();
27670 new_optimize
= build_optimization_node ();
27677 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
27679 if (old_optimize
!= new_optimize
)
27680 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
27683 cl_target_option_restore (&global_options
, &cur_target
);
27685 if (old_optimize
!= new_optimize
)
27686 cl_optimization_restore (&global_options
,
27687 TREE_OPTIMIZATION (old_optimize
));
27693 /* Hook to validate the current #pragma GCC target and set the state, and
27694 update the macros based on what was changed. If ARGS is NULL, then
27695 POP_TARGET is used to reset the options. */
27698 rs6000_pragma_target_parse (tree args
, tree pop_target
)
27700 tree prev_tree
= build_target_option_node ();
27702 struct cl_target_option
*prev_opt
, *cur_opt
;
27703 unsigned prev_bumask
, cur_bumask
, diff_bumask
;
27704 int prev_flags
, cur_flags
, diff_flags
;
27706 if (TARGET_DEBUG_TARGET
)
27708 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
27709 fprintf (stderr
, "args:");
27710 rs6000_debug_target_options (args
, " ");
27711 fprintf (stderr
, "\n");
27715 fprintf (stderr
, "pop_target:\n");
27716 debug_tree (pop_target
);
27719 fprintf (stderr
, "pop_target: <NULL>\n");
27721 fprintf (stderr
, "--------------------\n");
27726 cur_tree
= ((pop_target
)
27728 : target_option_default_node
);
27729 cl_target_option_restore (&global_options
,
27730 TREE_TARGET_OPTION (cur_tree
));
27734 rs6000_cpu_index
= rs6000_tune_index
= -1;
27735 if (!rs6000_inner_target_options (args
, false)
27736 || !rs6000_option_override_internal (false)
27737 || (cur_tree
= build_target_option_node ()) == NULL_TREE
)
27739 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
27740 fprintf (stderr
, "invalid pragma\n");
27746 target_option_current_node
= cur_tree
;
27748 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
27749 change the macros that are defined. */
27750 if (rs6000_target_modify_macros_ptr
)
27752 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
27753 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
27754 prev_flags
= prev_opt
->x_target_flags
;
27756 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
27757 cur_flags
= cur_opt
->x_target_flags
;
27758 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
27760 diff_bumask
= (prev_bumask
^ cur_bumask
);
27761 diff_flags
= (prev_flags
^ cur_flags
);
27763 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
27765 /* Delete old macros. */
27766 rs6000_target_modify_macros_ptr (false,
27767 prev_flags
& diff_flags
,
27768 prev_bumask
& diff_bumask
);
27770 /* Define new macros. */
27771 rs6000_target_modify_macros_ptr (true,
27772 cur_flags
& diff_flags
,
27773 cur_bumask
& diff_bumask
);
27781 /* Remember the last target of rs6000_set_current_function. */
27782 static GTY(()) tree rs6000_previous_fndecl
;
27784 /* Establish appropriate back-end context for processing the function
27785 FNDECL. The argument might be NULL to indicate processing at top
27786 level, outside of any function scope. */
27788 rs6000_set_current_function (tree fndecl
)
27790 tree old_tree
= (rs6000_previous_fndecl
27791 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
)
27794 tree new_tree
= (fndecl
27795 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl
)
27798 if (TARGET_DEBUG_TARGET
)
27800 bool print_final
= false;
27801 fprintf (stderr
, "\n==================== rs6000_set_current_function");
27804 fprintf (stderr
, ", fndecl %s (%p)",
27805 (DECL_NAME (fndecl
)
27806 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
27807 : "<unknown>"), (void *)fndecl
);
27809 if (rs6000_previous_fndecl
)
27810 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
27812 fprintf (stderr
, "\n");
27815 fprintf (stderr
, "\nnew fndecl target specific options:\n");
27816 debug_tree (new_tree
);
27817 print_final
= true;
27822 fprintf (stderr
, "\nold fndecl target specific options:\n");
27823 debug_tree (old_tree
);
27824 print_final
= true;
27828 fprintf (stderr
, "--------------------\n");
27831 /* Only change the context if the function changes. This hook is called
27832 several times in the course of compiling a function, and we don't want to
27833 slow things down too much or call target_reinit when it isn't safe. */
27834 if (fndecl
&& fndecl
!= rs6000_previous_fndecl
)
27836 rs6000_previous_fndecl
= fndecl
;
27837 if (old_tree
== new_tree
)
27842 cl_target_option_restore (&global_options
,
27843 TREE_TARGET_OPTION (new_tree
));
27849 struct cl_target_option
*def
27850 = TREE_TARGET_OPTION (target_option_current_node
);
27852 cl_target_option_restore (&global_options
, def
);
27859 /* Save the current options */
27862 rs6000_function_specific_save (struct cl_target_option
*ptr
)
27864 ptr
->rs6000_target_flags_explicit
= target_flags_explicit
;
27867 /* Restore the current options */
27870 rs6000_function_specific_restore (struct cl_target_option
*ptr
)
27872 target_flags_explicit
= ptr
->rs6000_target_flags_explicit
;
27873 (void) rs6000_option_override_internal (false);
27876 /* Print the current options */
27879 rs6000_function_specific_print (FILE *file
, int indent
,
27880 struct cl_target_option
*ptr
)
27883 int flags
= ptr
->x_target_flags
;
27884 unsigned bu_mask
= ptr
->x_rs6000_builtin_mask
;
27886 /* Print the various mask options. */
27887 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
27888 if ((flags
& rs6000_opt_masks
[i
].mask
) != 0)
27890 flags
&= ~ rs6000_opt_masks
[i
].mask
;
27891 fprintf (file
, "%*s-m%s%s\n", indent
, "",
27892 rs6000_opt_masks
[i
].invert
? "no-" : "",
27893 rs6000_opt_masks
[i
].name
);
27896 /* Print the various options that are variables. */
27897 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
27899 size_t j
= rs6000_opt_vars
[i
].target_offset
;
27900 if (((signed char *) ptr
)[j
])
27901 fprintf (file
, "%*s-m%s\n", indent
, "",
27902 rs6000_opt_vars
[i
].name
);
27905 /* Print the various builtin flags. */
27906 fprintf (file
, "%*sbuiltin mask = 0x%x\n", indent
, "", bu_mask
);
27907 for (i
= 0; i
< ARRAY_SIZE (rs6000_builtin_mask_names
); i
++)
27908 if ((bu_mask
& rs6000_builtin_mask_names
[i
].mask
) != 0)
27910 fprintf (file
, "%*s%s builtins supported\n", indent
, "",
27911 rs6000_builtin_mask_names
[i
].name
);
27916 /* Hook to determine if one function can safely inline another. */
27919 rs6000_can_inline_p (tree caller
, tree callee
)
27922 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
27923 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
27925 /* If callee has no option attributes, then it is ok to inline. */
27929 /* If caller has no option attributes, but callee does then it is not ok to
27931 else if (!caller_tree
)
27936 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
27937 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
27939 /* Callee's options should a subset of the caller's, i.e. a vsx function
27940 can inline an altivec function but a non-vsx function can't inline a
27942 if ((caller_opts
->x_target_flags
& callee_opts
->x_target_flags
)
27943 == callee_opts
->x_target_flags
)
27947 if (TARGET_DEBUG_TARGET
)
27948 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
27949 (DECL_NAME (caller
)
27950 ? IDENTIFIER_POINTER (DECL_NAME (caller
))
27952 (DECL_NAME (callee
)
27953 ? IDENTIFIER_POINTER (DECL_NAME (callee
))
27955 (ret
? "can" : "cannot"));
27960 /* Allocate a stack temp and fixup the address so it meets the particular
27961 memory requirements (either offetable or REG+REG addressing). */
27964 rs6000_allocate_stack_temp (enum machine_mode mode
,
27965 bool offsettable_p
,
27968 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
27969 rtx addr
= XEXP (stack
, 0);
27970 int strict_p
= (reload_in_progress
|| reload_completed
);
27972 if (!legitimate_indirect_address_p (addr
, strict_p
))
27975 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
27976 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
27978 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
27979 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
27985 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
27986 to such a form to deal with memory reference instructions like STFIWX that
27987 only take reg+reg addressing. */
27990 rs6000_address_for_fpconvert (rtx x
)
27992 int strict_p
= (reload_in_progress
|| reload_completed
);
27995 gcc_assert (MEM_P (x
));
27996 addr
= XEXP (x
, 0);
27997 if (! legitimate_indirect_address_p (addr
, strict_p
)
27998 && ! legitimate_indexed_address_p (addr
, strict_p
))
28000 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
28002 rtx reg
= XEXP (addr
, 0);
28003 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
28004 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
28005 gcc_assert (REG_P (reg
));
28006 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
28009 else if (GET_CODE (addr
) == PRE_MODIFY
)
28011 rtx reg
= XEXP (addr
, 0);
28012 rtx expr
= XEXP (addr
, 1);
28013 gcc_assert (REG_P (reg
));
28014 gcc_assert (GET_CODE (expr
) == PLUS
);
28015 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
28019 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
28025 /* Given a memory reference, if it is not in the form for altivec memory
28026 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
28027 convert to the altivec format. */
28030 rs6000_address_for_altivec (rtx x
)
28032 gcc_assert (MEM_P (x
));
28033 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
28035 rtx addr
= XEXP (x
, 0);
28036 int strict_p
= (reload_in_progress
|| reload_completed
);
28038 if (!legitimate_indexed_address_p (addr
, strict_p
)
28039 && !legitimate_indirect_address_p (addr
, strict_p
))
28040 addr
= copy_to_mode_reg (Pmode
, addr
);
28042 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
28043 x
= change_address (x
, GET_MODE (x
), addr
);
28049 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28051 On the RS/6000, all integer constants are acceptable, most won't be valid
28052 for particular insns, though. Only easy FP constants are acceptable. */
28055 rs6000_legitimate_constant_p (enum machine_mode mode
, rtx x
)
28057 if (rs6000_tls_referenced_p (x
))
28060 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
28061 || GET_MODE (x
) == VOIDmode
28062 || (TARGET_POWERPC64
&& mode
== DImode
)
28063 || easy_fp_constant (x
, mode
)
28064 || easy_vector_constant (x
, mode
));
28068 /* A function pointer under AIX is a pointer to a data area whose first word
28069 contains the actual address of the function, whose second word contains a
28070 pointer to its TOC, and whose third word contains a value to place in the
28071 static chain register (r11). Note that if we load the static chain, our
28072 "trampoline" need not have any executable code. */
28075 rs6000_call_indirect_aix (rtx value
, rtx func_desc
, rtx flag
)
28081 rtx stack_toc_offset
;
28083 rtx func_toc_offset
;
28085 rtx func_sc_offset
;
28088 rtx (*call_func
) (rtx
, rtx
, rtx
, rtx
);
28089 rtx (*call_value_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
28091 stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
28092 toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
28094 /* Load up address of the actual function. */
28095 func_desc
= force_reg (Pmode
, func_desc
);
28096 func_addr
= gen_reg_rtx (Pmode
);
28097 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
28102 stack_toc_offset
= GEN_INT (TOC_SAVE_OFFSET_32BIT
);
28103 func_toc_offset
= GEN_INT (AIX_FUNC_DESC_TOC_32BIT
);
28104 func_sc_offset
= GEN_INT (AIX_FUNC_DESC_SC_32BIT
);
28105 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28107 call_func
= gen_call_indirect_aix32bit
;
28108 call_value_func
= gen_call_value_indirect_aix32bit
;
28112 call_func
= gen_call_indirect_aix32bit_nor11
;
28113 call_value_func
= gen_call_value_indirect_aix32bit_nor11
;
28118 stack_toc_offset
= GEN_INT (TOC_SAVE_OFFSET_64BIT
);
28119 func_toc_offset
= GEN_INT (AIX_FUNC_DESC_TOC_64BIT
);
28120 func_sc_offset
= GEN_INT (AIX_FUNC_DESC_SC_64BIT
);
28121 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28123 call_func
= gen_call_indirect_aix64bit
;
28124 call_value_func
= gen_call_value_indirect_aix64bit
;
28128 call_func
= gen_call_indirect_aix64bit_nor11
;
28129 call_value_func
= gen_call_value_indirect_aix64bit_nor11
;
28133 /* Reserved spot to store the TOC. */
28134 stack_toc_mem
= gen_frame_mem (Pmode
,
28135 gen_rtx_PLUS (Pmode
,
28137 stack_toc_offset
));
28140 gcc_assert (cfun
->machine
);
28142 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28144 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
28145 cfun
->machine
->save_toc_in_prologue
= true;
28149 MEM_VOLATILE_P (stack_toc_mem
) = 1;
28150 emit_move_insn (stack_toc_mem
, toc_reg
);
28153 /* Calculate the address to load the TOC of the called function. We don't
28154 actually load this until the split after reload. */
28155 func_toc_mem
= gen_rtx_MEM (Pmode
,
28156 gen_rtx_PLUS (Pmode
,
28160 /* If we have a static chain, load it up. */
28161 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28163 func_sc_mem
= gen_rtx_MEM (Pmode
,
28164 gen_rtx_PLUS (Pmode
,
28168 sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
28169 emit_move_insn (sc_reg
, func_sc_mem
);
28172 /* Create the call. */
28174 insn
= call_value_func (value
, func_addr
, flag
, func_toc_mem
,
28177 insn
= call_func (func_addr
, flag
, func_toc_mem
, stack_toc_mem
);
28179 emit_call_insn (insn
);
28182 /* Return whether we need to always update the saved TOC pointer when we update
28183 the stack pointer. */
28186 rs6000_save_toc_in_prologue_p (void)
28188 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
28191 #ifdef HAVE_GAS_HIDDEN
28192 # define USE_HIDDEN_LINKONCE 1
28194 # define USE_HIDDEN_LINKONCE 0
28197 /* Fills in the label name that should be used for a 476 link stack thunk. */
28200 get_ppc476_thunk_name (char name
[32])
28202 gcc_assert (TARGET_LINK_STACK
);
28204 if (USE_HIDDEN_LINKONCE
)
28205 sprintf (name
, "__ppc476.get_thunk");
28207 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
28210 /* This function emits the simple thunk routine that is used to preserve
28211 the link stack on the 476 cpu. */
28213 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
28215 rs6000_code_end (void)
28220 if (!TARGET_LINK_STACK
)
28223 get_ppc476_thunk_name (name
);
28225 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
28226 build_function_type_list (void_type_node
, NULL_TREE
));
28227 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
28228 NULL_TREE
, void_type_node
);
28229 TREE_PUBLIC (decl
) = 1;
28230 TREE_STATIC (decl
) = 1;
28232 if (USE_HIDDEN_LINKONCE
)
28234 DECL_COMDAT_GROUP (decl
) = DECL_ASSEMBLER_NAME (decl
);
28235 targetm
.asm_out
.unique_section (decl
, 0);
28236 switch_to_section (get_named_section (decl
, NULL
, 0));
28237 DECL_WEAK (decl
) = 1;
28238 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
28239 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
28240 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
28241 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
28245 switch_to_section (text_section
);
28246 ASM_OUTPUT_LABEL (asm_out_file
, name
);
28249 DECL_INITIAL (decl
) = make_node (BLOCK
);
28250 current_function_decl
= decl
;
28251 init_function_start (decl
);
28252 first_function_block_is_cold
= false;
28253 /* Make sure unwind info is emitted for the thunk if needed. */
28254 final_start_function (emit_barrier (), asm_out_file
, 1);
28256 fputs ("\tblr\n", asm_out_file
);
28258 final_end_function ();
28259 init_insn_lengths ();
28260 free_after_compilation (cfun
);
28262 current_function_decl
= NULL
;
28265 /* Add r30 to hard reg set if the prologue sets it up and it is not
28266 pic_offset_table_rtx. */
28269 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
28271 if (!TARGET_SINGLE_PIC_BASE
28273 && TARGET_MINIMAL_TOC
28274 && get_pool_size () != 0)
28275 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
28278 struct gcc_target targetm
= TARGET_INITIALIZER
;
28280 #include "gt-rs6000.h"