1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2012 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
41 #include "basic-block.h"
42 #include "diagnostic-core.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
53 #include "sched-int.h"
55 #include "tree-flow.h"
58 #include "tm-constrs.h"
61 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #include "gstab.h" /* for N_SLINE */
67 #ifndef TARGET_NO_PROTOTYPE
68 #define TARGET_NO_PROTOTYPE 0
71 #define min(A,B) ((A) < (B) ? (A) : (B))
72 #define max(A,B) ((A) > (B) ? (A) : (B))
74 /* Structure used to define the rs6000 stack */
75 typedef struct rs6000_stack
{
76 int reload_completed
; /* stack info won't change from here on */
77 int first_gp_reg_save
; /* first callee saved GP register used */
78 int first_fp_reg_save
; /* first callee saved FP register used */
79 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
80 int lr_save_p
; /* true if the link reg needs to be saved */
81 int cr_save_p
; /* true if the CR reg needs to be saved */
82 unsigned int vrsave_mask
; /* mask of vec registers to save */
83 int push_p
; /* true if we need to allocate stack space */
84 int calls_p
; /* true if the function makes any calls */
85 int world_save_p
; /* true if we're saving *everything*:
86 r13-r31, cr, f14-f31, vrsave, v20-v31 */
87 enum rs6000_abi abi
; /* which ABI to use */
88 int gp_save_offset
; /* offset to save GP regs from initial SP */
89 int fp_save_offset
; /* offset to save FP regs from initial SP */
90 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
91 int lr_save_offset
; /* offset to save LR from initial SP */
92 int cr_save_offset
; /* offset to save CR from initial SP */
93 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
94 int spe_gp_save_offset
; /* offset to save spe 64-bit gprs */
95 int varargs_save_offset
; /* offset to save the varargs registers */
96 int ehrd_offset
; /* offset to EH return data */
97 int reg_size
; /* register size (4 or 8) */
98 HOST_WIDE_INT vars_size
; /* variable save area size */
99 int parm_size
; /* outgoing parameter size */
100 int save_size
; /* save area size */
101 int fixed_size
; /* fixed size of stack frame */
102 int gp_size
; /* size of saved GP registers */
103 int fp_size
; /* size of saved FP registers */
104 int altivec_size
; /* size of saved AltiVec registers */
105 int cr_size
; /* size to hold CR if not in save_size */
106 int vrsave_size
; /* size to hold VRSAVE if not in save_size */
107 int altivec_padding_size
; /* size of altivec alignment padding if
109 int spe_gp_size
; /* size of 64-bit GPR save size for SPE */
110 int spe_padding_size
;
111 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
112 int spe_64bit_regs_used
;
116 /* A C structure for machine-specific, per-function data.
117 This is added to the cfun structure. */
118 typedef struct GTY(()) machine_function
120 /* Some local-dynamic symbol. */
121 const char *some_ld_name
;
122 /* Whether the instruction chain has been scanned already. */
123 int insn_chain_scanned_p
;
124 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
125 int ra_needs_full_frame
;
126 /* Flags if __builtin_return_address (0) was used. */
128 /* Cache lr_save_p after expansion of builtin_eh_return. */
130 /* Whether we need to save the TOC to the reserved stack location in the
131 function prologue. */
132 bool save_toc_in_prologue
;
133 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
134 varargs save area. */
135 HOST_WIDE_INT varargs_save_offset
;
136 /* Temporary stack slot to use for SDmode copies. This slot is
137 64-bits wide and is allocated early enough so that the offset
138 does not overflow the 16-bit load/store offset field. */
139 rtx sdmode_stack_slot
;
142 /* Support targetm.vectorize.builtin_mask_for_load. */
143 static GTY(()) tree altivec_builtin_mask_for_load
;
145 /* Set to nonzero once AIX common-mode calls have been defined. */
146 static GTY(()) int common_mode_defined
;
148 /* Label number of label created for -mrelocatable, to call to so we can
149 get the address of the GOT section */
150 static int rs6000_pic_labelno
;
153 /* Counter for labels which are to be placed in .fixup. */
154 int fixuplabelno
= 0;
157 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
160 /* Specify the machine mode that pointers have. After generation of rtl, the
161 compiler makes no further distinction between pointers and any other objects
162 of this machine mode. The type is unsigned since not all things that
163 include rs6000.h also include machmode.h. */
164 unsigned rs6000_pmode
;
166 /* Width in bits of a pointer. */
167 unsigned rs6000_pointer_size
;
169 #ifdef HAVE_AS_GNU_ATTRIBUTE
170 /* Flag whether floating point values have been passed/returned. */
171 static bool rs6000_passes_float
;
172 /* Flag whether vector values have been passed/returned. */
173 static bool rs6000_passes_vector
;
174 /* Flag whether small (<= 8 byte) structures have been returned. */
175 static bool rs6000_returns_struct
;
178 /* Value is TRUE if register/mode pair is acceptable. */
179 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
181 /* Maximum number of registers needed for a given register class and mode. */
182 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
184 /* How many registers are needed for a given register and mode. */
185 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
187 /* Map register number to register class. */
188 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
190 /* Reload functions based on the type and the vector unit. */
191 static enum insn_code rs6000_vector_reload
[NUM_MACHINE_MODES
][2];
193 static int dbg_cost_ctrl
;
195 /* Built in types. */
196 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
197 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
199 /* Flag to say the TOC is initialized */
201 char toc_label_name
[10];
203 /* Cached value of rs6000_variable_issue. This is cached in
204 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
205 static short cached_can_issue_more
;
207 static GTY(()) section
*read_only_data_section
;
208 static GTY(()) section
*private_data_section
;
209 static GTY(()) section
*read_only_private_data_section
;
210 static GTY(()) section
*sdata2_section
;
211 static GTY(()) section
*toc_section
;
213 struct builtin_description
215 const unsigned int mask
;
216 const enum insn_code icode
;
217 const char *const name
;
218 const enum rs6000_builtins code
;
221 /* Describe the vector unit used for modes. */
222 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
223 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
225 /* Register classes for various constraints that are based on the target
227 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
229 /* Describe the alignment of a vector. */
230 int rs6000_vector_align
[NUM_MACHINE_MODES
];
232 /* Map selected modes to types for builtins. */
233 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
235 /* What modes to automatically generate reciprocal divide estimate (fre) and
236 reciprocal sqrt (frsqrte) for. */
237 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
239 /* Masks to determine which reciprocal esitmate instructions to generate
241 enum rs6000_recip_mask
{
242 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
243 RECIP_DF_DIV
= 0x002,
244 RECIP_V4SF_DIV
= 0x004,
245 RECIP_V2DF_DIV
= 0x008,
247 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
248 RECIP_DF_RSQRT
= 0x020,
249 RECIP_V4SF_RSQRT
= 0x040,
250 RECIP_V2DF_RSQRT
= 0x080,
252 /* Various combination of flags for -mrecip=xxx. */
254 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
255 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
256 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
258 RECIP_HIGH_PRECISION
= RECIP_ALL
,
260 /* On low precision machines like the power5, don't enable double precision
261 reciprocal square root estimate, since it isn't accurate enough. */
262 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
265 /* -mrecip options. */
268 const char *string
; /* option name */
269 unsigned int mask
; /* mask bits to set */
270 } recip_options
[] = {
271 { "all", RECIP_ALL
},
272 { "none", RECIP_NONE
},
273 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
275 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
276 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
277 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
278 | RECIP_V2DF_RSQRT
) },
279 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
280 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
283 /* 2 argument gen function typedef. */
284 typedef rtx (*gen_2arg_fn_t
) (rtx
, rtx
, rtx
);
286 /* Pointer to function (in rs6000-c.c) that can define or undefine target
287 macros that have changed. Languages that don't support the preprocessor
288 don't link in rs6000-c.c, so we can't call it directly. */
289 void (*rs6000_target_modify_macros_ptr
) (bool, int, unsigned);
292 /* Target cpu costs. */
294 struct processor_costs
{
295 const int mulsi
; /* cost of SImode multiplication. */
296 const int mulsi_const
; /* cost of SImode multiplication by constant. */
297 const int mulsi_const9
; /* cost of SImode mult by short constant. */
298 const int muldi
; /* cost of DImode multiplication. */
299 const int divsi
; /* cost of SImode division. */
300 const int divdi
; /* cost of DImode division. */
301 const int fp
; /* cost of simple SFmode and DFmode insns. */
302 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
303 const int sdiv
; /* cost of SFmode division (fdivs). */
304 const int ddiv
; /* cost of DFmode division (fdiv). */
305 const int cache_line_size
; /* cache line size in bytes. */
306 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
307 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
308 const int simultaneous_prefetches
; /* number of parallel prefetch
312 const struct processor_costs
*rs6000_cost
;
314 /* Processor costs (relative to an add) */
316 /* Instruction size costs on 32bit processors. */
318 struct processor_costs size32_cost
= {
319 COSTS_N_INSNS (1), /* mulsi */
320 COSTS_N_INSNS (1), /* mulsi_const */
321 COSTS_N_INSNS (1), /* mulsi_const9 */
322 COSTS_N_INSNS (1), /* muldi */
323 COSTS_N_INSNS (1), /* divsi */
324 COSTS_N_INSNS (1), /* divdi */
325 COSTS_N_INSNS (1), /* fp */
326 COSTS_N_INSNS (1), /* dmul */
327 COSTS_N_INSNS (1), /* sdiv */
328 COSTS_N_INSNS (1), /* ddiv */
335 /* Instruction size costs on 64bit processors. */
337 struct processor_costs size64_cost
= {
338 COSTS_N_INSNS (1), /* mulsi */
339 COSTS_N_INSNS (1), /* mulsi_const */
340 COSTS_N_INSNS (1), /* mulsi_const9 */
341 COSTS_N_INSNS (1), /* muldi */
342 COSTS_N_INSNS (1), /* divsi */
343 COSTS_N_INSNS (1), /* divdi */
344 COSTS_N_INSNS (1), /* fp */
345 COSTS_N_INSNS (1), /* dmul */
346 COSTS_N_INSNS (1), /* sdiv */
347 COSTS_N_INSNS (1), /* ddiv */
354 /* Instruction costs on RS64A processors. */
356 struct processor_costs rs64a_cost
= {
357 COSTS_N_INSNS (20), /* mulsi */
358 COSTS_N_INSNS (12), /* mulsi_const */
359 COSTS_N_INSNS (8), /* mulsi_const9 */
360 COSTS_N_INSNS (34), /* muldi */
361 COSTS_N_INSNS (65), /* divsi */
362 COSTS_N_INSNS (67), /* divdi */
363 COSTS_N_INSNS (4), /* fp */
364 COSTS_N_INSNS (4), /* dmul */
365 COSTS_N_INSNS (31), /* sdiv */
366 COSTS_N_INSNS (31), /* ddiv */
367 128, /* cache line size */
373 /* Instruction costs on MPCCORE processors. */
375 struct processor_costs mpccore_cost
= {
376 COSTS_N_INSNS (2), /* mulsi */
377 COSTS_N_INSNS (2), /* mulsi_const */
378 COSTS_N_INSNS (2), /* mulsi_const9 */
379 COSTS_N_INSNS (2), /* muldi */
380 COSTS_N_INSNS (6), /* divsi */
381 COSTS_N_INSNS (6), /* divdi */
382 COSTS_N_INSNS (4), /* fp */
383 COSTS_N_INSNS (5), /* dmul */
384 COSTS_N_INSNS (10), /* sdiv */
385 COSTS_N_INSNS (17), /* ddiv */
386 32, /* cache line size */
392 /* Instruction costs on PPC403 processors. */
394 struct processor_costs ppc403_cost
= {
395 COSTS_N_INSNS (4), /* mulsi */
396 COSTS_N_INSNS (4), /* mulsi_const */
397 COSTS_N_INSNS (4), /* mulsi_const9 */
398 COSTS_N_INSNS (4), /* muldi */
399 COSTS_N_INSNS (33), /* divsi */
400 COSTS_N_INSNS (33), /* divdi */
401 COSTS_N_INSNS (11), /* fp */
402 COSTS_N_INSNS (11), /* dmul */
403 COSTS_N_INSNS (11), /* sdiv */
404 COSTS_N_INSNS (11), /* ddiv */
405 32, /* cache line size */
411 /* Instruction costs on PPC405 processors. */
413 struct processor_costs ppc405_cost
= {
414 COSTS_N_INSNS (5), /* mulsi */
415 COSTS_N_INSNS (4), /* mulsi_const */
416 COSTS_N_INSNS (3), /* mulsi_const9 */
417 COSTS_N_INSNS (5), /* muldi */
418 COSTS_N_INSNS (35), /* divsi */
419 COSTS_N_INSNS (35), /* divdi */
420 COSTS_N_INSNS (11), /* fp */
421 COSTS_N_INSNS (11), /* dmul */
422 COSTS_N_INSNS (11), /* sdiv */
423 COSTS_N_INSNS (11), /* ddiv */
424 32, /* cache line size */
430 /* Instruction costs on PPC440 processors. */
432 struct processor_costs ppc440_cost
= {
433 COSTS_N_INSNS (3), /* mulsi */
434 COSTS_N_INSNS (2), /* mulsi_const */
435 COSTS_N_INSNS (2), /* mulsi_const9 */
436 COSTS_N_INSNS (3), /* muldi */
437 COSTS_N_INSNS (34), /* divsi */
438 COSTS_N_INSNS (34), /* divdi */
439 COSTS_N_INSNS (5), /* fp */
440 COSTS_N_INSNS (5), /* dmul */
441 COSTS_N_INSNS (19), /* sdiv */
442 COSTS_N_INSNS (33), /* ddiv */
443 32, /* cache line size */
449 /* Instruction costs on PPC476 processors. */
451 struct processor_costs ppc476_cost
= {
452 COSTS_N_INSNS (4), /* mulsi */
453 COSTS_N_INSNS (4), /* mulsi_const */
454 COSTS_N_INSNS (4), /* mulsi_const9 */
455 COSTS_N_INSNS (4), /* muldi */
456 COSTS_N_INSNS (11), /* divsi */
457 COSTS_N_INSNS (11), /* divdi */
458 COSTS_N_INSNS (6), /* fp */
459 COSTS_N_INSNS (6), /* dmul */
460 COSTS_N_INSNS (19), /* sdiv */
461 COSTS_N_INSNS (33), /* ddiv */
462 32, /* l1 cache line size */
468 /* Instruction costs on PPC601 processors. */
470 struct processor_costs ppc601_cost
= {
471 COSTS_N_INSNS (5), /* mulsi */
472 COSTS_N_INSNS (5), /* mulsi_const */
473 COSTS_N_INSNS (5), /* mulsi_const9 */
474 COSTS_N_INSNS (5), /* muldi */
475 COSTS_N_INSNS (36), /* divsi */
476 COSTS_N_INSNS (36), /* divdi */
477 COSTS_N_INSNS (4), /* fp */
478 COSTS_N_INSNS (5), /* dmul */
479 COSTS_N_INSNS (17), /* sdiv */
480 COSTS_N_INSNS (31), /* ddiv */
481 32, /* cache line size */
487 /* Instruction costs on PPC603 processors. */
489 struct processor_costs ppc603_cost
= {
490 COSTS_N_INSNS (5), /* mulsi */
491 COSTS_N_INSNS (3), /* mulsi_const */
492 COSTS_N_INSNS (2), /* mulsi_const9 */
493 COSTS_N_INSNS (5), /* muldi */
494 COSTS_N_INSNS (37), /* divsi */
495 COSTS_N_INSNS (37), /* divdi */
496 COSTS_N_INSNS (3), /* fp */
497 COSTS_N_INSNS (4), /* dmul */
498 COSTS_N_INSNS (18), /* sdiv */
499 COSTS_N_INSNS (33), /* ddiv */
500 32, /* cache line size */
506 /* Instruction costs on PPC604 processors. */
508 struct processor_costs ppc604_cost
= {
509 COSTS_N_INSNS (4), /* mulsi */
510 COSTS_N_INSNS (4), /* mulsi_const */
511 COSTS_N_INSNS (4), /* mulsi_const9 */
512 COSTS_N_INSNS (4), /* muldi */
513 COSTS_N_INSNS (20), /* divsi */
514 COSTS_N_INSNS (20), /* divdi */
515 COSTS_N_INSNS (3), /* fp */
516 COSTS_N_INSNS (3), /* dmul */
517 COSTS_N_INSNS (18), /* sdiv */
518 COSTS_N_INSNS (32), /* ddiv */
519 32, /* cache line size */
525 /* Instruction costs on PPC604e processors. */
527 struct processor_costs ppc604e_cost
= {
528 COSTS_N_INSNS (2), /* mulsi */
529 COSTS_N_INSNS (2), /* mulsi_const */
530 COSTS_N_INSNS (2), /* mulsi_const9 */
531 COSTS_N_INSNS (2), /* muldi */
532 COSTS_N_INSNS (20), /* divsi */
533 COSTS_N_INSNS (20), /* divdi */
534 COSTS_N_INSNS (3), /* fp */
535 COSTS_N_INSNS (3), /* dmul */
536 COSTS_N_INSNS (18), /* sdiv */
537 COSTS_N_INSNS (32), /* ddiv */
538 32, /* cache line size */
544 /* Instruction costs on PPC620 processors. */
546 struct processor_costs ppc620_cost
= {
547 COSTS_N_INSNS (5), /* mulsi */
548 COSTS_N_INSNS (4), /* mulsi_const */
549 COSTS_N_INSNS (3), /* mulsi_const9 */
550 COSTS_N_INSNS (7), /* muldi */
551 COSTS_N_INSNS (21), /* divsi */
552 COSTS_N_INSNS (37), /* divdi */
553 COSTS_N_INSNS (3), /* fp */
554 COSTS_N_INSNS (3), /* dmul */
555 COSTS_N_INSNS (18), /* sdiv */
556 COSTS_N_INSNS (32), /* ddiv */
557 128, /* cache line size */
563 /* Instruction costs on PPC630 processors. */
565 struct processor_costs ppc630_cost
= {
566 COSTS_N_INSNS (5), /* mulsi */
567 COSTS_N_INSNS (4), /* mulsi_const */
568 COSTS_N_INSNS (3), /* mulsi_const9 */
569 COSTS_N_INSNS (7), /* muldi */
570 COSTS_N_INSNS (21), /* divsi */
571 COSTS_N_INSNS (37), /* divdi */
572 COSTS_N_INSNS (3), /* fp */
573 COSTS_N_INSNS (3), /* dmul */
574 COSTS_N_INSNS (17), /* sdiv */
575 COSTS_N_INSNS (21), /* ddiv */
576 128, /* cache line size */
582 /* Instruction costs on Cell processor. */
583 /* COSTS_N_INSNS (1) ~ one add. */
585 struct processor_costs ppccell_cost
= {
586 COSTS_N_INSNS (9/2)+2, /* mulsi */
587 COSTS_N_INSNS (6/2), /* mulsi_const */
588 COSTS_N_INSNS (6/2), /* mulsi_const9 */
589 COSTS_N_INSNS (15/2)+2, /* muldi */
590 COSTS_N_INSNS (38/2), /* divsi */
591 COSTS_N_INSNS (70/2), /* divdi */
592 COSTS_N_INSNS (10/2), /* fp */
593 COSTS_N_INSNS (10/2), /* dmul */
594 COSTS_N_INSNS (74/2), /* sdiv */
595 COSTS_N_INSNS (74/2), /* ddiv */
596 128, /* cache line size */
602 /* Instruction costs on PPC750 and PPC7400 processors. */
604 struct processor_costs ppc750_cost
= {
605 COSTS_N_INSNS (5), /* mulsi */
606 COSTS_N_INSNS (3), /* mulsi_const */
607 COSTS_N_INSNS (2), /* mulsi_const9 */
608 COSTS_N_INSNS (5), /* muldi */
609 COSTS_N_INSNS (17), /* divsi */
610 COSTS_N_INSNS (17), /* divdi */
611 COSTS_N_INSNS (3), /* fp */
612 COSTS_N_INSNS (3), /* dmul */
613 COSTS_N_INSNS (17), /* sdiv */
614 COSTS_N_INSNS (31), /* ddiv */
615 32, /* cache line size */
621 /* Instruction costs on PPC7450 processors. */
623 struct processor_costs ppc7450_cost
= {
624 COSTS_N_INSNS (4), /* mulsi */
625 COSTS_N_INSNS (3), /* mulsi_const */
626 COSTS_N_INSNS (3), /* mulsi_const9 */
627 COSTS_N_INSNS (4), /* muldi */
628 COSTS_N_INSNS (23), /* divsi */
629 COSTS_N_INSNS (23), /* divdi */
630 COSTS_N_INSNS (5), /* fp */
631 COSTS_N_INSNS (5), /* dmul */
632 COSTS_N_INSNS (21), /* sdiv */
633 COSTS_N_INSNS (35), /* ddiv */
634 32, /* cache line size */
640 /* Instruction costs on PPC8540 processors. */
642 struct processor_costs ppc8540_cost
= {
643 COSTS_N_INSNS (4), /* mulsi */
644 COSTS_N_INSNS (4), /* mulsi_const */
645 COSTS_N_INSNS (4), /* mulsi_const9 */
646 COSTS_N_INSNS (4), /* muldi */
647 COSTS_N_INSNS (19), /* divsi */
648 COSTS_N_INSNS (19), /* divdi */
649 COSTS_N_INSNS (4), /* fp */
650 COSTS_N_INSNS (4), /* dmul */
651 COSTS_N_INSNS (29), /* sdiv */
652 COSTS_N_INSNS (29), /* ddiv */
653 32, /* cache line size */
656 1, /* prefetch streams /*/
659 /* Instruction costs on E300C2 and E300C3 cores. */
661 struct processor_costs ppce300c2c3_cost
= {
662 COSTS_N_INSNS (4), /* mulsi */
663 COSTS_N_INSNS (4), /* mulsi_const */
664 COSTS_N_INSNS (4), /* mulsi_const9 */
665 COSTS_N_INSNS (4), /* muldi */
666 COSTS_N_INSNS (19), /* divsi */
667 COSTS_N_INSNS (19), /* divdi */
668 COSTS_N_INSNS (3), /* fp */
669 COSTS_N_INSNS (4), /* dmul */
670 COSTS_N_INSNS (18), /* sdiv */
671 COSTS_N_INSNS (33), /* ddiv */
675 1, /* prefetch streams /*/
678 /* Instruction costs on PPCE500MC processors. */
680 struct processor_costs ppce500mc_cost
= {
681 COSTS_N_INSNS (4), /* mulsi */
682 COSTS_N_INSNS (4), /* mulsi_const */
683 COSTS_N_INSNS (4), /* mulsi_const9 */
684 COSTS_N_INSNS (4), /* muldi */
685 COSTS_N_INSNS (14), /* divsi */
686 COSTS_N_INSNS (14), /* divdi */
687 COSTS_N_INSNS (8), /* fp */
688 COSTS_N_INSNS (10), /* dmul */
689 COSTS_N_INSNS (36), /* sdiv */
690 COSTS_N_INSNS (66), /* ddiv */
691 64, /* cache line size */
694 1, /* prefetch streams /*/
697 /* Instruction costs on PPCE500MC64 processors. */
699 struct processor_costs ppce500mc64_cost
= {
700 COSTS_N_INSNS (4), /* mulsi */
701 COSTS_N_INSNS (4), /* mulsi_const */
702 COSTS_N_INSNS (4), /* mulsi_const9 */
703 COSTS_N_INSNS (4), /* muldi */
704 COSTS_N_INSNS (14), /* divsi */
705 COSTS_N_INSNS (14), /* divdi */
706 COSTS_N_INSNS (4), /* fp */
707 COSTS_N_INSNS (10), /* dmul */
708 COSTS_N_INSNS (36), /* sdiv */
709 COSTS_N_INSNS (66), /* ddiv */
710 64, /* cache line size */
713 1, /* prefetch streams /*/
716 /* Instruction costs on PPCE5500 processors. */
718 struct processor_costs ppce5500_cost
= {
719 COSTS_N_INSNS (5), /* mulsi */
720 COSTS_N_INSNS (5), /* mulsi_const */
721 COSTS_N_INSNS (4), /* mulsi_const9 */
722 COSTS_N_INSNS (5), /* muldi */
723 COSTS_N_INSNS (14), /* divsi */
724 COSTS_N_INSNS (14), /* divdi */
725 COSTS_N_INSNS (7), /* fp */
726 COSTS_N_INSNS (10), /* dmul */
727 COSTS_N_INSNS (36), /* sdiv */
728 COSTS_N_INSNS (66), /* ddiv */
729 64, /* cache line size */
732 1, /* prefetch streams /*/
735 /* Instruction costs on PPCE6500 processors. */
737 struct processor_costs ppce6500_cost
= {
738 COSTS_N_INSNS (5), /* mulsi */
739 COSTS_N_INSNS (5), /* mulsi_const */
740 COSTS_N_INSNS (4), /* mulsi_const9 */
741 COSTS_N_INSNS (5), /* muldi */
742 COSTS_N_INSNS (14), /* divsi */
743 COSTS_N_INSNS (14), /* divdi */
744 COSTS_N_INSNS (7), /* fp */
745 COSTS_N_INSNS (10), /* dmul */
746 COSTS_N_INSNS (36), /* sdiv */
747 COSTS_N_INSNS (66), /* ddiv */
748 64, /* cache line size */
751 1, /* prefetch streams /*/
754 /* Instruction costs on AppliedMicro Titan processors. */
756 struct processor_costs titan_cost
= {
757 COSTS_N_INSNS (5), /* mulsi */
758 COSTS_N_INSNS (5), /* mulsi_const */
759 COSTS_N_INSNS (5), /* mulsi_const9 */
760 COSTS_N_INSNS (5), /* muldi */
761 COSTS_N_INSNS (18), /* divsi */
762 COSTS_N_INSNS (18), /* divdi */
763 COSTS_N_INSNS (10), /* fp */
764 COSTS_N_INSNS (10), /* dmul */
765 COSTS_N_INSNS (46), /* sdiv */
766 COSTS_N_INSNS (72), /* ddiv */
767 32, /* cache line size */
770 1, /* prefetch streams /*/
773 /* Instruction costs on POWER4 and POWER5 processors. */
775 struct processor_costs power4_cost
= {
776 COSTS_N_INSNS (3), /* mulsi */
777 COSTS_N_INSNS (2), /* mulsi_const */
778 COSTS_N_INSNS (2), /* mulsi_const9 */
779 COSTS_N_INSNS (4), /* muldi */
780 COSTS_N_INSNS (18), /* divsi */
781 COSTS_N_INSNS (34), /* divdi */
782 COSTS_N_INSNS (3), /* fp */
783 COSTS_N_INSNS (3), /* dmul */
784 COSTS_N_INSNS (17), /* sdiv */
785 COSTS_N_INSNS (17), /* ddiv */
786 128, /* cache line size */
789 8, /* prefetch streams /*/
792 /* Instruction costs on POWER6 processors. */
794 struct processor_costs power6_cost
= {
795 COSTS_N_INSNS (8), /* mulsi */
796 COSTS_N_INSNS (8), /* mulsi_const */
797 COSTS_N_INSNS (8), /* mulsi_const9 */
798 COSTS_N_INSNS (8), /* muldi */
799 COSTS_N_INSNS (22), /* divsi */
800 COSTS_N_INSNS (28), /* divdi */
801 COSTS_N_INSNS (3), /* fp */
802 COSTS_N_INSNS (3), /* dmul */
803 COSTS_N_INSNS (13), /* sdiv */
804 COSTS_N_INSNS (16), /* ddiv */
805 128, /* cache line size */
808 16, /* prefetch streams */
811 /* Instruction costs on POWER7 processors. */
813 struct processor_costs power7_cost
= {
814 COSTS_N_INSNS (2), /* mulsi */
815 COSTS_N_INSNS (2), /* mulsi_const */
816 COSTS_N_INSNS (2), /* mulsi_const9 */
817 COSTS_N_INSNS (2), /* muldi */
818 COSTS_N_INSNS (18), /* divsi */
819 COSTS_N_INSNS (34), /* divdi */
820 COSTS_N_INSNS (3), /* fp */
821 COSTS_N_INSNS (3), /* dmul */
822 COSTS_N_INSNS (13), /* sdiv */
823 COSTS_N_INSNS (16), /* ddiv */
824 128, /* cache line size */
827 12, /* prefetch streams */
830 /* Instruction costs on POWER A2 processors. */
832 struct processor_costs ppca2_cost
= {
833 COSTS_N_INSNS (16), /* mulsi */
834 COSTS_N_INSNS (16), /* mulsi_const */
835 COSTS_N_INSNS (16), /* mulsi_const9 */
836 COSTS_N_INSNS (16), /* muldi */
837 COSTS_N_INSNS (22), /* divsi */
838 COSTS_N_INSNS (28), /* divdi */
839 COSTS_N_INSNS (3), /* fp */
840 COSTS_N_INSNS (3), /* dmul */
841 COSTS_N_INSNS (59), /* sdiv */
842 COSTS_N_INSNS (72), /* ddiv */
846 16, /* prefetch streams */
850 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
851 #undef RS6000_BUILTIN_1
852 #undef RS6000_BUILTIN_2
853 #undef RS6000_BUILTIN_3
854 #undef RS6000_BUILTIN_A
855 #undef RS6000_BUILTIN_D
856 #undef RS6000_BUILTIN_E
857 #undef RS6000_BUILTIN_P
858 #undef RS6000_BUILTIN_Q
859 #undef RS6000_BUILTIN_S
860 #undef RS6000_BUILTIN_X
862 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
863 { NAME, ICODE, MASK, ATTR },
865 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
866 { NAME, ICODE, MASK, ATTR },
868 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
869 { NAME, ICODE, MASK, ATTR },
871 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
872 { NAME, ICODE, MASK, ATTR },
874 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
875 { NAME, ICODE, MASK, ATTR },
877 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
878 { NAME, ICODE, MASK, ATTR },
880 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
881 { NAME, ICODE, MASK, ATTR },
883 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
884 { NAME, ICODE, MASK, ATTR },
886 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
887 { NAME, ICODE, MASK, ATTR },
889 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
890 { NAME, ICODE, MASK, ATTR },
892 struct rs6000_builtin_info_type
{
894 const enum insn_code icode
;
899 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
901 #include "rs6000-builtin.def"
904 #undef RS6000_BUILTIN_1
905 #undef RS6000_BUILTIN_2
906 #undef RS6000_BUILTIN_3
907 #undef RS6000_BUILTIN_A
908 #undef RS6000_BUILTIN_D
909 #undef RS6000_BUILTIN_E
910 #undef RS6000_BUILTIN_P
911 #undef RS6000_BUILTIN_Q
912 #undef RS6000_BUILTIN_S
913 #undef RS6000_BUILTIN_X
915 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
916 static tree (*rs6000_veclib_handler
) (tree
, tree
, tree
);
919 static bool rs6000_debug_legitimate_address_p (enum machine_mode
, rtx
, bool);
920 static bool spe_func_has_64bit_regs_p (void);
921 static struct machine_function
* rs6000_init_machine_status (void);
922 static int rs6000_ra_ever_killed (void);
923 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
924 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
925 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
926 static tree
rs6000_builtin_vectorized_libmass (tree
, tree
, tree
);
927 static rtx
rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
928 static int rs6000_memory_move_cost (enum machine_mode
, reg_class_t
, bool);
929 static bool rs6000_debug_rtx_costs (rtx
, int, int, int, int *, bool);
930 static int rs6000_debug_address_cost (rtx
, bool);
931 static int rs6000_debug_adjust_cost (rtx
, rtx
, rtx
, int);
932 static bool is_microcoded_insn (rtx
);
933 static bool is_nonpipeline_insn (rtx
);
934 static bool is_cracked_insn (rtx
);
935 static bool is_load_insn (rtx
, rtx
*);
936 static bool is_store_insn (rtx
, rtx
*);
937 static bool set_to_load_agen (rtx
,rtx
);
938 static bool insn_terminates_group_p (rtx
, enum group_termination
);
939 static bool insn_must_be_first_in_group (rtx
);
940 static bool insn_must_be_last_in_group (rtx
);
941 static void altivec_init_builtins (void);
942 static tree
builtin_function_type (enum machine_mode
, enum machine_mode
,
943 enum machine_mode
, enum machine_mode
,
944 enum rs6000_builtins
, const char *name
);
945 static void rs6000_common_init_builtins (void);
946 static void paired_init_builtins (void);
947 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
948 static void spe_init_builtins (void);
949 static rtx
spe_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
950 static rtx
spe_expand_evsel_builtin (enum insn_code
, tree
, rtx
);
951 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
952 static rs6000_stack_t
*rs6000_stack_info (void);
953 static void is_altivec_return_reg (rtx
, void *);
954 int easy_vector_constant (rtx
, enum machine_mode
);
955 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, enum machine_mode
);
956 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
957 static int rs6000_tls_symbol_ref_1 (rtx
*, void *);
958 static int rs6000_get_some_local_dynamic_name_1 (rtx
*, void *);
959 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
962 static void macho_branch_islands (void);
964 static rtx
rs6000_legitimize_reload_address (rtx
, enum machine_mode
, int, int,
966 static rtx
rs6000_debug_legitimize_reload_address (rtx
, enum machine_mode
, int,
968 static bool rs6000_mode_dependent_address (const_rtx
);
969 static bool rs6000_debug_mode_dependent_address (const_rtx
);
970 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
971 enum machine_mode
, rtx
);
972 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
975 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
976 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
978 static bool rs6000_secondary_memory_needed (enum reg_class
, enum reg_class
,
980 static bool rs6000_debug_secondary_memory_needed (enum reg_class
,
983 static bool rs6000_cannot_change_mode_class (enum machine_mode
,
986 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode
,
989 static bool rs6000_save_toc_in_prologue_p (void);
991 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, enum machine_mode
, int, int,
993 = rs6000_legitimize_reload_address
;
995 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
996 = rs6000_mode_dependent_address
;
998 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
999 enum machine_mode
, rtx
)
1000 = rs6000_secondary_reload_class
;
1002 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1003 = rs6000_preferred_reload_class
;
1005 bool (*rs6000_secondary_memory_needed_ptr
) (enum reg_class
, enum reg_class
,
1007 = rs6000_secondary_memory_needed
;
1009 bool (*rs6000_cannot_change_mode_class_ptr
) (enum machine_mode
,
1012 = rs6000_cannot_change_mode_class
;
1014 const int INSN_NOT_AVAILABLE
= -1;
1016 /* Hash table stuff for keeping track of TOC entries. */
1018 struct GTY(()) toc_hash_struct
1020 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1021 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1023 enum machine_mode key_mode
;
1027 static GTY ((param_is (struct toc_hash_struct
))) htab_t toc_hash_table
;
1029 /* Hash table to keep track of the argument types for builtin functions. */
1031 struct GTY(()) builtin_hash_struct
1034 enum machine_mode mode
[4]; /* return value + 3 arguments. */
1035 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1038 static GTY ((param_is (struct builtin_hash_struct
))) htab_t builtin_hash_table
;
1041 /* Default register names. */
1042 char rs6000_reg_names
[][8] =
1044 "0", "1", "2", "3", "4", "5", "6", "7",
1045 "8", "9", "10", "11", "12", "13", "14", "15",
1046 "16", "17", "18", "19", "20", "21", "22", "23",
1047 "24", "25", "26", "27", "28", "29", "30", "31",
1048 "0", "1", "2", "3", "4", "5", "6", "7",
1049 "8", "9", "10", "11", "12", "13", "14", "15",
1050 "16", "17", "18", "19", "20", "21", "22", "23",
1051 "24", "25", "26", "27", "28", "29", "30", "31",
1052 "mq", "lr", "ctr","ap",
1053 "0", "1", "2", "3", "4", "5", "6", "7",
1055 /* AltiVec registers. */
1056 "0", "1", "2", "3", "4", "5", "6", "7",
1057 "8", "9", "10", "11", "12", "13", "14", "15",
1058 "16", "17", "18", "19", "20", "21", "22", "23",
1059 "24", "25", "26", "27", "28", "29", "30", "31",
1061 /* SPE registers. */
1062 "spe_acc", "spefscr",
1063 /* Soft frame pointer. */
1067 #ifdef TARGET_REGNAMES
1068 static const char alt_reg_names
[][8] =
1070 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1071 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1072 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1073 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1074 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1075 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1076 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1077 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1078 "mq", "lr", "ctr", "ap",
1079 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1081 /* AltiVec registers. */
1082 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1083 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1084 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1085 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1087 /* SPE registers. */
1088 "spe_acc", "spefscr",
1089 /* Soft frame pointer. */
1094 /* Table of valid machine attributes. */
1096 static const struct attribute_spec rs6000_attribute_table
[] =
1098 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1099 affects_type_identity } */
1100 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1102 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1104 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1106 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1108 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1110 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1111 SUBTARGET_ATTRIBUTE_TABLE
,
1113 { NULL
, 0, 0, false, false, false, NULL
, false }
1116 #ifndef MASK_STRICT_ALIGN
1117 #define MASK_STRICT_ALIGN 0
1119 #ifndef TARGET_PROFILE_KERNEL
1120 #define TARGET_PROFILE_KERNEL 0
1123 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1124 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1126 /* Initialize the GCC target structure. */
1127 #undef TARGET_ATTRIBUTE_TABLE
1128 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1129 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1130 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1131 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1132 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1134 #undef TARGET_ASM_ALIGNED_DI_OP
1135 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1137 /* Default unaligned ops are only provided for ELF. Find the ops needed
1138 for non-ELF systems. */
1139 #ifndef OBJECT_FORMAT_ELF
1141 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1143 #undef TARGET_ASM_UNALIGNED_HI_OP
1144 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1145 #undef TARGET_ASM_UNALIGNED_SI_OP
1146 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1147 #undef TARGET_ASM_UNALIGNED_DI_OP
1148 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1151 #undef TARGET_ASM_UNALIGNED_HI_OP
1152 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1153 #undef TARGET_ASM_UNALIGNED_SI_OP
1154 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1155 #undef TARGET_ASM_UNALIGNED_DI_OP
1156 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1157 #undef TARGET_ASM_ALIGNED_DI_OP
1158 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1162 /* This hook deals with fixups for relocatable code and DI-mode objects
1164 #undef TARGET_ASM_INTEGER
1165 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1167 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1168 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1169 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1172 #undef TARGET_SET_UP_BY_PROLOGUE
1173 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1175 #undef TARGET_HAVE_TLS
1176 #define TARGET_HAVE_TLS HAVE_AS_TLS
1178 #undef TARGET_CANNOT_FORCE_CONST_MEM
1179 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1181 #undef TARGET_DELEGITIMIZE_ADDRESS
1182 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1184 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1185 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1187 #undef TARGET_ASM_FUNCTION_PROLOGUE
1188 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1189 #undef TARGET_ASM_FUNCTION_EPILOGUE
1190 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1192 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1193 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1195 #undef TARGET_LEGITIMIZE_ADDRESS
1196 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1198 #undef TARGET_SCHED_VARIABLE_ISSUE
1199 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1201 #undef TARGET_SCHED_ISSUE_RATE
1202 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1203 #undef TARGET_SCHED_ADJUST_COST
1204 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1205 #undef TARGET_SCHED_ADJUST_PRIORITY
1206 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1207 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1208 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1209 #undef TARGET_SCHED_INIT
1210 #define TARGET_SCHED_INIT rs6000_sched_init
1211 #undef TARGET_SCHED_FINISH
1212 #define TARGET_SCHED_FINISH rs6000_sched_finish
1213 #undef TARGET_SCHED_REORDER
1214 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1215 #undef TARGET_SCHED_REORDER2
1216 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1218 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1219 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1221 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1222 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1224 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1225 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1226 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1227 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1228 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1229 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1230 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1231 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1233 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1234 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1235 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1236 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1237 rs6000_builtin_support_vector_misalignment
1238 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1239 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1240 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1241 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1242 rs6000_builtin_vectorization_cost
1243 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1244 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1245 rs6000_preferred_simd_mode
1246 #undef TARGET_VECTORIZE_INIT_COST
1247 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1248 #undef TARGET_VECTORIZE_ADD_STMT_COST
1249 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1250 #undef TARGET_VECTORIZE_FINISH_COST
1251 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1252 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1253 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1255 #undef TARGET_INIT_BUILTINS
1256 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1257 #undef TARGET_BUILTIN_DECL
1258 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1260 #undef TARGET_EXPAND_BUILTIN
1261 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1263 #undef TARGET_MANGLE_TYPE
1264 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1266 #undef TARGET_INIT_LIBFUNCS
1267 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1270 #undef TARGET_BINDS_LOCAL_P
1271 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1274 #undef TARGET_MS_BITFIELD_LAYOUT_P
1275 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1277 #undef TARGET_ASM_OUTPUT_MI_THUNK
1278 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1280 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1281 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1283 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1284 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1286 #undef TARGET_INVALID_WITHIN_DOLOOP
1287 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1289 #undef TARGET_REGISTER_MOVE_COST
1290 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1291 #undef TARGET_MEMORY_MOVE_COST
1292 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1293 #undef TARGET_RTX_COSTS
1294 #define TARGET_RTX_COSTS rs6000_rtx_costs
1295 #undef TARGET_ADDRESS_COST
1296 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
1298 #undef TARGET_DWARF_REGISTER_SPAN
1299 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1301 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1302 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1304 /* On rs6000, function arguments are promoted, as are function return
1306 #undef TARGET_PROMOTE_FUNCTION_MODE
1307 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1309 #undef TARGET_RETURN_IN_MEMORY
1310 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1312 #undef TARGET_SETUP_INCOMING_VARARGS
1313 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1315 /* Always strict argument naming on rs6000. */
1316 #undef TARGET_STRICT_ARGUMENT_NAMING
1317 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1318 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1319 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1320 #undef TARGET_SPLIT_COMPLEX_ARG
1321 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1322 #undef TARGET_MUST_PASS_IN_STACK
1323 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1324 #undef TARGET_PASS_BY_REFERENCE
1325 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1326 #undef TARGET_ARG_PARTIAL_BYTES
1327 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1328 #undef TARGET_FUNCTION_ARG_ADVANCE
1329 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1330 #undef TARGET_FUNCTION_ARG
1331 #define TARGET_FUNCTION_ARG rs6000_function_arg
1332 #undef TARGET_FUNCTION_ARG_BOUNDARY
1333 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1335 #undef TARGET_BUILD_BUILTIN_VA_LIST
1336 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1338 #undef TARGET_EXPAND_BUILTIN_VA_START
1339 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1341 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1342 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1344 #undef TARGET_EH_RETURN_FILTER_MODE
1345 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1347 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1348 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1350 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1351 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1353 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1354 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1356 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1357 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1359 #undef TARGET_OPTION_OVERRIDE
1360 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1362 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1363 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1364 rs6000_builtin_vectorized_function
1367 #undef TARGET_STACK_PROTECT_FAIL
1368 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1371 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1372 The PowerPC architecture requires only weak consistency among
1373 processors--that is, memory accesses between processors need not be
1374 sequentially consistent and memory accesses among processors can occur
1375 in any order. The ability to order memory accesses weakly provides
1376 opportunities for more efficient use of the system bus. Unless a
1377 dependency exists, the 604e allows read operations to precede store
1379 #undef TARGET_RELAXED_ORDERING
1380 #define TARGET_RELAXED_ORDERING true
1383 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1384 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1387 /* Use a 32-bit anchor range. This leads to sequences like:
1389 addis tmp,anchor,high
1392 where tmp itself acts as an anchor, and can be shared between
1393 accesses to the same 64k page. */
1394 #undef TARGET_MIN_ANCHOR_OFFSET
1395 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1396 #undef TARGET_MAX_ANCHOR_OFFSET
1397 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1398 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1399 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1401 #undef TARGET_BUILTIN_RECIPROCAL
1402 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1404 #undef TARGET_EXPAND_TO_RTL_HOOK
1405 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1407 #undef TARGET_INSTANTIATE_DECLS
1408 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1410 #undef TARGET_SECONDARY_RELOAD
1411 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1413 #undef TARGET_LEGITIMATE_ADDRESS_P
1414 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1416 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1417 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1419 #undef TARGET_CAN_ELIMINATE
1420 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1422 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1423 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1425 #undef TARGET_TRAMPOLINE_INIT
1426 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1428 #undef TARGET_FUNCTION_VALUE
1429 #define TARGET_FUNCTION_VALUE rs6000_function_value
1431 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1432 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1434 #undef TARGET_OPTION_SAVE
1435 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1437 #undef TARGET_OPTION_RESTORE
1438 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1440 #undef TARGET_OPTION_PRINT
1441 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1443 #undef TARGET_CAN_INLINE_P
1444 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1446 #undef TARGET_SET_CURRENT_FUNCTION
1447 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1449 #undef TARGET_LEGITIMATE_CONSTANT_P
1450 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1452 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1453 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1456 /* Simplifications for entries below. */
1459 POWERPC_BASE_MASK
= MASK_POWERPC
| MASK_NEW_MNEMONICS
,
1460 POWERPC_7400_MASK
= POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_ALTIVEC
1463 /* Some OSs don't support saving the high part of 64-bit registers on context
1464 switch. Other OSs don't support saving Altivec registers. On those OSs, we
1465 don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants
1466 either, the user must explicitly specify them and we won't interfere with
1467 the user's specification. */
1470 POWERPC_MASKS
= (POWERPC_BASE_MASK
| MASK_PPC_GPOPT
| MASK_STRICT_ALIGN
1471 | MASK_PPC_GFXOPT
| MASK_POWERPC64
| MASK_ALTIVEC
1472 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
| MASK_MULHW
1473 | MASK_DLMZB
| MASK_CMPB
| MASK_MFPGPR
| MASK_DFP
1474 | MASK_POPCNTD
| MASK_VSX
| MASK_ISEL
| MASK_NO_UPDATE
1475 | MASK_RECIP_PRECISION
)
1478 /* Masks for instructions set at various powerpc ISAs. */
1480 ISA_2_1_MASKS
= MASK_MFCRF
,
1481 ISA_2_2_MASKS
= (ISA_2_1_MASKS
| MASK_POPCNTB
),
1482 ISA_2_4_MASKS
= (ISA_2_2_MASKS
| MASK_FPRND
),
1484 /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add
1485 ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel,
1486 fre, fsqrt, etc. were no longer documented as optional. Group masks by
1487 server and embedded. */
1488 ISA_2_5_MASKS_EMBEDDED
= (ISA_2_2_MASKS
| MASK_CMPB
| MASK_RECIP_PRECISION
1489 | MASK_PPC_GFXOPT
| MASK_PPC_GPOPT
),
1490 ISA_2_5_MASKS_SERVER
= (ISA_2_5_MASKS_EMBEDDED
| MASK_DFP
),
1492 /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but
1493 altivec is a win so enable it. */
1494 ISA_2_6_MASKS_EMBEDDED
= (ISA_2_5_MASKS_EMBEDDED
| MASK_POPCNTD
),
1495 ISA_2_6_MASKS_SERVER
= (ISA_2_5_MASKS_SERVER
| MASK_POPCNTD
| MASK_ALTIVEC
1501 const char *const name
; /* Canonical processor name. */
1502 const enum processor_type processor
; /* Processor type enum value. */
1503 const int target_enable
; /* Target flags to enable. */
1506 static struct rs6000_ptt
const processor_target_table
[] =
1508 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1509 #include "rs6000-cpus.def"
1513 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1517 rs6000_cpu_name_lookup (const char *name
)
1523 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1524 if (! strcmp (name
, processor_target_table
[i
].name
))
1532 /* Return number of consecutive hard regs needed starting at reg REGNO
1533 to hold something of mode MODE.
1534 This is ordinarily the length in words of a value of mode MODE
1535 but can be less for certain modes in special long registers.
1537 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1538 scalar instructions. The upper 32 bits are only available to the
1541 POWER and PowerPC GPRs hold 32 bits worth;
1542 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1545 rs6000_hard_regno_nregs_internal (int regno
, enum machine_mode mode
)
1547 unsigned HOST_WIDE_INT reg_size
;
1549 if (FP_REGNO_P (regno
))
1550 reg_size
= (VECTOR_MEM_VSX_P (mode
)
1551 ? UNITS_PER_VSX_WORD
1552 : UNITS_PER_FP_WORD
);
1554 else if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1555 reg_size
= UNITS_PER_SPE_WORD
;
1557 else if (ALTIVEC_REGNO_P (regno
))
1558 reg_size
= UNITS_PER_ALTIVEC_WORD
;
1560 /* The value returned for SCmode in the E500 double case is 2 for
1561 ABI compatibility; storing an SCmode value in a single register
1562 would require function_arg and rs6000_spe_function_arg to handle
1563 SCmode so as to pass the value correctly in a pair of
1565 else if (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
) && mode
!= SCmode
1566 && !DECIMAL_FLOAT_MODE_P (mode
))
1567 reg_size
= UNITS_PER_FP_WORD
;
1570 reg_size
= UNITS_PER_WORD
;
1572 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
1575 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1578 rs6000_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
1580 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
1582 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1583 implementations. Don't allow an item to be split between a FP register
1584 and an Altivec register. */
1585 if (VECTOR_MEM_VSX_P (mode
))
1587 if (FP_REGNO_P (regno
))
1588 return FP_REGNO_P (last_regno
);
1590 if (ALTIVEC_REGNO_P (regno
))
1591 return ALTIVEC_REGNO_P (last_regno
);
1594 /* The GPRs can hold any mode, but values bigger than one register
1595 cannot go past R31. */
1596 if (INT_REGNO_P (regno
))
1597 return INT_REGNO_P (last_regno
);
1599 /* The float registers (except for VSX vector modes) can only hold floating
1600 modes and DImode. This excludes the 32-bit decimal float mode for
1602 if (FP_REGNO_P (regno
))
1604 if (SCALAR_FLOAT_MODE_P (mode
)
1605 && (mode
!= TDmode
|| (regno
% 2) == 0)
1606 && FP_REGNO_P (last_regno
))
1609 if (GET_MODE_CLASS (mode
) == MODE_INT
1610 && GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
1613 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
1614 && PAIRED_VECTOR_MODE (mode
))
1620 /* The CR register can only hold CC modes. */
1621 if (CR_REGNO_P (regno
))
1622 return GET_MODE_CLASS (mode
) == MODE_CC
;
1624 if (CA_REGNO_P (regno
))
1625 return mode
== BImode
;
1627 /* AltiVec only in AldyVec registers. */
1628 if (ALTIVEC_REGNO_P (regno
))
1629 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
);
1631 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1632 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1635 /* We cannot put TImode anywhere except general register and it must be able
1636 to fit within the register set. In the future, allow TImode in the
1637 Altivec or VSX registers. */
1639 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
1642 /* Print interesting facts about registers. */
1644 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
1648 for (r
= first_regno
; r
<= last_regno
; ++r
)
1650 const char *comma
= "";
1653 if (first_regno
== last_regno
)
1654 fprintf (stderr
, "%s:\t", reg_name
);
1656 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
1659 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1660 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
1664 fprintf (stderr
, ",\n\t");
1669 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
1670 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
1671 rs6000_hard_regno_nregs
[m
][r
]);
1673 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
1678 if (call_used_regs
[r
])
1682 fprintf (stderr
, ",\n\t");
1687 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
1695 fprintf (stderr
, ",\n\t");
1700 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
1706 fprintf (stderr
, ",\n\t");
1710 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
1714 #define DEBUG_FMT_D "%-32s= %d\n"
1715 #define DEBUG_FMT_X "%-32s= 0x%x\n"
1716 #define DEBUG_FMT_S "%-32s= %s\n"
1718 /* Print various interesting information with -mdebug=reg. */
1720 rs6000_debug_reg_global (void)
1722 static const char *const tf
[2] = { "false", "true" };
1723 const char *nl
= (const char *)0;
1725 char costly_num
[20];
1727 const char *costly_str
;
1728 const char *nop_str
;
1729 const char *trace_str
;
1730 const char *abi_str
;
1731 const char *cmodel_str
;
1733 /* Map enum rs6000_vector to string. */
1734 static const char *rs6000_debug_vector_unit
[] = {
1743 fprintf (stderr
, "Register information: (last virtual reg = %d)\n",
1744 LAST_VIRTUAL_REGISTER
);
1745 rs6000_debug_reg_print (0, 31, "gr");
1746 rs6000_debug_reg_print (32, 63, "fp");
1747 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
1750 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
1751 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
1752 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
1753 rs6000_debug_reg_print (MQ_REGNO
, MQ_REGNO
, "mq");
1754 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
1755 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
1756 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
1757 rs6000_debug_reg_print (SPE_ACC_REGNO
, SPE_ACC_REGNO
, "spe_a");
1758 rs6000_debug_reg_print (SPEFSCR_REGNO
, SPEFSCR_REGNO
, "spe_f");
1762 "d reg_class = %s\n"
1763 "f reg_class = %s\n"
1764 "v reg_class = %s\n"
1765 "wa reg_class = %s\n"
1766 "wd reg_class = %s\n"
1767 "wf reg_class = %s\n"
1768 "ws reg_class = %s\n\n",
1769 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
1770 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
1771 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
1772 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
1773 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
1774 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
1775 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]]);
1777 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1778 if (rs6000_vector_unit
[m
] || rs6000_vector_mem
[m
])
1781 fprintf (stderr
, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1783 rs6000_debug_vector_unit
[ rs6000_vector_unit
[m
] ],
1784 rs6000_debug_vector_unit
[ rs6000_vector_mem
[m
] ]);
1790 if (rs6000_recip_control
)
1792 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
1794 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1795 if (rs6000_recip_bits
[m
])
1798 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1800 (RS6000_RECIP_AUTO_RE_P (m
)
1802 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
1803 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
1805 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
1808 fputs ("\n", stderr
);
1811 if (rs6000_cpu_index
>= 0)
1812 fprintf (stderr
, DEBUG_FMT_S
, "cpu",
1813 processor_target_table
[rs6000_cpu_index
].name
);
1815 if (rs6000_tune_index
>= 0)
1816 fprintf (stderr
, DEBUG_FMT_S
, "tune",
1817 processor_target_table
[rs6000_tune_index
].name
);
1819 switch (rs6000_sched_costly_dep
)
1821 case max_dep_latency
:
1822 costly_str
= "max_dep_latency";
1826 costly_str
= "no_dep_costly";
1829 case all_deps_costly
:
1830 costly_str
= "all_deps_costly";
1833 case true_store_to_load_dep_costly
:
1834 costly_str
= "true_store_to_load_dep_costly";
1837 case store_to_load_dep_costly
:
1838 costly_str
= "store_to_load_dep_costly";
1842 costly_str
= costly_num
;
1843 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
1847 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
1849 switch (rs6000_sched_insert_nops
)
1851 case sched_finish_regroup_exact
:
1852 nop_str
= "sched_finish_regroup_exact";
1855 case sched_finish_pad_groups
:
1856 nop_str
= "sched_finish_pad_groups";
1859 case sched_finish_none
:
1860 nop_str
= "sched_finish_none";
1865 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
1869 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
1871 switch (rs6000_sdata
)
1878 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
1882 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
1886 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
1891 switch (rs6000_traceback
)
1893 case traceback_default
: trace_str
= "default"; break;
1894 case traceback_none
: trace_str
= "none"; break;
1895 case traceback_part
: trace_str
= "part"; break;
1896 case traceback_full
: trace_str
= "full"; break;
1897 default: trace_str
= "unknown"; break;
1900 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
1902 switch (rs6000_current_cmodel
)
1904 case CMODEL_SMALL
: cmodel_str
= "small"; break;
1905 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
1906 case CMODEL_LARGE
: cmodel_str
= "large"; break;
1907 default: cmodel_str
= "unknown"; break;
1910 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
1912 switch (rs6000_current_abi
)
1914 case ABI_NONE
: abi_str
= "none"; break;
1915 case ABI_AIX
: abi_str
= "aix"; break;
1916 case ABI_V4
: abi_str
= "V4"; break;
1917 case ABI_DARWIN
: abi_str
= "darwin"; break;
1918 default: abi_str
= "unknown"; break;
1921 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
1923 if (rs6000_altivec_abi
)
1924 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
1927 fprintf (stderr
, DEBUG_FMT_S
, "spe_abi", "true");
1929 if (rs6000_darwin64_abi
)
1930 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
1932 if (rs6000_float_gprs
)
1933 fprintf (stderr
, DEBUG_FMT_S
, "float_gprs", "true");
1935 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
1936 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
1937 tf
[!!rs6000_align_branch_targets
]);
1938 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
1939 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
1940 rs6000_long_double_type_size
);
1941 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
1942 (int)rs6000_sched_restricted_insns_priority
);
1943 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
1945 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
1946 (int)RS6000_BUILTIN_COUNT
);
1947 fprintf (stderr
, DEBUG_FMT_X
, "Builtin mask", rs6000_builtin_mask
);
1950 /* Initialize the various global tables that are based on register size. */
1952 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
1958 /* Precalculate REGNO_REG_CLASS. */
1959 rs6000_regno_regclass
[0] = GENERAL_REGS
;
1960 for (r
= 1; r
< 32; ++r
)
1961 rs6000_regno_regclass
[r
] = BASE_REGS
;
1963 for (r
= 32; r
< 64; ++r
)
1964 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
1966 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
1967 rs6000_regno_regclass
[r
] = NO_REGS
;
1969 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
1970 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
1972 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
1973 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
1974 rs6000_regno_regclass
[r
] = CR_REGS
;
1976 rs6000_regno_regclass
[MQ_REGNO
] = MQ_REGS
;
1977 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
1978 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
1979 rs6000_regno_regclass
[CA_REGNO
] = CA_REGS
;
1980 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
1981 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
1982 rs6000_regno_regclass
[SPE_ACC_REGNO
] = SPE_ACC_REGS
;
1983 rs6000_regno_regclass
[SPEFSCR_REGNO
] = SPEFSCR_REGS
;
1984 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
1985 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
1987 /* Precalculate vector information, this must be set up before the
1988 rs6000_hard_regno_nregs_internal below. */
1989 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1991 rs6000_vector_unit
[m
] = rs6000_vector_mem
[m
] = VECTOR_NONE
;
1992 rs6000_vector_reload
[m
][0] = CODE_FOR_nothing
;
1993 rs6000_vector_reload
[m
][1] = CODE_FOR_nothing
;
1996 for (c
= 0; c
< (int)(int)RS6000_CONSTRAINT_MAX
; c
++)
1997 rs6000_constraints
[c
] = NO_REGS
;
1999 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2000 believes it can use native alignment or still uses 128-bit alignment. */
2001 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
2012 /* V2DF mode, VSX only. */
2015 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
2016 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
2017 rs6000_vector_align
[V2DFmode
] = align64
;
2020 /* V4SF mode, either VSX or Altivec. */
2023 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
2024 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
2025 rs6000_vector_align
[V4SFmode
] = align32
;
2027 else if (TARGET_ALTIVEC
)
2029 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
2030 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
2031 rs6000_vector_align
[V4SFmode
] = align32
;
2034 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2038 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
2039 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
2040 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
2041 rs6000_vector_align
[V4SImode
] = align32
;
2042 rs6000_vector_align
[V8HImode
] = align32
;
2043 rs6000_vector_align
[V16QImode
] = align32
;
2047 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
2048 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
2049 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
2053 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
2054 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
2055 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
2059 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2060 Altivec doesn't have 64-bit support. */
2063 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
2064 rs6000_vector_unit
[V2DImode
] = VECTOR_NONE
;
2065 rs6000_vector_align
[V2DImode
] = align64
;
2068 /* DFmode, see if we want to use the VSX unit. */
2069 if (TARGET_VSX
&& TARGET_VSX_SCALAR_DOUBLE
)
2071 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
2072 rs6000_vector_mem
[DFmode
]
2073 = (TARGET_VSX_SCALAR_MEMORY
? VECTOR_VSX
: VECTOR_NONE
);
2074 rs6000_vector_align
[DFmode
] = align64
;
2077 /* TODO add SPE and paired floating point vector support. */
2079 /* Register class constraints for the constraints that depend on compile
2081 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
2082 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
;
2084 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
2085 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
;
2089 /* At present, we just use VSX_REGS, but we have different constraints
2090 based on the use, in case we want to fine tune the default register
2091 class used. wa = any VSX register, wf = register class to use for
2092 V4SF, wd = register class to use for V2DF, and ws = register classs to
2093 use for DF scalars. */
2094 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
2095 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
;
2096 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
;
2097 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = (TARGET_VSX_SCALAR_MEMORY
2103 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
2105 /* Set up the reload helper functions. */
2106 if (TARGET_VSX
|| TARGET_ALTIVEC
)
2110 rs6000_vector_reload
[V16QImode
][0] = CODE_FOR_reload_v16qi_di_store
;
2111 rs6000_vector_reload
[V16QImode
][1] = CODE_FOR_reload_v16qi_di_load
;
2112 rs6000_vector_reload
[V8HImode
][0] = CODE_FOR_reload_v8hi_di_store
;
2113 rs6000_vector_reload
[V8HImode
][1] = CODE_FOR_reload_v8hi_di_load
;
2114 rs6000_vector_reload
[V4SImode
][0] = CODE_FOR_reload_v4si_di_store
;
2115 rs6000_vector_reload
[V4SImode
][1] = CODE_FOR_reload_v4si_di_load
;
2116 rs6000_vector_reload
[V2DImode
][0] = CODE_FOR_reload_v2di_di_store
;
2117 rs6000_vector_reload
[V2DImode
][1] = CODE_FOR_reload_v2di_di_load
;
2118 rs6000_vector_reload
[V4SFmode
][0] = CODE_FOR_reload_v4sf_di_store
;
2119 rs6000_vector_reload
[V4SFmode
][1] = CODE_FOR_reload_v4sf_di_load
;
2120 rs6000_vector_reload
[V2DFmode
][0] = CODE_FOR_reload_v2df_di_store
;
2121 rs6000_vector_reload
[V2DFmode
][1] = CODE_FOR_reload_v2df_di_load
;
2122 if (TARGET_VSX
&& TARGET_VSX_SCALAR_MEMORY
)
2124 rs6000_vector_reload
[DFmode
][0] = CODE_FOR_reload_df_di_store
;
2125 rs6000_vector_reload
[DFmode
][1] = CODE_FOR_reload_df_di_load
;
2130 rs6000_vector_reload
[V16QImode
][0] = CODE_FOR_reload_v16qi_si_store
;
2131 rs6000_vector_reload
[V16QImode
][1] = CODE_FOR_reload_v16qi_si_load
;
2132 rs6000_vector_reload
[V8HImode
][0] = CODE_FOR_reload_v8hi_si_store
;
2133 rs6000_vector_reload
[V8HImode
][1] = CODE_FOR_reload_v8hi_si_load
;
2134 rs6000_vector_reload
[V4SImode
][0] = CODE_FOR_reload_v4si_si_store
;
2135 rs6000_vector_reload
[V4SImode
][1] = CODE_FOR_reload_v4si_si_load
;
2136 rs6000_vector_reload
[V2DImode
][0] = CODE_FOR_reload_v2di_si_store
;
2137 rs6000_vector_reload
[V2DImode
][1] = CODE_FOR_reload_v2di_si_load
;
2138 rs6000_vector_reload
[V4SFmode
][0] = CODE_FOR_reload_v4sf_si_store
;
2139 rs6000_vector_reload
[V4SFmode
][1] = CODE_FOR_reload_v4sf_si_load
;
2140 rs6000_vector_reload
[V2DFmode
][0] = CODE_FOR_reload_v2df_si_store
;
2141 rs6000_vector_reload
[V2DFmode
][1] = CODE_FOR_reload_v2df_si_load
;
2142 if (TARGET_VSX
&& TARGET_VSX_SCALAR_MEMORY
)
2144 rs6000_vector_reload
[DFmode
][0] = CODE_FOR_reload_df_si_store
;
2145 rs6000_vector_reload
[DFmode
][1] = CODE_FOR_reload_df_si_load
;
2150 /* Precalculate HARD_REGNO_NREGS. */
2151 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2152 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2153 rs6000_hard_regno_nregs
[m
][r
]
2154 = rs6000_hard_regno_nregs_internal (r
, (enum machine_mode
)m
);
2156 /* Precalculate HARD_REGNO_MODE_OK. */
2157 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2158 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2159 if (rs6000_hard_regno_mode_ok (r
, (enum machine_mode
)m
))
2160 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
2162 /* Precalculate CLASS_MAX_NREGS sizes. */
2163 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
2167 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
2168 reg_size
= UNITS_PER_VSX_WORD
;
2170 else if (c
== ALTIVEC_REGS
)
2171 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2173 else if (c
== FLOAT_REGS
)
2174 reg_size
= UNITS_PER_FP_WORD
;
2177 reg_size
= UNITS_PER_WORD
;
2179 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2180 rs6000_class_max_nregs
[m
][c
]
2181 = (GET_MODE_SIZE (m
) + reg_size
- 1) / reg_size
;
2184 if (TARGET_E500_DOUBLE
)
2185 rs6000_class_max_nregs
[DFmode
][GENERAL_REGS
] = 1;
2187 /* Calculate which modes to automatically generate code to use a the
2188 reciprocal divide and square root instructions. In the future, possibly
2189 automatically generate the instructions even if the user did not specify
2190 -mrecip. The older machines double precision reciprocal sqrt estimate is
2191 not accurate enough. */
2192 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
2194 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2196 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2197 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2198 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2199 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2200 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2202 if (TARGET_FRSQRTES
)
2203 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2205 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2206 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2207 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2208 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2209 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2211 if (rs6000_recip_control
)
2213 if (!flag_finite_math_only
)
2214 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2215 if (flag_trapping_math
)
2216 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2217 if (!flag_reciprocal_math
)
2218 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2219 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
2221 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
2222 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
2223 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2225 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
2226 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
2227 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2229 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
2230 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
2231 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2233 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
2234 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
2235 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2237 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
2238 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
2239 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2241 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
2242 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
2243 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2245 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
2246 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
2247 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2249 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
2250 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
2251 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2255 if (global_init_p
|| TARGET_DEBUG_TARGET
)
2257 if (TARGET_DEBUG_REG
)
2258 rs6000_debug_reg_global ();
2260 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
2262 "SImode variable mult cost = %d\n"
2263 "SImode constant mult cost = %d\n"
2264 "SImode short constant mult cost = %d\n"
2265 "DImode multipliciation cost = %d\n"
2266 "SImode division cost = %d\n"
2267 "DImode division cost = %d\n"
2268 "Simple fp operation cost = %d\n"
2269 "DFmode multiplication cost = %d\n"
2270 "SFmode division cost = %d\n"
2271 "DFmode division cost = %d\n"
2272 "cache line size = %d\n"
2273 "l1 cache size = %d\n"
2274 "l2 cache size = %d\n"
2275 "simultaneous prefetches = %d\n"
2278 rs6000_cost
->mulsi_const
,
2279 rs6000_cost
->mulsi_const9
,
2287 rs6000_cost
->cache_line_size
,
2288 rs6000_cost
->l1_cache_size
,
2289 rs6000_cost
->l2_cache_size
,
2290 rs6000_cost
->simultaneous_prefetches
);
2295 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2298 darwin_rs6000_override_options (void)
2300 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2302 rs6000_altivec_abi
= 1;
2303 TARGET_ALTIVEC_VRSAVE
= 1;
2304 rs6000_current_abi
= ABI_DARWIN
;
2306 if (DEFAULT_ABI
== ABI_DARWIN
2308 darwin_one_byte_bool
= 1;
2310 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
2312 target_flags
|= MASK_POWERPC64
;
2313 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2317 rs6000_default_long_calls
= 1;
2318 target_flags
|= MASK_SOFT_FLOAT
;
2321 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2323 if (!flag_mkernel
&& !flag_apple_kext
2325 && ! (target_flags_explicit
& MASK_ALTIVEC
))
2326 target_flags
|= MASK_ALTIVEC
;
2328 /* Unless the user (not the configurer) has explicitly overridden
2329 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2330 G4 unless targeting the kernel. */
2333 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
2334 && ! (target_flags_explicit
& MASK_ALTIVEC
)
2335 && ! global_options_set
.x_rs6000_cpu_index
)
2337 target_flags
|= MASK_ALTIVEC
;
2342 /* If not otherwise specified by a target, make 'long double' equivalent to
2345 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2346 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2349 /* Return the builtin mask of the various options used that could affect which
2350 builtins were used. In the past we used target_flags, but we've run out of
2351 bits, and some options like SPE and PAIRED are no longer in
2355 rs6000_builtin_mask_calculate (void)
2357 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
2358 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
2359 | ((TARGET_SPE
) ? RS6000_BTM_SPE
: 0)
2360 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
2361 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
2362 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
2363 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
2364 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
2365 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
2366 | ((TARGET_POWERPC
) ? RS6000_BTM_POWERPC
: 0)
2367 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0));
2370 /* Override command line options. Mostly we process the processor type and
2371 sometimes adjust other TARGET_ options. */
2374 rs6000_option_override_internal (bool global_init_p
)
2377 bool have_cpu
= false;
2379 /* The default cpu requested at configure time, if any. */
2380 const char *implicit_cpu
= OPTION_TARGET_CPU_DEFAULT
;
2385 struct cl_target_option
*main_target_opt
2386 = ((global_init_p
|| target_option_default_node
== NULL
)
2387 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
2389 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2390 library functions, so warn about it. The flag may be useful for
2391 performance studies from time to time though, so don't disable it
2393 if (global_options_set
.x_rs6000_alignment_flags
2394 && rs6000_alignment_flags
== MASK_ALIGN_POWER
2395 && DEFAULT_ABI
== ABI_DARWIN
2397 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2398 " it is incompatible with the installed C and C++ libraries");
2400 /* Numerous experiment shows that IRA based loop pressure
2401 calculation works better for RTL loop invariant motion on targets
2402 with enough (>= 32) registers. It is an expensive optimization.
2403 So it is on only for peak performance. */
2404 if (optimize
>= 3 && global_init_p
)
2405 flag_ira_loop_pressure
= 1;
2407 /* Set the pointer size. */
2410 rs6000_pmode
= (int)DImode
;
2411 rs6000_pointer_size
= 64;
2415 rs6000_pmode
= (int)SImode
;
2416 rs6000_pointer_size
= 32;
2419 set_masks
= POWERPC_MASKS
| MASK_SOFT_FLOAT
;
2420 #ifdef OS_MISSING_POWERPC64
2421 if (OS_MISSING_POWERPC64
)
2422 set_masks
&= ~MASK_POWERPC64
;
2424 #ifdef OS_MISSING_ALTIVEC
2425 if (OS_MISSING_ALTIVEC
)
2426 set_masks
&= ~MASK_ALTIVEC
;
2429 /* Don't override by the processor default if given explicitly. */
2430 set_masks
&= ~target_flags_explicit
;
2432 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2433 the cpu in a target attribute or pragma, but did not specify a tuning
2434 option, use the cpu for the tuning option rather than the option specified
2435 with -mtune on the command line. Process a '--with-cpu' configuration
2436 request as an implicit --cpu. */
2437 if (rs6000_cpu_index
>= 0)
2439 cpu_index
= rs6000_cpu_index
;
2442 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
2444 rs6000_cpu_index
= cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
2449 const char *default_cpu
=
2450 (implicit_cpu
? implicit_cpu
2451 : (TARGET_POWERPC64
? "powerpc64" : "powerpc"));
2453 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
2454 have_cpu
= implicit_cpu
!= 0;
2457 gcc_assert (cpu_index
>= 0);
2459 target_flags
&= ~set_masks
;
2460 target_flags
|= (processor_target_table
[cpu_index
].target_enable
2463 if (rs6000_tune_index
>= 0)
2464 tune_index
= rs6000_tune_index
;
2466 rs6000_tune_index
= tune_index
= cpu_index
;
2470 enum processor_type tune_proc
2471 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
2474 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
2475 if (processor_target_table
[i
].processor
== tune_proc
)
2477 rs6000_tune_index
= tune_index
= i
;
2482 gcc_assert (tune_index
>= 0);
2483 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
2485 /* Pick defaults for SPE related control flags. Do this early to make sure
2486 that the TARGET_ macros are representative ASAP. */
2488 int spe_capable_cpu
=
2489 (rs6000_cpu
== PROCESSOR_PPC8540
2490 || rs6000_cpu
== PROCESSOR_PPC8548
);
2492 if (!global_options_set
.x_rs6000_spe_abi
)
2493 rs6000_spe_abi
= spe_capable_cpu
;
2495 if (!global_options_set
.x_rs6000_spe
)
2496 rs6000_spe
= spe_capable_cpu
;
2498 if (!global_options_set
.x_rs6000_float_gprs
)
2500 (rs6000_cpu
== PROCESSOR_PPC8540
? 1
2501 : rs6000_cpu
== PROCESSOR_PPC8548
? 2
2505 if (global_options_set
.x_rs6000_spe_abi
2508 error ("not configured for SPE ABI");
2510 if (global_options_set
.x_rs6000_spe
2513 error ("not configured for SPE instruction set");
2515 if (main_target_opt
!= NULL
2516 && ((main_target_opt
->x_rs6000_spe_abi
!= rs6000_spe_abi
)
2517 || (main_target_opt
->x_rs6000_spe
!= rs6000_spe
)
2518 || (main_target_opt
->x_rs6000_float_gprs
!= rs6000_float_gprs
)))
2519 error ("target attribute or pragma changes SPE ABI");
2521 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
2522 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
2523 || rs6000_cpu
== PROCESSOR_PPCE5500
)
2526 error ("AltiVec not supported in this target");
2528 error ("SPE not supported in this target");
2530 if (rs6000_cpu
== PROCESSOR_PPCE6500
)
2533 error ("SPE not supported in this target");
2536 /* Disable Cell microcode if we are optimizing for the Cell
2537 and not optimizing for size. */
2538 if (rs6000_gen_cell_microcode
== -1)
2539 rs6000_gen_cell_microcode
= !(rs6000_cpu
== PROCESSOR_CELL
2542 /* If we are optimizing big endian systems for space and it's OK to
2543 use instructions that would be microcoded on the Cell, use the
2544 load/store multiple and string instructions. */
2545 if (BYTES_BIG_ENDIAN
&& optimize_size
&& rs6000_gen_cell_microcode
)
2546 target_flags
|= ~target_flags_explicit
& (MASK_MULTIPLE
| MASK_STRING
);
2548 /* Don't allow -mmultiple or -mstring on little endian systems
2549 unless the cpu is a 750, because the hardware doesn't support the
2550 instructions used in little endian mode, and causes an alignment
2551 trap. The 750 does not cause an alignment trap (except when the
2552 target is unaligned). */
2554 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
2556 if (TARGET_MULTIPLE
)
2558 target_flags
&= ~MASK_MULTIPLE
;
2559 if ((target_flags_explicit
& MASK_MULTIPLE
) != 0)
2560 warning (0, "-mmultiple is not supported on little endian systems");
2565 target_flags
&= ~MASK_STRING
;
2566 if ((target_flags_explicit
& MASK_STRING
) != 0)
2567 warning (0, "-mstring is not supported on little endian systems");
2571 /* Add some warnings for VSX. */
2574 const char *msg
= NULL
;
2575 if (!TARGET_HARD_FLOAT
|| !TARGET_FPRS
2576 || !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
2578 if (target_flags_explicit
& MASK_VSX
)
2579 msg
= N_("-mvsx requires hardware floating point");
2581 target_flags
&= ~ MASK_VSX
;
2583 else if (TARGET_PAIRED_FLOAT
)
2584 msg
= N_("-mvsx and -mpaired are incompatible");
2585 /* The hardware will allow VSX and little endian, but until we make sure
2586 things like vector select, etc. work don't allow VSX on little endian
2587 systems at this point. */
2588 else if (!BYTES_BIG_ENDIAN
)
2589 msg
= N_("-mvsx used with little endian code");
2590 else if (TARGET_AVOID_XFORM
> 0)
2591 msg
= N_("-mvsx needs indexed addressing");
2592 else if (!TARGET_ALTIVEC
&& (target_flags_explicit
& MASK_ALTIVEC
))
2594 if (target_flags_explicit
& MASK_VSX
)
2595 msg
= N_("-mvsx and -mno-altivec are incompatible");
2597 msg
= N_("-mno-altivec disables vsx");
2603 target_flags
&= ~ MASK_VSX
;
2604 target_flags_explicit
|= MASK_VSX
;
2608 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2609 unless the user explicitly used the -mno-<option> to disable the code. */
2611 target_flags
|= (ISA_2_6_MASKS_SERVER
& ~target_flags_explicit
);
2612 else if (TARGET_POPCNTD
)
2613 target_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~target_flags_explicit
);
2614 else if (TARGET_DFP
)
2615 target_flags
|= (ISA_2_5_MASKS_SERVER
& ~target_flags_explicit
);
2616 else if (TARGET_CMPB
)
2617 target_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~target_flags_explicit
);
2618 else if (TARGET_FPRND
)
2619 target_flags
|= (ISA_2_4_MASKS
& ~target_flags_explicit
);
2620 else if (TARGET_POPCNTB
)
2621 target_flags
|= (ISA_2_2_MASKS
& ~target_flags_explicit
);
2622 else if (TARGET_ALTIVEC
)
2623 target_flags
|= (MASK_PPC_GFXOPT
& ~target_flags_explicit
);
2625 /* E500mc does "better" if we inline more aggressively. Respect the
2626 user's opinion, though. */
2627 if (rs6000_block_move_inline_limit
== 0
2628 && (rs6000_cpu
== PROCESSOR_PPCE500MC
2629 || rs6000_cpu
== PROCESSOR_PPCE500MC64
2630 || rs6000_cpu
== PROCESSOR_PPCE5500
2631 || rs6000_cpu
== PROCESSOR_PPCE6500
))
2632 rs6000_block_move_inline_limit
= 128;
2634 /* store_one_arg depends on expand_block_move to handle at least the
2635 size of reg_parm_stack_space. */
2636 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
2637 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
2641 /* If the appropriate debug option is enabled, replace the target hooks
2642 with debug versions that call the real version and then prints
2643 debugging information. */
2644 if (TARGET_DEBUG_COST
)
2646 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
2647 targetm
.address_cost
= rs6000_debug_address_cost
;
2648 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
2651 if (TARGET_DEBUG_ADDR
)
2653 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
2654 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
2655 rs6000_secondary_reload_class_ptr
2656 = rs6000_debug_secondary_reload_class
;
2657 rs6000_secondary_memory_needed_ptr
2658 = rs6000_debug_secondary_memory_needed
;
2659 rs6000_cannot_change_mode_class_ptr
2660 = rs6000_debug_cannot_change_mode_class
;
2661 rs6000_preferred_reload_class_ptr
2662 = rs6000_debug_preferred_reload_class
;
2663 rs6000_legitimize_reload_address_ptr
2664 = rs6000_debug_legitimize_reload_address
;
2665 rs6000_mode_dependent_address_ptr
2666 = rs6000_debug_mode_dependent_address
;
2669 if (rs6000_veclibabi_name
)
2671 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
2672 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
2675 error ("unknown vectorization library ABI type (%s) for "
2676 "-mveclibabi= switch", rs6000_veclibabi_name
);
2682 if (!global_options_set
.x_rs6000_long_double_type_size
)
2684 if (main_target_opt
!= NULL
2685 && (main_target_opt
->x_rs6000_long_double_type_size
2686 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
2687 error ("target attribute or pragma changes long double size");
2689 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
2692 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2693 if (!global_options_set
.x_rs6000_ieeequad
)
2694 rs6000_ieeequad
= 1;
2697 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2698 target attribute or pragma which automatically enables both options,
2699 unless the altivec ABI was set. This is set by default for 64-bit, but
2701 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
2702 target_flags
&= ~((MASK_VSX
| MASK_ALTIVEC
) & ~target_flags_explicit
);
2704 /* Enable Altivec ABI for AIX -maltivec. */
2705 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
2707 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
2708 error ("target attribute or pragma changes AltiVec ABI");
2710 rs6000_altivec_abi
= 1;
2713 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2714 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2715 be explicitly overridden in either case. */
2718 if (!global_options_set
.x_rs6000_altivec_abi
2719 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
2721 if (main_target_opt
!= NULL
&&
2722 !main_target_opt
->x_rs6000_altivec_abi
)
2723 error ("target attribute or pragma changes AltiVec ABI");
2725 rs6000_altivec_abi
= 1;
2728 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
2729 if (!global_options_set
.x_TARGET_ALTIVEC_VRSAVE
)
2730 TARGET_ALTIVEC_VRSAVE
= rs6000_altivec_abi
;
2733 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2734 So far, the only darwin64 targets are also MACH-O. */
2736 && DEFAULT_ABI
== ABI_DARWIN
2739 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
2740 error ("target attribute or pragma changes darwin64 ABI");
2743 rs6000_darwin64_abi
= 1;
2744 /* Default to natural alignment, for better performance. */
2745 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
2749 /* Place FP constants in the constant pool instead of TOC
2750 if section anchors enabled. */
2751 if (flag_section_anchors
)
2752 TARGET_NO_FP_IN_TOC
= 1;
2754 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2755 SUBTARGET_OVERRIDE_OPTIONS
;
2757 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2758 SUBSUBTARGET_OVERRIDE_OPTIONS
;
2760 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2761 SUB3TARGET_OVERRIDE_OPTIONS
;
2764 /* For the E500 family of cores, reset the single/double FP flags to let us
2765 check that they remain constant across attributes or pragmas. Also,
2766 clear a possible request for string instructions, not supported and which
2767 we might have silently queried above for -Os.
2769 For other families, clear ISEL in case it was set implicitly.
2774 case PROCESSOR_PPC8540
:
2775 case PROCESSOR_PPC8548
:
2776 case PROCESSOR_PPCE500MC
:
2777 case PROCESSOR_PPCE500MC64
:
2778 case PROCESSOR_PPCE5500
:
2779 case PROCESSOR_PPCE6500
:
2781 rs6000_single_float
= TARGET_E500_SINGLE
|| TARGET_E500_DOUBLE
;
2782 rs6000_double_float
= TARGET_E500_DOUBLE
;
2784 target_flags
&= ~MASK_STRING
;
2790 if (have_cpu
&& !(target_flags_explicit
& MASK_ISEL
))
2791 target_flags
&= ~MASK_ISEL
;
2796 if (main_target_opt
)
2798 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
2799 error ("target attribute or pragma changes single precision floating "
2801 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
2802 error ("target attribute or pragma changes double precision floating "
2806 /* Detect invalid option combinations with E500. */
2809 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
2810 && rs6000_cpu
!= PROCESSOR_POWER5
2811 && rs6000_cpu
!= PROCESSOR_POWER6
2812 && rs6000_cpu
!= PROCESSOR_POWER7
2813 && rs6000_cpu
!= PROCESSOR_PPCA2
2814 && rs6000_cpu
!= PROCESSOR_CELL
2815 && rs6000_cpu
!= PROCESSOR_PPC476
);
2816 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
2817 || rs6000_cpu
== PROCESSOR_POWER5
2818 || rs6000_cpu
== PROCESSOR_POWER7
);
2819 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
2820 || rs6000_cpu
== PROCESSOR_POWER5
2821 || rs6000_cpu
== PROCESSOR_POWER6
2822 || rs6000_cpu
== PROCESSOR_POWER7
2823 || rs6000_cpu
== PROCESSOR_PPCE500MC
2824 || rs6000_cpu
== PROCESSOR_PPCE500MC64
2825 || rs6000_cpu
== PROCESSOR_PPCE5500
2826 || rs6000_cpu
== PROCESSOR_PPCE6500
);
2828 /* Allow debug switches to override the above settings. These are set to -1
2829 in rs6000.opt to indicate the user hasn't directly set the switch. */
2830 if (TARGET_ALWAYS_HINT
>= 0)
2831 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
2833 if (TARGET_SCHED_GROUPS
>= 0)
2834 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
2836 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
2837 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
2839 rs6000_sched_restricted_insns_priority
2840 = (rs6000_sched_groups
? 1 : 0);
2842 /* Handle -msched-costly-dep option. */
2843 rs6000_sched_costly_dep
2844 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
2846 if (rs6000_sched_costly_dep_str
)
2848 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
2849 rs6000_sched_costly_dep
= no_dep_costly
;
2850 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
2851 rs6000_sched_costly_dep
= all_deps_costly
;
2852 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
2853 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
2854 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
2855 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
2857 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
2858 atoi (rs6000_sched_costly_dep_str
));
2861 /* Handle -minsert-sched-nops option. */
2862 rs6000_sched_insert_nops
2863 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
2865 if (rs6000_sched_insert_nops_str
)
2867 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
2868 rs6000_sched_insert_nops
= sched_finish_none
;
2869 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
2870 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
2871 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
2872 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
2874 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
2875 atoi (rs6000_sched_insert_nops_str
));
2880 #ifdef TARGET_REGNAMES
2881 /* If the user desires alternate register names, copy in the
2882 alternate names now. */
2883 if (TARGET_REGNAMES
)
2884 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
2887 /* Set aix_struct_return last, after the ABI is determined.
2888 If -maix-struct-return or -msvr4-struct-return was explicitly
2889 used, don't override with the ABI default. */
2890 if (!global_options_set
.x_aix_struct_return
)
2891 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
2894 /* IBM XL compiler defaults to unsigned bitfields. */
2895 if (TARGET_XL_COMPAT
)
2896 flag_signed_bitfields
= 0;
2899 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
2900 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
2903 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
2905 /* We can only guarantee the availability of DI pseudo-ops when
2906 assembling for 64-bit targets. */
2909 targetm
.asm_out
.aligned_op
.di
= NULL
;
2910 targetm
.asm_out
.unaligned_op
.di
= NULL
;
2914 /* Set branch target alignment, if not optimizing for size. */
2917 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2918 aligned 8byte to avoid misprediction by the branch predictor. */
2919 if (rs6000_cpu
== PROCESSOR_TITAN
2920 || rs6000_cpu
== PROCESSOR_CELL
)
2922 if (align_functions
<= 0)
2923 align_functions
= 8;
2924 if (align_jumps
<= 0)
2926 if (align_loops
<= 0)
2929 if (rs6000_align_branch_targets
)
2931 if (align_functions
<= 0)
2932 align_functions
= 16;
2933 if (align_jumps
<= 0)
2935 if (align_loops
<= 0)
2937 can_override_loop_align
= 1;
2941 if (align_jumps_max_skip
<= 0)
2942 align_jumps_max_skip
= 15;
2943 if (align_loops_max_skip
<= 0)
2944 align_loops_max_skip
= 15;
2947 /* Arrange to save and restore machine status around nested functions. */
2948 init_machine_status
= rs6000_init_machine_status
;
2950 /* We should always be splitting complex arguments, but we can't break
2951 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2952 if (DEFAULT_ABI
!= ABI_AIX
)
2953 targetm
.calls
.split_complex_arg
= NULL
;
2956 /* Initialize rs6000_cost with the appropriate target costs. */
2958 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
2962 case PROCESSOR_RS64A
:
2963 rs6000_cost
= &rs64a_cost
;
2966 case PROCESSOR_MPCCORE
:
2967 rs6000_cost
= &mpccore_cost
;
2970 case PROCESSOR_PPC403
:
2971 rs6000_cost
= &ppc403_cost
;
2974 case PROCESSOR_PPC405
:
2975 rs6000_cost
= &ppc405_cost
;
2978 case PROCESSOR_PPC440
:
2979 rs6000_cost
= &ppc440_cost
;
2982 case PROCESSOR_PPC476
:
2983 rs6000_cost
= &ppc476_cost
;
2986 case PROCESSOR_PPC601
:
2987 rs6000_cost
= &ppc601_cost
;
2990 case PROCESSOR_PPC603
:
2991 rs6000_cost
= &ppc603_cost
;
2994 case PROCESSOR_PPC604
:
2995 rs6000_cost
= &ppc604_cost
;
2998 case PROCESSOR_PPC604e
:
2999 rs6000_cost
= &ppc604e_cost
;
3002 case PROCESSOR_PPC620
:
3003 rs6000_cost
= &ppc620_cost
;
3006 case PROCESSOR_PPC630
:
3007 rs6000_cost
= &ppc630_cost
;
3010 case PROCESSOR_CELL
:
3011 rs6000_cost
= &ppccell_cost
;
3014 case PROCESSOR_PPC750
:
3015 case PROCESSOR_PPC7400
:
3016 rs6000_cost
= &ppc750_cost
;
3019 case PROCESSOR_PPC7450
:
3020 rs6000_cost
= &ppc7450_cost
;
3023 case PROCESSOR_PPC8540
:
3024 case PROCESSOR_PPC8548
:
3025 rs6000_cost
= &ppc8540_cost
;
3028 case PROCESSOR_PPCE300C2
:
3029 case PROCESSOR_PPCE300C3
:
3030 rs6000_cost
= &ppce300c2c3_cost
;
3033 case PROCESSOR_PPCE500MC
:
3034 rs6000_cost
= &ppce500mc_cost
;
3037 case PROCESSOR_PPCE500MC64
:
3038 rs6000_cost
= &ppce500mc64_cost
;
3041 case PROCESSOR_PPCE5500
:
3042 rs6000_cost
= &ppce5500_cost
;
3045 case PROCESSOR_PPCE6500
:
3046 rs6000_cost
= &ppce6500_cost
;
3049 case PROCESSOR_TITAN
:
3050 rs6000_cost
= &titan_cost
;
3053 case PROCESSOR_POWER4
:
3054 case PROCESSOR_POWER5
:
3055 rs6000_cost
= &power4_cost
;
3058 case PROCESSOR_POWER6
:
3059 rs6000_cost
= &power6_cost
;
3062 case PROCESSOR_POWER7
:
3063 rs6000_cost
= &power7_cost
;
3066 case PROCESSOR_PPCA2
:
3067 rs6000_cost
= &ppca2_cost
;
3076 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
3077 rs6000_cost
->simultaneous_prefetches
,
3078 global_options
.x_param_values
,
3079 global_options_set
.x_param_values
);
3080 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
3081 global_options
.x_param_values
,
3082 global_options_set
.x_param_values
);
3083 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
3084 rs6000_cost
->cache_line_size
,
3085 global_options
.x_param_values
,
3086 global_options_set
.x_param_values
);
3087 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
3088 global_options
.x_param_values
,
3089 global_options_set
.x_param_values
);
3091 /* If using typedef char *va_list, signal that
3092 __builtin_va_start (&ap, 0) can be optimized to
3093 ap = __builtin_next_arg (0). */
3094 if (DEFAULT_ABI
!= ABI_V4
)
3095 targetm
.expand_builtin_va_start
= NULL
;
3098 /* Set up single/double float flags.
3099 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3100 then set both flags. */
3101 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
3102 && rs6000_single_float
== 0 && rs6000_double_float
== 0)
3103 rs6000_single_float
= rs6000_double_float
= 1;
3105 /* If not explicitly specified via option, decide whether to generate indexed
3106 load/store instructions. */
3107 if (TARGET_AVOID_XFORM
== -1)
3108 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3109 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3110 need indexed accesses and the type used is the scalar type of the element
3111 being loaded or stored. */
3112 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
3113 && !TARGET_ALTIVEC
);
3115 /* Set the -mrecip options. */
3116 if (rs6000_recip_name
)
3118 char *p
= ASTRDUP (rs6000_recip_name
);
3120 unsigned int mask
, i
;
3123 while ((q
= strtok (p
, ",")) != NULL
)
3134 if (!strcmp (q
, "default"))
3135 mask
= ((TARGET_RECIP_PRECISION
)
3136 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
3139 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
3140 if (!strcmp (q
, recip_options
[i
].string
))
3142 mask
= recip_options
[i
].mask
;
3146 if (i
== ARRAY_SIZE (recip_options
))
3148 error ("unknown option for -mrecip=%s", q
);
3156 rs6000_recip_control
&= ~mask
;
3158 rs6000_recip_control
|= mask
;
3162 /* Set the builtin mask of the various options used that could affect which
3163 builtins were used. In the past we used target_flags, but we've run out
3164 of bits, and some options like SPE and PAIRED are no longer in
3166 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
3167 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
3168 fprintf (stderr
, "new builtin mask = 0x%x%s%s%s%s\n", rs6000_builtin_mask
,
3169 (rs6000_builtin_mask
& RS6000_BTM_ALTIVEC
) ? ", altivec" : "",
3170 (rs6000_builtin_mask
& RS6000_BTM_VSX
) ? ", vsx" : "",
3171 (rs6000_builtin_mask
& RS6000_BTM_PAIRED
) ? ", paired" : "",
3172 (rs6000_builtin_mask
& RS6000_BTM_SPE
) ? ", spe" : "");
3174 /* Initialize all of the registers. */
3175 rs6000_init_hard_regno_mode_ok (global_init_p
);
3177 /* Save the initial options in case the user does function specific options */
3179 target_option_default_node
= target_option_current_node
3180 = build_target_option_node ();
3182 /* If not explicitly specified via option, decide whether to generate the
3183 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3184 if (TARGET_LINK_STACK
== -1)
3185 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
3190 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3191 define the target cpu type. */
3194 rs6000_option_override (void)
3196 (void) rs6000_option_override_internal (true);
3200 /* Implement targetm.vectorize.builtin_mask_for_load. */
3202 rs6000_builtin_mask_for_load (void)
3204 if (TARGET_ALTIVEC
|| TARGET_VSX
)
3205 return altivec_builtin_mask_for_load
;
3210 /* Implement LOOP_ALIGN. */
3212 rs6000_loop_align (rtx label
)
3217 /* Don't override loop alignment if -falign-loops was specified. */
3218 if (!can_override_loop_align
)
3219 return align_loops_log
;
3221 bb
= BLOCK_FOR_INSN (label
);
3222 ninsns
= num_loop_insns(bb
->loop_father
);
3224 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3225 if (ninsns
> 4 && ninsns
<= 8
3226 && (rs6000_cpu
== PROCESSOR_POWER4
3227 || rs6000_cpu
== PROCESSOR_POWER5
3228 || rs6000_cpu
== PROCESSOR_POWER6
3229 || rs6000_cpu
== PROCESSOR_POWER7
))
3232 return align_loops_log
;
3235 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3237 rs6000_loop_align_max_skip (rtx label
)
3239 return (1 << rs6000_loop_align (label
)) - 1;
3242 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3243 after applying N number of iterations. This routine does not determine
3244 how may iterations are required to reach desired alignment. */
3247 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
3254 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
3257 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
3267 /* Assuming that all other types are naturally aligned. CHECKME! */
3272 /* Return true if the vector misalignment factor is supported by the
3275 rs6000_builtin_support_vector_misalignment (enum machine_mode mode
,
3282 /* Return if movmisalign pattern is not supported for this mode. */
3283 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
3286 if (misalignment
== -1)
3288 /* Misalignment factor is unknown at compile time but we know
3289 it's word aligned. */
3290 if (rs6000_vector_alignment_reachable (type
, is_packed
))
3292 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
3294 if (element_size
== 64 || element_size
== 32)
3301 /* VSX supports word-aligned vector. */
3302 if (misalignment
% 4 == 0)
3308 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3310 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
3311 tree vectype
, int misalign
)
3316 switch (type_of_cost
)
3326 case cond_branch_not_taken
:
3335 case vec_promote_demote
:
3341 case cond_branch_taken
:
3344 case unaligned_load
:
3345 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
3347 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3349 /* Double word aligned. */
3357 /* Double word aligned. */
3361 /* Unknown misalignment. */
3374 /* Misaligned loads are not supported. */
3379 case unaligned_store
:
3380 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
3382 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3384 /* Double word aligned. */
3392 /* Double word aligned. */
3396 /* Unknown misalignment. */
3409 /* Misaligned stores are not supported. */
3415 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
3416 elem_type
= TREE_TYPE (vectype
);
3417 /* 32-bit vectors loaded into registers are stored as double
3418 precision, so we need n/2 converts in addition to the usual
3419 n/2 merges to construct a vector of short floats from them. */
3420 if (SCALAR_FLOAT_TYPE_P (elem_type
)
3421 && TYPE_PRECISION (elem_type
) == 32)
3422 return elements
+ 1;
3424 return elements
/ 2 + 1;
3431 /* Implement targetm.vectorize.preferred_simd_mode. */
3433 static enum machine_mode
3434 rs6000_preferred_simd_mode (enum machine_mode mode
)
3443 if (TARGET_ALTIVEC
|| TARGET_VSX
)
3467 if (TARGET_PAIRED_FLOAT
3473 /* Implement targetm.vectorize.init_cost. */
3476 rs6000_init_cost (struct loop
*loop_info ATTRIBUTE_UNUSED
)
3478 unsigned *cost
= XNEWVEC (unsigned, 3);
3479 cost
[vect_prologue
] = cost
[vect_body
] = cost
[vect_epilogue
] = 0;
3483 /* Implement targetm.vectorize.add_stmt_cost. */
3486 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
3487 struct _stmt_vec_info
*stmt_info
, int misalign
,
3488 enum vect_cost_model_location where
)
3490 unsigned *cost
= (unsigned *) data
;
3491 unsigned retval
= 0;
3493 if (flag_vect_cost_model
)
3495 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
3496 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
3498 /* Statements in an inner loop relative to the loop being
3499 vectorized are weighted more heavily. The value here is
3500 arbitrary and could potentially be improved with analysis. */
3501 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
3502 count
*= 50; /* FIXME. */
3504 retval
= (unsigned) (count
* stmt_cost
);
3505 cost
[where
] += retval
;
3511 /* Implement targetm.vectorize.finish_cost. */
3514 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
3515 unsigned *body_cost
, unsigned *epilogue_cost
)
3517 unsigned *cost
= (unsigned *) data
;
3518 *prologue_cost
= cost
[vect_prologue
];
3519 *body_cost
= cost
[vect_body
];
3520 *epilogue_cost
= cost
[vect_epilogue
];
3523 /* Implement targetm.vectorize.destroy_cost_data. */
3526 rs6000_destroy_cost_data (void *data
)
3531 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3532 library with vectorized intrinsics. */
3535 rs6000_builtin_vectorized_libmass (tree fndecl
, tree type_out
, tree type_in
)
3538 const char *suffix
= NULL
;
3539 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
3542 enum machine_mode el_mode
, in_mode
;
3545 /* Libmass is suitable for unsafe math only as it does not correctly support
3546 parts of IEEE with the required precision such as denormals. Only support
3547 it if we have VSX to use the simd d2 or f4 functions.
3548 XXX: Add variable length support. */
3549 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
3552 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
3553 n
= TYPE_VECTOR_SUBPARTS (type_out
);
3554 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
3555 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
3556 if (el_mode
!= in_mode
3560 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3562 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
3565 case BUILT_IN_ATAN2
:
3566 case BUILT_IN_HYPOT
:
3572 case BUILT_IN_ACOSH
:
3574 case BUILT_IN_ASINH
:
3576 case BUILT_IN_ATANH
:
3584 case BUILT_IN_EXPM1
:
3585 case BUILT_IN_LGAMMA
:
3586 case BUILT_IN_LOG10
:
3587 case BUILT_IN_LOG1P
:
3595 bdecl
= builtin_decl_implicit (fn
);
3596 suffix
= "d2"; /* pow -> powd2 */
3597 if (el_mode
!= DFmode
3602 case BUILT_IN_ATAN2F
:
3603 case BUILT_IN_HYPOTF
:
3608 case BUILT_IN_ACOSF
:
3609 case BUILT_IN_ACOSHF
:
3610 case BUILT_IN_ASINF
:
3611 case BUILT_IN_ASINHF
:
3612 case BUILT_IN_ATANF
:
3613 case BUILT_IN_ATANHF
:
3614 case BUILT_IN_CBRTF
:
3616 case BUILT_IN_COSHF
:
3618 case BUILT_IN_ERFCF
:
3619 case BUILT_IN_EXP2F
:
3621 case BUILT_IN_EXPM1F
:
3622 case BUILT_IN_LGAMMAF
:
3623 case BUILT_IN_LOG10F
:
3624 case BUILT_IN_LOG1PF
:
3625 case BUILT_IN_LOG2F
:
3628 case BUILT_IN_SINHF
:
3629 case BUILT_IN_SQRTF
:
3631 case BUILT_IN_TANHF
:
3632 bdecl
= builtin_decl_implicit (fn
);
3633 suffix
= "4"; /* powf -> powf4 */
3634 if (el_mode
!= SFmode
3646 gcc_assert (suffix
!= NULL
);
3647 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
3648 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
3649 strcat (name
, suffix
);
3652 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
3653 else if (n_args
== 2)
3654 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
3658 /* Build a function declaration for the vectorized function. */
3659 new_fndecl
= build_decl (BUILTINS_LOCATION
,
3660 FUNCTION_DECL
, get_identifier (name
), fntype
);
3661 TREE_PUBLIC (new_fndecl
) = 1;
3662 DECL_EXTERNAL (new_fndecl
) = 1;
3663 DECL_IS_NOVOPS (new_fndecl
) = 1;
3664 TREE_READONLY (new_fndecl
) = 1;
3669 /* Returns a function decl for a vectorized version of the builtin function
3670 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3671 if it is not available. */
3674 rs6000_builtin_vectorized_function (tree fndecl
, tree type_out
,
3677 enum machine_mode in_mode
, out_mode
;
3680 if (TARGET_DEBUG_BUILTIN
)
3681 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3682 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
3683 GET_MODE_NAME (TYPE_MODE (type_out
)),
3684 GET_MODE_NAME (TYPE_MODE (type_in
)));
3686 if (TREE_CODE (type_out
) != VECTOR_TYPE
3687 || TREE_CODE (type_in
) != VECTOR_TYPE
3688 || !TARGET_VECTORIZE_BUILTINS
)
3691 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
3692 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
3693 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
3694 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
3696 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
3698 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
3701 case BUILT_IN_COPYSIGN
:
3702 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3703 && out_mode
== DFmode
&& out_n
== 2
3704 && in_mode
== DFmode
&& in_n
== 2)
3705 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
3707 case BUILT_IN_COPYSIGNF
:
3708 if (out_mode
!= SFmode
|| out_n
!= 4
3709 || in_mode
!= SFmode
|| in_n
!= 4)
3711 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3712 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
3713 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3714 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
3717 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3718 && out_mode
== DFmode
&& out_n
== 2
3719 && in_mode
== DFmode
&& in_n
== 2)
3720 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTDP
];
3722 case BUILT_IN_SQRTF
:
3723 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3724 && out_mode
== SFmode
&& out_n
== 4
3725 && in_mode
== SFmode
&& in_n
== 4)
3726 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTSP
];
3729 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3730 && out_mode
== DFmode
&& out_n
== 2
3731 && in_mode
== DFmode
&& in_n
== 2)
3732 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
3734 case BUILT_IN_CEILF
:
3735 if (out_mode
!= SFmode
|| out_n
!= 4
3736 || in_mode
!= SFmode
|| in_n
!= 4)
3738 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3739 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
3740 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3741 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
3743 case BUILT_IN_FLOOR
:
3744 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3745 && out_mode
== DFmode
&& out_n
== 2
3746 && in_mode
== DFmode
&& in_n
== 2)
3747 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
3749 case BUILT_IN_FLOORF
:
3750 if (out_mode
!= SFmode
|| out_n
!= 4
3751 || in_mode
!= SFmode
|| in_n
!= 4)
3753 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3754 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
3755 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3756 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
3759 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3760 && out_mode
== DFmode
&& out_n
== 2
3761 && in_mode
== DFmode
&& in_n
== 2)
3762 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
3765 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3766 && out_mode
== SFmode
&& out_n
== 4
3767 && in_mode
== SFmode
&& in_n
== 4)
3768 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
3769 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
3770 && out_mode
== SFmode
&& out_n
== 4
3771 && in_mode
== SFmode
&& in_n
== 4)
3772 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
3774 case BUILT_IN_TRUNC
:
3775 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3776 && out_mode
== DFmode
&& out_n
== 2
3777 && in_mode
== DFmode
&& in_n
== 2)
3778 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
3780 case BUILT_IN_TRUNCF
:
3781 if (out_mode
!= SFmode
|| out_n
!= 4
3782 || in_mode
!= SFmode
|| in_n
!= 4)
3784 if (VECTOR_UNIT_VSX_P (V4SFmode
))
3785 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
3786 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
3787 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
3789 case BUILT_IN_NEARBYINT
:
3790 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3791 && flag_unsafe_math_optimizations
3792 && out_mode
== DFmode
&& out_n
== 2
3793 && in_mode
== DFmode
&& in_n
== 2)
3794 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
3796 case BUILT_IN_NEARBYINTF
:
3797 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3798 && flag_unsafe_math_optimizations
3799 && out_mode
== SFmode
&& out_n
== 4
3800 && in_mode
== SFmode
&& in_n
== 4)
3801 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
3804 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3805 && !flag_trapping_math
3806 && out_mode
== DFmode
&& out_n
== 2
3807 && in_mode
== DFmode
&& in_n
== 2)
3808 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
3810 case BUILT_IN_RINTF
:
3811 if (VECTOR_UNIT_VSX_P (V4SFmode
)
3812 && !flag_trapping_math
3813 && out_mode
== SFmode
&& out_n
== 4
3814 && in_mode
== SFmode
&& in_n
== 4)
3815 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
3822 else if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
3824 enum rs6000_builtins fn
3825 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
3828 case RS6000_BUILTIN_RSQRTF
:
3829 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
3830 && out_mode
== SFmode
&& out_n
== 4
3831 && in_mode
== SFmode
&& in_n
== 4)
3832 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
3834 case RS6000_BUILTIN_RSQRT
:
3835 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3836 && out_mode
== DFmode
&& out_n
== 2
3837 && in_mode
== DFmode
&& in_n
== 2)
3838 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
3840 case RS6000_BUILTIN_RECIPF
:
3841 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
3842 && out_mode
== SFmode
&& out_n
== 4
3843 && in_mode
== SFmode
&& in_n
== 4)
3844 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
3846 case RS6000_BUILTIN_RECIP
:
3847 if (VECTOR_UNIT_VSX_P (V2DFmode
)
3848 && out_mode
== DFmode
&& out_n
== 2
3849 && in_mode
== DFmode
&& in_n
== 2)
3850 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
3857 /* Generate calls to libmass if appropriate. */
3858 if (rs6000_veclib_handler
)
3859 return rs6000_veclib_handler (fndecl
, type_out
, type_in
);
3864 /* Default CPU string for rs6000*_file_start functions. */
3865 static const char *rs6000_default_cpu
;
3867 /* Do anything needed at the start of the asm file. */
3870 rs6000_file_start (void)
3873 const char *start
= buffer
;
3874 FILE *file
= asm_out_file
;
3876 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
3878 default_file_start ();
3880 if (flag_verbose_asm
)
3882 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
3884 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
3886 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
3890 if (global_options_set
.x_rs6000_cpu_index
)
3892 fprintf (file
, "%s -mcpu=%s", start
,
3893 processor_target_table
[rs6000_cpu_index
].name
);
3897 if (global_options_set
.x_rs6000_tune_index
)
3899 fprintf (file
, "%s -mtune=%s", start
,
3900 processor_target_table
[rs6000_tune_index
].name
);
3904 if (PPC405_ERRATUM77
)
3906 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
3910 #ifdef USING_ELFOS_H
3911 switch (rs6000_sdata
)
3913 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
3914 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
3915 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
3916 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
3919 if (rs6000_sdata
&& g_switch_value
)
3921 fprintf (file
, "%s -G %d", start
,
3931 if (DEFAULT_ABI
== ABI_AIX
|| (TARGET_ELF
&& flag_pic
== 2))
3933 switch_to_section (toc_section
);
3934 switch_to_section (text_section
);
3939 /* Return nonzero if this function is known to have a null epilogue. */
3942 direct_return (void)
3944 if (reload_completed
)
3946 rs6000_stack_t
*info
= rs6000_stack_info ();
3948 if (info
->first_gp_reg_save
== 32
3949 && info
->first_fp_reg_save
== 64
3950 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
3951 && ! info
->lr_save_p
3952 && ! info
->cr_save_p
3953 && info
->vrsave_mask
== 0
3961 /* Return the number of instructions it takes to form a constant in an
3962 integer register. */
3965 num_insns_constant_wide (HOST_WIDE_INT value
)
3967 /* signed constant loadable with {cal|addi} */
3968 if ((unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000)
3971 /* constant loadable with {cau|addis} */
3972 else if ((value
& 0xffff) == 0
3973 && (value
>> 31 == -1 || value
>> 31 == 0))
3976 #if HOST_BITS_PER_WIDE_INT == 64
3977 else if (TARGET_POWERPC64
)
3979 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
3980 HOST_WIDE_INT high
= value
>> 31;
3982 if (high
== 0 || high
== -1)
3988 return num_insns_constant_wide (high
) + 1;
3990 return num_insns_constant_wide (low
) + 1;
3992 return (num_insns_constant_wide (high
)
3993 + num_insns_constant_wide (low
) + 1);
4002 num_insns_constant (rtx op
, enum machine_mode mode
)
4004 HOST_WIDE_INT low
, high
;
4006 switch (GET_CODE (op
))
4009 #if HOST_BITS_PER_WIDE_INT == 64
4010 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
4011 && mask64_operand (op
, mode
))
4015 return num_insns_constant_wide (INTVAL (op
));
4018 if (mode
== SFmode
|| mode
== SDmode
)
4023 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4024 if (DECIMAL_FLOAT_MODE_P (mode
))
4025 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
4027 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
4028 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
4031 if (mode
== VOIDmode
|| mode
== DImode
)
4033 high
= CONST_DOUBLE_HIGH (op
);
4034 low
= CONST_DOUBLE_LOW (op
);
4041 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4042 if (DECIMAL_FLOAT_MODE_P (mode
))
4043 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, l
);
4045 REAL_VALUE_TO_TARGET_DOUBLE (rv
, l
);
4046 high
= l
[WORDS_BIG_ENDIAN
== 0];
4047 low
= l
[WORDS_BIG_ENDIAN
!= 0];
4051 return (num_insns_constant_wide (low
)
4052 + num_insns_constant_wide (high
));
4055 if ((high
== 0 && low
>= 0)
4056 || (high
== -1 && low
< 0))
4057 return num_insns_constant_wide (low
);
4059 else if (mask64_operand (op
, mode
))
4063 return num_insns_constant_wide (high
) + 1;
4066 return (num_insns_constant_wide (high
)
4067 + num_insns_constant_wide (low
) + 1);
4075 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4076 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4077 corresponding element of the vector, but for V4SFmode and V2SFmode,
4078 the corresponding "float" is interpreted as an SImode integer. */
4081 const_vector_elt_as_int (rtx op
, unsigned int elt
)
4085 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4086 gcc_assert (GET_MODE (op
) != V2DImode
4087 && GET_MODE (op
) != V2DFmode
);
4089 tmp
= CONST_VECTOR_ELT (op
, elt
);
4090 if (GET_MODE (op
) == V4SFmode
4091 || GET_MODE (op
) == V2SFmode
)
4092 tmp
= gen_lowpart (SImode
, tmp
);
4093 return INTVAL (tmp
);
4096 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4097 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4098 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4099 all items are set to the same value and contain COPIES replicas of the
4100 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4101 operand and the others are set to the value of the operand's msb. */
4104 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
4106 enum machine_mode mode
= GET_MODE (op
);
4107 enum machine_mode inner
= GET_MODE_INNER (mode
);
4115 HOST_WIDE_INT splat_val
;
4116 HOST_WIDE_INT msb_val
;
4118 if (mode
== V2DImode
|| mode
== V2DFmode
)
4121 nunits
= GET_MODE_NUNITS (mode
);
4122 bitsize
= GET_MODE_BITSIZE (inner
);
4123 mask
= GET_MODE_MASK (inner
);
4125 val
= const_vector_elt_as_int (op
, nunits
- 1);
4127 msb_val
= val
> 0 ? 0 : -1;
4129 /* Construct the value to be splatted, if possible. If not, return 0. */
4130 for (i
= 2; i
<= copies
; i
*= 2)
4132 HOST_WIDE_INT small_val
;
4134 small_val
= splat_val
>> bitsize
;
4136 if (splat_val
!= ((small_val
<< bitsize
) | (small_val
& mask
)))
4138 splat_val
= small_val
;
4141 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4142 if (EASY_VECTOR_15 (splat_val
))
4145 /* Also check if we can splat, and then add the result to itself. Do so if
4146 the value is positive, of if the splat instruction is using OP's mode;
4147 for splat_val < 0, the splat and the add should use the same mode. */
4148 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
4149 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
4152 /* Also check if are loading up the most significant bit which can be done by
4153 loading up -1 and shifting the value left by -1. */
4154 else if (EASY_VECTOR_MSB (splat_val
, inner
))
4160 /* Check if VAL is present in every STEP-th element, and the
4161 other elements are filled with its most significant bit. */
4162 for (i
= 0; i
< nunits
- 1; ++i
)
4164 HOST_WIDE_INT desired_val
;
4165 if (((i
+ 1) & (step
- 1)) == 0)
4168 desired_val
= msb_val
;
4170 if (desired_val
!= const_vector_elt_as_int (op
, i
))
4178 /* Return true if OP is of the given MODE and can be synthesized
4179 with a vspltisb, vspltish or vspltisw. */
4182 easy_altivec_constant (rtx op
, enum machine_mode mode
)
4184 unsigned step
, copies
;
4186 if (mode
== VOIDmode
)
4187 mode
= GET_MODE (op
);
4188 else if (mode
!= GET_MODE (op
))
4191 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4193 if (mode
== V2DFmode
)
4194 return zero_constant (op
, mode
);
4196 if (mode
== V2DImode
)
4198 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4200 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
4201 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
4204 if (zero_constant (op
, mode
))
4207 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
4208 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
4214 /* Start with a vspltisw. */
4215 step
= GET_MODE_NUNITS (mode
) / 4;
4218 if (vspltis_constant (op
, step
, copies
))
4221 /* Then try with a vspltish. */
4227 if (vspltis_constant (op
, step
, copies
))
4230 /* And finally a vspltisb. */
4236 if (vspltis_constant (op
, step
, copies
))
4242 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4243 result is OP. Abort if it is not possible. */
4246 gen_easy_altivec_constant (rtx op
)
4248 enum machine_mode mode
= GET_MODE (op
);
4249 int nunits
= GET_MODE_NUNITS (mode
);
4250 rtx last
= CONST_VECTOR_ELT (op
, nunits
- 1);
4251 unsigned step
= nunits
/ 4;
4252 unsigned copies
= 1;
4254 /* Start with a vspltisw. */
4255 if (vspltis_constant (op
, step
, copies
))
4256 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, last
));
4258 /* Then try with a vspltish. */
4264 if (vspltis_constant (op
, step
, copies
))
4265 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, last
));
4267 /* And finally a vspltisb. */
4273 if (vspltis_constant (op
, step
, copies
))
4274 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, last
));
4280 output_vec_const_move (rtx
*operands
)
4283 enum machine_mode mode
;
4288 mode
= GET_MODE (dest
);
4292 if (zero_constant (vec
, mode
))
4293 return "xxlxor %x0,%x0,%x0";
4295 if (mode
== V2DImode
4296 && INTVAL (CONST_VECTOR_ELT (vec
, 0)) == -1
4297 && INTVAL (CONST_VECTOR_ELT (vec
, 1)) == -1)
4298 return "vspltisw %0,-1";
4304 if (zero_constant (vec
, mode
))
4305 return "vxor %0,%0,%0";
4307 splat_vec
= gen_easy_altivec_constant (vec
);
4308 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
4309 operands
[1] = XEXP (splat_vec
, 0);
4310 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
4313 switch (GET_MODE (splat_vec
))
4316 return "vspltisw %0,%1";
4319 return "vspltish %0,%1";
4322 return "vspltisb %0,%1";
4329 gcc_assert (TARGET_SPE
);
4331 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4332 pattern of V1DI, V4HI, and V2SF.
4334 FIXME: We should probably return # and add post reload
4335 splitters for these, but this way is so easy ;-). */
4336 cst
= INTVAL (CONST_VECTOR_ELT (vec
, 0));
4337 cst2
= INTVAL (CONST_VECTOR_ELT (vec
, 1));
4338 operands
[1] = CONST_VECTOR_ELT (vec
, 0);
4339 operands
[2] = CONST_VECTOR_ELT (vec
, 1);
4341 return "li %0,%1\n\tevmergelo %0,%0,%0";
4343 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4346 /* Initialize TARGET of vector PAIRED to VALS. */
4349 paired_expand_vector_init (rtx target
, rtx vals
)
4351 enum machine_mode mode
= GET_MODE (target
);
4352 int n_elts
= GET_MODE_NUNITS (mode
);
4354 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
4357 for (i
= 0; i
< n_elts
; ++i
)
4359 x
= XVECEXP (vals
, 0, i
);
4360 if (!(CONST_INT_P (x
)
4361 || GET_CODE (x
) == CONST_DOUBLE
4362 || GET_CODE (x
) == CONST_FIXED
))
4367 /* Load from constant pool. */
4368 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
4374 /* The vector is initialized only with non-constants. */
4375 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
4376 XVECEXP (vals
, 0, 1));
4378 emit_move_insn (target
, new_rtx
);
4382 /* One field is non-constant and the other one is a constant. Load the
4383 constant from the constant pool and use ps_merge instruction to
4384 construct the whole vector. */
4385 op1
= XVECEXP (vals
, 0, 0);
4386 op2
= XVECEXP (vals
, 0, 1);
4388 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
4390 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
4391 emit_move_insn (tmp
, constant_op
);
4393 if (CONSTANT_P (op1
))
4394 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
4396 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
4398 emit_move_insn (target
, new_rtx
);
4402 paired_expand_vector_move (rtx operands
[])
4404 rtx op0
= operands
[0], op1
= operands
[1];
4406 emit_move_insn (op0
, op1
);
4409 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4410 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4411 operands for the relation operation COND. This is a recursive
4415 paired_emit_vector_compare (enum rtx_code rcode
,
4416 rtx dest
, rtx op0
, rtx op1
,
4417 rtx cc_op0
, rtx cc_op1
)
4419 rtx tmp
= gen_reg_rtx (V2SFmode
);
4422 gcc_assert (TARGET_PAIRED_FLOAT
);
4423 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
4429 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4433 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
4434 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
4438 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
4441 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4444 tmp1
= gen_reg_rtx (V2SFmode
);
4445 max
= gen_reg_rtx (V2SFmode
);
4446 min
= gen_reg_rtx (V2SFmode
);
4447 gen_reg_rtx (V2SFmode
);
4449 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
4450 emit_insn (gen_selv2sf4
4451 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
4452 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
4453 emit_insn (gen_selv2sf4
4454 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
4455 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
4456 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
4459 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4462 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4465 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4468 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4471 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
4480 /* Emit vector conditional expression.
4481 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4482 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4485 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
4486 rtx cond
, rtx cc_op0
, rtx cc_op1
)
4488 enum rtx_code rcode
= GET_CODE (cond
);
4490 if (!TARGET_PAIRED_FLOAT
)
4493 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
4498 /* Initialize vector TARGET to VALS. */
4501 rs6000_expand_vector_init (rtx target
, rtx vals
)
4503 enum machine_mode mode
= GET_MODE (target
);
4504 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4505 int n_elts
= GET_MODE_NUNITS (mode
);
4506 int n_var
= 0, one_var
= -1;
4507 bool all_same
= true, all_const_zero
= true;
4511 for (i
= 0; i
< n_elts
; ++i
)
4513 x
= XVECEXP (vals
, 0, i
);
4514 if (!(CONST_INT_P (x
)
4515 || GET_CODE (x
) == CONST_DOUBLE
4516 || GET_CODE (x
) == CONST_FIXED
))
4517 ++n_var
, one_var
= i
;
4518 else if (x
!= CONST0_RTX (inner_mode
))
4519 all_const_zero
= false;
4521 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
4527 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
4528 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
4529 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
4531 /* Zero register. */
4532 emit_insn (gen_rtx_SET (VOIDmode
, target
,
4533 gen_rtx_XOR (mode
, target
, target
)));
4536 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
4538 /* Splat immediate. */
4539 emit_insn (gen_rtx_SET (VOIDmode
, target
, const_vec
));
4544 /* Load from constant pool. */
4545 emit_move_insn (target
, const_vec
);
4550 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4551 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
4553 rtx op0
= XVECEXP (vals
, 0, 0);
4554 rtx op1
= XVECEXP (vals
, 0, 1);
4557 if (!MEM_P (op0
) && !REG_P (op0
))
4558 op0
= force_reg (inner_mode
, op0
);
4559 if (mode
== V2DFmode
)
4560 emit_insn (gen_vsx_splat_v2df (target
, op0
));
4562 emit_insn (gen_vsx_splat_v2di (target
, op0
));
4566 op0
= force_reg (inner_mode
, op0
);
4567 op1
= force_reg (inner_mode
, op1
);
4568 if (mode
== V2DFmode
)
4569 emit_insn (gen_vsx_concat_v2df (target
, op0
, op1
));
4571 emit_insn (gen_vsx_concat_v2di (target
, op0
, op1
));
4576 /* With single precision floating point on VSX, know that internally single
4577 precision is actually represented as a double, and either make 2 V2DF
4578 vectors, and convert these vectors to single precision, or do one
4579 conversion, and splat the result to the other elements. */
4580 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (mode
))
4584 rtx freg
= gen_reg_rtx (V4SFmode
);
4585 rtx sreg
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
4587 emit_insn (gen_vsx_xscvdpsp_scalar (freg
, sreg
));
4588 emit_insn (gen_vsx_xxspltw_v4sf (target
, freg
, const0_rtx
));
4592 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
4593 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
4594 rtx flt_even
= gen_reg_rtx (V4SFmode
);
4595 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
4596 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
4597 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
4598 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
4599 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
4601 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
4602 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
4603 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
4604 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
4605 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
4610 /* Store value to stack temp. Load vector element. Splat. However, splat
4611 of 64-bit items is not supported on Altivec. */
4612 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
4614 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
4615 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
4616 XVECEXP (vals
, 0, 0));
4617 x
= gen_rtx_UNSPEC (VOIDmode
,
4618 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
4619 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
4621 gen_rtx_SET (VOIDmode
,
4624 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
4625 gen_rtx_PARALLEL (VOIDmode
,
4626 gen_rtvec (1, const0_rtx
)));
4627 emit_insn (gen_rtx_SET (VOIDmode
, target
,
4628 gen_rtx_VEC_DUPLICATE (mode
, x
)));
4632 /* One field is non-constant. Load constant then overwrite
4636 rtx copy
= copy_rtx (vals
);
4638 /* Load constant part of vector, substitute neighboring value for
4640 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
4641 rs6000_expand_vector_init (target
, copy
);
4643 /* Insert variable. */
4644 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
4648 /* Construct the vector in memory one field at a time
4649 and load the whole vector. */
4650 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
4651 for (i
= 0; i
< n_elts
; i
++)
4652 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
4653 i
* GET_MODE_SIZE (inner_mode
)),
4654 XVECEXP (vals
, 0, i
));
4655 emit_move_insn (target
, mem
);
4658 /* Set field ELT of TARGET to VAL. */
4661 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
4663 enum machine_mode mode
= GET_MODE (target
);
4664 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4665 rtx reg
= gen_reg_rtx (mode
);
4667 int width
= GET_MODE_SIZE (inner_mode
);
4670 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
4672 rtx (*set_func
) (rtx
, rtx
, rtx
, rtx
)
4673 = ((mode
== V2DFmode
) ? gen_vsx_set_v2df
: gen_vsx_set_v2di
);
4674 emit_insn (set_func (target
, target
, val
, GEN_INT (elt
)));
4678 /* Load single variable value. */
4679 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
4680 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
4681 x
= gen_rtx_UNSPEC (VOIDmode
,
4682 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
4683 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
4685 gen_rtx_SET (VOIDmode
,
4689 /* Linear sequence. */
4690 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
4691 for (i
= 0; i
< 16; ++i
)
4692 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
4694 /* Set permute mask to insert element into target. */
4695 for (i
= 0; i
< width
; ++i
)
4696 XVECEXP (mask
, 0, elt
*width
+ i
)
4697 = GEN_INT (i
+ 0x10);
4698 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
4699 x
= gen_rtx_UNSPEC (mode
,
4700 gen_rtvec (3, target
, reg
,
4701 force_reg (V16QImode
, x
)),
4703 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
4706 /* Extract field ELT from VEC into TARGET. */
4709 rs6000_expand_vector_extract (rtx target
, rtx vec
, int elt
)
4711 enum machine_mode mode
= GET_MODE (vec
);
4712 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
4715 if (VECTOR_MEM_VSX_P (mode
))
4722 emit_insn (gen_vsx_extract_v2df (target
, vec
, GEN_INT (elt
)));
4725 emit_insn (gen_vsx_extract_v2di (target
, vec
, GEN_INT (elt
)));
4728 emit_insn (gen_vsx_extract_v4sf (target
, vec
, GEN_INT (elt
)));
4733 /* Allocate mode-sized buffer. */
4734 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
4736 emit_move_insn (mem
, vec
);
4738 /* Add offset to field within buffer matching vector element. */
4739 mem
= adjust_address_nv (mem
, inner_mode
, elt
* GET_MODE_SIZE (inner_mode
));
4741 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
4744 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4745 implement ANDing by the mask IN. */
4747 build_mask64_2_operands (rtx in
, rtx
*out
)
4749 #if HOST_BITS_PER_WIDE_INT >= 64
4750 unsigned HOST_WIDE_INT c
, lsb
, m1
, m2
;
4753 gcc_assert (GET_CODE (in
) == CONST_INT
);
4758 /* Assume c initially something like 0x00fff000000fffff. The idea
4759 is to rotate the word so that the middle ^^^^^^ group of zeros
4760 is at the MS end and can be cleared with an rldicl mask. We then
4761 rotate back and clear off the MS ^^ group of zeros with a
4763 c
= ~c
; /* c == 0xff000ffffff00000 */
4764 lsb
= c
& -c
; /* lsb == 0x0000000000100000 */
4765 m1
= -lsb
; /* m1 == 0xfffffffffff00000 */
4766 c
= ~c
; /* c == 0x00fff000000fffff */
4767 c
&= -lsb
; /* c == 0x00fff00000000000 */
4768 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
4769 c
= ~c
; /* c == 0xff000fffffffffff */
4770 c
&= -lsb
; /* c == 0xff00000000000000 */
4772 while ((lsb
>>= 1) != 0)
4773 shift
++; /* shift == 44 on exit from loop */
4774 m1
<<= 64 - shift
; /* m1 == 0xffffff0000000000 */
4775 m1
= ~m1
; /* m1 == 0x000000ffffffffff */
4776 m2
= ~c
; /* m2 == 0x00ffffffffffffff */
4780 /* Assume c initially something like 0xff000f0000000000. The idea
4781 is to rotate the word so that the ^^^ middle group of zeros
4782 is at the LS end and can be cleared with an rldicr mask. We then
4783 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4785 lsb
= c
& -c
; /* lsb == 0x0000010000000000 */
4786 m2
= -lsb
; /* m2 == 0xffffff0000000000 */
4787 c
= ~c
; /* c == 0x00fff0ffffffffff */
4788 c
&= -lsb
; /* c == 0x00fff00000000000 */
4789 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
4790 c
= ~c
; /* c == 0xff000fffffffffff */
4791 c
&= -lsb
; /* c == 0xff00000000000000 */
4793 while ((lsb
>>= 1) != 0)
4794 shift
++; /* shift == 44 on exit from loop */
4795 m1
= ~c
; /* m1 == 0x00ffffffffffffff */
4796 m1
>>= shift
; /* m1 == 0x0000000000000fff */
4797 m1
= ~m1
; /* m1 == 0xfffffffffffff000 */
4800 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4801 masks will be all 1's. We are guaranteed more than one transition. */
4802 out
[0] = GEN_INT (64 - shift
);
4803 out
[1] = GEN_INT (m1
);
4804 out
[2] = GEN_INT (shift
);
4805 out
[3] = GEN_INT (m2
);
4813 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4816 invalid_e500_subreg (rtx op
, enum machine_mode mode
)
4818 if (TARGET_E500_DOUBLE
)
4820 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4821 subreg:TI and reg:TF. Decimal float modes are like integer
4822 modes (only low part of each register used) for this
4824 if (GET_CODE (op
) == SUBREG
4825 && (mode
== SImode
|| mode
== DImode
|| mode
== TImode
4826 || mode
== DDmode
|| mode
== TDmode
)
4827 && REG_P (SUBREG_REG (op
))
4828 && (GET_MODE (SUBREG_REG (op
)) == DFmode
4829 || GET_MODE (SUBREG_REG (op
)) == TFmode
))
4832 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4834 if (GET_CODE (op
) == SUBREG
4835 && (mode
== DFmode
|| mode
== TFmode
)
4836 && REG_P (SUBREG_REG (op
))
4837 && (GET_MODE (SUBREG_REG (op
)) == DImode
4838 || GET_MODE (SUBREG_REG (op
)) == TImode
4839 || GET_MODE (SUBREG_REG (op
)) == DDmode
4840 || GET_MODE (SUBREG_REG (op
)) == TDmode
))
4845 && GET_CODE (op
) == SUBREG
4847 && REG_P (SUBREG_REG (op
))
4848 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op
))))
4854 /* AIX increases natural record alignment to doubleword if the first
4855 field is an FP double while the FP fields remain word aligned. */
4858 rs6000_special_round_type_align (tree type
, unsigned int computed
,
4859 unsigned int specified
)
4861 unsigned int align
= MAX (computed
, specified
);
4862 tree field
= TYPE_FIELDS (type
);
4864 /* Skip all non field decls */
4865 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
4866 field
= DECL_CHAIN (field
);
4868 if (field
!= NULL
&& field
!= type
)
4870 type
= TREE_TYPE (field
);
4871 while (TREE_CODE (type
) == ARRAY_TYPE
)
4872 type
= TREE_TYPE (type
);
4874 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
4875 align
= MAX (align
, 64);
4881 /* Darwin increases record alignment to the natural alignment of
4885 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
4886 unsigned int specified
)
4888 unsigned int align
= MAX (computed
, specified
);
4890 if (TYPE_PACKED (type
))
4893 /* Find the first field, looking down into aggregates. */
4895 tree field
= TYPE_FIELDS (type
);
4896 /* Skip all non field decls */
4897 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
4898 field
= DECL_CHAIN (field
);
4901 /* A packed field does not contribute any extra alignment. */
4902 if (DECL_PACKED (field
))
4904 type
= TREE_TYPE (field
);
4905 while (TREE_CODE (type
) == ARRAY_TYPE
)
4906 type
= TREE_TYPE (type
);
4907 } while (AGGREGATE_TYPE_P (type
));
4909 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
4910 align
= MAX (align
, TYPE_ALIGN (type
));
4915 /* Return 1 for an operand in small memory on V.4/eabi. */
4918 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
4919 enum machine_mode mode ATTRIBUTE_UNUSED
)
4924 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
4927 if (DEFAULT_ABI
!= ABI_V4
)
4930 /* Vector and float memory instructions have a limited offset on the
4931 SPE, so using a vector or float variable directly as an operand is
4934 && (SPE_VECTOR_MODE (mode
) || FLOAT_MODE_P (mode
)))
4937 if (GET_CODE (op
) == SYMBOL_REF
)
4940 else if (GET_CODE (op
) != CONST
4941 || GET_CODE (XEXP (op
, 0)) != PLUS
4942 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
4943 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
4948 rtx sum
= XEXP (op
, 0);
4949 HOST_WIDE_INT summand
;
4951 /* We have to be careful here, because it is the referenced address
4952 that must be 32k from _SDA_BASE_, not just the symbol. */
4953 summand
= INTVAL (XEXP (sum
, 1));
4954 if (summand
< 0 || summand
> g_switch_value
)
4957 sym_ref
= XEXP (sum
, 0);
4960 return SYMBOL_REF_SMALL_P (sym_ref
);
4966 /* Return true if either operand is a general purpose register. */
4969 gpr_or_gpr_p (rtx op0
, rtx op1
)
4971 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
4972 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
4975 /* Given an address, return a constant offset term if one exists. */
4978 address_offset (rtx op
)
4980 if (GET_CODE (op
) == PRE_INC
4981 || GET_CODE (op
) == PRE_DEC
)
4983 else if (GET_CODE (op
) == PRE_MODIFY
4984 || GET_CODE (op
) == LO_SUM
)
4987 if (GET_CODE (op
) == CONST
)
4990 if (GET_CODE (op
) == PLUS
)
4993 if (CONST_INT_P (op
))
4999 /* Return true if the MEM operand is a memory operand suitable for use
5000 with a (full width, possibly multiple) gpr load/store. On
5001 powerpc64 this means the offset must be divisible by 4.
5002 Implements 'Y' constraint.
5004 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5005 a constraint function we know the operand has satisfied a suitable
5006 memory predicate. Also accept some odd rtl generated by reload
5007 (see rs6000_legitimize_reload_address for various forms). It is
5008 important that reload rtl be accepted by appropriate constraints
5009 but not by the operand predicate.
5011 Offsetting a lo_sum should not be allowed, except where we know by
5012 alignment that a 32k boundary is not crossed, but see the ???
5013 comment in rs6000_legitimize_reload_address. */
5016 mem_operand_gpr (rtx op
, enum machine_mode mode
)
5018 unsigned HOST_WIDE_INT offset
;
5021 op
= address_offset (XEXP (op
, 0));
5025 offset
= INTVAL (op
);
5026 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
5029 else if (TARGET_POWERPC64
&& (offset
& 3) != 0)
5031 return offset
+ 0x8000 < 0x10000u
- extra
;
5034 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
5037 reg_offset_addressing_ok_p (enum machine_mode mode
)
5047 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
5048 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
5056 /* Paired vector modes. Only reg+reg addressing is valid. */
5057 if (TARGET_PAIRED_FLOAT
)
5069 virtual_stack_registers_memory_p (rtx op
)
5073 if (GET_CODE (op
) == REG
)
5074 regnum
= REGNO (op
);
5076 else if (GET_CODE (op
) == PLUS
5077 && GET_CODE (XEXP (op
, 0)) == REG
5078 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
5079 regnum
= REGNO (XEXP (op
, 0));
5084 return (regnum
>= FIRST_VIRTUAL_REGISTER
5085 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
5088 /* Return true if memory accesses to OP are known to never straddle
5092 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
5093 enum machine_mode mode
)
5096 unsigned HOST_WIDE_INT dsize
, dalign
;
5098 if (GET_CODE (op
) != SYMBOL_REF
)
5101 decl
= SYMBOL_REF_DECL (op
);
5104 if (GET_MODE_SIZE (mode
) == 0)
5107 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5108 replacing memory addresses with an anchor plus offset. We
5109 could find the decl by rummaging around in the block->objects
5110 VEC for the given offset but that seems like too much work. */
5112 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
5113 && SYMBOL_REF_ANCHOR_P (op
)
5114 && SYMBOL_REF_BLOCK (op
) != NULL
)
5116 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
5117 HOST_WIDE_INT lsb
, mask
;
5119 /* Given the alignment of the block.. */
5120 dalign
= block
->alignment
;
5121 mask
= dalign
/ BITS_PER_UNIT
- 1;
5123 /* ..and the combined offset of the anchor and any offset
5124 to this block object.. */
5125 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
5126 lsb
= offset
& -offset
;
5128 /* ..find how many bits of the alignment we know for the
5133 return dalign
>= GET_MODE_SIZE (mode
);
5138 if (TREE_CODE (decl
) == FUNCTION_DECL
)
5141 if (!DECL_SIZE_UNIT (decl
))
5144 if (!host_integerp (DECL_SIZE_UNIT (decl
), 1))
5147 dsize
= tree_low_cst (DECL_SIZE_UNIT (decl
), 1);
5151 dalign
= DECL_ALIGN_UNIT (decl
);
5152 return dalign
>= dsize
;
5155 type
= TREE_TYPE (decl
);
5157 if (TREE_CODE (decl
) == STRING_CST
)
5158 dsize
= TREE_STRING_LENGTH (decl
);
5159 else if (TYPE_SIZE_UNIT (type
)
5160 && host_integerp (TYPE_SIZE_UNIT (type
), 1))
5161 dsize
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5167 dalign
= TYPE_ALIGN (type
);
5168 if (CONSTANT_CLASS_P (decl
))
5169 dalign
= CONSTANT_ALIGNMENT (decl
, dalign
);
5171 dalign
= DATA_ALIGNMENT (decl
, dalign
);
5172 dalign
/= BITS_PER_UNIT
;
5173 return dalign
>= dsize
;
5177 constant_pool_expr_p (rtx op
)
5181 split_const (op
, &base
, &offset
);
5182 return (GET_CODE (base
) == SYMBOL_REF
5183 && CONSTANT_POOL_ADDRESS_P (base
)
5184 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
5187 static const_rtx tocrel_base
, tocrel_offset
;
5189 /* Return true if OP is a toc pointer relative address (the output
5190 of create_TOC_reference). If STRICT, do not match high part or
5191 non-split -mcmodel=large/medium toc pointer relative addresses. */
5194 toc_relative_expr_p (const_rtx op
, bool strict
)
5199 if (TARGET_CMODEL
!= CMODEL_SMALL
)
5201 /* Only match the low part. */
5202 if (GET_CODE (op
) == LO_SUM
5203 && REG_P (XEXP (op
, 0))
5204 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
))
5211 tocrel_offset
= const0_rtx
;
5212 if (GET_CODE (op
) == PLUS
&& CONST_INT_P (XEXP (op
, 1)))
5214 tocrel_base
= XEXP (op
, 0);
5215 tocrel_offset
= XEXP (op
, 1);
5218 return (GET_CODE (tocrel_base
) == UNSPEC
5219 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
5222 /* Return true if X is a constant pool address, and also for cmodel=medium
5223 if X is a toc-relative address known to be offsettable within MODE. */
5226 legitimate_constant_pool_address_p (const_rtx x
, enum machine_mode mode
,
5229 return (toc_relative_expr_p (x
, strict
)
5230 && (TARGET_CMODEL
!= CMODEL_MEDIUM
5231 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
5233 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
5234 INTVAL (tocrel_offset
), mode
)));
5238 legitimate_small_data_p (enum machine_mode mode
, rtx x
)
5240 return (DEFAULT_ABI
== ABI_V4
5241 && !flag_pic
&& !TARGET_TOC
5242 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
5243 && small_data_operand (x
, mode
));
5246 /* SPE offset addressing is limited to 5-bits worth of double words. */
5247 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5250 rs6000_legitimate_offset_address_p (enum machine_mode mode
, rtx x
,
5251 bool strict
, bool worst_case
)
5253 unsigned HOST_WIDE_INT offset
;
5256 if (GET_CODE (x
) != PLUS
)
5258 if (!REG_P (XEXP (x
, 0)))
5260 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
5262 if (!reg_offset_addressing_ok_p (mode
))
5263 return virtual_stack_registers_memory_p (x
);
5264 if (legitimate_constant_pool_address_p (x
, mode
, strict
))
5266 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
5269 offset
= INTVAL (XEXP (x
, 1));
5277 /* SPE vector modes. */
5278 return SPE_CONST_OFFSET_OK (offset
);
5283 /* On e500v2, we may have:
5285 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5287 Which gets addressed with evldd instructions. */
5288 if (TARGET_E500_DOUBLE
)
5289 return SPE_CONST_OFFSET_OK (offset
);
5291 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5293 if (mode
== DFmode
&& VECTOR_MEM_VSX_P (DFmode
))
5298 if (!TARGET_POWERPC64
)
5300 else if (offset
& 3)
5307 if (TARGET_E500_DOUBLE
)
5308 return (SPE_CONST_OFFSET_OK (offset
)
5309 && SPE_CONST_OFFSET_OK (offset
+ 8));
5314 if (!TARGET_POWERPC64
)
5316 else if (offset
& 3)
5325 return offset
< 0x10000 - extra
;
5329 legitimate_indexed_address_p (rtx x
, int strict
)
5333 if (GET_CODE (x
) != PLUS
)
5339 /* Recognize the rtl generated by reload which we know will later be
5340 replaced with proper base and index regs. */
5342 && reload_in_progress
5343 && (REG_P (op0
) || GET_CODE (op0
) == PLUS
)
5347 return (REG_P (op0
) && REG_P (op1
)
5348 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
5349 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
5350 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
5351 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
5355 avoiding_indexed_address_p (enum machine_mode mode
)
5357 /* Avoid indexed addressing for modes that have non-indexed
5358 load/store instruction forms. */
5359 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
5363 legitimate_indirect_address_p (rtx x
, int strict
)
5365 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
5369 macho_lo_sum_memory_operand (rtx x
, enum machine_mode mode
)
5371 if (!TARGET_MACHO
|| !flag_pic
5372 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
5376 if (GET_CODE (x
) != LO_SUM
)
5378 if (GET_CODE (XEXP (x
, 0)) != REG
)
5380 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
5384 return CONSTANT_P (x
);
5388 legitimate_lo_sum_address_p (enum machine_mode mode
, rtx x
, int strict
)
5390 if (GET_CODE (x
) != LO_SUM
)
5392 if (GET_CODE (XEXP (x
, 0)) != REG
)
5394 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
5396 /* Restrict addressing for DI because of our SUBREG hackery. */
5397 if (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
5401 if (TARGET_ELF
|| TARGET_MACHO
)
5403 if (DEFAULT_ABI
!= ABI_AIX
&& DEFAULT_ABI
!= ABI_DARWIN
&& flag_pic
)
5407 if (GET_MODE_NUNITS (mode
) != 1)
5409 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
5410 && !(/* ??? Assume floating point reg based on mode? */
5411 TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
5412 && (mode
== DFmode
|| mode
== DDmode
)))
5415 return CONSTANT_P (x
);
5422 /* Try machine-dependent ways of modifying an illegitimate address
5423 to be legitimate. If we find one, return the new, valid address.
5424 This is used from only one place: `memory_address' in explow.c.
5426 OLDX is the address as it was before break_out_memory_refs was
5427 called. In some cases it is useful to look at this to decide what
5430 It is always safe for this function to do nothing. It exists to
5431 recognize opportunities to optimize the output.
5433 On RS/6000, first check for the sum of a register with a constant
5434 integer that is out of range. If so, generate code to add the
5435 constant with the low-order 16 bits masked to the register and force
5436 this result into another register (this can be done with `cau').
5437 Then generate an address of REG+(CONST&0xffff), allowing for the
5438 possibility of bit 16 being a one.
5440 Then check for the sum of a register and something not constant, try to
5441 load the other things into a register and return the sum. */
5444 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
5445 enum machine_mode mode
)
5449 if (!reg_offset_addressing_ok_p (mode
))
5451 if (virtual_stack_registers_memory_p (x
))
5454 /* In theory we should not be seeing addresses of the form reg+0,
5455 but just in case it is generated, optimize it away. */
5456 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
5457 return force_reg (Pmode
, XEXP (x
, 0));
5459 /* Make sure both operands are registers. */
5460 else if (GET_CODE (x
) == PLUS
)
5461 return gen_rtx_PLUS (Pmode
,
5462 force_reg (Pmode
, XEXP (x
, 0)),
5463 force_reg (Pmode
, XEXP (x
, 1)));
5465 return force_reg (Pmode
, x
);
5467 if (GET_CODE (x
) == SYMBOL_REF
)
5469 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
5471 return rs6000_legitimize_tls_address (x
, model
);
5480 /* As in legitimate_offset_address_p we do not assume
5481 worst-case. The mode here is just a hint as to the registers
5482 used. A TImode is usually in gprs, but may actually be in
5483 fprs. Leave worst-case scenario for reload to handle via
5484 insn constraints. */
5491 if (GET_CODE (x
) == PLUS
5492 && GET_CODE (XEXP (x
, 0)) == REG
5493 && GET_CODE (XEXP (x
, 1)) == CONST_INT
5494 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
5496 && !(SPE_VECTOR_MODE (mode
)
5497 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)))
5499 HOST_WIDE_INT high_int
, low_int
;
5501 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5502 if (low_int
>= 0x8000 - extra
)
5504 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
5505 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
5506 GEN_INT (high_int
)), 0);
5507 return plus_constant (Pmode
, sum
, low_int
);
5509 else if (GET_CODE (x
) == PLUS
5510 && GET_CODE (XEXP (x
, 0)) == REG
5511 && GET_CODE (XEXP (x
, 1)) != CONST_INT
5512 && GET_MODE_NUNITS (mode
) == 1
5513 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
5514 || (/* ??? Assume floating point reg based on mode? */
5515 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
5516 && (mode
== DFmode
|| mode
== DDmode
)))
5517 && !avoiding_indexed_address_p (mode
))
5519 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
5520 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
5522 else if (SPE_VECTOR_MODE (mode
)
5523 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
))
5527 /* We accept [reg + reg] and [reg + OFFSET]. */
5529 if (GET_CODE (x
) == PLUS
)
5531 rtx op1
= XEXP (x
, 0);
5532 rtx op2
= XEXP (x
, 1);
5535 op1
= force_reg (Pmode
, op1
);
5537 if (GET_CODE (op2
) != REG
5538 && (GET_CODE (op2
) != CONST_INT
5539 || !SPE_CONST_OFFSET_OK (INTVAL (op2
))
5540 || (GET_MODE_SIZE (mode
) > 8
5541 && !SPE_CONST_OFFSET_OK (INTVAL (op2
) + 8))))
5542 op2
= force_reg (Pmode
, op2
);
5544 /* We can't always do [reg + reg] for these, because [reg +
5545 reg + offset] is not a legitimate addressing mode. */
5546 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
5548 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
5549 return force_reg (Pmode
, y
);
5554 return force_reg (Pmode
, x
);
5556 else if ((TARGET_ELF
5558 || !MACHO_DYNAMIC_NO_PIC_P
5564 && GET_CODE (x
) != CONST_INT
5565 && GET_CODE (x
) != CONST_DOUBLE
5567 && GET_MODE_NUNITS (mode
) == 1
5568 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
5569 || (/* ??? Assume floating point reg based on mode? */
5570 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
5571 && (mode
== DFmode
|| mode
== DDmode
))))
5573 rtx reg
= gen_reg_rtx (Pmode
);
5575 emit_insn (gen_elf_high (reg
, x
));
5577 emit_insn (gen_macho_high (reg
, x
));
5578 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
5581 && GET_CODE (x
) == SYMBOL_REF
5582 && constant_pool_expr_p (x
)
5583 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
5584 return create_TOC_reference (x
, NULL_RTX
);
5589 /* Debug version of rs6000_legitimize_address. */
5591 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
)
5597 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
5598 insns
= get_insns ();
5604 "\nrs6000_legitimize_address: mode %s, old code %s, "
5605 "new code %s, modified\n",
5606 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
5607 GET_RTX_NAME (GET_CODE (ret
)));
5609 fprintf (stderr
, "Original address:\n");
5612 fprintf (stderr
, "oldx:\n");
5615 fprintf (stderr
, "New address:\n");
5620 fprintf (stderr
, "Insns added:\n");
5621 debug_rtx_list (insns
, 20);
5627 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5628 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
5639 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5640 We need to emit DTP-relative relocations. */
5642 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
5644 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
5649 fputs ("\t.long\t", file
);
5652 fputs (DOUBLE_INT_ASM_OP
, file
);
5657 output_addr_const (file
, x
);
5658 fputs ("@dtprel+0x8000", file
);
5661 /* In the name of slightly smaller debug output, and to cater to
5662 general assembler lossage, recognize various UNSPEC sequences
5663 and turn them back into a direct symbol reference. */
5666 rs6000_delegitimize_address (rtx orig_x
)
5670 orig_x
= delegitimize_mem_from_attrs (orig_x
);
5676 if (TARGET_CMODEL
!= CMODEL_SMALL
5677 && GET_CODE (y
) == LO_SUM
)
5681 if (GET_CODE (y
) == PLUS
5682 && GET_MODE (y
) == Pmode
5683 && CONST_INT_P (XEXP (y
, 1)))
5685 offset
= XEXP (y
, 1);
5689 if (GET_CODE (y
) == UNSPEC
5690 && XINT (y
, 1) == UNSPEC_TOCREL
)
5692 #ifdef ENABLE_CHECKING
5693 if (REG_P (XVECEXP (y
, 0, 1))
5694 && REGNO (XVECEXP (y
, 0, 1)) == TOC_REGISTER
)
5698 else if (GET_CODE (XVECEXP (y
, 0, 1)) == DEBUG_EXPR
)
5700 /* Weirdness alert. df_note_compute can replace r2 with a
5701 debug_expr when this unspec is in a debug_insn.
5702 Seen in gcc.dg/pr51957-1.c */
5710 y
= XVECEXP (y
, 0, 0);
5711 if (offset
!= NULL_RTX
)
5712 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
5713 if (!MEM_P (orig_x
))
5716 return replace_equiv_address_nv (orig_x
, y
);
5720 && GET_CODE (orig_x
) == LO_SUM
5721 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
5723 y
= XEXP (XEXP (orig_x
, 1), 0);
5724 if (GET_CODE (y
) == UNSPEC
5725 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
5726 return XVECEXP (y
, 0, 0);
5732 /* Return true if X shouldn't be emitted into the debug info.
5733 The linker doesn't like .toc section references from
5734 .debug_* sections, so reject .toc section symbols. */
5737 rs6000_const_not_ok_for_debug_p (rtx x
)
5739 if (GET_CODE (x
) == SYMBOL_REF
5740 && CONSTANT_POOL_ADDRESS_P (x
))
5742 rtx c
= get_pool_constant (x
);
5743 enum machine_mode cmode
= get_pool_mode (x
);
5744 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
5751 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5753 static GTY(()) rtx rs6000_tls_symbol
;
5755 rs6000_tls_get_addr (void)
5757 if (!rs6000_tls_symbol
)
5758 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
5760 return rs6000_tls_symbol
;
5763 /* Construct the SYMBOL_REF for TLS GOT references. */
5765 static GTY(()) rtx rs6000_got_symbol
;
5767 rs6000_got_sym (void)
5769 if (!rs6000_got_symbol
)
5771 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
5772 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
5773 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
5776 return rs6000_got_symbol
;
5779 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5780 this (thread-local) address. */
5783 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
5787 dest
= gen_reg_rtx (Pmode
);
5788 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
5794 tlsreg
= gen_rtx_REG (Pmode
, 13);
5795 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
5799 tlsreg
= gen_rtx_REG (Pmode
, 2);
5800 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
5804 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
5808 tmp
= gen_reg_rtx (Pmode
);
5811 tlsreg
= gen_rtx_REG (Pmode
, 13);
5812 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
5816 tlsreg
= gen_rtx_REG (Pmode
, 2);
5817 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
5821 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
5823 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
5828 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
5830 /* We currently use relocations like @got@tlsgd for tls, which
5831 means the linker will handle allocation of tls entries, placing
5832 them in the .got section. So use a pointer to the .got section,
5833 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5834 or to secondary GOT sections used by 32-bit -fPIC. */
5836 got
= gen_rtx_REG (Pmode
, 2);
5840 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
5843 rtx gsym
= rs6000_got_sym ();
5844 got
= gen_reg_rtx (Pmode
);
5846 rs6000_emit_move (got
, gsym
, Pmode
);
5851 tmp1
= gen_reg_rtx (Pmode
);
5852 tmp2
= gen_reg_rtx (Pmode
);
5853 mem
= gen_const_mem (Pmode
, tmp1
);
5854 lab
= gen_label_rtx ();
5855 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
5856 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
5857 if (TARGET_LINK_STACK
)
5858 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
5859 emit_move_insn (tmp2
, mem
);
5860 last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
5861 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
5866 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
5868 tga
= rs6000_tls_get_addr ();
5869 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
5870 1, const0_rtx
, Pmode
);
5872 r3
= gen_rtx_REG (Pmode
, 3);
5873 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5874 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
5875 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
5876 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
5877 else if (DEFAULT_ABI
== ABI_V4
)
5878 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
5881 call_insn
= last_call_insn ();
5882 PATTERN (call_insn
) = insn
;
5883 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
5884 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
5885 pic_offset_table_rtx
);
5887 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
5889 tga
= rs6000_tls_get_addr ();
5890 tmp1
= gen_reg_rtx (Pmode
);
5891 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
5892 1, const0_rtx
, Pmode
);
5894 r3
= gen_rtx_REG (Pmode
, 3);
5895 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5896 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
5897 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
5898 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
5899 else if (DEFAULT_ABI
== ABI_V4
)
5900 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
5903 call_insn
= last_call_insn ();
5904 PATTERN (call_insn
) = insn
;
5905 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
5906 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
5907 pic_offset_table_rtx
);
5909 if (rs6000_tls_size
== 16)
5912 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
5914 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
5916 else if (rs6000_tls_size
== 32)
5918 tmp2
= gen_reg_rtx (Pmode
);
5920 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
5922 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
5925 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
5927 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
5931 tmp2
= gen_reg_rtx (Pmode
);
5933 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
5935 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
5937 insn
= gen_rtx_SET (Pmode
, dest
,
5938 gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
5944 /* IE, or 64-bit offset LE. */
5945 tmp2
= gen_reg_rtx (Pmode
);
5947 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
5949 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
5952 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
5954 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
5962 /* Return 1 if X contains a thread-local symbol. */
5965 rs6000_tls_referenced_p (rtx x
)
5967 if (! TARGET_HAVE_TLS
)
5970 return for_each_rtx (&x
, &rs6000_tls_symbol_ref_1
, 0);
5973 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
5976 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
5978 if (GET_CODE (x
) == HIGH
5979 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
5982 return rs6000_tls_referenced_p (x
);
5985 /* Return 1 if *X is a thread-local symbol. This is the same as
5986 rs6000_tls_symbol_ref except for the type of the unused argument. */
5989 rs6000_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
5991 return RS6000_SYMBOL_REF_TLS_P (*x
);
5994 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
5995 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
5996 can be addressed relative to the toc pointer. */
5999 use_toc_relative_ref (rtx sym
)
6001 return ((constant_pool_expr_p (sym
)
6002 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
6003 get_pool_mode (sym
)))
6004 || (TARGET_CMODEL
== CMODEL_MEDIUM
6005 && !CONSTANT_POOL_ADDRESS_P (sym
)
6006 && SYMBOL_REF_LOCAL_P (sym
)));
6009 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
6010 replace the input X, or the original X if no replacement is called for.
6011 The output parameter *WIN is 1 if the calling macro should goto WIN,
6014 For RS/6000, we wish to handle large displacements off a base
6015 register by splitting the addend across an addiu/addis and the mem insn.
6016 This cuts number of extra insns needed from 3 to 1.
6018 On Darwin, we use this to generate code for floating point constants.
6019 A movsf_low is generated so we wind up with 2 instructions rather than 3.
6020 The Darwin code is inside #if TARGET_MACHO because only then are the
6021 machopic_* functions defined. */
6023 rs6000_legitimize_reload_address (rtx x
, enum machine_mode mode
,
6024 int opnum
, int type
,
6025 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
6027 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
6029 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
6030 DFmode/DImode MEM. */
6033 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
6034 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)))
6035 reg_offset_p
= false;
6037 /* We must recognize output that we have already generated ourselves. */
6038 if (GET_CODE (x
) == PLUS
6039 && GET_CODE (XEXP (x
, 0)) == PLUS
6040 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
6041 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6042 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
6044 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6045 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6046 opnum
, (enum reload_type
) type
);
6051 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
6052 if (GET_CODE (x
) == LO_SUM
6053 && GET_CODE (XEXP (x
, 0)) == HIGH
)
6055 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6056 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6057 opnum
, (enum reload_type
) type
);
6063 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
6064 && GET_CODE (x
) == LO_SUM
6065 && GET_CODE (XEXP (x
, 0)) == PLUS
6066 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
6067 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
6068 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
6069 && machopic_operand_p (XEXP (x
, 1)))
6071 /* Result of previous invocation of this function on Darwin
6072 floating point constant. */
6073 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6074 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6075 opnum
, (enum reload_type
) type
);
6081 if (TARGET_CMODEL
!= CMODEL_SMALL
6083 && small_toc_ref (x
, VOIDmode
))
6085 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
6086 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
6087 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6088 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6089 opnum
, (enum reload_type
) type
);
6094 /* Force ld/std non-word aligned offset into base register by wrapping
6096 if (GET_CODE (x
) == PLUS
6097 && GET_CODE (XEXP (x
, 0)) == REG
6098 && REGNO (XEXP (x
, 0)) < 32
6099 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
6100 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6102 && (INTVAL (XEXP (x
, 1)) & 3) != 0
6103 && VECTOR_MEM_NONE_P (mode
)
6104 && GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
6105 && TARGET_POWERPC64
)
6107 x
= gen_rtx_PLUS (GET_MODE (x
), x
, GEN_INT (0));
6108 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6109 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6110 opnum
, (enum reload_type
) type
);
6115 if (GET_CODE (x
) == PLUS
6116 && GET_CODE (XEXP (x
, 0)) == REG
6117 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
6118 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
6119 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6121 && !SPE_VECTOR_MODE (mode
)
6122 && !(TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
6123 || mode
== DDmode
|| mode
== TDmode
6125 && VECTOR_MEM_NONE_P (mode
))
6127 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
6128 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
6130 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6132 /* Check for 32-bit overflow. */
6133 if (high
+ low
!= val
)
6139 /* Reload the high part into a base reg; leave the low part
6140 in the mem directly. */
6142 x
= gen_rtx_PLUS (GET_MODE (x
),
6143 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
6147 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6148 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
6149 opnum
, (enum reload_type
) type
);
6154 if (GET_CODE (x
) == SYMBOL_REF
6156 && VECTOR_MEM_NONE_P (mode
)
6157 && !SPE_VECTOR_MODE (mode
)
6159 && DEFAULT_ABI
== ABI_DARWIN
6160 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
6161 && machopic_symbol_defined_p (x
)
6163 && DEFAULT_ABI
== ABI_V4
6166 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6167 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6169 ??? Assume floating point reg based on mode? This assumption is
6170 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
6171 where reload ends up doing a DFmode load of a constant from
6172 mem using two gprs. Unfortunately, at this point reload
6173 hasn't yet selected regs so poking around in reload data
6174 won't help and even if we could figure out the regs reliably,
6175 we'd still want to allow this transformation when the mem is
6176 naturally aligned. Since we say the address is good here, we
6177 can't disable offsets from LO_SUMs in mem_operand_gpr.
6178 FIXME: Allow offset from lo_sum for other modes too, when
6179 mem is sufficiently aligned. */
6182 && (mode
!= DImode
|| TARGET_POWERPC64
)
6183 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
6184 || (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)))
6189 rtx offset
= machopic_gen_offset (x
);
6190 x
= gen_rtx_LO_SUM (GET_MODE (x
),
6191 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
6192 gen_rtx_HIGH (Pmode
, offset
)), offset
);
6196 x
= gen_rtx_LO_SUM (GET_MODE (x
),
6197 gen_rtx_HIGH (Pmode
, x
), x
);
6199 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6200 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6201 opnum
, (enum reload_type
) type
);
6206 /* Reload an offset address wrapped by an AND that represents the
6207 masking of the lower bits. Strip the outer AND and let reload
6208 convert the offset address into an indirect address. For VSX,
6209 force reload to create the address with an AND in a separate
6210 register, because we can't guarantee an altivec register will
6212 if (VECTOR_MEM_ALTIVEC_P (mode
)
6213 && GET_CODE (x
) == AND
6214 && GET_CODE (XEXP (x
, 0)) == PLUS
6215 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
6216 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6217 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6218 && INTVAL (XEXP (x
, 1)) == -16)
6227 && GET_CODE (x
) == SYMBOL_REF
6228 && use_toc_relative_ref (x
))
6230 x
= create_TOC_reference (x
, NULL_RTX
);
6231 if (TARGET_CMODEL
!= CMODEL_SMALL
)
6232 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
6233 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
6234 opnum
, (enum reload_type
) type
);
6242 /* Debug version of rs6000_legitimize_reload_address. */
6244 rs6000_debug_legitimize_reload_address (rtx x
, enum machine_mode mode
,
6245 int opnum
, int type
,
6246 int ind_levels
, int *win
)
6248 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
6251 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6252 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6253 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
6257 fprintf (stderr
, "Same address returned\n");
6259 fprintf (stderr
, "NULL returned\n");
6262 fprintf (stderr
, "New address:\n");
6269 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6270 that is a valid memory address for an instruction.
6271 The MODE argument is the machine mode for the MEM expression
6272 that wants to use this address.
6274 On the RS/6000, there are four valid address: a SYMBOL_REF that
6275 refers to a constant pool entry of an address (or the sum of it
6276 plus a constant), a short (16-bit signed) constant plus a register,
6277 the sum of two registers, or a register indirect, possibly with an
6278 auto-increment. For DFmode, DDmode and DImode with a constant plus
6279 register, we must ensure that both words are addressable or PowerPC64
6280 with offset word aligned.
6282 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6283 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6284 because adjacent memory cells are accessed by adding word-sized offsets
6285 during assembly output. */
6287 rs6000_legitimate_address_p (enum machine_mode mode
, rtx x
, bool reg_ok_strict
)
6289 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
6291 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6292 if (VECTOR_MEM_ALTIVEC_P (mode
)
6293 && GET_CODE (x
) == AND
6294 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6295 && INTVAL (XEXP (x
, 1)) == -16)
6298 if (RS6000_SYMBOL_REF_TLS_P (x
))
6300 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
6302 if ((GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
6303 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
6304 && !SPE_VECTOR_MODE (mode
)
6307 /* Restrict addressing for DI because of our SUBREG hackery. */
6308 && !(TARGET_E500_DOUBLE
6309 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
6311 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
6313 if (virtual_stack_registers_memory_p (x
))
6315 if (reg_offset_p
&& legitimate_small_data_p (mode
, x
))
6318 && legitimate_constant_pool_address_p (x
, mode
, reg_ok_strict
))
6320 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6323 && GET_CODE (x
) == PLUS
6324 && GET_CODE (XEXP (x
, 0)) == REG
6325 && (XEXP (x
, 0) == virtual_stack_vars_rtx
6326 || XEXP (x
, 0) == arg_pointer_rtx
)
6327 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
6329 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
6334 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6336 || (mode
!= DFmode
&& mode
!= DDmode
)
6337 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
))
6338 && (TARGET_POWERPC64
|| mode
!= DImode
)
6339 && !avoiding_indexed_address_p (mode
)
6340 && legitimate_indexed_address_p (x
, reg_ok_strict
))
6342 if (GET_CODE (x
) == PRE_MODIFY
6346 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6348 || ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_E500_DOUBLE
))
6349 && (TARGET_POWERPC64
|| mode
!= DImode
)
6350 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
6351 && !SPE_VECTOR_MODE (mode
)
6352 /* Restrict addressing for DI because of our SUBREG hackery. */
6353 && !(TARGET_E500_DOUBLE
6354 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
6356 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
6357 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
6358 reg_ok_strict
, false)
6359 || (!avoiding_indexed_address_p (mode
)
6360 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
6361 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6363 if (reg_offset_p
&& legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
6368 /* Debug version of rs6000_legitimate_address_p. */
6370 rs6000_debug_legitimate_address_p (enum machine_mode mode
, rtx x
,
6373 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
6375 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6376 "strict = %d, code = %s\n",
6377 ret
? "true" : "false",
6378 GET_MODE_NAME (mode
),
6380 GET_RTX_NAME (GET_CODE (x
)));
6386 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6389 rs6000_mode_dependent_address_p (const_rtx addr
)
6391 return rs6000_mode_dependent_address_ptr (addr
);
6394 /* Go to LABEL if ADDR (a legitimate address expression)
6395 has an effect that depends on the machine mode it is used for.
6397 On the RS/6000 this is true of all integral offsets (since AltiVec
6398 and VSX modes don't allow them) or is a pre-increment or decrement.
6400 ??? Except that due to conceptual problems in offsettable_address_p
6401 we can't really report the problems of integral offsets. So leave
6402 this assuming that the adjustable offset must be valid for the
6403 sub-words of a TFmode operand, which is what we had before. */
6406 rs6000_mode_dependent_address (const_rtx addr
)
6408 switch (GET_CODE (addr
))
6411 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6412 is considered a legitimate address before reload, so there
6413 are no offset restrictions in that case. Note that this
6414 condition is safe in strict mode because any address involving
6415 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6416 been rejected as illegitimate. */
6417 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
6418 && XEXP (addr
, 0) != arg_pointer_rtx
6419 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
6421 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
6422 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
6427 /* Anything in the constant pool is sufficiently aligned that
6428 all bytes have the same high part address. */
6429 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
6431 /* Auto-increment cases are now treated generically in recog.c. */
6433 return TARGET_UPDATE
;
6435 /* AND is only allowed in Altivec loads. */
6446 /* Debug version of rs6000_mode_dependent_address. */
6448 rs6000_debug_mode_dependent_address (const_rtx addr
)
6450 bool ret
= rs6000_mode_dependent_address (addr
);
6452 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
6453 ret
? "true" : "false");
6459 /* Implement FIND_BASE_TERM. */
6462 rs6000_find_base_term (rtx op
)
6467 if (GET_CODE (base
) == CONST
)
6468 base
= XEXP (base
, 0);
6469 if (GET_CODE (base
) == PLUS
)
6470 base
= XEXP (base
, 0);
6471 if (GET_CODE (base
) == UNSPEC
)
6472 switch (XINT (base
, 1))
6475 case UNSPEC_MACHOPIC_OFFSET
:
6476 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6477 for aliasing purposes. */
6478 return XVECEXP (base
, 0, 0);
6484 /* More elaborate version of recog's offsettable_memref_p predicate
6485 that works around the ??? note of rs6000_mode_dependent_address.
6486 In particular it accepts
6488 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6490 in 32-bit mode, that the recog predicate rejects. */
6493 rs6000_offsettable_memref_p (rtx op
, enum machine_mode reg_mode
)
6500 /* First mimic offsettable_memref_p. */
6501 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
6504 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6505 the latter predicate knows nothing about the mode of the memory
6506 reference and, therefore, assumes that it is the largest supported
6507 mode (TFmode). As a consequence, legitimate offsettable memory
6508 references are rejected. rs6000_legitimate_offset_address_p contains
6509 the correct logic for the PLUS case of rs6000_mode_dependent_address,
6510 at least with a little bit of help here given that we know the
6511 actual registers used. */
6512 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
6513 || GET_MODE_SIZE (reg_mode
) == 4);
6514 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
6518 /* Change register usage conditional on target flags. */
6520 rs6000_conditional_register_usage (void)
6524 if (TARGET_DEBUG_TARGET
)
6525 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
6527 /* Set MQ register fixed (already call_used) so that it will not be
6531 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6533 fixed_regs
[13] = call_used_regs
[13]
6534 = call_really_used_regs
[13] = 1;
6536 /* Conditionally disable FPRs. */
6537 if (TARGET_SOFT_FLOAT
|| !TARGET_FPRS
)
6538 for (i
= 32; i
< 64; i
++)
6539 fixed_regs
[i
] = call_used_regs
[i
]
6540 = call_really_used_regs
[i
] = 1;
6542 /* The TOC register is not killed across calls in a way that is
6543 visible to the compiler. */
6544 if (DEFAULT_ABI
== ABI_AIX
)
6545 call_really_used_regs
[2] = 0;
6547 if (DEFAULT_ABI
== ABI_V4
6548 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
6550 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6552 if (DEFAULT_ABI
== ABI_V4
6553 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
6555 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6556 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6557 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6559 if (DEFAULT_ABI
== ABI_DARWIN
6560 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
6561 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6562 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6563 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6565 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
6566 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
6567 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
6571 global_regs
[SPEFSCR_REGNO
] = 1;
6572 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6573 registers in prologues and epilogues. We no longer use r14
6574 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6575 pool for link-compatibility with older versions of GCC. Once
6576 "old" code has died out, we can return r14 to the allocation
6579 = call_used_regs
[14]
6580 = call_really_used_regs
[14] = 1;
6583 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
6585 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
6586 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6587 call_really_used_regs
[VRSAVE_REGNO
] = 1;
6590 if (TARGET_ALTIVEC
|| TARGET_VSX
)
6591 global_regs
[VSCR_REGNO
] = 1;
6593 if (TARGET_ALTIVEC_ABI
)
6595 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
6596 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6598 /* AIX reserves VR20:31 in non-extended ABI mode. */
6600 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
6601 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
6605 /* Try to output insns to set TARGET equal to the constant C if it can
6606 be done in less than N insns. Do all computations in MODE.
6607 Returns the place where the output has been placed if it can be
6608 done and the insns have been emitted. If it would take more than N
6609 insns, zero is returned and no insns and emitted. */
6612 rs6000_emit_set_const (rtx dest
, enum machine_mode mode
,
6613 rtx source
, int n ATTRIBUTE_UNUSED
)
6615 rtx result
, insn
, set
;
6616 HOST_WIDE_INT c0
, c1
;
6623 dest
= gen_reg_rtx (mode
);
6624 emit_insn (gen_rtx_SET (VOIDmode
, dest
, source
));
6628 result
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
6630 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (result
),
6631 GEN_INT (INTVAL (source
)
6632 & (~ (HOST_WIDE_INT
) 0xffff))));
6633 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
6634 gen_rtx_IOR (SImode
, copy_rtx (result
),
6635 GEN_INT (INTVAL (source
) & 0xffff))));
6640 switch (GET_CODE (source
))
6643 c0
= INTVAL (source
);
6648 #if HOST_BITS_PER_WIDE_INT >= 64
6649 c0
= CONST_DOUBLE_LOW (source
);
6652 c0
= CONST_DOUBLE_LOW (source
);
6653 c1
= CONST_DOUBLE_HIGH (source
);
6661 result
= rs6000_emit_set_long_const (dest
, c0
, c1
);
6668 insn
= get_last_insn ();
6669 set
= single_set (insn
);
6670 if (! CONSTANT_P (SET_SRC (set
)))
6671 set_unique_reg_note (insn
, REG_EQUAL
, source
);
6676 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6677 fall back to a straight forward decomposition. We do this to avoid
6678 exponential run times encountered when looking for longer sequences
6679 with rs6000_emit_set_const. */
6681 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
6683 if (!TARGET_POWERPC64
)
6685 rtx operand1
, operand2
;
6687 operand1
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
== 0,
6689 operand2
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
!= 0,
6691 emit_move_insn (operand1
, GEN_INT (c1
));
6692 emit_move_insn (operand2
, GEN_INT (c2
));
6696 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
6699 ud2
= (c1
& 0xffff0000) >> 16;
6700 #if HOST_BITS_PER_WIDE_INT >= 64
6704 ud4
= (c2
& 0xffff0000) >> 16;
6706 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
6707 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
6710 emit_move_insn (dest
, GEN_INT (((ud1
^ 0x8000) - 0x8000)));
6712 emit_move_insn (dest
, GEN_INT (ud1
));
6715 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
6716 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
6719 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
6722 emit_move_insn (dest
, GEN_INT (ud2
<< 16));
6724 emit_move_insn (copy_rtx (dest
),
6725 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6728 else if (ud3
== 0 && ud4
== 0)
6730 gcc_assert (ud2
& 0x8000);
6731 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
6734 emit_move_insn (copy_rtx (dest
),
6735 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6737 emit_move_insn (copy_rtx (dest
),
6738 gen_rtx_ZERO_EXTEND (DImode
,
6739 gen_lowpart (SImode
,
6742 else if ((ud4
== 0xffff && (ud3
& 0x8000))
6743 || (ud4
== 0 && ! (ud3
& 0x8000)))
6746 emit_move_insn (dest
, GEN_INT (((ud3
<< 16) ^ 0x80000000)
6749 emit_move_insn (dest
, GEN_INT (ud3
<< 16));
6752 emit_move_insn (copy_rtx (dest
),
6753 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6755 emit_move_insn (copy_rtx (dest
),
6756 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
6759 emit_move_insn (copy_rtx (dest
),
6760 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6766 emit_move_insn (dest
, GEN_INT (((ud4
<< 16) ^ 0x80000000)
6769 emit_move_insn (dest
, GEN_INT (ud4
<< 16));
6772 emit_move_insn (copy_rtx (dest
),
6773 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6776 emit_move_insn (copy_rtx (dest
),
6777 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
6780 emit_move_insn (copy_rtx (dest
),
6781 gen_rtx_IOR (DImode
, copy_rtx (dest
),
6782 GEN_INT (ud2
<< 16)));
6784 emit_move_insn (copy_rtx (dest
),
6785 gen_rtx_IOR (DImode
, copy_rtx (dest
), GEN_INT (ud1
)));
6791 /* Helper for the following. Get rid of [r+r] memory refs
6792 in cases where it won't work (TImode, TFmode, TDmode). */
6795 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
6797 if (reload_in_progress
)
6800 if (GET_CODE (operands
[0]) == MEM
6801 && GET_CODE (XEXP (operands
[0], 0)) != REG
6802 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
6803 GET_MODE (operands
[0]), false))
6805 = replace_equiv_address (operands
[0],
6806 copy_addr_to_reg (XEXP (operands
[0], 0)));
6808 if (GET_CODE (operands
[1]) == MEM
6809 && GET_CODE (XEXP (operands
[1], 0)) != REG
6810 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
6811 GET_MODE (operands
[1]), false))
6813 = replace_equiv_address (operands
[1],
6814 copy_addr_to_reg (XEXP (operands
[1], 0)));
6817 /* Emit a move from SOURCE to DEST in mode MODE. */
6819 rs6000_emit_move (rtx dest
, rtx source
, enum machine_mode mode
)
6823 operands
[1] = source
;
6825 if (TARGET_DEBUG_ADDR
)
6828 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6829 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6830 GET_MODE_NAME (mode
),
6833 can_create_pseudo_p ());
6835 fprintf (stderr
, "source:\n");
6839 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6840 if (GET_CODE (operands
[1]) == CONST_DOUBLE
6841 && ! FLOAT_MODE_P (mode
)
6842 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
6844 /* FIXME. This should never happen. */
6845 /* Since it seems that it does, do the safe thing and convert
6847 operands
[1] = gen_int_mode (CONST_DOUBLE_LOW (operands
[1]), mode
);
6849 gcc_assert (GET_CODE (operands
[1]) != CONST_DOUBLE
6850 || FLOAT_MODE_P (mode
)
6851 || ((CONST_DOUBLE_HIGH (operands
[1]) != 0
6852 || CONST_DOUBLE_LOW (operands
[1]) < 0)
6853 && (CONST_DOUBLE_HIGH (operands
[1]) != -1
6854 || CONST_DOUBLE_LOW (operands
[1]) >= 0)));
6856 /* Check if GCC is setting up a block move that will end up using FP
6857 registers as temporaries. We must make sure this is acceptable. */
6858 if (GET_CODE (operands
[0]) == MEM
6859 && GET_CODE (operands
[1]) == MEM
6861 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
6862 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
6863 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
6864 ? 32 : MEM_ALIGN (operands
[0])))
6865 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
6867 : MEM_ALIGN (operands
[1]))))
6868 && ! MEM_VOLATILE_P (operands
[0])
6869 && ! MEM_VOLATILE_P (operands
[1]))
6871 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
6872 adjust_address (operands
[1], SImode
, 0));
6873 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
6874 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
6878 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
6879 && !gpc_reg_operand (operands
[1], mode
))
6880 operands
[1] = force_reg (mode
, operands
[1]);
6882 if (mode
== SFmode
&& ! TARGET_POWERPC
6883 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
6884 && GET_CODE (operands
[0]) == MEM
)
6888 if (reload_in_progress
|| reload_completed
)
6889 regnum
= true_regnum (operands
[1]);
6890 else if (GET_CODE (operands
[1]) == REG
)
6891 regnum
= REGNO (operands
[1]);
6895 /* If operands[1] is a register, on POWER it may have
6896 double-precision data in it, so truncate it to single
6898 if (FP_REGNO_P (regnum
) || regnum
>= FIRST_PSEUDO_REGISTER
)
6901 newreg
= (!can_create_pseudo_p () ? copy_rtx (operands
[1])
6902 : gen_reg_rtx (mode
));
6903 emit_insn (gen_aux_truncdfsf2 (newreg
, operands
[1]));
6904 operands
[1] = newreg
;
6908 /* Recognize the case where operand[1] is a reference to thread-local
6909 data and load its address to a register. */
6910 if (rs6000_tls_referenced_p (operands
[1]))
6912 enum tls_model model
;
6913 rtx tmp
= operands
[1];
6916 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
6918 addend
= XEXP (XEXP (tmp
, 0), 1);
6919 tmp
= XEXP (XEXP (tmp
, 0), 0);
6922 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
6923 model
= SYMBOL_REF_TLS_MODEL (tmp
);
6924 gcc_assert (model
!= 0);
6926 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
6929 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
6930 tmp
= force_operand (tmp
, operands
[0]);
6935 /* Handle the case where reload calls us with an invalid address. */
6936 if (reload_in_progress
&& mode
== Pmode
6937 && (! general_operand (operands
[1], mode
)
6938 || ! nonimmediate_operand (operands
[0], mode
)))
6941 /* 128-bit constant floating-point values on Darwin should really be
6942 loaded as two parts. */
6943 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
6944 && mode
== TFmode
&& GET_CODE (operands
[1]) == CONST_DOUBLE
)
6946 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
6947 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
6949 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
6950 GET_MODE_SIZE (DFmode
)),
6951 simplify_gen_subreg (DFmode
, operands
[1], mode
,
6952 GET_MODE_SIZE (DFmode
)),
6957 if (reload_in_progress
&& cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
6958 cfun
->machine
->sdmode_stack_slot
=
6959 eliminate_regs (cfun
->machine
->sdmode_stack_slot
, VOIDmode
, NULL_RTX
);
6961 if (reload_in_progress
6963 && MEM_P (operands
[0])
6964 && rtx_equal_p (operands
[0], cfun
->machine
->sdmode_stack_slot
)
6965 && REG_P (operands
[1]))
6967 if (FP_REGNO_P (REGNO (operands
[1])))
6969 rtx mem
= adjust_address_nv (operands
[0], DDmode
, 0);
6970 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
6971 emit_insn (gen_movsd_store (mem
, operands
[1]));
6973 else if (INT_REGNO_P (REGNO (operands
[1])))
6975 rtx mem
= adjust_address_nv (operands
[0], mode
, 4);
6976 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
6977 emit_insn (gen_movsd_hardfloat (mem
, operands
[1]));
6983 if (reload_in_progress
6985 && REG_P (operands
[0])
6986 && MEM_P (operands
[1])
6987 && rtx_equal_p (operands
[1], cfun
->machine
->sdmode_stack_slot
))
6989 if (FP_REGNO_P (REGNO (operands
[0])))
6991 rtx mem
= adjust_address_nv (operands
[1], DDmode
, 0);
6992 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
6993 emit_insn (gen_movsd_load (operands
[0], mem
));
6995 else if (INT_REGNO_P (REGNO (operands
[0])))
6997 rtx mem
= adjust_address_nv (operands
[1], mode
, 4);
6998 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
6999 emit_insn (gen_movsd_hardfloat (operands
[0], mem
));
7006 /* FIXME: In the long term, this switch statement should go away
7007 and be replaced by a sequence of tests based on things like
7013 if (CONSTANT_P (operands
[1])
7014 && GET_CODE (operands
[1]) != CONST_INT
)
7015 operands
[1] = force_const_mem (mode
, operands
[1]);
7020 rs6000_eliminate_indexed_memrefs (operands
);
7027 if (CONSTANT_P (operands
[1])
7028 && ! easy_fp_constant (operands
[1], mode
))
7029 operands
[1] = force_const_mem (mode
, operands
[1]);
7042 if (CONSTANT_P (operands
[1])
7043 && !easy_vector_constant (operands
[1], mode
))
7044 operands
[1] = force_const_mem (mode
, operands
[1]);
7049 /* Use default pattern for address of ELF small data */
7052 && DEFAULT_ABI
== ABI_V4
7053 && (GET_CODE (operands
[1]) == SYMBOL_REF
7054 || GET_CODE (operands
[1]) == CONST
)
7055 && small_data_operand (operands
[1], mode
))
7057 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7061 if (DEFAULT_ABI
== ABI_V4
7062 && mode
== Pmode
&& mode
== SImode
7063 && flag_pic
== 1 && got_operand (operands
[1], mode
))
7065 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
7069 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
7073 && CONSTANT_P (operands
[1])
7074 && GET_CODE (operands
[1]) != HIGH
7075 && GET_CODE (operands
[1]) != CONST_INT
)
7077 rtx target
= (!can_create_pseudo_p ()
7079 : gen_reg_rtx (mode
));
7081 /* If this is a function address on -mcall-aixdesc,
7082 convert it to the address of the descriptor. */
7083 if (DEFAULT_ABI
== ABI_AIX
7084 && GET_CODE (operands
[1]) == SYMBOL_REF
7085 && XSTR (operands
[1], 0)[0] == '.')
7087 const char *name
= XSTR (operands
[1], 0);
7089 while (*name
== '.')
7091 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
7092 CONSTANT_POOL_ADDRESS_P (new_ref
)
7093 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
7094 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
7095 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
7096 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
7097 operands
[1] = new_ref
;
7100 if (DEFAULT_ABI
== ABI_DARWIN
)
7103 if (MACHO_DYNAMIC_NO_PIC_P
)
7105 /* Take care of any required data indirection. */
7106 operands
[1] = rs6000_machopic_legitimize_pic_address (
7107 operands
[1], mode
, operands
[0]);
7108 if (operands
[0] != operands
[1])
7109 emit_insn (gen_rtx_SET (VOIDmode
,
7110 operands
[0], operands
[1]));
7114 emit_insn (gen_macho_high (target
, operands
[1]));
7115 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
7119 emit_insn (gen_elf_high (target
, operands
[1]));
7120 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
7124 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7125 and we have put it in the TOC, we just need to make a TOC-relative
7128 && GET_CODE (operands
[1]) == SYMBOL_REF
7129 && use_toc_relative_ref (operands
[1]))
7130 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
7131 else if (mode
== Pmode
7132 && CONSTANT_P (operands
[1])
7133 && GET_CODE (operands
[1]) != HIGH
7134 && ((GET_CODE (operands
[1]) != CONST_INT
7135 && ! easy_fp_constant (operands
[1], mode
))
7136 || (GET_CODE (operands
[1]) == CONST_INT
7137 && (num_insns_constant (operands
[1], mode
)
7138 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
7139 || (GET_CODE (operands
[0]) == REG
7140 && FP_REGNO_P (REGNO (operands
[0]))))
7141 && !toc_relative_expr_p (operands
[1], false)
7142 && (TARGET_CMODEL
== CMODEL_SMALL
7143 || can_create_pseudo_p ()
7144 || (REG_P (operands
[0])
7145 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
7149 /* Darwin uses a special PIC legitimizer. */
7150 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
7153 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
7155 if (operands
[0] != operands
[1])
7156 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7161 /* If we are to limit the number of things we put in the TOC and
7162 this is a symbol plus a constant we can add in one insn,
7163 just put the symbol in the TOC and add the constant. Don't do
7164 this if reload is in progress. */
7165 if (GET_CODE (operands
[1]) == CONST
7166 && TARGET_NO_SUM_IN_TOC
&& ! reload_in_progress
7167 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
7168 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
7169 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
7170 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
7171 && ! side_effects_p (operands
[0]))
7174 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
7175 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
7177 sym
= force_reg (mode
, sym
);
7178 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
7182 operands
[1] = force_const_mem (mode
, operands
[1]);
7185 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
7186 && constant_pool_expr_p (XEXP (operands
[1], 0))
7187 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7188 get_pool_constant (XEXP (operands
[1], 0)),
7189 get_pool_mode (XEXP (operands
[1], 0))))
7191 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
7193 operands
[1] = gen_const_mem (mode
, tocref
);
7194 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
7200 rs6000_eliminate_indexed_memrefs (operands
);
7204 fatal_insn ("bad move", gen_rtx_SET (VOIDmode
, dest
, source
));
7207 /* Above, we may have called force_const_mem which may have returned
7208 an invalid address. If we can, fix this up; otherwise, reload will
7209 have to deal with it. */
7210 if (GET_CODE (operands
[1]) == MEM
&& ! reload_in_progress
)
7211 operands
[1] = validize_mem (operands
[1]);
7214 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
7217 /* Nonzero if we can use a floating-point register to pass this arg. */
7218 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7219 (SCALAR_FLOAT_MODE_P (MODE) \
7220 && (CUM)->fregno <= FP_ARG_MAX_REG \
7221 && TARGET_HARD_FLOAT && TARGET_FPRS)
7223 /* Nonzero if we can use an AltiVec register to pass this arg. */
7224 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7225 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7226 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7227 && TARGET_ALTIVEC_ABI \
7230 /* Return a nonzero value to say to return the function value in
7231 memory, just as large structures are always returned. TYPE will be
7232 the data type of the value, and FNTYPE will be the type of the
7233 function doing the returning, or @code{NULL} for libcalls.
7235 The AIX ABI for the RS/6000 specifies that all structures are
7236 returned in memory. The Darwin ABI does the same.
7238 For the Darwin 64 Bit ABI, a function result can be returned in
7239 registers or in memory, depending on the size of the return data
7240 type. If it is returned in registers, the value occupies the same
7241 registers as it would if it were the first and only function
7242 argument. Otherwise, the function places its result in memory at
7243 the location pointed to by GPR3.
7245 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7246 but a draft put them in memory, and GCC used to implement the draft
7247 instead of the final standard. Therefore, aix_struct_return
7248 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7249 compatibility can change DRAFT_V4_STRUCT_RET to override the
7250 default, and -m switches get the final word. See
7251 rs6000_option_override_internal for more details.
7253 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7254 long double support is enabled. These values are returned in memory.
7256 int_size_in_bytes returns -1 for variable size objects, which go in
7257 memory always. The cast to unsigned makes -1 > 8. */
7260 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
7262 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7264 && rs6000_darwin64_abi
7265 && TREE_CODE (type
) == RECORD_TYPE
7266 && int_size_in_bytes (type
) > 0)
7268 CUMULATIVE_ARGS valcum
;
7272 valcum
.fregno
= FP_ARG_MIN_REG
;
7273 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
7274 /* Do a trial code generation as if this were going to be passed
7275 as an argument; if any part goes in memory, we return NULL. */
7276 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
7279 /* Otherwise fall through to more conventional ABI rules. */
7282 if (AGGREGATE_TYPE_P (type
)
7283 && (aix_struct_return
7284 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
7287 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7288 modes only exist for GCC vector types if -maltivec. */
7289 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
7290 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
7293 /* Return synthetic vectors in memory. */
7294 if (TREE_CODE (type
) == VECTOR_TYPE
7295 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
7297 static bool warned_for_return_big_vectors
= false;
7298 if (!warned_for_return_big_vectors
)
7300 warning (0, "GCC vector returned by reference: "
7301 "non-standard ABI extension with no compatibility guarantee");
7302 warned_for_return_big_vectors
= true;
7307 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& TYPE_MODE (type
) == TFmode
)
7313 #ifdef HAVE_AS_GNU_ATTRIBUTE
7314 /* Return TRUE if a call to function FNDECL may be one that
7315 potentially affects the function calling ABI of the object file. */
7318 call_ABI_of_interest (tree fndecl
)
7320 if (cgraph_state
== CGRAPH_STATE_EXPANSION
)
7322 struct cgraph_node
*c_node
;
7324 /* Libcalls are always interesting. */
7325 if (fndecl
== NULL_TREE
)
7328 /* Any call to an external function is interesting. */
7329 if (DECL_EXTERNAL (fndecl
))
7332 /* Interesting functions that we are emitting in this object file. */
7333 c_node
= cgraph_get_node (fndecl
);
7334 c_node
= cgraph_function_or_thunk_node (c_node
, NULL
);
7335 return !cgraph_only_called_directly_p (c_node
);
7341 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7342 for a call to a function whose data type is FNTYPE.
7343 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7345 For incoming args we set the number of arguments in the prototype large
7346 so we never return a PARALLEL. */
7349 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
7350 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
7351 int libcall
, int n_named_args
,
7352 tree fndecl ATTRIBUTE_UNUSED
,
7353 enum machine_mode return_mode ATTRIBUTE_UNUSED
)
7355 static CUMULATIVE_ARGS zero_cumulative
;
7357 *cum
= zero_cumulative
;
7359 cum
->fregno
= FP_ARG_MIN_REG
;
7360 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
7361 cum
->prototype
= (fntype
&& prototype_p (fntype
));
7362 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
7363 ? CALL_LIBCALL
: CALL_NORMAL
);
7364 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
7365 cum
->stdarg
= stdarg_p (fntype
);
7367 cum
->nargs_prototype
= 0;
7368 if (incoming
|| cum
->prototype
)
7369 cum
->nargs_prototype
= n_named_args
;
7371 /* Check for a longcall attribute. */
7372 if ((!fntype
&& rs6000_default_long_calls
)
7374 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
7375 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
7376 cum
->call_cookie
|= CALL_LONG
;
7378 if (TARGET_DEBUG_ARG
)
7380 fprintf (stderr
, "\ninit_cumulative_args:");
7383 tree ret_type
= TREE_TYPE (fntype
);
7384 fprintf (stderr
, " ret code = %s,",
7385 tree_code_name
[ (int)TREE_CODE (ret_type
) ]);
7388 if (cum
->call_cookie
& CALL_LONG
)
7389 fprintf (stderr
, " longcall,");
7391 fprintf (stderr
, " proto = %d, nargs = %d\n",
7392 cum
->prototype
, cum
->nargs_prototype
);
7395 #ifdef HAVE_AS_GNU_ATTRIBUTE
7396 if (DEFAULT_ABI
== ABI_V4
)
7398 cum
->escapes
= call_ABI_of_interest (fndecl
);
7405 return_type
= TREE_TYPE (fntype
);
7406 return_mode
= TYPE_MODE (return_type
);
7409 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
7411 if (return_type
!= NULL
)
7413 if (TREE_CODE (return_type
) == RECORD_TYPE
7414 && TYPE_TRANSPARENT_AGGR (return_type
))
7416 return_type
= TREE_TYPE (first_field (return_type
));
7417 return_mode
= TYPE_MODE (return_type
);
7419 if (AGGREGATE_TYPE_P (return_type
)
7420 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
7422 rs6000_returns_struct
= true;
7424 if (SCALAR_FLOAT_MODE_P (return_mode
))
7425 rs6000_passes_float
= true;
7426 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
7427 || SPE_VECTOR_MODE (return_mode
))
7428 rs6000_passes_vector
= true;
7435 && TARGET_ALTIVEC_ABI
7436 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
7438 error ("cannot return value in vector register because"
7439 " altivec instructions are disabled, use -maltivec"
7444 /* Return true if TYPE must be passed on the stack and not in registers. */
7447 rs6000_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
7449 if (DEFAULT_ABI
== ABI_AIX
|| TARGET_64BIT
)
7450 return must_pass_in_stack_var_size (mode
, type
);
7452 return must_pass_in_stack_var_size_or_pad (mode
, type
);
7455 /* If defined, a C expression which determines whether, and in which
7456 direction, to pad out an argument with extra space. The value
7457 should be of type `enum direction': either `upward' to pad above
7458 the argument, `downward' to pad below, or `none' to inhibit
7461 For the AIX ABI structs are always stored left shifted in their
7465 function_arg_padding (enum machine_mode mode
, const_tree type
)
7467 #ifndef AGGREGATE_PADDING_FIXED
7468 #define AGGREGATE_PADDING_FIXED 0
7470 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7471 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7474 if (!AGGREGATE_PADDING_FIXED
)
7476 /* GCC used to pass structures of the same size as integer types as
7477 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7478 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7479 passed padded downward, except that -mstrict-align further
7480 muddied the water in that multi-component structures of 2 and 4
7481 bytes in size were passed padded upward.
7483 The following arranges for best compatibility with previous
7484 versions of gcc, but removes the -mstrict-align dependency. */
7485 if (BYTES_BIG_ENDIAN
)
7487 HOST_WIDE_INT size
= 0;
7489 if (mode
== BLKmode
)
7491 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
7492 size
= int_size_in_bytes (type
);
7495 size
= GET_MODE_SIZE (mode
);
7497 if (size
== 1 || size
== 2 || size
== 4)
7503 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
7505 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
7509 /* Fall back to the default. */
7510 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
7513 /* If defined, a C expression that gives the alignment boundary, in bits,
7514 of an argument with the specified mode and type. If it is not defined,
7515 PARM_BOUNDARY is used for all arguments.
7517 V.4 wants long longs and doubles to be double word aligned. Just
7518 testing the mode size is a boneheaded way to do this as it means
7519 that other types such as complex int are also double word aligned.
7520 However, we're stuck with this because changing the ABI might break
7521 existing library interfaces.
7523 Doubleword align SPE vectors.
7524 Quadword align Altivec/VSX vectors.
7525 Quadword align large synthetic vector types. */
7528 rs6000_function_arg_boundary (enum machine_mode mode
, const_tree type
)
7530 if (DEFAULT_ABI
== ABI_V4
7531 && (GET_MODE_SIZE (mode
) == 8
7532 || (TARGET_HARD_FLOAT
7534 && (mode
== TFmode
|| mode
== TDmode
))))
7536 else if (SPE_VECTOR_MODE (mode
)
7537 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7538 && int_size_in_bytes (type
) >= 8
7539 && int_size_in_bytes (type
) < 16))
7541 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
7542 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7543 && int_size_in_bytes (type
) >= 16))
7545 else if (TARGET_MACHO
7546 && rs6000_darwin64_abi
7548 && type
&& TYPE_ALIGN (type
) > 64)
7551 return PARM_BOUNDARY
;
7554 /* For a function parm of MODE and TYPE, return the starting word in
7555 the parameter area. NWORDS of the parameter area are already used. */
7558 rs6000_parm_start (enum machine_mode mode
, const_tree type
,
7559 unsigned int nwords
)
7562 unsigned int parm_offset
;
7564 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
7565 parm_offset
= DEFAULT_ABI
== ABI_V4
? 2 : 6;
7566 return nwords
+ (-(parm_offset
+ nwords
) & align
);
7569 /* Compute the size (in words) of a function argument. */
7571 static unsigned long
7572 rs6000_arg_size (enum machine_mode mode
, const_tree type
)
7576 if (mode
!= BLKmode
)
7577 size
= GET_MODE_SIZE (mode
);
7579 size
= int_size_in_bytes (type
);
7582 return (size
+ 3) >> 2;
7584 return (size
+ 7) >> 3;
7587 /* Use this to flush pending int fields. */
7590 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
7591 HOST_WIDE_INT bitpos
, int final
)
7593 unsigned int startbit
, endbit
;
7594 int intregs
, intoffset
;
7595 enum machine_mode mode
;
7597 /* Handle the situations where a float is taking up the first half
7598 of the GPR, and the other half is empty (typically due to
7599 alignment restrictions). We can detect this by a 8-byte-aligned
7600 int field, or by seeing that this is the final flush for this
7601 argument. Count the word and continue on. */
7602 if (cum
->floats_in_gpr
== 1
7603 && (cum
->intoffset
% 64 == 0
7604 || (cum
->intoffset
== -1 && final
)))
7607 cum
->floats_in_gpr
= 0;
7610 if (cum
->intoffset
== -1)
7613 intoffset
= cum
->intoffset
;
7614 cum
->intoffset
= -1;
7615 cum
->floats_in_gpr
= 0;
7617 if (intoffset
% BITS_PER_WORD
!= 0)
7619 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
7621 if (mode
== BLKmode
)
7623 /* We couldn't find an appropriate mode, which happens,
7624 e.g., in packed structs when there are 3 bytes to load.
7625 Back intoffset back to the beginning of the word in this
7627 intoffset
= intoffset
& -BITS_PER_WORD
;
7631 startbit
= intoffset
& -BITS_PER_WORD
;
7632 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
7633 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
7634 cum
->words
+= intregs
;
7635 /* words should be unsigned. */
7636 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
7638 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
7643 /* The darwin64 ABI calls for us to recurse down through structs,
7644 looking for elements passed in registers. Unfortunately, we have
7645 to track int register count here also because of misalignments
7646 in powerpc alignment mode. */
7649 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
7651 HOST_WIDE_INT startbitpos
)
7655 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
7656 if (TREE_CODE (f
) == FIELD_DECL
)
7658 HOST_WIDE_INT bitpos
= startbitpos
;
7659 tree ftype
= TREE_TYPE (f
);
7660 enum machine_mode mode
;
7661 if (ftype
== error_mark_node
)
7663 mode
= TYPE_MODE (ftype
);
7665 if (DECL_SIZE (f
) != 0
7666 && host_integerp (bit_position (f
), 1))
7667 bitpos
+= int_bit_position (f
);
7669 /* ??? FIXME: else assume zero offset. */
7671 if (TREE_CODE (ftype
) == RECORD_TYPE
)
7672 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
7673 else if (USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
7675 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
7676 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
7677 cum
->fregno
+= n_fpregs
;
7678 /* Single-precision floats present a special problem for
7679 us, because they are smaller than an 8-byte GPR, and so
7680 the structure-packing rules combined with the standard
7681 varargs behavior mean that we want to pack float/float
7682 and float/int combinations into a single register's
7683 space. This is complicated by the arg advance flushing,
7684 which works on arbitrarily large groups of int-type
7688 if (cum
->floats_in_gpr
== 1)
7690 /* Two floats in a word; count the word and reset
7693 cum
->floats_in_gpr
= 0;
7695 else if (bitpos
% 64 == 0)
7697 /* A float at the beginning of an 8-byte word;
7698 count it and put off adjusting cum->words until
7699 we see if a arg advance flush is going to do it
7701 cum
->floats_in_gpr
++;
7705 /* The float is at the end of a word, preceded
7706 by integer fields, so the arg advance flush
7707 just above has already set cum->words and
7708 everything is taken care of. */
7712 cum
->words
+= n_fpregs
;
7714 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, 1))
7716 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
7720 else if (cum
->intoffset
== -1)
7721 cum
->intoffset
= bitpos
;
7725 /* Check for an item that needs to be considered specially under the darwin 64
7726 bit ABI. These are record types where the mode is BLK or the structure is
7729 rs6000_darwin64_struct_check_p (enum machine_mode mode
, const_tree type
)
7731 return rs6000_darwin64_abi
7732 && ((mode
== BLKmode
7733 && TREE_CODE (type
) == RECORD_TYPE
7734 && int_size_in_bytes (type
) > 0)
7735 || (type
&& TREE_CODE (type
) == RECORD_TYPE
7736 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
7739 /* Update the data in CUM to advance over an argument
7740 of mode MODE and data type TYPE.
7741 (TYPE is null for libcalls where that information may not be available.)
7743 Note that for args passed by reference, function_arg will be called
7744 with MODE and TYPE set to that of the pointer to the arg, not the arg
7748 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
7749 const_tree type
, bool named
, int depth
)
7751 /* Only tick off an argument if we're not recursing. */
7753 cum
->nargs_prototype
--;
7755 #ifdef HAVE_AS_GNU_ATTRIBUTE
7756 if (DEFAULT_ABI
== ABI_V4
7759 if (SCALAR_FLOAT_MODE_P (mode
))
7760 rs6000_passes_float
= true;
7761 else if (named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
7762 rs6000_passes_vector
= true;
7763 else if (SPE_VECTOR_MODE (mode
)
7765 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
7766 rs6000_passes_vector
= true;
7770 if (TARGET_ALTIVEC_ABI
7771 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
7772 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
7773 && int_size_in_bytes (type
) == 16)))
7777 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
7780 if (!TARGET_ALTIVEC
)
7781 error ("cannot pass argument in vector register because"
7782 " altivec instructions are disabled, use -maltivec"
7785 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7786 even if it is going to be passed in a vector register.
7787 Darwin does the same for variable-argument functions. */
7788 if ((DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
7789 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
7799 /* Vector parameters must be 16-byte aligned. This places
7800 them at 2 mod 4 in terms of words in 32-bit mode, since
7801 the parameter save area starts at offset 24 from the
7802 stack. In 64-bit mode, they just have to start on an
7803 even word, since the parameter save area is 16-byte
7804 aligned. Space for GPRs is reserved even if the argument
7805 will be passed in memory. */
7807 align
= (2 - cum
->words
) & 3;
7809 align
= cum
->words
& 1;
7810 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
7812 if (TARGET_DEBUG_ARG
)
7814 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
7816 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
7817 cum
->nargs_prototype
, cum
->prototype
,
7818 GET_MODE_NAME (mode
));
7822 else if (TARGET_SPE_ABI
&& TARGET_SPE
&& SPE_VECTOR_MODE (mode
)
7824 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
7827 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
7829 int size
= int_size_in_bytes (type
);
7830 /* Variable sized types have size == -1 and are
7831 treated as if consisting entirely of ints.
7832 Pad to 16 byte boundary if needed. */
7833 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
7834 && (cum
->words
% 2) != 0)
7836 /* For varargs, we can just go up by the size of the struct. */
7838 cum
->words
+= (size
+ 7) / 8;
7841 /* It is tempting to say int register count just goes up by
7842 sizeof(type)/8, but this is wrong in a case such as
7843 { int; double; int; } [powerpc alignment]. We have to
7844 grovel through the fields for these too. */
7846 cum
->floats_in_gpr
= 0;
7847 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
7848 rs6000_darwin64_record_arg_advance_flush (cum
,
7849 size
* BITS_PER_UNIT
, 1);
7851 if (TARGET_DEBUG_ARG
)
7853 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
7854 cum
->words
, TYPE_ALIGN (type
), size
);
7856 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
7857 cum
->nargs_prototype
, cum
->prototype
,
7858 GET_MODE_NAME (mode
));
7861 else if (DEFAULT_ABI
== ABI_V4
)
7863 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
7864 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
7865 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
7866 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
7867 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
7869 /* _Decimal128 must use an even/odd register pair. This assumes
7870 that the register number is odd when fregno is odd. */
7871 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
7874 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
7875 <= FP_ARG_V4_MAX_REG
)
7876 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
7879 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
7880 if (mode
== DFmode
|| mode
== TFmode
7881 || mode
== DDmode
|| mode
== TDmode
)
7882 cum
->words
+= cum
->words
& 1;
7883 cum
->words
+= rs6000_arg_size (mode
, type
);
7888 int n_words
= rs6000_arg_size (mode
, type
);
7889 int gregno
= cum
->sysv_gregno
;
7891 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7892 (r7,r8) or (r9,r10). As does any other 2 word item such
7893 as complex int due to a historical mistake. */
7895 gregno
+= (1 - gregno
) & 1;
7897 /* Multi-reg args are not split between registers and stack. */
7898 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
7900 /* Long long and SPE vectors are aligned on the stack.
7901 So are other 2 word items such as complex int due to
7902 a historical mistake. */
7904 cum
->words
+= cum
->words
& 1;
7905 cum
->words
+= n_words
;
7908 /* Note: continuing to accumulate gregno past when we've started
7909 spilling to the stack indicates the fact that we've started
7910 spilling to the stack to expand_builtin_saveregs. */
7911 cum
->sysv_gregno
= gregno
+ n_words
;
7914 if (TARGET_DEBUG_ARG
)
7916 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
7917 cum
->words
, cum
->fregno
);
7918 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
7919 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
7920 fprintf (stderr
, "mode = %4s, named = %d\n",
7921 GET_MODE_NAME (mode
), named
);
7926 int n_words
= rs6000_arg_size (mode
, type
);
7927 int start_words
= cum
->words
;
7928 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
7930 cum
->words
= align_words
+ n_words
;
7932 if (SCALAR_FLOAT_MODE_P (mode
)
7933 && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
7935 /* _Decimal128 must be passed in an even/odd float register pair.
7936 This assumes that the register number is odd when fregno is
7938 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
7940 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
7943 if (TARGET_DEBUG_ARG
)
7945 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
7946 cum
->words
, cum
->fregno
);
7947 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
7948 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
7949 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
7950 named
, align_words
- start_words
, depth
);
7956 rs6000_function_arg_advance (cumulative_args_t cum
, enum machine_mode mode
,
7957 const_tree type
, bool named
)
7959 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
7964 spe_build_register_parallel (enum machine_mode mode
, int gregno
)
7971 r1
= gen_rtx_REG (DImode
, gregno
);
7972 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
7973 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, r1
));
7977 r1
= gen_rtx_REG (DImode
, gregno
);
7978 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
7979 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
7980 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
7981 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r3
));
7984 r1
= gen_rtx_REG (DImode
, gregno
);
7985 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
7986 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
7987 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
7988 r5
= gen_rtx_REG (DImode
, gregno
+ 4);
7989 r5
= gen_rtx_EXPR_LIST (VOIDmode
, r5
, GEN_INT (16));
7990 r7
= gen_rtx_REG (DImode
, gregno
+ 6);
7991 r7
= gen_rtx_EXPR_LIST (VOIDmode
, r7
, GEN_INT (24));
7992 return gen_rtx_PARALLEL (mode
, gen_rtvec (4, r1
, r3
, r5
, r7
));
7999 /* Determine where to put a SIMD argument on the SPE. */
8001 rs6000_spe_function_arg (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
8004 int gregno
= cum
->sysv_gregno
;
8006 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
8007 are passed and returned in a pair of GPRs for ABI compatibility. */
8008 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
8009 || mode
== DCmode
|| mode
== TCmode
))
8011 int n_words
= rs6000_arg_size (mode
, type
);
8013 /* Doubles go in an odd/even register pair (r5/r6, etc). */
8015 gregno
+= (1 - gregno
) & 1;
8017 /* Multi-reg args are not split between registers and stack. */
8018 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
8021 return spe_build_register_parallel (mode
, gregno
);
8025 int n_words
= rs6000_arg_size (mode
, type
);
8027 /* SPE vectors are put in odd registers. */
8028 if (n_words
== 2 && (gregno
& 1) == 0)
8031 if (gregno
+ n_words
- 1 <= GP_ARG_MAX_REG
)
8034 enum machine_mode m
= SImode
;
8036 r1
= gen_rtx_REG (m
, gregno
);
8037 r1
= gen_rtx_EXPR_LIST (m
, r1
, const0_rtx
);
8038 r2
= gen_rtx_REG (m
, gregno
+ 1);
8039 r2
= gen_rtx_EXPR_LIST (m
, r2
, GEN_INT (4));
8040 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
8047 if (gregno
<= GP_ARG_MAX_REG
)
8048 return gen_rtx_REG (mode
, gregno
);
8054 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
8055 structure between cum->intoffset and bitpos to integer registers. */
8058 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
8059 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
8061 enum machine_mode mode
;
8063 unsigned int startbit
, endbit
;
8064 int this_regno
, intregs
, intoffset
;
8067 if (cum
->intoffset
== -1)
8070 intoffset
= cum
->intoffset
;
8071 cum
->intoffset
= -1;
8073 /* If this is the trailing part of a word, try to only load that
8074 much into the register. Otherwise load the whole register. Note
8075 that in the latter case we may pick up unwanted bits. It's not a
8076 problem at the moment but may wish to revisit. */
8078 if (intoffset
% BITS_PER_WORD
!= 0)
8080 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
8082 if (mode
== BLKmode
)
8084 /* We couldn't find an appropriate mode, which happens,
8085 e.g., in packed structs when there are 3 bytes to load.
8086 Back intoffset back to the beginning of the word in this
8088 intoffset
= intoffset
& -BITS_PER_WORD
;
8095 startbit
= intoffset
& -BITS_PER_WORD
;
8096 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
8097 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
8098 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
8100 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
8103 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
8107 intoffset
/= BITS_PER_UNIT
;
8110 regno
= GP_ARG_MIN_REG
+ this_regno
;
8111 reg
= gen_rtx_REG (mode
, regno
);
8113 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
8116 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
8120 while (intregs
> 0);
8123 /* Recursive workhorse for the following. */
8126 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
8127 HOST_WIDE_INT startbitpos
, rtx rvec
[],
8132 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
8133 if (TREE_CODE (f
) == FIELD_DECL
)
8135 HOST_WIDE_INT bitpos
= startbitpos
;
8136 tree ftype
= TREE_TYPE (f
);
8137 enum machine_mode mode
;
8138 if (ftype
== error_mark_node
)
8140 mode
= TYPE_MODE (ftype
);
8142 if (DECL_SIZE (f
) != 0
8143 && host_integerp (bit_position (f
), 1))
8144 bitpos
+= int_bit_position (f
);
8146 /* ??? FIXME: else assume zero offset. */
8148 if (TREE_CODE (ftype
) == RECORD_TYPE
)
8149 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
8150 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
8152 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
8156 case SCmode
: mode
= SFmode
; break;
8157 case DCmode
: mode
= DFmode
; break;
8158 case TCmode
: mode
= TFmode
; break;
8162 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
8163 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
8165 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
8166 && (mode
== TFmode
|| mode
== TDmode
));
8167 /* Long double or _Decimal128 split over regs and memory. */
8168 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
8172 = gen_rtx_EXPR_LIST (VOIDmode
,
8173 gen_rtx_REG (mode
, cum
->fregno
++),
8174 GEN_INT (bitpos
/ BITS_PER_UNIT
));
8175 if (mode
== TFmode
|| mode
== TDmode
)
8178 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, ftype
, 1))
8180 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
8182 = gen_rtx_EXPR_LIST (VOIDmode
,
8183 gen_rtx_REG (mode
, cum
->vregno
++),
8184 GEN_INT (bitpos
/ BITS_PER_UNIT
));
8186 else if (cum
->intoffset
== -1)
8187 cum
->intoffset
= bitpos
;
8191 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8192 the register(s) to be used for each field and subfield of a struct
8193 being passed by value, along with the offset of where the
8194 register's value may be found in the block. FP fields go in FP
8195 register, vector fields go in vector registers, and everything
8196 else goes in int registers, packed as in memory.
8198 This code is also used for function return values. RETVAL indicates
8199 whether this is the case.
8201 Much of this is taken from the SPARC V9 port, which has a similar
8202 calling convention. */
8205 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
8206 bool named
, bool retval
)
8208 rtx rvec
[FIRST_PSEUDO_REGISTER
];
8209 int k
= 1, kbase
= 1;
8210 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
8211 /* This is a copy; modifications are not visible to our caller. */
8212 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
8213 CUMULATIVE_ARGS
*cum
= ©_cum
;
8215 /* Pad to 16 byte boundary if needed. */
8216 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
8217 && (cum
->words
% 2) != 0)
8224 /* Put entries into rvec[] for individual FP and vector fields, and
8225 for the chunks of memory that go in int regs. Note we start at
8226 element 1; 0 is reserved for an indication of using memory, and
8227 may or may not be filled in below. */
8228 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
8229 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
8231 /* If any part of the struct went on the stack put all of it there.
8232 This hack is because the generic code for
8233 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8234 parts of the struct are not at the beginning. */
8238 return NULL_RTX
; /* doesn't go in registers at all */
8240 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8242 if (k
> 1 || cum
->use_stack
)
8243 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
8248 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8251 rs6000_mixed_function_arg (enum machine_mode mode
, const_tree type
,
8256 rtx rvec
[GP_ARG_NUM_REG
+ 1];
8258 if (align_words
>= GP_ARG_NUM_REG
)
8261 n_units
= rs6000_arg_size (mode
, type
);
8263 /* Optimize the simple case where the arg fits in one gpr, except in
8264 the case of BLKmode due to assign_parms assuming that registers are
8265 BITS_PER_WORD wide. */
8267 || (n_units
== 1 && mode
!= BLKmode
))
8268 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8271 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
8272 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8273 using a magic NULL_RTX component.
8274 This is not strictly correct. Only some of the arg belongs in
8275 memory, not all of it. However, the normal scheme using
8276 function_arg_partial_nregs can result in unusual subregs, eg.
8277 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8278 store the whole arg to memory is often more efficient than code
8279 to store pieces, and we know that space is available in the right
8280 place for the whole arg. */
8281 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8286 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
8287 rtx off
= GEN_INT (i
++ * 4);
8288 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
8290 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
8292 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
8295 /* Determine where to put an argument to a function.
8296 Value is zero to push the argument on the stack,
8297 or a hard register in which to store the argument.
8299 MODE is the argument's machine mode.
8300 TYPE is the data type of the argument (as a tree).
8301 This is null for libcalls where that information may
8303 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8304 the preceding args and about the function being called. It is
8305 not modified in this routine.
8306 NAMED is nonzero if this argument is a named parameter
8307 (otherwise it is an extra parameter matching an ellipsis).
8309 On RS/6000 the first eight words of non-FP are normally in registers
8310 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8311 Under V.4, the first 8 FP args are in registers.
8313 If this is floating-point and no prototype is specified, we use
8314 both an FP and integer register (or possibly FP reg and stack). Library
8315 functions (when CALL_LIBCALL is set) always have the proper types for args,
8316 so we can pass the FP value just in one register. emit_library_function
8317 doesn't support PARALLEL anyway.
8319 Note that for args passed by reference, function_arg will be called
8320 with MODE and TYPE set to that of the pointer to the arg, not the arg
8324 rs6000_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
8325 const_tree type
, bool named
)
8327 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8328 enum rs6000_abi abi
= DEFAULT_ABI
;
8330 /* Return a marker to indicate whether CR1 needs to set or clear the
8331 bit that V.4 uses to say fp args were passed in registers.
8332 Assume that we don't need the marker for software floating point,
8333 or compiler generated library calls. */
8334 if (mode
== VOIDmode
)
8337 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
8339 || (cum
->nargs_prototype
< 0
8340 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
))))
8342 /* For the SPE, we need to crxor CR6 always. */
8344 return GEN_INT (cum
->call_cookie
| CALL_V4_SET_FP_ARGS
);
8345 else if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
8346 return GEN_INT (cum
->call_cookie
8347 | ((cum
->fregno
== FP_ARG_MIN_REG
)
8348 ? CALL_V4_SET_FP_ARGS
8349 : CALL_V4_CLEAR_FP_ARGS
));
8352 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
8355 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
8357 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
8358 if (rslt
!= NULL_RTX
)
8360 /* Else fall through to usual handling. */
8363 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
8364 if (TARGET_64BIT
&& ! cum
->prototype
)
8366 /* Vector parameters get passed in vector register
8367 and also in GPRs or memory, in absence of prototype. */
8370 align_words
= (cum
->words
+ 1) & ~1;
8372 if (align_words
>= GP_ARG_NUM_REG
)
8378 slot
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8380 return gen_rtx_PARALLEL (mode
,
8382 gen_rtx_EXPR_LIST (VOIDmode
,
8384 gen_rtx_EXPR_LIST (VOIDmode
,
8385 gen_rtx_REG (mode
, cum
->vregno
),
8389 return gen_rtx_REG (mode
, cum
->vregno
);
8390 else if (TARGET_ALTIVEC_ABI
8391 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
8392 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
8393 && int_size_in_bytes (type
) == 16)))
8395 if (named
|| abi
== ABI_V4
)
8399 /* Vector parameters to varargs functions under AIX or Darwin
8400 get passed in memory and possibly also in GPRs. */
8401 int align
, align_words
, n_words
;
8402 enum machine_mode part_mode
;
8404 /* Vector parameters must be 16-byte aligned. This places them at
8405 2 mod 4 in terms of words in 32-bit mode, since the parameter
8406 save area starts at offset 24 from the stack. In 64-bit mode,
8407 they just have to start on an even word, since the parameter
8408 save area is 16-byte aligned. */
8410 align
= (2 - cum
->words
) & 3;
8412 align
= cum
->words
& 1;
8413 align_words
= cum
->words
+ align
;
8415 /* Out of registers? Memory, then. */
8416 if (align_words
>= GP_ARG_NUM_REG
)
8419 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8420 return rs6000_mixed_function_arg (mode
, type
, align_words
);
8422 /* The vector value goes in GPRs. Only the part of the
8423 value in GPRs is reported here. */
8425 n_words
= rs6000_arg_size (mode
, type
);
8426 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
8427 /* Fortunately, there are only two possibilities, the value
8428 is either wholly in GPRs or half in GPRs and half not. */
8431 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
8434 else if (TARGET_SPE_ABI
&& TARGET_SPE
8435 && (SPE_VECTOR_MODE (mode
)
8436 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
8439 || mode
== TCmode
))))
8440 return rs6000_spe_function_arg (cum
, mode
, type
);
8442 else if (abi
== ABI_V4
)
8444 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
8445 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
8446 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
8447 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
8448 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
8450 /* _Decimal128 must use an even/odd register pair. This assumes
8451 that the register number is odd when fregno is odd. */
8452 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8455 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
8456 <= FP_ARG_V4_MAX_REG
)
8457 return gen_rtx_REG (mode
, cum
->fregno
);
8463 int n_words
= rs6000_arg_size (mode
, type
);
8464 int gregno
= cum
->sysv_gregno
;
8466 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8467 (r7,r8) or (r9,r10). As does any other 2 word item such
8468 as complex int due to a historical mistake. */
8470 gregno
+= (1 - gregno
) & 1;
8472 /* Multi-reg args are not split between registers and stack. */
8473 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
8476 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8477 return rs6000_mixed_function_arg (mode
, type
,
8478 gregno
- GP_ARG_MIN_REG
);
8479 return gen_rtx_REG (mode
, gregno
);
8484 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
8486 /* _Decimal128 must be passed in an even/odd float register pair.
8487 This assumes that the register number is odd when fregno is odd. */
8488 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
8491 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
8493 rtx rvec
[GP_ARG_NUM_REG
+ 1];
8497 enum machine_mode fmode
= mode
;
8498 unsigned long n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
8500 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
8502 /* Currently, we only ever need one reg here because complex
8503 doubles are split. */
8504 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
8505 && (fmode
== TFmode
|| fmode
== TDmode
));
8507 /* Long double or _Decimal128 split over regs and memory. */
8508 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
8511 /* Do we also need to pass this arg in the parameter save
8514 && (cum
->nargs_prototype
<= 0
8515 || (DEFAULT_ABI
== ABI_AIX
8517 && align_words
>= GP_ARG_NUM_REG
)));
8519 if (!needs_psave
&& mode
== fmode
)
8520 return gen_rtx_REG (fmode
, cum
->fregno
);
8525 /* Describe the part that goes in gprs or the stack.
8526 This piece must come first, before the fprs. */
8527 if (align_words
< GP_ARG_NUM_REG
)
8529 unsigned long n_words
= rs6000_arg_size (mode
, type
);
8531 if (align_words
+ n_words
> GP_ARG_NUM_REG
8532 || (TARGET_32BIT
&& TARGET_POWERPC64
))
8534 /* If this is partially on the stack, then we only
8535 include the portion actually in registers here. */
8536 enum machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
8539 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
8540 /* Not all of the arg fits in gprs. Say that it
8541 goes in memory too, using a magic NULL_RTX
8542 component. Also see comment in
8543 rs6000_mixed_function_arg for why the normal
8544 function_arg_partial_nregs scheme doesn't work
8546 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
,
8550 r
= gen_rtx_REG (rmode
,
8551 GP_ARG_MIN_REG
+ align_words
);
8552 off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
8553 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
8555 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
8559 /* The whole arg fits in gprs. */
8560 r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8561 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
8565 /* It's entirely in memory. */
8566 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
8569 /* Describe where this piece goes in the fprs. */
8570 r
= gen_rtx_REG (fmode
, cum
->fregno
);
8571 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
8573 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
8575 else if (align_words
< GP_ARG_NUM_REG
)
8577 if (TARGET_32BIT
&& TARGET_POWERPC64
)
8578 return rs6000_mixed_function_arg (mode
, type
, align_words
);
8580 if (mode
== BLKmode
)
8583 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
8590 /* For an arg passed partly in registers and partly in memory, this is
8591 the number of bytes passed in registers. For args passed entirely in
8592 registers or entirely in memory, zero. When an arg is described by a
8593 PARALLEL, perhaps using more than one register type, this function
8594 returns the number of bytes used by the first element of the PARALLEL. */
8597 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
8598 tree type
, bool named
)
8600 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8604 if (DEFAULT_ABI
== ABI_V4
)
8607 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
)
8608 && cum
->nargs_prototype
>= 0)
8611 /* In this complicated case we just disable the partial_nregs code. */
8612 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
8615 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
8617 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
8619 /* If we are passing this arg in the fixed parameter save area
8620 (gprs or memory) as well as fprs, then this function should
8621 return the number of partial bytes passed in the parameter
8622 save area rather than partial bytes passed in fprs. */
8624 && (cum
->nargs_prototype
<= 0
8625 || (DEFAULT_ABI
== ABI_AIX
8627 && align_words
>= GP_ARG_NUM_REG
)))
8629 else if (cum
->fregno
+ ((GET_MODE_SIZE (mode
) + 7) >> 3)
8630 > FP_ARG_MAX_REG
+ 1)
8631 ret
= (FP_ARG_MAX_REG
+ 1 - cum
->fregno
) * 8;
8632 else if (cum
->nargs_prototype
>= 0)
8636 if (align_words
< GP_ARG_NUM_REG
8637 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
8638 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
8640 if (ret
!= 0 && TARGET_DEBUG_ARG
)
8641 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
8646 /* A C expression that indicates when an argument must be passed by
8647 reference. If nonzero for an argument, a copy of that argument is
8648 made in memory and a pointer to the argument is passed instead of
8649 the argument itself. The pointer is passed in whatever way is
8650 appropriate for passing a pointer to that type.
8652 Under V.4, aggregates and long double are passed by reference.
8654 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8655 reference unless the AltiVec vector extension ABI is in force.
8657 As an extension to all ABIs, variable sized types are passed by
8661 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
8662 enum machine_mode mode
, const_tree type
,
8663 bool named ATTRIBUTE_UNUSED
)
8665 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& mode
== TFmode
)
8667 if (TARGET_DEBUG_ARG
)
8668 fprintf (stderr
, "function_arg_pass_by_reference: V4 long double\n");
8675 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
8677 if (TARGET_DEBUG_ARG
)
8678 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
8682 if (int_size_in_bytes (type
) < 0)
8684 if (TARGET_DEBUG_ARG
)
8685 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
8689 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8690 modes only exist for GCC vector types if -maltivec. */
8691 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
8693 if (TARGET_DEBUG_ARG
)
8694 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
8698 /* Pass synthetic vectors in memory. */
8699 if (TREE_CODE (type
) == VECTOR_TYPE
8700 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
8702 static bool warned_for_pass_big_vectors
= false;
8703 if (TARGET_DEBUG_ARG
)
8704 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
8705 if (!warned_for_pass_big_vectors
)
8707 warning (0, "GCC vector passed by reference: "
8708 "non-standard ABI extension with no compatibility guarantee");
8709 warned_for_pass_big_vectors
= true;
8718 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
8721 enum machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
8726 for (i
= 0; i
< nregs
; i
++)
8728 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
8729 if (reload_completed
)
8731 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
8734 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
8735 i
* GET_MODE_SIZE (reg_mode
));
8738 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
8742 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
8746 /* Perform any needed actions needed for a function that is receiving a
8747 variable number of arguments.
8751 MODE and TYPE are the mode and type of the current parameter.
8753 PRETEND_SIZE is a variable that should be set to the amount of stack
8754 that must be pushed by the prolog to pretend that our caller pushed
8757 Normally, this macro will push all remaining incoming registers on the
8758 stack and set PRETEND_SIZE to the length of the registers pushed. */
8761 setup_incoming_varargs (cumulative_args_t cum
, enum machine_mode mode
,
8762 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
8765 CUMULATIVE_ARGS next_cum
;
8766 int reg_size
= TARGET_32BIT
? 4 : 8;
8767 rtx save_area
= NULL_RTX
, mem
;
8768 int first_reg_offset
;
8771 /* Skip the last named argument. */
8772 next_cum
= *get_cumulative_args (cum
);
8773 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
8775 if (DEFAULT_ABI
== ABI_V4
)
8777 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
8781 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
8782 HOST_WIDE_INT offset
= 0;
8784 /* Try to optimize the size of the varargs save area.
8785 The ABI requires that ap.reg_save_area is doubleword
8786 aligned, but we don't need to allocate space for all
8787 the bytes, only those to which we actually will save
8789 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
8790 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
8791 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
8792 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
8793 && cfun
->va_list_fpr_size
)
8796 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
8797 * UNITS_PER_FP_WORD
;
8798 if (cfun
->va_list_fpr_size
8799 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
8800 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
8802 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
8803 * UNITS_PER_FP_WORD
;
8807 offset
= -((first_reg_offset
* reg_size
) & ~7);
8808 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
8810 gpr_reg_num
= cfun
->va_list_gpr_size
;
8811 if (reg_size
== 4 && (first_reg_offset
& 1))
8814 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
8817 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
8819 - (int) (GP_ARG_NUM_REG
* reg_size
);
8821 if (gpr_size
+ fpr_size
)
8824 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
8825 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
8826 reg_save_area
= XEXP (reg_save_area
, 0);
8827 if (GET_CODE (reg_save_area
) == PLUS
)
8829 gcc_assert (XEXP (reg_save_area
, 0)
8830 == virtual_stack_vars_rtx
);
8831 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
8832 offset
+= INTVAL (XEXP (reg_save_area
, 1));
8835 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
8838 cfun
->machine
->varargs_save_offset
= offset
;
8839 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
8844 first_reg_offset
= next_cum
.words
;
8845 save_area
= virtual_incoming_args_rtx
;
8847 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
8848 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
8851 set
= get_varargs_alias_set ();
8852 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
8853 && cfun
->va_list_gpr_size
)
8855 int nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
8857 if (va_list_gpr_counter_field
)
8859 /* V4 va_list_gpr_size counts number of registers needed. */
8860 if (nregs
> cfun
->va_list_gpr_size
)
8861 nregs
= cfun
->va_list_gpr_size
;
8865 /* char * va_list instead counts number of bytes needed. */
8866 if (nregs
> cfun
->va_list_gpr_size
/ reg_size
)
8867 nregs
= cfun
->va_list_gpr_size
/ reg_size
;
8870 mem
= gen_rtx_MEM (BLKmode
,
8871 plus_constant (Pmode
, save_area
,
8872 first_reg_offset
* reg_size
));
8873 MEM_NOTRAP_P (mem
) = 1;
8874 set_mem_alias_set (mem
, set
);
8875 set_mem_align (mem
, BITS_PER_WORD
);
8877 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
8881 /* Save FP registers if needed. */
8882 if (DEFAULT_ABI
== ABI_V4
8883 && TARGET_HARD_FLOAT
&& TARGET_FPRS
8885 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
8886 && cfun
->va_list_fpr_size
)
8888 int fregno
= next_cum
.fregno
, nregs
;
8889 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
8890 rtx lab
= gen_label_rtx ();
8891 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
8892 * UNITS_PER_FP_WORD
);
8895 (gen_rtx_SET (VOIDmode
,
8897 gen_rtx_IF_THEN_ELSE (VOIDmode
,
8898 gen_rtx_NE (VOIDmode
, cr1
,
8900 gen_rtx_LABEL_REF (VOIDmode
, lab
),
8904 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
8905 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
8907 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8909 plus_constant (Pmode
, save_area
, off
));
8910 MEM_NOTRAP_P (mem
) = 1;
8911 set_mem_alias_set (mem
, set
);
8912 set_mem_align (mem
, GET_MODE_ALIGNMENT (
8913 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8914 ? DFmode
: SFmode
));
8915 emit_move_insn (mem
, gen_rtx_REG (
8916 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8917 ? DFmode
: SFmode
, fregno
));
8924 /* Create the va_list data type. */
8927 rs6000_build_builtin_va_list (void)
8929 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
8931 /* For AIX, prefer 'char *' because that's what the system
8932 header files like. */
8933 if (DEFAULT_ABI
!= ABI_V4
)
8934 return build_pointer_type (char_type_node
);
8936 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
8937 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
8938 get_identifier ("__va_list_tag"), record
);
8940 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
8941 unsigned_char_type_node
);
8942 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
8943 unsigned_char_type_node
);
8944 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
8946 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
8947 get_identifier ("reserved"), short_unsigned_type_node
);
8948 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
8949 get_identifier ("overflow_arg_area"),
8951 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
8952 get_identifier ("reg_save_area"),
8955 va_list_gpr_counter_field
= f_gpr
;
8956 va_list_fpr_counter_field
= f_fpr
;
8958 DECL_FIELD_CONTEXT (f_gpr
) = record
;
8959 DECL_FIELD_CONTEXT (f_fpr
) = record
;
8960 DECL_FIELD_CONTEXT (f_res
) = record
;
8961 DECL_FIELD_CONTEXT (f_ovf
) = record
;
8962 DECL_FIELD_CONTEXT (f_sav
) = record
;
8964 TYPE_STUB_DECL (record
) = type_decl
;
8965 TYPE_NAME (record
) = type_decl
;
8966 TYPE_FIELDS (record
) = f_gpr
;
8967 DECL_CHAIN (f_gpr
) = f_fpr
;
8968 DECL_CHAIN (f_fpr
) = f_res
;
8969 DECL_CHAIN (f_res
) = f_ovf
;
8970 DECL_CHAIN (f_ovf
) = f_sav
;
8972 layout_type (record
);
8974 /* The correct type is an array type of one element. */
8975 return build_array_type (record
, build_index_type (size_zero_node
));
8978 /* Implement va_start. */
8981 rs6000_va_start (tree valist
, rtx nextarg
)
8983 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
8984 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
8985 tree gpr
, fpr
, ovf
, sav
, t
;
8987 /* Only SVR4 needs something special. */
8988 if (DEFAULT_ABI
!= ABI_V4
)
8990 std_expand_builtin_va_start (valist
, nextarg
);
8994 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
8995 f_fpr
= DECL_CHAIN (f_gpr
);
8996 f_res
= DECL_CHAIN (f_fpr
);
8997 f_ovf
= DECL_CHAIN (f_res
);
8998 f_sav
= DECL_CHAIN (f_ovf
);
9000 valist
= build_simple_mem_ref (valist
);
9001 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9002 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
9004 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
9006 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
9009 /* Count number of gp and fp argument registers used. */
9010 words
= crtl
->args
.info
.words
;
9011 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
9013 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
9016 if (TARGET_DEBUG_ARG
)
9017 fprintf (stderr
, "va_start: words = "HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
9018 HOST_WIDE_INT_PRINT_DEC
", n_fpr = "HOST_WIDE_INT_PRINT_DEC
"\n",
9019 words
, n_gpr
, n_fpr
);
9021 if (cfun
->va_list_gpr_size
)
9023 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
9024 build_int_cst (NULL_TREE
, n_gpr
));
9025 TREE_SIDE_EFFECTS (t
) = 1;
9026 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9029 if (cfun
->va_list_fpr_size
)
9031 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
9032 build_int_cst (NULL_TREE
, n_fpr
));
9033 TREE_SIDE_EFFECTS (t
) = 1;
9034 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9036 #ifdef HAVE_AS_GNU_ATTRIBUTE
9037 if (call_ABI_of_interest (cfun
->decl
))
9038 rs6000_passes_float
= true;
9042 /* Find the overflow area. */
9043 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
9045 t
= fold_build_pointer_plus_hwi (t
, words
* UNITS_PER_WORD
);
9046 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
9047 TREE_SIDE_EFFECTS (t
) = 1;
9048 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9050 /* If there were no va_arg invocations, don't set up the register
9052 if (!cfun
->va_list_gpr_size
9053 && !cfun
->va_list_fpr_size
9054 && n_gpr
< GP_ARG_NUM_REG
9055 && n_fpr
< FP_ARG_V4_MAX_REG
)
9058 /* Find the register save area. */
9059 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
9060 if (cfun
->machine
->varargs_save_offset
)
9061 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
9062 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
9063 TREE_SIDE_EFFECTS (t
) = 1;
9064 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
9067 /* Implement va_arg. */
9070 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
9073 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
9074 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
9075 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
9076 tree lab_false
, lab_over
, addr
;
9078 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
9082 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
9084 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
9085 return build_va_arg_indirect_ref (t
);
9088 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9089 earlier version of gcc, with the property that it always applied alignment
9090 adjustments to the va-args (even for zero-sized types). The cheapest way
9091 to deal with this is to replicate the effect of the part of
9092 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9094 We don't need to check for pass-by-reference because of the test above.
9095 We can return a simplifed answer, since we know there's no offset to add. */
9098 && rs6000_darwin64_abi
9099 && integer_zerop (TYPE_SIZE (type
)))
9101 unsigned HOST_WIDE_INT align
, boundary
;
9102 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
9103 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
9104 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
9105 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
9106 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
9107 boundary
/= BITS_PER_UNIT
;
9108 if (boundary
> align
)
9111 /* This updates arg ptr by the amount that would be necessary
9112 to align the zero-sized (but not zero-alignment) item. */
9113 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
9114 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
9115 gimplify_and_add (t
, pre_p
);
9117 t
= fold_convert (sizetype
, valist_tmp
);
9118 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
9119 fold_convert (TREE_TYPE (valist
),
9120 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
9121 size_int (-boundary
))));
9122 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
9123 gimplify_and_add (t
, pre_p
);
9125 /* Since it is zero-sized there's no increment for the item itself. */
9126 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
9127 return build_va_arg_indirect_ref (valist_tmp
);
9130 if (DEFAULT_ABI
!= ABI_V4
)
9132 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
9134 tree elem_type
= TREE_TYPE (type
);
9135 enum machine_mode elem_mode
= TYPE_MODE (elem_type
);
9136 int elem_size
= GET_MODE_SIZE (elem_mode
);
9138 if (elem_size
< UNITS_PER_WORD
)
9140 tree real_part
, imag_part
;
9141 gimple_seq post
= NULL
;
9143 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
9145 /* Copy the value into a temporary, lest the formal temporary
9146 be reused out from under us. */
9147 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
9148 gimple_seq_add_seq (pre_p
, post
);
9150 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
9153 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
9157 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
9160 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
9161 f_fpr
= DECL_CHAIN (f_gpr
);
9162 f_res
= DECL_CHAIN (f_fpr
);
9163 f_ovf
= DECL_CHAIN (f_res
);
9164 f_sav
= DECL_CHAIN (f_ovf
);
9166 valist
= build_va_arg_indirect_ref (valist
);
9167 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
9168 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
9170 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
9172 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
9175 size
= int_size_in_bytes (type
);
9176 rsize
= (size
+ 3) / 4;
9179 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
9180 && ((TARGET_SINGLE_FLOAT
&& TYPE_MODE (type
) == SFmode
)
9181 || (TARGET_DOUBLE_FLOAT
9182 && (TYPE_MODE (type
) == DFmode
9183 || TYPE_MODE (type
) == TFmode
9184 || TYPE_MODE (type
) == SDmode
9185 || TYPE_MODE (type
) == DDmode
9186 || TYPE_MODE (type
) == TDmode
))))
9188 /* FP args go in FP registers, if present. */
9190 n_reg
= (size
+ 7) / 8;
9191 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
9192 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
9193 if (TYPE_MODE (type
) != SFmode
&& TYPE_MODE (type
) != SDmode
)
9198 /* Otherwise into GP registers. */
9207 /* Pull the value out of the saved registers.... */
9210 addr
= create_tmp_var (ptr_type_node
, "addr");
9212 /* AltiVec vectors never go in registers when -mabi=altivec. */
9213 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
9217 lab_false
= create_artificial_label (input_location
);
9218 lab_over
= create_artificial_label (input_location
);
9220 /* Long long and SPE vectors are aligned in the registers.
9221 As are any other 2 gpr item such as complex int due to a
9222 historical mistake. */
9224 if (n_reg
== 2 && reg
== gpr
)
9227 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9228 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
9229 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
9230 unshare_expr (reg
), u
);
9232 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9233 reg number is 0 for f1, so we want to make it odd. */
9234 else if (reg
== fpr
&& TYPE_MODE (type
) == TDmode
)
9236 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9237 build_int_cst (TREE_TYPE (reg
), 1));
9238 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
9241 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
9242 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
9243 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
9244 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
9245 gimplify_and_add (t
, pre_p
);
9249 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
9251 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
9252 build_int_cst (TREE_TYPE (reg
), n_reg
));
9253 u
= fold_convert (sizetype
, u
);
9254 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
9255 t
= fold_build_pointer_plus (t
, u
);
9257 /* _Decimal32 varargs are located in the second word of the 64-bit
9258 FP register for 32-bit binaries. */
9259 if (!TARGET_POWERPC64
9260 && TARGET_HARD_FLOAT
&& TARGET_FPRS
9261 && TYPE_MODE (type
) == SDmode
)
9262 t
= fold_build_pointer_plus_hwi (t
, size
);
9264 gimplify_assign (addr
, t
, pre_p
);
9266 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
9268 stmt
= gimple_build_label (lab_false
);
9269 gimple_seq_add_stmt (pre_p
, stmt
);
9271 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
9273 /* Ensure that we don't find any more args in regs.
9274 Alignment has taken care of for special cases. */
9275 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
9279 /* ... otherwise out of the overflow area. */
9281 /* Care for on-stack alignment if needed. */
9285 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
9286 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
9287 build_int_cst (TREE_TYPE (t
), -align
));
9289 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
9291 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
9293 t
= fold_build_pointer_plus_hwi (t
, size
);
9294 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
9298 stmt
= gimple_build_label (lab_over
);
9299 gimple_seq_add_stmt (pre_p
, stmt
);
9302 if (STRICT_ALIGNMENT
9303 && (TYPE_ALIGN (type
)
9304 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
9306 /* The value (of type complex double, for example) may not be
9307 aligned in memory in the saved registers, so copy via a
9308 temporary. (This is the same code as used for SPARC.) */
9309 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
9310 tree dest_addr
= build_fold_addr_expr (tmp
);
9312 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
9313 3, dest_addr
, addr
, size_int (rsize
* 4));
9315 gimplify_and_add (copy
, pre_p
);
9319 addr
= fold_convert (ptrtype
, addr
);
9320 return build_va_arg_indirect_ref (addr
);
9326 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
9329 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
9330 const char *attr_string
= "";
9332 gcc_assert (name
!= NULL
);
9333 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
9335 if (rs6000_builtin_decls
[(int)code
])
9336 fatal_error ("internal error: builtin function %s already processed", name
);
9338 rs6000_builtin_decls
[(int)code
] = t
=
9339 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
9341 /* Set any special attributes. */
9342 if ((classify
& RS6000_BTC_CONST
) != 0)
9344 /* const function, function only depends on the inputs. */
9345 TREE_READONLY (t
) = 1;
9346 TREE_NOTHROW (t
) = 1;
9347 attr_string
= ", pure";
9349 else if ((classify
& RS6000_BTC_PURE
) != 0)
9351 /* pure function, function can read global memory, but does not set any
9353 DECL_PURE_P (t
) = 1;
9354 TREE_NOTHROW (t
) = 1;
9355 attr_string
= ", const";
9357 else if ((classify
& RS6000_BTC_FP
) != 0)
9359 /* Function is a math function. If rounding mode is on, then treat the
9360 function as not reading global memory, but it can have arbitrary side
9361 effects. If it is off, then assume the function is a const function.
9362 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9363 builtin-attribute.def that is used for the math functions. */
9364 TREE_NOTHROW (t
) = 1;
9365 if (flag_rounding_math
)
9367 DECL_PURE_P (t
) = 1;
9368 DECL_IS_NOVOPS (t
) = 1;
9369 attr_string
= ", fp, pure";
9373 TREE_READONLY (t
) = 1;
9374 attr_string
= ", fp, const";
9377 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
9380 if (TARGET_DEBUG_BUILTIN
)
9381 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
9382 (int)code
, name
, attr_string
);
9385 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9387 #undef RS6000_BUILTIN_1
9388 #undef RS6000_BUILTIN_2
9389 #undef RS6000_BUILTIN_3
9390 #undef RS6000_BUILTIN_A
9391 #undef RS6000_BUILTIN_D
9392 #undef RS6000_BUILTIN_E
9393 #undef RS6000_BUILTIN_P
9394 #undef RS6000_BUILTIN_Q
9395 #undef RS6000_BUILTIN_S
9396 #undef RS6000_BUILTIN_X
9398 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9399 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9400 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9401 { MASK, ICODE, NAME, ENUM },
9403 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9404 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9405 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9406 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9407 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9408 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9409 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9411 static const struct builtin_description bdesc_3arg
[] =
9413 #include "rs6000-builtin.def"
9416 /* DST operations: void foo (void *, const int, const char). */
9418 #undef RS6000_BUILTIN_1
9419 #undef RS6000_BUILTIN_2
9420 #undef RS6000_BUILTIN_3
9421 #undef RS6000_BUILTIN_A
9422 #undef RS6000_BUILTIN_D
9423 #undef RS6000_BUILTIN_E
9424 #undef RS6000_BUILTIN_P
9425 #undef RS6000_BUILTIN_Q
9426 #undef RS6000_BUILTIN_S
9427 #undef RS6000_BUILTIN_X
9429 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9430 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9431 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9432 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9433 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9434 { MASK, ICODE, NAME, ENUM },
9436 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9437 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9438 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9439 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9440 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9442 static const struct builtin_description bdesc_dst
[] =
9444 #include "rs6000-builtin.def"
9447 /* Simple binary operations: VECc = foo (VECa, VECb). */
9449 #undef RS6000_BUILTIN_1
9450 #undef RS6000_BUILTIN_2
9451 #undef RS6000_BUILTIN_3
9452 #undef RS6000_BUILTIN_A
9453 #undef RS6000_BUILTIN_D
9454 #undef RS6000_BUILTIN_E
9455 #undef RS6000_BUILTIN_P
9456 #undef RS6000_BUILTIN_Q
9457 #undef RS6000_BUILTIN_S
9458 #undef RS6000_BUILTIN_X
9460 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9461 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9462 { MASK, ICODE, NAME, ENUM },
9464 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9465 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9466 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9467 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9468 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9469 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9470 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9471 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9473 static const struct builtin_description bdesc_2arg
[] =
9475 #include "rs6000-builtin.def"
9478 #undef RS6000_BUILTIN_1
9479 #undef RS6000_BUILTIN_2
9480 #undef RS6000_BUILTIN_3
9481 #undef RS6000_BUILTIN_A
9482 #undef RS6000_BUILTIN_D
9483 #undef RS6000_BUILTIN_E
9484 #undef RS6000_BUILTIN_P
9485 #undef RS6000_BUILTIN_Q
9486 #undef RS6000_BUILTIN_S
9487 #undef RS6000_BUILTIN_X
9489 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9490 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9491 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9492 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9493 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9494 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9495 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9496 { MASK, ICODE, NAME, ENUM },
9498 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9499 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9500 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9502 /* AltiVec predicates. */
9504 static const struct builtin_description bdesc_altivec_preds
[] =
9506 #include "rs6000-builtin.def"
9509 /* SPE predicates. */
9510 #undef RS6000_BUILTIN_1
9511 #undef RS6000_BUILTIN_2
9512 #undef RS6000_BUILTIN_3
9513 #undef RS6000_BUILTIN_A
9514 #undef RS6000_BUILTIN_D
9515 #undef RS6000_BUILTIN_E
9516 #undef RS6000_BUILTIN_P
9517 #undef RS6000_BUILTIN_Q
9518 #undef RS6000_BUILTIN_S
9519 #undef RS6000_BUILTIN_X
9521 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9522 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9523 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9524 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9525 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9526 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9527 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9528 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9529 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9530 { MASK, ICODE, NAME, ENUM },
9532 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9534 static const struct builtin_description bdesc_spe_predicates
[] =
9536 #include "rs6000-builtin.def"
9539 /* SPE evsel predicates. */
9540 #undef RS6000_BUILTIN_1
9541 #undef RS6000_BUILTIN_2
9542 #undef RS6000_BUILTIN_3
9543 #undef RS6000_BUILTIN_A
9544 #undef RS6000_BUILTIN_D
9545 #undef RS6000_BUILTIN_E
9546 #undef RS6000_BUILTIN_P
9547 #undef RS6000_BUILTIN_Q
9548 #undef RS6000_BUILTIN_S
9549 #undef RS6000_BUILTIN_X
9551 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9552 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9553 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9554 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9555 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9556 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9557 { MASK, ICODE, NAME, ENUM },
9559 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9560 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9561 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9562 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9564 static const struct builtin_description bdesc_spe_evsel
[] =
9566 #include "rs6000-builtin.def"
9569 /* PAIRED predicates. */
9570 #undef RS6000_BUILTIN_1
9571 #undef RS6000_BUILTIN_2
9572 #undef RS6000_BUILTIN_3
9573 #undef RS6000_BUILTIN_A
9574 #undef RS6000_BUILTIN_D
9575 #undef RS6000_BUILTIN_E
9576 #undef RS6000_BUILTIN_P
9577 #undef RS6000_BUILTIN_Q
9578 #undef RS6000_BUILTIN_S
9579 #undef RS6000_BUILTIN_X
9581 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9582 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9583 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9584 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9585 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9586 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9587 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9588 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9589 { MASK, ICODE, NAME, ENUM },
9591 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9592 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9594 static const struct builtin_description bdesc_paired_preds
[] =
9596 #include "rs6000-builtin.def"
9599 /* ABS* operations. */
9601 #undef RS6000_BUILTIN_1
9602 #undef RS6000_BUILTIN_2
9603 #undef RS6000_BUILTIN_3
9604 #undef RS6000_BUILTIN_A
9605 #undef RS6000_BUILTIN_D
9606 #undef RS6000_BUILTIN_E
9607 #undef RS6000_BUILTIN_P
9608 #undef RS6000_BUILTIN_Q
9609 #undef RS6000_BUILTIN_S
9610 #undef RS6000_BUILTIN_X
9612 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9613 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9614 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9615 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9616 { MASK, ICODE, NAME, ENUM },
9618 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9619 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9620 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9621 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9622 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9623 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9625 static const struct builtin_description bdesc_abs
[] =
9627 #include "rs6000-builtin.def"
9630 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9633 #undef RS6000_BUILTIN_1
9634 #undef RS6000_BUILTIN_2
9635 #undef RS6000_BUILTIN_3
9636 #undef RS6000_BUILTIN_A
9637 #undef RS6000_BUILTIN_E
9638 #undef RS6000_BUILTIN_D
9639 #undef RS6000_BUILTIN_P
9640 #undef RS6000_BUILTIN_Q
9641 #undef RS6000_BUILTIN_S
9642 #undef RS6000_BUILTIN_X
9644 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9645 { MASK, ICODE, NAME, ENUM },
9647 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9648 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9649 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9650 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9651 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9652 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9653 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9654 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9655 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9657 static const struct builtin_description bdesc_1arg
[] =
9659 #include "rs6000-builtin.def"
9662 #undef RS6000_BUILTIN_1
9663 #undef RS6000_BUILTIN_2
9664 #undef RS6000_BUILTIN_3
9665 #undef RS6000_BUILTIN_A
9666 #undef RS6000_BUILTIN_D
9667 #undef RS6000_BUILTIN_E
9668 #undef RS6000_BUILTIN_P
9669 #undef RS6000_BUILTIN_Q
9670 #undef RS6000_BUILTIN_S
9671 #undef RS6000_BUILTIN_X
9673 /* Return true if a builtin function is overloaded. */
9675 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
9677 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
9682 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
9685 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9686 rtx op0
= expand_normal (arg0
);
9687 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9688 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9690 if (icode
== CODE_FOR_nothing
)
9691 /* Builtin not supported on this processor. */
9694 /* If we got invalid arguments bail out before generating bad rtl. */
9695 if (arg0
== error_mark_node
)
9698 if (icode
== CODE_FOR_altivec_vspltisb
9699 || icode
== CODE_FOR_altivec_vspltish
9700 || icode
== CODE_FOR_altivec_vspltisw
9701 || icode
== CODE_FOR_spe_evsplatfi
9702 || icode
== CODE_FOR_spe_evsplati
)
9704 /* Only allow 5-bit *signed* literals. */
9705 if (GET_CODE (op0
) != CONST_INT
9706 || INTVAL (op0
) > 15
9707 || INTVAL (op0
) < -16)
9709 error ("argument 1 must be a 5-bit signed literal");
9715 || GET_MODE (target
) != tmode
9716 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9717 target
= gen_reg_rtx (tmode
);
9719 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9720 op0
= copy_to_mode_reg (mode0
, op0
);
9722 pat
= GEN_FCN (icode
) (target
, op0
);
9731 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
9733 rtx pat
, scratch1
, scratch2
;
9734 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9735 rtx op0
= expand_normal (arg0
);
9736 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9737 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9739 /* If we have invalid arguments, bail out before generating bad rtl. */
9740 if (arg0
== error_mark_node
)
9744 || GET_MODE (target
) != tmode
9745 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9746 target
= gen_reg_rtx (tmode
);
9748 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9749 op0
= copy_to_mode_reg (mode0
, op0
);
9751 scratch1
= gen_reg_rtx (mode0
);
9752 scratch2
= gen_reg_rtx (mode0
);
9754 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
9763 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
9766 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9767 tree arg1
= CALL_EXPR_ARG (exp
, 1);
9768 rtx op0
= expand_normal (arg0
);
9769 rtx op1
= expand_normal (arg1
);
9770 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9771 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9772 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9774 if (icode
== CODE_FOR_nothing
)
9775 /* Builtin not supported on this processor. */
9778 /* If we got invalid arguments bail out before generating bad rtl. */
9779 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9782 if (icode
== CODE_FOR_altivec_vcfux
9783 || icode
== CODE_FOR_altivec_vcfsx
9784 || icode
== CODE_FOR_altivec_vctsxs
9785 || icode
== CODE_FOR_altivec_vctuxs
9786 || icode
== CODE_FOR_altivec_vspltb
9787 || icode
== CODE_FOR_altivec_vsplth
9788 || icode
== CODE_FOR_altivec_vspltw
9789 || icode
== CODE_FOR_spe_evaddiw
9790 || icode
== CODE_FOR_spe_evldd
9791 || icode
== CODE_FOR_spe_evldh
9792 || icode
== CODE_FOR_spe_evldw
9793 || icode
== CODE_FOR_spe_evlhhesplat
9794 || icode
== CODE_FOR_spe_evlhhossplat
9795 || icode
== CODE_FOR_spe_evlhhousplat
9796 || icode
== CODE_FOR_spe_evlwhe
9797 || icode
== CODE_FOR_spe_evlwhos
9798 || icode
== CODE_FOR_spe_evlwhou
9799 || icode
== CODE_FOR_spe_evlwhsplat
9800 || icode
== CODE_FOR_spe_evlwwsplat
9801 || icode
== CODE_FOR_spe_evrlwi
9802 || icode
== CODE_FOR_spe_evslwi
9803 || icode
== CODE_FOR_spe_evsrwis
9804 || icode
== CODE_FOR_spe_evsubifw
9805 || icode
== CODE_FOR_spe_evsrwiu
)
9807 /* Only allow 5-bit unsigned literals. */
9809 if (TREE_CODE (arg1
) != INTEGER_CST
9810 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
9812 error ("argument 2 must be a 5-bit unsigned literal");
9818 || GET_MODE (target
) != tmode
9819 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9820 target
= gen_reg_rtx (tmode
);
9822 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9823 op0
= copy_to_mode_reg (mode0
, op0
);
9824 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9825 op1
= copy_to_mode_reg (mode1
, op1
);
9827 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
9836 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
9839 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
9840 tree arg0
= CALL_EXPR_ARG (exp
, 1);
9841 tree arg1
= CALL_EXPR_ARG (exp
, 2);
9842 rtx op0
= expand_normal (arg0
);
9843 rtx op1
= expand_normal (arg1
);
9844 enum machine_mode tmode
= SImode
;
9845 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9846 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9849 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
9851 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9855 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
9857 gcc_assert (mode0
== mode1
);
9859 /* If we have invalid arguments, bail out before generating bad rtl. */
9860 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9864 || GET_MODE (target
) != tmode
9865 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9866 target
= gen_reg_rtx (tmode
);
9868 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9869 op0
= copy_to_mode_reg (mode0
, op0
);
9870 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9871 op1
= copy_to_mode_reg (mode1
, op1
);
9873 scratch
= gen_reg_rtx (mode0
);
9875 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
9880 /* The vec_any* and vec_all* predicates use the same opcodes for two
9881 different operations, but the bits in CR6 will be different
9882 depending on what information we want. So we have to play tricks
9883 with CR6 to get the right bits out.
9885 If you think this is disgusting, look at the specs for the
9886 AltiVec predicates. */
9888 switch (cr6_form_int
)
9891 emit_insn (gen_cr6_test_for_zero (target
));
9894 emit_insn (gen_cr6_test_for_zero_reverse (target
));
9897 emit_insn (gen_cr6_test_for_lt (target
));
9900 emit_insn (gen_cr6_test_for_lt_reverse (target
));
9903 error ("argument 1 of __builtin_altivec_predicate is out of range");
9911 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
9914 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9915 tree arg1
= CALL_EXPR_ARG (exp
, 1);
9916 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9917 enum machine_mode mode0
= Pmode
;
9918 enum machine_mode mode1
= Pmode
;
9919 rtx op0
= expand_normal (arg0
);
9920 rtx op1
= expand_normal (arg1
);
9922 if (icode
== CODE_FOR_nothing
)
9923 /* Builtin not supported on this processor. */
9926 /* If we got invalid arguments bail out before generating bad rtl. */
9927 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9931 || GET_MODE (target
) != tmode
9932 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9933 target
= gen_reg_rtx (tmode
);
9935 op1
= copy_to_mode_reg (mode1
, op1
);
9937 if (op0
== const0_rtx
)
9939 addr
= gen_rtx_MEM (tmode
, op1
);
9943 op0
= copy_to_mode_reg (mode0
, op0
);
9944 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
9947 pat
= GEN_FCN (icode
) (target
, addr
);
9957 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
9960 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9961 tree arg1
= CALL_EXPR_ARG (exp
, 1);
9962 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9963 enum machine_mode mode0
= Pmode
;
9964 enum machine_mode mode1
= Pmode
;
9965 rtx op0
= expand_normal (arg0
);
9966 rtx op1
= expand_normal (arg1
);
9968 if (icode
== CODE_FOR_nothing
)
9969 /* Builtin not supported on this processor. */
9972 /* If we got invalid arguments bail out before generating bad rtl. */
9973 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9977 || GET_MODE (target
) != tmode
9978 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9979 target
= gen_reg_rtx (tmode
);
9981 op1
= copy_to_mode_reg (mode1
, op1
);
9983 if (op0
== const0_rtx
)
9985 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
9989 op0
= copy_to_mode_reg (mode0
, op0
);
9990 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
9993 pat
= GEN_FCN (icode
) (target
, addr
);
10003 spe_expand_stv_builtin (enum insn_code icode
, tree exp
)
10005 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10006 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10007 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10008 rtx op0
= expand_normal (arg0
);
10009 rtx op1
= expand_normal (arg1
);
10010 rtx op2
= expand_normal (arg2
);
10012 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
10013 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
10014 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
10016 /* Invalid arguments. Bail before doing anything stoopid! */
10017 if (arg0
== error_mark_node
10018 || arg1
== error_mark_node
10019 || arg2
== error_mark_node
)
10022 if (! (*insn_data
[icode
].operand
[2].predicate
) (op0
, mode2
))
10023 op0
= copy_to_mode_reg (mode2
, op0
);
10024 if (! (*insn_data
[icode
].operand
[0].predicate
) (op1
, mode0
))
10025 op1
= copy_to_mode_reg (mode0
, op1
);
10026 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
10027 op2
= copy_to_mode_reg (mode1
, op2
);
10029 pat
= GEN_FCN (icode
) (op1
, op2
, op0
);
10036 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
10038 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10039 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10040 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10041 rtx op0
= expand_normal (arg0
);
10042 rtx op1
= expand_normal (arg1
);
10043 rtx op2
= expand_normal (arg2
);
10045 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10046 enum machine_mode mode1
= Pmode
;
10047 enum machine_mode mode2
= Pmode
;
10049 /* Invalid arguments. Bail before doing anything stoopid! */
10050 if (arg0
== error_mark_node
10051 || arg1
== error_mark_node
10052 || arg2
== error_mark_node
)
10055 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
10056 op0
= copy_to_mode_reg (tmode
, op0
);
10058 op2
= copy_to_mode_reg (mode2
, op2
);
10060 if (op1
== const0_rtx
)
10062 addr
= gen_rtx_MEM (tmode
, op2
);
10066 op1
= copy_to_mode_reg (mode1
, op1
);
10067 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
10070 pat
= GEN_FCN (icode
) (addr
, op0
);
10077 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
10079 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10080 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10081 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10082 rtx op0
= expand_normal (arg0
);
10083 rtx op1
= expand_normal (arg1
);
10084 rtx op2
= expand_normal (arg2
);
10086 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10087 enum machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
10088 enum machine_mode mode1
= Pmode
;
10089 enum machine_mode mode2
= Pmode
;
10091 /* Invalid arguments. Bail before doing anything stoopid! */
10092 if (arg0
== error_mark_node
10093 || arg1
== error_mark_node
10094 || arg2
== error_mark_node
)
10097 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
10098 op0
= copy_to_mode_reg (smode
, op0
);
10100 op2
= copy_to_mode_reg (mode2
, op2
);
10102 if (op1
== const0_rtx
)
10104 addr
= gen_rtx_MEM (tmode
, op2
);
10108 op1
= copy_to_mode_reg (mode1
, op1
);
10109 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
10112 pat
= GEN_FCN (icode
) (addr
, op0
);
10119 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
10122 tree arg0
= CALL_EXPR_ARG (exp
, 0);
10123 tree arg1
= CALL_EXPR_ARG (exp
, 1);
10124 tree arg2
= CALL_EXPR_ARG (exp
, 2);
10125 rtx op0
= expand_normal (arg0
);
10126 rtx op1
= expand_normal (arg1
);
10127 rtx op2
= expand_normal (arg2
);
10128 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10129 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
10130 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
10131 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
10133 if (icode
== CODE_FOR_nothing
)
10134 /* Builtin not supported on this processor. */
10137 /* If we got invalid arguments bail out before generating bad rtl. */
10138 if (arg0
== error_mark_node
10139 || arg1
== error_mark_node
10140 || arg2
== error_mark_node
)
10143 /* Check and prepare argument depending on the instruction code.
10145 Note that a switch statement instead of the sequence of tests
10146 would be incorrect as many of the CODE_FOR values could be
10147 CODE_FOR_nothing and that would yield multiple alternatives
10148 with identical values. We'd never reach here at runtime in
10150 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
10151 || icode
== CODE_FOR_altivec_vsldoi_v4si
10152 || icode
== CODE_FOR_altivec_vsldoi_v8hi
10153 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
10155 /* Only allow 4-bit unsigned literals. */
10157 if (TREE_CODE (arg2
) != INTEGER_CST
10158 || TREE_INT_CST_LOW (arg2
) & ~0xf)
10160 error ("argument 3 must be a 4-bit unsigned literal");
10164 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
10165 || icode
== CODE_FOR_vsx_xxpermdi_v2di
10166 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
10167 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
10168 || icode
== CODE_FOR_vsx_xxsldwi_v4si
10169 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
10170 || icode
== CODE_FOR_vsx_xxsldwi_v2di
10171 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
10173 /* Only allow 2-bit unsigned literals. */
10175 if (TREE_CODE (arg2
) != INTEGER_CST
10176 || TREE_INT_CST_LOW (arg2
) & ~0x3)
10178 error ("argument 3 must be a 2-bit unsigned literal");
10182 else if (icode
== CODE_FOR_vsx_set_v2df
10183 || icode
== CODE_FOR_vsx_set_v2di
)
10185 /* Only allow 1-bit unsigned literals. */
10187 if (TREE_CODE (arg2
) != INTEGER_CST
10188 || TREE_INT_CST_LOW (arg2
) & ~0x1)
10190 error ("argument 3 must be a 1-bit unsigned literal");
10196 || GET_MODE (target
) != tmode
10197 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10198 target
= gen_reg_rtx (tmode
);
10200 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
10201 op0
= copy_to_mode_reg (mode0
, op0
);
10202 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
10203 op1
= copy_to_mode_reg (mode1
, op1
);
10204 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
10205 op2
= copy_to_mode_reg (mode2
, op2
);
10207 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
10208 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
10210 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
10218 /* Expand the lvx builtins. */
10220 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
10222 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10223 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10225 enum machine_mode tmode
, mode0
;
10227 enum insn_code icode
;
10231 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
10232 icode
= CODE_FOR_vector_altivec_load_v16qi
;
10234 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
10235 icode
= CODE_FOR_vector_altivec_load_v8hi
;
10237 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
10238 icode
= CODE_FOR_vector_altivec_load_v4si
;
10240 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
10241 icode
= CODE_FOR_vector_altivec_load_v4sf
;
10243 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
10244 icode
= CODE_FOR_vector_altivec_load_v2df
;
10246 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
10247 icode
= CODE_FOR_vector_altivec_load_v2di
;
10250 *expandedp
= false;
10256 arg0
= CALL_EXPR_ARG (exp
, 0);
10257 op0
= expand_normal (arg0
);
10258 tmode
= insn_data
[icode
].operand
[0].mode
;
10259 mode0
= insn_data
[icode
].operand
[1].mode
;
10262 || GET_MODE (target
) != tmode
10263 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10264 target
= gen_reg_rtx (tmode
);
10266 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
10267 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
10269 pat
= GEN_FCN (icode
) (target
, op0
);
10276 /* Expand the stvx builtins. */
10278 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
10281 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10282 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10284 enum machine_mode mode0
, mode1
;
10286 enum insn_code icode
;
10290 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
10291 icode
= CODE_FOR_vector_altivec_store_v16qi
;
10293 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
10294 icode
= CODE_FOR_vector_altivec_store_v8hi
;
10296 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
10297 icode
= CODE_FOR_vector_altivec_store_v4si
;
10299 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
10300 icode
= CODE_FOR_vector_altivec_store_v4sf
;
10302 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
10303 icode
= CODE_FOR_vector_altivec_store_v2df
;
10305 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
10306 icode
= CODE_FOR_vector_altivec_store_v2di
;
10309 *expandedp
= false;
10313 arg0
= CALL_EXPR_ARG (exp
, 0);
10314 arg1
= CALL_EXPR_ARG (exp
, 1);
10315 op0
= expand_normal (arg0
);
10316 op1
= expand_normal (arg1
);
10317 mode0
= insn_data
[icode
].operand
[0].mode
;
10318 mode1
= insn_data
[icode
].operand
[1].mode
;
10320 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10321 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
10322 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
10323 op1
= copy_to_mode_reg (mode1
, op1
);
10325 pat
= GEN_FCN (icode
) (op0
, op1
);
10333 /* Expand the dst builtins. */
10335 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
10338 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10339 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10340 tree arg0
, arg1
, arg2
;
10341 enum machine_mode mode0
, mode1
;
10342 rtx pat
, op0
, op1
, op2
;
10343 const struct builtin_description
*d
;
10346 *expandedp
= false;
10348 /* Handle DST variants. */
10350 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
10351 if (d
->code
== fcode
)
10353 arg0
= CALL_EXPR_ARG (exp
, 0);
10354 arg1
= CALL_EXPR_ARG (exp
, 1);
10355 arg2
= CALL_EXPR_ARG (exp
, 2);
10356 op0
= expand_normal (arg0
);
10357 op1
= expand_normal (arg1
);
10358 op2
= expand_normal (arg2
);
10359 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10360 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10362 /* Invalid arguments, bail out before generating bad rtl. */
10363 if (arg0
== error_mark_node
10364 || arg1
== error_mark_node
10365 || arg2
== error_mark_node
)
10370 if (TREE_CODE (arg2
) != INTEGER_CST
10371 || TREE_INT_CST_LOW (arg2
) & ~0x3)
10373 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
10377 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
10378 op0
= copy_to_mode_reg (Pmode
, op0
);
10379 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
10380 op1
= copy_to_mode_reg (mode1
, op1
);
10382 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
10392 /* Expand vec_init builtin. */
10394 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
10396 enum machine_mode tmode
= TYPE_MODE (type
);
10397 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
10398 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
10399 rtvec v
= rtvec_alloc (n_elt
);
10401 gcc_assert (VECTOR_MODE_P (tmode
));
10402 gcc_assert (n_elt
== call_expr_nargs (exp
));
10404 for (i
= 0; i
< n_elt
; ++i
)
10406 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
10407 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
10410 if (!target
|| !register_operand (target
, tmode
))
10411 target
= gen_reg_rtx (tmode
);
10413 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
10417 /* Return the integer constant in ARG. Constrain it to be in the range
10418 of the subparts of VEC_TYPE; issue an error if not. */
10421 get_element_number (tree vec_type
, tree arg
)
10423 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
10425 if (!host_integerp (arg
, 1)
10426 || (elt
= tree_low_cst (arg
, 1), elt
> max
))
10428 error ("selector must be an integer constant in the range 0..%wi", max
);
10435 /* Expand vec_set builtin. */
10437 altivec_expand_vec_set_builtin (tree exp
)
10439 enum machine_mode tmode
, mode1
;
10440 tree arg0
, arg1
, arg2
;
10444 arg0
= CALL_EXPR_ARG (exp
, 0);
10445 arg1
= CALL_EXPR_ARG (exp
, 1);
10446 arg2
= CALL_EXPR_ARG (exp
, 2);
10448 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
10449 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
10450 gcc_assert (VECTOR_MODE_P (tmode
));
10452 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
10453 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
10454 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
10456 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
10457 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
10459 op0
= force_reg (tmode
, op0
);
10460 op1
= force_reg (mode1
, op1
);
10462 rs6000_expand_vector_set (op0
, op1
, elt
);
10467 /* Expand vec_ext builtin. */
10469 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
10471 enum machine_mode tmode
, mode0
;
10476 arg0
= CALL_EXPR_ARG (exp
, 0);
10477 arg1
= CALL_EXPR_ARG (exp
, 1);
10479 op0
= expand_normal (arg0
);
10480 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
10482 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
10483 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
10484 gcc_assert (VECTOR_MODE_P (mode0
));
10486 op0
= force_reg (mode0
, op0
);
10488 if (optimize
|| !target
|| !register_operand (target
, tmode
))
10489 target
= gen_reg_rtx (tmode
);
10491 rs6000_expand_vector_extract (target
, op0
, elt
);
10496 /* Expand the builtin in EXP and store the result in TARGET. Store
10497 true in *EXPANDEDP if we found a builtin to expand. */
10499 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
10501 const struct builtin_description
*d
;
10503 enum insn_code icode
;
10504 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10507 enum machine_mode tmode
, mode0
;
10508 enum rs6000_builtins fcode
10509 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
10511 if (rs6000_overloaded_builtin_p (fcode
))
10514 error ("unresolved overload for Altivec builtin %qF", fndecl
);
10516 /* Given it is invalid, just generate a normal call. */
10517 return expand_call (exp
, target
, false);
10520 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
10524 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
10528 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
10536 case ALTIVEC_BUILTIN_STVX
:
10537 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si
, exp
);
10538 case ALTIVEC_BUILTIN_STVEBX
:
10539 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
10540 case ALTIVEC_BUILTIN_STVEHX
:
10541 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
10542 case ALTIVEC_BUILTIN_STVEWX
:
10543 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
10544 case ALTIVEC_BUILTIN_STVXL
:
10545 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl
, exp
);
10547 case ALTIVEC_BUILTIN_STVLX
:
10548 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
10549 case ALTIVEC_BUILTIN_STVLXL
:
10550 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
10551 case ALTIVEC_BUILTIN_STVRX
:
10552 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
10553 case ALTIVEC_BUILTIN_STVRXL
:
10554 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
10556 case VSX_BUILTIN_STXVD2X_V2DF
:
10557 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
10558 case VSX_BUILTIN_STXVD2X_V2DI
:
10559 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
10560 case VSX_BUILTIN_STXVW4X_V4SF
:
10561 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
10562 case VSX_BUILTIN_STXVW4X_V4SI
:
10563 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
10564 case VSX_BUILTIN_STXVW4X_V8HI
:
10565 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
10566 case VSX_BUILTIN_STXVW4X_V16QI
:
10567 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
10569 case ALTIVEC_BUILTIN_MFVSCR
:
10570 icode
= CODE_FOR_altivec_mfvscr
;
10571 tmode
= insn_data
[icode
].operand
[0].mode
;
10574 || GET_MODE (target
) != tmode
10575 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10576 target
= gen_reg_rtx (tmode
);
10578 pat
= GEN_FCN (icode
) (target
);
10584 case ALTIVEC_BUILTIN_MTVSCR
:
10585 icode
= CODE_FOR_altivec_mtvscr
;
10586 arg0
= CALL_EXPR_ARG (exp
, 0);
10587 op0
= expand_normal (arg0
);
10588 mode0
= insn_data
[icode
].operand
[0].mode
;
10590 /* If we got invalid arguments bail out before generating bad rtl. */
10591 if (arg0
== error_mark_node
)
10594 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10595 op0
= copy_to_mode_reg (mode0
, op0
);
10597 pat
= GEN_FCN (icode
) (op0
);
10602 case ALTIVEC_BUILTIN_DSSALL
:
10603 emit_insn (gen_altivec_dssall ());
10606 case ALTIVEC_BUILTIN_DSS
:
10607 icode
= CODE_FOR_altivec_dss
;
10608 arg0
= CALL_EXPR_ARG (exp
, 0);
10610 op0
= expand_normal (arg0
);
10611 mode0
= insn_data
[icode
].operand
[0].mode
;
10613 /* If we got invalid arguments bail out before generating bad rtl. */
10614 if (arg0
== error_mark_node
)
10617 if (TREE_CODE (arg0
) != INTEGER_CST
10618 || TREE_INT_CST_LOW (arg0
) & ~0x3)
10620 error ("argument to dss must be a 2-bit unsigned literal");
10624 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10625 op0
= copy_to_mode_reg (mode0
, op0
);
10627 emit_insn (gen_altivec_dss (op0
));
10630 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
10631 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
10632 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
10633 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
10634 case VSX_BUILTIN_VEC_INIT_V2DF
:
10635 case VSX_BUILTIN_VEC_INIT_V2DI
:
10636 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
10638 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
10639 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
10640 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
10641 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
10642 case VSX_BUILTIN_VEC_SET_V2DF
:
10643 case VSX_BUILTIN_VEC_SET_V2DI
:
10644 return altivec_expand_vec_set_builtin (exp
);
10646 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
10647 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
10648 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
10649 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
10650 case VSX_BUILTIN_VEC_EXT_V2DF
:
10651 case VSX_BUILTIN_VEC_EXT_V2DI
:
10652 return altivec_expand_vec_ext_builtin (exp
, target
);
10656 /* Fall through. */
10659 /* Expand abs* operations. */
10661 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
10662 if (d
->code
== fcode
)
10663 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
10665 /* Expand the AltiVec predicates. */
10666 d
= bdesc_altivec_preds
;
10667 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
10668 if (d
->code
== fcode
)
10669 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
10671 /* LV* are funky. We initialized them differently. */
10674 case ALTIVEC_BUILTIN_LVSL
:
10675 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
10676 exp
, target
, false);
10677 case ALTIVEC_BUILTIN_LVSR
:
10678 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
10679 exp
, target
, false);
10680 case ALTIVEC_BUILTIN_LVEBX
:
10681 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
10682 exp
, target
, false);
10683 case ALTIVEC_BUILTIN_LVEHX
:
10684 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
10685 exp
, target
, false);
10686 case ALTIVEC_BUILTIN_LVEWX
:
10687 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
10688 exp
, target
, false);
10689 case ALTIVEC_BUILTIN_LVXL
:
10690 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl
,
10691 exp
, target
, false);
10692 case ALTIVEC_BUILTIN_LVX
:
10693 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si
,
10694 exp
, target
, false);
10695 case ALTIVEC_BUILTIN_LVLX
:
10696 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
10697 exp
, target
, true);
10698 case ALTIVEC_BUILTIN_LVLXL
:
10699 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
10700 exp
, target
, true);
10701 case ALTIVEC_BUILTIN_LVRX
:
10702 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
10703 exp
, target
, true);
10704 case ALTIVEC_BUILTIN_LVRXL
:
10705 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
10706 exp
, target
, true);
10707 case VSX_BUILTIN_LXVD2X_V2DF
:
10708 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
10709 exp
, target
, false);
10710 case VSX_BUILTIN_LXVD2X_V2DI
:
10711 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
10712 exp
, target
, false);
10713 case VSX_BUILTIN_LXVW4X_V4SF
:
10714 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
10715 exp
, target
, false);
10716 case VSX_BUILTIN_LXVW4X_V4SI
:
10717 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
10718 exp
, target
, false);
10719 case VSX_BUILTIN_LXVW4X_V8HI
:
10720 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
10721 exp
, target
, false);
10722 case VSX_BUILTIN_LXVW4X_V16QI
:
10723 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
10724 exp
, target
, false);
10728 /* Fall through. */
10731 *expandedp
= false;
10735 /* Expand the builtin in EXP and store the result in TARGET. Store
10736 true in *EXPANDEDP if we found a builtin to expand. */
10738 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
10740 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10741 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10742 const struct builtin_description
*d
;
10749 case PAIRED_BUILTIN_STX
:
10750 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
10751 case PAIRED_BUILTIN_LX
:
10752 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
10755 /* Fall through. */
10758 /* Expand the paired predicates. */
10759 d
= bdesc_paired_preds
;
10760 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
10761 if (d
->code
== fcode
)
10762 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
10764 *expandedp
= false;
10768 /* Binops that need to be initialized manually, but can be expanded
10769 automagically by rs6000_expand_binop_builtin. */
10770 static const struct builtin_description bdesc_2arg_spe
[] =
10772 { RS6000_BTM_SPE
, CODE_FOR_spe_evlddx
, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX
},
10773 { RS6000_BTM_SPE
, CODE_FOR_spe_evldwx
, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX
},
10774 { RS6000_BTM_SPE
, CODE_FOR_spe_evldhx
, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX
},
10775 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhex
, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX
},
10776 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhoux
, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX
},
10777 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhosx
, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX
},
10778 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplatx
, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX
},
10779 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplatx
, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX
},
10780 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplatx
, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX
},
10781 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplatx
, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX
},
10782 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplatx
, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX
},
10783 { RS6000_BTM_SPE
, CODE_FOR_spe_evldd
, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD
},
10784 { RS6000_BTM_SPE
, CODE_FOR_spe_evldw
, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW
},
10785 { RS6000_BTM_SPE
, CODE_FOR_spe_evldh
, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH
},
10786 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhe
, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE
},
10787 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhou
, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU
},
10788 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhos
, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS
},
10789 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplat
, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT
},
10790 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplat
, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT
},
10791 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplat
, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT
},
10792 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplat
, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT
},
10793 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplat
, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT
}
10796 /* Expand the builtin in EXP and store the result in TARGET. Store
10797 true in *EXPANDEDP if we found a builtin to expand.
10799 This expands the SPE builtins that are not simple unary and binary
10802 spe_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
10804 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10806 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10807 enum insn_code icode
;
10808 enum machine_mode tmode
, mode0
;
10810 const struct builtin_description
*d
;
10815 /* Syntax check for a 5-bit unsigned immediate. */
10818 case SPE_BUILTIN_EVSTDD
:
10819 case SPE_BUILTIN_EVSTDH
:
10820 case SPE_BUILTIN_EVSTDW
:
10821 case SPE_BUILTIN_EVSTWHE
:
10822 case SPE_BUILTIN_EVSTWHO
:
10823 case SPE_BUILTIN_EVSTWWE
:
10824 case SPE_BUILTIN_EVSTWWO
:
10825 arg1
= CALL_EXPR_ARG (exp
, 2);
10826 if (TREE_CODE (arg1
) != INTEGER_CST
10827 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
10829 error ("argument 2 must be a 5-bit unsigned literal");
10837 /* The evsplat*i instructions are not quite generic. */
10840 case SPE_BUILTIN_EVSPLATFI
:
10841 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi
,
10843 case SPE_BUILTIN_EVSPLATI
:
10844 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati
,
10850 d
= bdesc_2arg_spe
;
10851 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg_spe
); ++i
, ++d
)
10852 if (d
->code
== fcode
)
10853 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
10855 d
= bdesc_spe_predicates
;
10856 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, ++d
)
10857 if (d
->code
== fcode
)
10858 return spe_expand_predicate_builtin (d
->icode
, exp
, target
);
10860 d
= bdesc_spe_evsel
;
10861 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, ++d
)
10862 if (d
->code
== fcode
)
10863 return spe_expand_evsel_builtin (d
->icode
, exp
, target
);
10867 case SPE_BUILTIN_EVSTDDX
:
10868 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx
, exp
);
10869 case SPE_BUILTIN_EVSTDHX
:
10870 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx
, exp
);
10871 case SPE_BUILTIN_EVSTDWX
:
10872 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx
, exp
);
10873 case SPE_BUILTIN_EVSTWHEX
:
10874 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex
, exp
);
10875 case SPE_BUILTIN_EVSTWHOX
:
10876 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox
, exp
);
10877 case SPE_BUILTIN_EVSTWWEX
:
10878 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex
, exp
);
10879 case SPE_BUILTIN_EVSTWWOX
:
10880 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox
, exp
);
10881 case SPE_BUILTIN_EVSTDD
:
10882 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd
, exp
);
10883 case SPE_BUILTIN_EVSTDH
:
10884 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh
, exp
);
10885 case SPE_BUILTIN_EVSTDW
:
10886 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw
, exp
);
10887 case SPE_BUILTIN_EVSTWHE
:
10888 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe
, exp
);
10889 case SPE_BUILTIN_EVSTWHO
:
10890 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho
, exp
);
10891 case SPE_BUILTIN_EVSTWWE
:
10892 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe
, exp
);
10893 case SPE_BUILTIN_EVSTWWO
:
10894 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo
, exp
);
10895 case SPE_BUILTIN_MFSPEFSCR
:
10896 icode
= CODE_FOR_spe_mfspefscr
;
10897 tmode
= insn_data
[icode
].operand
[0].mode
;
10900 || GET_MODE (target
) != tmode
10901 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10902 target
= gen_reg_rtx (tmode
);
10904 pat
= GEN_FCN (icode
) (target
);
10909 case SPE_BUILTIN_MTSPEFSCR
:
10910 icode
= CODE_FOR_spe_mtspefscr
;
10911 arg0
= CALL_EXPR_ARG (exp
, 0);
10912 op0
= expand_normal (arg0
);
10913 mode0
= insn_data
[icode
].operand
[0].mode
;
10915 if (arg0
== error_mark_node
)
10918 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
10919 op0
= copy_to_mode_reg (mode0
, op0
);
10921 pat
= GEN_FCN (icode
) (op0
);
10929 *expandedp
= false;
10934 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
10936 rtx pat
, scratch
, tmp
;
10937 tree form
= CALL_EXPR_ARG (exp
, 0);
10938 tree arg0
= CALL_EXPR_ARG (exp
, 1);
10939 tree arg1
= CALL_EXPR_ARG (exp
, 2);
10940 rtx op0
= expand_normal (arg0
);
10941 rtx op1
= expand_normal (arg1
);
10942 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
10943 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
10945 enum rtx_code code
;
10947 if (TREE_CODE (form
) != INTEGER_CST
)
10949 error ("argument 1 of __builtin_paired_predicate must be a constant");
10953 form_int
= TREE_INT_CST_LOW (form
);
10955 gcc_assert (mode0
== mode1
);
10957 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
10961 || GET_MODE (target
) != SImode
10962 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
10963 target
= gen_reg_rtx (SImode
);
10964 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
10965 op0
= copy_to_mode_reg (mode0
, op0
);
10966 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
10967 op1
= copy_to_mode_reg (mode1
, op1
);
10969 scratch
= gen_reg_rtx (CCFPmode
);
10971 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
10993 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
10996 error ("argument 1 of __builtin_paired_predicate is out of range");
11000 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
11001 emit_move_insn (target
, tmp
);
11006 spe_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
11008 rtx pat
, scratch
, tmp
;
11009 tree form
= CALL_EXPR_ARG (exp
, 0);
11010 tree arg0
= CALL_EXPR_ARG (exp
, 1);
11011 tree arg1
= CALL_EXPR_ARG (exp
, 2);
11012 rtx op0
= expand_normal (arg0
);
11013 rtx op1
= expand_normal (arg1
);
11014 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11015 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11017 enum rtx_code code
;
11019 if (TREE_CODE (form
) != INTEGER_CST
)
11021 error ("argument 1 of __builtin_spe_predicate must be a constant");
11025 form_int
= TREE_INT_CST_LOW (form
);
11027 gcc_assert (mode0
== mode1
);
11029 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11033 || GET_MODE (target
) != SImode
11034 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
11035 target
= gen_reg_rtx (SImode
);
11037 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11038 op0
= copy_to_mode_reg (mode0
, op0
);
11039 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
11040 op1
= copy_to_mode_reg (mode1
, op1
);
11042 scratch
= gen_reg_rtx (CCmode
);
11044 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11049 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
11050 _lower_. We use one compare, but look in different bits of the
11051 CR for each variant.
11053 There are 2 elements in each SPE simd type (upper/lower). The CR
11054 bits are set as follows:
11056 BIT0 | BIT 1 | BIT 2 | BIT 3
11057 U | L | (U | L) | (U & L)
11059 So, for an "all" relationship, BIT 3 would be set.
11060 For an "any" relationship, BIT 2 would be set. Etc.
11062 Following traditional nomenclature, these bits map to:
11064 BIT0 | BIT 1 | BIT 2 | BIT 3
11067 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
11072 /* All variant. OV bit. */
11074 /* We need to get to the OV bit, which is the ORDERED bit. We
11075 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11076 that's ugly and will make validate_condition_mode die.
11077 So let's just use another pattern. */
11078 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
11080 /* Any variant. EQ bit. */
11084 /* Upper variant. LT bit. */
11088 /* Lower variant. GT bit. */
11093 error ("argument 1 of __builtin_spe_predicate is out of range");
11097 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
11098 emit_move_insn (target
, tmp
);
11103 /* The evsel builtins look like this:
11105 e = __builtin_spe_evsel_OP (a, b, c, d);
11107 and work like this:
11109 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11110 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11114 spe_expand_evsel_builtin (enum insn_code icode
, tree exp
, rtx target
)
11117 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11118 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11119 tree arg2
= CALL_EXPR_ARG (exp
, 2);
11120 tree arg3
= CALL_EXPR_ARG (exp
, 3);
11121 rtx op0
= expand_normal (arg0
);
11122 rtx op1
= expand_normal (arg1
);
11123 rtx op2
= expand_normal (arg2
);
11124 rtx op3
= expand_normal (arg3
);
11125 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11126 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11128 gcc_assert (mode0
== mode1
);
11130 if (arg0
== error_mark_node
|| arg1
== error_mark_node
11131 || arg2
== error_mark_node
|| arg3
== error_mark_node
)
11135 || GET_MODE (target
) != mode0
11136 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode0
))
11137 target
= gen_reg_rtx (mode0
);
11139 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11140 op0
= copy_to_mode_reg (mode0
, op0
);
11141 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
11142 op1
= copy_to_mode_reg (mode0
, op1
);
11143 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
11144 op2
= copy_to_mode_reg (mode0
, op2
);
11145 if (! (*insn_data
[icode
].operand
[1].predicate
) (op3
, mode1
))
11146 op3
= copy_to_mode_reg (mode0
, op3
);
11148 /* Generate the compare. */
11149 scratch
= gen_reg_rtx (CCmode
);
11150 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11155 if (mode0
== V2SImode
)
11156 emit_insn (gen_spe_evsel (target
, op2
, op3
, scratch
));
11158 emit_insn (gen_spe_evsel_fs (target
, op2
, op3
, scratch
));
11163 /* Raise an error message for a builtin function that is called without the
11164 appropriate target options being set. */
11167 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
11169 size_t uns_fncode
= (size_t)fncode
;
11170 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
11171 unsigned fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
11173 gcc_assert (name
!= NULL
);
11174 if ((fnmask
& RS6000_BTM_CELL
) != 0)
11175 error ("Builtin function %s is only valid for the cell processor", name
);
11176 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
11177 error ("Builtin function %s requires the -mvsx option", name
);
11178 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
11179 error ("Builtin function %s requires the -maltivec option", name
);
11180 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
11181 error ("Builtin function %s requires the -mpaired option", name
);
11182 else if ((fnmask
& RS6000_BTM_SPE
) != 0)
11183 error ("Builtin function %s requires the -mspe option", name
);
11185 error ("Builtin function %s is not supported with the current options",
11189 /* Expand an expression EXP that calls a built-in function,
11190 with result going to TARGET if that's convenient
11191 (and in mode MODE if that's convenient).
11192 SUBTARGET may be used as the target for computing one of EXP's operands.
11193 IGNORE is nonzero if the value is to be ignored. */
11196 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
11197 enum machine_mode mode ATTRIBUTE_UNUSED
,
11198 int ignore ATTRIBUTE_UNUSED
)
11200 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
11201 enum rs6000_builtins fcode
11202 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
11203 size_t uns_fcode
= (size_t)fcode
;
11204 const struct builtin_description
*d
;
11208 unsigned mask
= rs6000_builtin_info
[uns_fcode
].mask
;
11209 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
11211 if (TARGET_DEBUG_BUILTIN
)
11213 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
11214 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
11215 const char *name2
= ((icode
!= CODE_FOR_nothing
)
11216 ? get_insn_name ((int)icode
)
11220 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
11222 default: name3
= "unknown"; break;
11223 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
11224 case RS6000_BTC_UNARY
: name3
= "unary"; break;
11225 case RS6000_BTC_BINARY
: name3
= "binary"; break;
11226 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
11227 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
11228 case RS6000_BTC_ABS
: name3
= "abs"; break;
11229 case RS6000_BTC_EVSEL
: name3
= "evsel"; break;
11230 case RS6000_BTC_DST
: name3
= "dst"; break;
11235 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11236 (name1
) ? name1
: "---", fcode
,
11237 (name2
) ? name2
: "---", (int)icode
,
11239 func_valid_p
? "" : ", not valid");
11244 rs6000_invalid_builtin (fcode
);
11246 /* Given it is invalid, just generate a normal call. */
11247 return expand_call (exp
, target
, ignore
);
11252 case RS6000_BUILTIN_RECIP
:
11253 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
11255 case RS6000_BUILTIN_RECIPF
:
11256 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
11258 case RS6000_BUILTIN_RSQRTF
:
11259 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
11261 case RS6000_BUILTIN_RSQRT
:
11262 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
11264 case POWER7_BUILTIN_BPERMD
:
11265 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
11266 ? CODE_FOR_bpermd_di
11267 : CODE_FOR_bpermd_si
), exp
, target
);
11269 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
11270 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
11272 int icode
= (int) CODE_FOR_altivec_lvsr
;
11273 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11274 enum machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
11278 gcc_assert (TARGET_ALTIVEC
);
11280 arg
= CALL_EXPR_ARG (exp
, 0);
11281 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
11282 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
11283 addr
= memory_address (mode
, op
);
11284 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
11288 /* For the load case need to negate the address. */
11289 op
= gen_reg_rtx (GET_MODE (addr
));
11290 emit_insn (gen_rtx_SET (VOIDmode
, op
,
11291 gen_rtx_NEG (GET_MODE (addr
), addr
)));
11293 op
= gen_rtx_MEM (mode
, op
);
11296 || GET_MODE (target
) != tmode
11297 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11298 target
= gen_reg_rtx (tmode
);
11300 /*pat = gen_altivec_lvsr (target, op);*/
11301 pat
= GEN_FCN (icode
) (target
, op
);
11309 case ALTIVEC_BUILTIN_VCFUX
:
11310 case ALTIVEC_BUILTIN_VCFSX
:
11311 case ALTIVEC_BUILTIN_VCTUXS
:
11312 case ALTIVEC_BUILTIN_VCTSXS
:
11313 /* FIXME: There's got to be a nicer way to handle this case than
11314 constructing a new CALL_EXPR. */
11315 if (call_expr_nargs (exp
) == 1)
11317 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
11318 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
11326 if (TARGET_ALTIVEC
)
11328 ret
= altivec_expand_builtin (exp
, target
, &success
);
11335 ret
= spe_expand_builtin (exp
, target
, &success
);
11340 if (TARGET_PAIRED_FLOAT
)
11342 ret
= paired_expand_builtin (exp
, target
, &success
);
11348 gcc_assert (TARGET_ALTIVEC
|| TARGET_VSX
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
);
11350 /* Handle simple unary operations. */
11352 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
11353 if (d
->code
== fcode
)
11354 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
11356 /* Handle simple binary operations. */
11358 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
11359 if (d
->code
== fcode
)
11360 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
11362 /* Handle simple ternary operations. */
11364 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
11365 if (d
->code
== fcode
)
11366 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
11368 gcc_unreachable ();
11372 rs6000_init_builtins (void)
11376 enum machine_mode mode
;
11378 if (TARGET_DEBUG_BUILTIN
)
11379 fprintf (stderr
, "rs6000_init_builtins%s%s%s%s\n",
11380 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
11381 (TARGET_SPE
) ? ", spe" : "",
11382 (TARGET_ALTIVEC
) ? ", altivec" : "",
11383 (TARGET_VSX
) ? ", vsx" : "");
11385 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
11386 V2SF_type_node
= build_vector_type (float_type_node
, 2);
11387 V2DI_type_node
= build_vector_type (intDI_type_node
, 2);
11388 V2DF_type_node
= build_vector_type (double_type_node
, 2);
11389 V4HI_type_node
= build_vector_type (intHI_type_node
, 4);
11390 V4SI_type_node
= build_vector_type (intSI_type_node
, 4);
11391 V4SF_type_node
= build_vector_type (float_type_node
, 4);
11392 V8HI_type_node
= build_vector_type (intHI_type_node
, 8);
11393 V16QI_type_node
= build_vector_type (intQI_type_node
, 16);
11395 unsigned_V16QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 16);
11396 unsigned_V8HI_type_node
= build_vector_type (unsigned_intHI_type_node
, 8);
11397 unsigned_V4SI_type_node
= build_vector_type (unsigned_intSI_type_node
, 4);
11398 unsigned_V2DI_type_node
= build_vector_type (unsigned_intDI_type_node
, 2);
11400 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
11401 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
11402 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
11403 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
11405 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11406 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11407 'vector unsigned short'. */
11409 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
11410 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
11411 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
11412 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
11413 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
11415 long_integer_type_internal_node
= long_integer_type_node
;
11416 long_unsigned_type_internal_node
= long_unsigned_type_node
;
11417 long_long_integer_type_internal_node
= long_long_integer_type_node
;
11418 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
11419 intQI_type_internal_node
= intQI_type_node
;
11420 uintQI_type_internal_node
= unsigned_intQI_type_node
;
11421 intHI_type_internal_node
= intHI_type_node
;
11422 uintHI_type_internal_node
= unsigned_intHI_type_node
;
11423 intSI_type_internal_node
= intSI_type_node
;
11424 uintSI_type_internal_node
= unsigned_intSI_type_node
;
11425 intDI_type_internal_node
= intDI_type_node
;
11426 uintDI_type_internal_node
= unsigned_intDI_type_node
;
11427 float_type_internal_node
= float_type_node
;
11428 double_type_internal_node
= double_type_node
;
11429 void_type_internal_node
= void_type_node
;
11431 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11433 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
11434 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
11435 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
11436 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
11437 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
11438 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
11439 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
11440 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
11441 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
11442 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
11443 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
11444 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
11445 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
11446 builtin_mode_to_type
[V4HImode
][0] = V4HI_type_node
;
11447 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
11448 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
11449 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
11450 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
11451 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
11452 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
11453 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
11455 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
11456 TYPE_NAME (bool_char_type_node
) = tdecl
;
11458 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
11459 TYPE_NAME (bool_short_type_node
) = tdecl
;
11461 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
11462 TYPE_NAME (bool_int_type_node
) = tdecl
;
11464 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
11465 TYPE_NAME (pixel_type_node
) = tdecl
;
11467 bool_V16QI_type_node
= build_vector_type (bool_char_type_node
, 16);
11468 bool_V8HI_type_node
= build_vector_type (bool_short_type_node
, 8);
11469 bool_V4SI_type_node
= build_vector_type (bool_int_type_node
, 4);
11470 bool_V2DI_type_node
= build_vector_type (bool_long_type_node
, 2);
11471 pixel_V8HI_type_node
= build_vector_type (pixel_type_node
, 8);
11473 tdecl
= add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node
);
11474 TYPE_NAME (unsigned_V16QI_type_node
) = tdecl
;
11476 tdecl
= add_builtin_type ("__vector signed char", V16QI_type_node
);
11477 TYPE_NAME (V16QI_type_node
) = tdecl
;
11479 tdecl
= add_builtin_type ("__vector __bool char", bool_V16QI_type_node
);
11480 TYPE_NAME ( bool_V16QI_type_node
) = tdecl
;
11482 tdecl
= add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node
);
11483 TYPE_NAME (unsigned_V8HI_type_node
) = tdecl
;
11485 tdecl
= add_builtin_type ("__vector signed short", V8HI_type_node
);
11486 TYPE_NAME (V8HI_type_node
) = tdecl
;
11488 tdecl
= add_builtin_type ("__vector __bool short", bool_V8HI_type_node
);
11489 TYPE_NAME (bool_V8HI_type_node
) = tdecl
;
11491 tdecl
= add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node
);
11492 TYPE_NAME (unsigned_V4SI_type_node
) = tdecl
;
11494 tdecl
= add_builtin_type ("__vector signed int", V4SI_type_node
);
11495 TYPE_NAME (V4SI_type_node
) = tdecl
;
11497 tdecl
= add_builtin_type ("__vector __bool int", bool_V4SI_type_node
);
11498 TYPE_NAME (bool_V4SI_type_node
) = tdecl
;
11500 tdecl
= add_builtin_type ("__vector float", V4SF_type_node
);
11501 TYPE_NAME (V4SF_type_node
) = tdecl
;
11503 tdecl
= add_builtin_type ("__vector __pixel", pixel_V8HI_type_node
);
11504 TYPE_NAME (pixel_V8HI_type_node
) = tdecl
;
11506 tdecl
= add_builtin_type ("__vector double", V2DF_type_node
);
11507 TYPE_NAME (V2DF_type_node
) = tdecl
;
11509 tdecl
= add_builtin_type ("__vector long", V2DI_type_node
);
11510 TYPE_NAME (V2DI_type_node
) = tdecl
;
11512 tdecl
= add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node
);
11513 TYPE_NAME (unsigned_V2DI_type_node
) = tdecl
;
11515 tdecl
= add_builtin_type ("__vector __bool long", bool_V2DI_type_node
);
11516 TYPE_NAME (bool_V2DI_type_node
) = tdecl
;
11518 /* Paired and SPE builtins are only available if you build a compiler with
11519 the appropriate options, so only create those builtins with the
11520 appropriate compiler option. Create Altivec and VSX builtins on machines
11521 with at least the general purpose extensions (970 and newer) to allow the
11522 use of the target attribute. */
11523 if (TARGET_PAIRED_FLOAT
)
11524 paired_init_builtins ();
11526 spe_init_builtins ();
11527 if (TARGET_EXTRA_BUILTINS
)
11528 altivec_init_builtins ();
11529 if (TARGET_EXTRA_BUILTINS
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
)
11530 rs6000_common_init_builtins ();
11532 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
11533 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
11534 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
11536 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
11537 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
11538 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
11540 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
11541 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
11542 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
11544 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
11545 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
11546 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
11548 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
11549 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
11550 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
11551 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
11554 /* AIX libm provides clog as __clog. */
11555 if ((tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
11556 set_user_assembler_name (tdecl
, "__clog");
11559 #ifdef SUBTARGET_INIT_BUILTINS
11560 SUBTARGET_INIT_BUILTINS
;
11564 /* Returns the rs6000 builtin decl for CODE. */
11567 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
11571 if (code
>= RS6000_BUILTIN_COUNT
)
11572 return error_mark_node
;
11574 fnmask
= rs6000_builtin_info
[code
].mask
;
11575 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
11577 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
11578 return error_mark_node
;
11581 return rs6000_builtin_decls
[code
];
11585 spe_init_builtins (void)
11587 tree puint_type_node
= build_pointer_type (unsigned_type_node
);
11588 tree pushort_type_node
= build_pointer_type (short_unsigned_type_node
);
11589 const struct builtin_description
*d
;
11592 tree v2si_ftype_4_v2si
11593 = build_function_type_list (opaque_V2SI_type_node
,
11594 opaque_V2SI_type_node
,
11595 opaque_V2SI_type_node
,
11596 opaque_V2SI_type_node
,
11597 opaque_V2SI_type_node
,
11600 tree v2sf_ftype_4_v2sf
11601 = build_function_type_list (opaque_V2SF_type_node
,
11602 opaque_V2SF_type_node
,
11603 opaque_V2SF_type_node
,
11604 opaque_V2SF_type_node
,
11605 opaque_V2SF_type_node
,
11608 tree int_ftype_int_v2si_v2si
11609 = build_function_type_list (integer_type_node
,
11611 opaque_V2SI_type_node
,
11612 opaque_V2SI_type_node
,
11615 tree int_ftype_int_v2sf_v2sf
11616 = build_function_type_list (integer_type_node
,
11618 opaque_V2SF_type_node
,
11619 opaque_V2SF_type_node
,
11622 tree void_ftype_v2si_puint_int
11623 = build_function_type_list (void_type_node
,
11624 opaque_V2SI_type_node
,
11629 tree void_ftype_v2si_puint_char
11630 = build_function_type_list (void_type_node
,
11631 opaque_V2SI_type_node
,
11636 tree void_ftype_v2si_pv2si_int
11637 = build_function_type_list (void_type_node
,
11638 opaque_V2SI_type_node
,
11639 opaque_p_V2SI_type_node
,
11643 tree void_ftype_v2si_pv2si_char
11644 = build_function_type_list (void_type_node
,
11645 opaque_V2SI_type_node
,
11646 opaque_p_V2SI_type_node
,
11650 tree void_ftype_int
11651 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
11653 tree int_ftype_void
11654 = build_function_type_list (integer_type_node
, NULL_TREE
);
11656 tree v2si_ftype_pv2si_int
11657 = build_function_type_list (opaque_V2SI_type_node
,
11658 opaque_p_V2SI_type_node
,
11662 tree v2si_ftype_puint_int
11663 = build_function_type_list (opaque_V2SI_type_node
,
11668 tree v2si_ftype_pushort_int
11669 = build_function_type_list (opaque_V2SI_type_node
,
11674 tree v2si_ftype_signed_char
11675 = build_function_type_list (opaque_V2SI_type_node
,
11676 signed_char_type_node
,
11679 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node
);
11681 /* Initialize irregular SPE builtins. */
11683 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int
, SPE_BUILTIN_MTSPEFSCR
);
11684 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void
, SPE_BUILTIN_MFSPEFSCR
);
11685 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDDX
);
11686 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDHX
);
11687 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDWX
);
11688 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHEX
);
11689 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHOX
);
11690 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWEX
);
11691 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWOX
);
11692 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDD
);
11693 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDH
);
11694 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDW
);
11695 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHE
);
11696 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHO
);
11697 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWE
);
11698 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWO
);
11699 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATFI
);
11700 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATI
);
11703 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDDX
);
11704 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDWX
);
11705 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDHX
);
11706 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHEX
);
11707 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOUX
);
11708 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOSX
);
11709 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLATX
);
11710 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLATX
);
11711 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLATX
);
11712 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLATX
);
11713 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLATX
);
11714 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDD
);
11715 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDW
);
11716 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDH
);
11717 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLAT
);
11718 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLAT
);
11719 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLAT
);
11720 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHE
);
11721 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOS
);
11722 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOU
);
11723 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLAT
);
11724 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLAT
);
11727 d
= bdesc_spe_predicates
;
11728 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, d
++)
11732 switch (insn_data
[d
->icode
].operand
[1].mode
)
11735 type
= int_ftype_int_v2si_v2si
;
11738 type
= int_ftype_int_v2sf_v2sf
;
11741 gcc_unreachable ();
11744 def_builtin (d
->name
, type
, d
->code
);
11747 /* Evsel predicates. */
11748 d
= bdesc_spe_evsel
;
11749 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, d
++)
11753 switch (insn_data
[d
->icode
].operand
[1].mode
)
11756 type
= v2si_ftype_4_v2si
;
11759 type
= v2sf_ftype_4_v2sf
;
11762 gcc_unreachable ();
11765 def_builtin (d
->name
, type
, d
->code
);
11770 paired_init_builtins (void)
11772 const struct builtin_description
*d
;
11775 tree int_ftype_int_v2sf_v2sf
11776 = build_function_type_list (integer_type_node
,
11781 tree pcfloat_type_node
=
11782 build_pointer_type (build_qualified_type
11783 (float_type_node
, TYPE_QUAL_CONST
));
11785 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
11786 long_integer_type_node
,
11789 tree void_ftype_v2sf_long_pcfloat
=
11790 build_function_type_list (void_type_node
,
11792 long_integer_type_node
,
11797 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
11798 PAIRED_BUILTIN_LX
);
11801 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
11802 PAIRED_BUILTIN_STX
);
11805 d
= bdesc_paired_preds
;
11806 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
11810 if (TARGET_DEBUG_BUILTIN
)
11811 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
11812 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
11813 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
11815 switch (insn_data
[d
->icode
].operand
[1].mode
)
11818 type
= int_ftype_int_v2sf_v2sf
;
11821 gcc_unreachable ();
11824 def_builtin (d
->name
, type
, d
->code
);
11829 altivec_init_builtins (void)
11831 const struct builtin_description
*d
;
11836 tree pvoid_type_node
= build_pointer_type (void_type_node
);
11838 tree pcvoid_type_node
11839 = build_pointer_type (build_qualified_type (void_type_node
,
11842 tree int_ftype_opaque
11843 = build_function_type_list (integer_type_node
,
11844 opaque_V4SI_type_node
, NULL_TREE
);
11845 tree opaque_ftype_opaque
11846 = build_function_type_list (integer_type_node
, NULL_TREE
);
11847 tree opaque_ftype_opaque_int
11848 = build_function_type_list (opaque_V4SI_type_node
,
11849 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
11850 tree opaque_ftype_opaque_opaque_int
11851 = build_function_type_list (opaque_V4SI_type_node
,
11852 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
11853 integer_type_node
, NULL_TREE
);
11854 tree int_ftype_int_opaque_opaque
11855 = build_function_type_list (integer_type_node
,
11856 integer_type_node
, opaque_V4SI_type_node
,
11857 opaque_V4SI_type_node
, NULL_TREE
);
11858 tree int_ftype_int_v4si_v4si
11859 = build_function_type_list (integer_type_node
,
11860 integer_type_node
, V4SI_type_node
,
11861 V4SI_type_node
, NULL_TREE
);
11862 tree void_ftype_v4si
11863 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
11864 tree v8hi_ftype_void
11865 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
11866 tree void_ftype_void
11867 = build_function_type_list (void_type_node
, NULL_TREE
);
11868 tree void_ftype_int
11869 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
11871 tree opaque_ftype_long_pcvoid
11872 = build_function_type_list (opaque_V4SI_type_node
,
11873 long_integer_type_node
, pcvoid_type_node
,
11875 tree v16qi_ftype_long_pcvoid
11876 = build_function_type_list (V16QI_type_node
,
11877 long_integer_type_node
, pcvoid_type_node
,
11879 tree v8hi_ftype_long_pcvoid
11880 = build_function_type_list (V8HI_type_node
,
11881 long_integer_type_node
, pcvoid_type_node
,
11883 tree v4si_ftype_long_pcvoid
11884 = build_function_type_list (V4SI_type_node
,
11885 long_integer_type_node
, pcvoid_type_node
,
11887 tree v4sf_ftype_long_pcvoid
11888 = build_function_type_list (V4SF_type_node
,
11889 long_integer_type_node
, pcvoid_type_node
,
11891 tree v2df_ftype_long_pcvoid
11892 = build_function_type_list (V2DF_type_node
,
11893 long_integer_type_node
, pcvoid_type_node
,
11895 tree v2di_ftype_long_pcvoid
11896 = build_function_type_list (V2DI_type_node
,
11897 long_integer_type_node
, pcvoid_type_node
,
11900 tree void_ftype_opaque_long_pvoid
11901 = build_function_type_list (void_type_node
,
11902 opaque_V4SI_type_node
, long_integer_type_node
,
11903 pvoid_type_node
, NULL_TREE
);
11904 tree void_ftype_v4si_long_pvoid
11905 = build_function_type_list (void_type_node
,
11906 V4SI_type_node
, long_integer_type_node
,
11907 pvoid_type_node
, NULL_TREE
);
11908 tree void_ftype_v16qi_long_pvoid
11909 = build_function_type_list (void_type_node
,
11910 V16QI_type_node
, long_integer_type_node
,
11911 pvoid_type_node
, NULL_TREE
);
11912 tree void_ftype_v8hi_long_pvoid
11913 = build_function_type_list (void_type_node
,
11914 V8HI_type_node
, long_integer_type_node
,
11915 pvoid_type_node
, NULL_TREE
);
11916 tree void_ftype_v4sf_long_pvoid
11917 = build_function_type_list (void_type_node
,
11918 V4SF_type_node
, long_integer_type_node
,
11919 pvoid_type_node
, NULL_TREE
);
11920 tree void_ftype_v2df_long_pvoid
11921 = build_function_type_list (void_type_node
,
11922 V2DF_type_node
, long_integer_type_node
,
11923 pvoid_type_node
, NULL_TREE
);
11924 tree void_ftype_v2di_long_pvoid
11925 = build_function_type_list (void_type_node
,
11926 V2DI_type_node
, long_integer_type_node
,
11927 pvoid_type_node
, NULL_TREE
);
11928 tree int_ftype_int_v8hi_v8hi
11929 = build_function_type_list (integer_type_node
,
11930 integer_type_node
, V8HI_type_node
,
11931 V8HI_type_node
, NULL_TREE
);
11932 tree int_ftype_int_v16qi_v16qi
11933 = build_function_type_list (integer_type_node
,
11934 integer_type_node
, V16QI_type_node
,
11935 V16QI_type_node
, NULL_TREE
);
11936 tree int_ftype_int_v4sf_v4sf
11937 = build_function_type_list (integer_type_node
,
11938 integer_type_node
, V4SF_type_node
,
11939 V4SF_type_node
, NULL_TREE
);
11940 tree int_ftype_int_v2df_v2df
11941 = build_function_type_list (integer_type_node
,
11942 integer_type_node
, V2DF_type_node
,
11943 V2DF_type_node
, NULL_TREE
);
11944 tree v4si_ftype_v4si
11945 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
11946 tree v8hi_ftype_v8hi
11947 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
11948 tree v16qi_ftype_v16qi
11949 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
11950 tree v4sf_ftype_v4sf
11951 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
11952 tree v2df_ftype_v2df
11953 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
11954 tree void_ftype_pcvoid_int_int
11955 = build_function_type_list (void_type_node
,
11956 pcvoid_type_node
, integer_type_node
,
11957 integer_type_node
, NULL_TREE
);
11959 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
11960 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
11961 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
11962 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
11963 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
11964 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
11965 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
11966 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
11967 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
11968 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
11969 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
11970 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
11971 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
11972 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
11973 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
11974 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
11975 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
11976 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
11977 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
11978 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
11979 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
11980 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
11981 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
11982 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
11983 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
11984 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
11985 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
11986 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
11987 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
11988 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
11990 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
11991 VSX_BUILTIN_LXVD2X_V2DF
);
11992 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
11993 VSX_BUILTIN_LXVD2X_V2DI
);
11994 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
11995 VSX_BUILTIN_LXVW4X_V4SF
);
11996 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
11997 VSX_BUILTIN_LXVW4X_V4SI
);
11998 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
11999 VSX_BUILTIN_LXVW4X_V8HI
);
12000 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
12001 VSX_BUILTIN_LXVW4X_V16QI
);
12002 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
12003 VSX_BUILTIN_STXVD2X_V2DF
);
12004 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
12005 VSX_BUILTIN_STXVD2X_V2DI
);
12006 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
12007 VSX_BUILTIN_STXVW4X_V4SF
);
12008 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
12009 VSX_BUILTIN_STXVW4X_V4SI
);
12010 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
12011 VSX_BUILTIN_STXVW4X_V8HI
);
12012 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
12013 VSX_BUILTIN_STXVW4X_V16QI
);
12014 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
12015 VSX_BUILTIN_VEC_LD
);
12016 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
12017 VSX_BUILTIN_VEC_ST
);
12019 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
12020 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
12021 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
12023 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
12024 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
12025 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
12026 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
12027 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
12028 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
12029 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
12030 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
12031 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
12032 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
12033 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
12034 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
12036 /* Cell builtins. */
12037 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
12038 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
12039 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
12040 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
12042 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
12043 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
12044 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
12045 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
12047 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
12048 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
12049 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
12050 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
12052 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
12053 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
12054 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
12055 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
12057 /* Add the DST variants. */
12059 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
12060 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
12062 /* Initialize the predicates. */
12063 d
= bdesc_altivec_preds
;
12064 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
12066 enum machine_mode mode1
;
12069 if (rs6000_overloaded_builtin_p (d
->code
))
12072 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
12077 type
= int_ftype_int_opaque_opaque
;
12080 type
= int_ftype_int_v4si_v4si
;
12083 type
= int_ftype_int_v8hi_v8hi
;
12086 type
= int_ftype_int_v16qi_v16qi
;
12089 type
= int_ftype_int_v4sf_v4sf
;
12092 type
= int_ftype_int_v2df_v2df
;
12095 gcc_unreachable ();
12098 def_builtin (d
->name
, type
, d
->code
);
12101 /* Initialize the abs* operators. */
12103 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
12105 enum machine_mode mode0
;
12108 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
12113 type
= v4si_ftype_v4si
;
12116 type
= v8hi_ftype_v8hi
;
12119 type
= v16qi_ftype_v16qi
;
12122 type
= v4sf_ftype_v4sf
;
12125 type
= v2df_ftype_v2df
;
12128 gcc_unreachable ();
12131 def_builtin (d
->name
, type
, d
->code
);
12134 /* Initialize target builtin that implements
12135 targetm.vectorize.builtin_mask_for_load. */
12137 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
12138 v16qi_ftype_long_pcvoid
,
12139 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
12140 BUILT_IN_MD
, NULL
, NULL_TREE
);
12141 TREE_READONLY (decl
) = 1;
12142 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12143 altivec_builtin_mask_for_load
= decl
;
12145 /* Access to the vec_init patterns. */
12146 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
12147 integer_type_node
, integer_type_node
,
12148 integer_type_node
, NULL_TREE
);
12149 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
12151 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
12152 short_integer_type_node
,
12153 short_integer_type_node
,
12154 short_integer_type_node
,
12155 short_integer_type_node
,
12156 short_integer_type_node
,
12157 short_integer_type_node
,
12158 short_integer_type_node
, NULL_TREE
);
12159 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
12161 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
12162 char_type_node
, char_type_node
,
12163 char_type_node
, char_type_node
,
12164 char_type_node
, char_type_node
,
12165 char_type_node
, char_type_node
,
12166 char_type_node
, char_type_node
,
12167 char_type_node
, char_type_node
,
12168 char_type_node
, char_type_node
,
12169 char_type_node
, NULL_TREE
);
12170 def_builtin ("__builtin_vec_init_v16qi", ftype
,
12171 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
12173 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
12174 float_type_node
, float_type_node
,
12175 float_type_node
, NULL_TREE
);
12176 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
12178 /* VSX builtins. */
12179 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
12180 double_type_node
, NULL_TREE
);
12181 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
12183 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
12184 intDI_type_node
, NULL_TREE
);
12185 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
12187 /* Access to the vec_set patterns. */
12188 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
12190 integer_type_node
, NULL_TREE
);
12191 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
12193 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
12195 integer_type_node
, NULL_TREE
);
12196 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
12198 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
12200 integer_type_node
, NULL_TREE
);
12201 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
12203 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
12205 integer_type_node
, NULL_TREE
);
12206 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
12208 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
12210 integer_type_node
, NULL_TREE
);
12211 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
12213 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
12215 integer_type_node
, NULL_TREE
);
12216 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
12218 /* Access to the vec_extract patterns. */
12219 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
12220 integer_type_node
, NULL_TREE
);
12221 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
12223 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
12224 integer_type_node
, NULL_TREE
);
12225 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
12227 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
12228 integer_type_node
, NULL_TREE
);
12229 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
12231 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
12232 integer_type_node
, NULL_TREE
);
12233 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
12235 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
12236 integer_type_node
, NULL_TREE
);
12237 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
12239 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
12240 integer_type_node
, NULL_TREE
);
12241 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
12244 /* Hash function for builtin functions with up to 3 arguments and a return
12247 builtin_hash_function (const void *hash_entry
)
12251 const struct builtin_hash_struct
*bh
=
12252 (const struct builtin_hash_struct
*) hash_entry
;
12254 for (i
= 0; i
< 4; i
++)
12256 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
12257 ret
= (ret
* 2) + bh
->uns_p
[i
];
12263 /* Compare builtin hash entries H1 and H2 for equivalence. */
12265 builtin_hash_eq (const void *h1
, const void *h2
)
12267 const struct builtin_hash_struct
*p1
= (const struct builtin_hash_struct
*) h1
;
12268 const struct builtin_hash_struct
*p2
= (const struct builtin_hash_struct
*) h2
;
12270 return ((p1
->mode
[0] == p2
->mode
[0])
12271 && (p1
->mode
[1] == p2
->mode
[1])
12272 && (p1
->mode
[2] == p2
->mode
[2])
12273 && (p1
->mode
[3] == p2
->mode
[3])
12274 && (p1
->uns_p
[0] == p2
->uns_p
[0])
12275 && (p1
->uns_p
[1] == p2
->uns_p
[1])
12276 && (p1
->uns_p
[2] == p2
->uns_p
[2])
12277 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
12280 /* Map types for builtin functions with an explicit return type and up to 3
12281 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12282 of the argument. */
12284 builtin_function_type (enum machine_mode mode_ret
, enum machine_mode mode_arg0
,
12285 enum machine_mode mode_arg1
, enum machine_mode mode_arg2
,
12286 enum rs6000_builtins builtin
, const char *name
)
12288 struct builtin_hash_struct h
;
12289 struct builtin_hash_struct
*h2
;
12293 tree ret_type
= NULL_TREE
;
12294 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
12296 /* Create builtin_hash_table. */
12297 if (builtin_hash_table
== NULL
)
12298 builtin_hash_table
= htab_create_ggc (1500, builtin_hash_function
,
12299 builtin_hash_eq
, NULL
);
12301 h
.type
= NULL_TREE
;
12302 h
.mode
[0] = mode_ret
;
12303 h
.mode
[1] = mode_arg0
;
12304 h
.mode
[2] = mode_arg1
;
12305 h
.mode
[3] = mode_arg2
;
12311 /* If the builtin is a type that produces unsigned results or takes unsigned
12312 arguments, and it is returned as a decl for the vectorizer (such as
12313 widening multiplies, permute), make sure the arguments and return value
12314 are type correct. */
12317 /* unsigned 2 argument functions. */
12318 case ALTIVEC_BUILTIN_VMULEUB_UNS
:
12319 case ALTIVEC_BUILTIN_VMULEUH_UNS
:
12320 case ALTIVEC_BUILTIN_VMULOUB_UNS
:
12321 case ALTIVEC_BUILTIN_VMULOUH_UNS
:
12327 /* unsigned 3 argument functions. */
12328 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
12329 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
12330 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
12331 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
12332 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
12333 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
12334 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
12335 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
12336 case VSX_BUILTIN_VPERM_16QI_UNS
:
12337 case VSX_BUILTIN_VPERM_8HI_UNS
:
12338 case VSX_BUILTIN_VPERM_4SI_UNS
:
12339 case VSX_BUILTIN_VPERM_2DI_UNS
:
12340 case VSX_BUILTIN_XXSEL_16QI_UNS
:
12341 case VSX_BUILTIN_XXSEL_8HI_UNS
:
12342 case VSX_BUILTIN_XXSEL_4SI_UNS
:
12343 case VSX_BUILTIN_XXSEL_2DI_UNS
:
12350 /* signed permute functions with unsigned char mask. */
12351 case ALTIVEC_BUILTIN_VPERM_16QI
:
12352 case ALTIVEC_BUILTIN_VPERM_8HI
:
12353 case ALTIVEC_BUILTIN_VPERM_4SI
:
12354 case ALTIVEC_BUILTIN_VPERM_4SF
:
12355 case ALTIVEC_BUILTIN_VPERM_2DI
:
12356 case ALTIVEC_BUILTIN_VPERM_2DF
:
12357 case VSX_BUILTIN_VPERM_16QI
:
12358 case VSX_BUILTIN_VPERM_8HI
:
12359 case VSX_BUILTIN_VPERM_4SI
:
12360 case VSX_BUILTIN_VPERM_4SF
:
12361 case VSX_BUILTIN_VPERM_2DI
:
12362 case VSX_BUILTIN_VPERM_2DF
:
12366 /* unsigned args, signed return. */
12367 case VSX_BUILTIN_XVCVUXDDP_UNS
:
12368 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
12372 /* signed args, unsigned return. */
12373 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
12374 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
12382 /* Figure out how many args are present. */
12383 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
12387 fatal_error ("internal error: builtin function %s had no type", name
);
12389 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
12390 if (!ret_type
&& h
.uns_p
[0])
12391 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
12394 fatal_error ("internal error: builtin function %s had an unexpected "
12395 "return type %s", name
, GET_MODE_NAME (h
.mode
[0]));
12397 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
12398 arg_type
[i
] = NULL_TREE
;
12400 for (i
= 0; i
< num_args
; i
++)
12402 int m
= (int) h
.mode
[i
+1];
12403 int uns_p
= h
.uns_p
[i
+1];
12405 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
12406 if (!arg_type
[i
] && uns_p
)
12407 arg_type
[i
] = builtin_mode_to_type
[m
][0];
12410 fatal_error ("internal error: builtin function %s, argument %d "
12411 "had unexpected argument type %s", name
, i
,
12412 GET_MODE_NAME (m
));
12415 found
= htab_find_slot (builtin_hash_table
, &h
, INSERT
);
12416 if (*found
== NULL
)
12418 h2
= ggc_alloc_builtin_hash_struct ();
12420 *found
= (void *)h2
;
12422 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
12423 arg_type
[2], NULL_TREE
);
12426 return ((struct builtin_hash_struct
*)(*found
))->type
;
12430 rs6000_common_init_builtins (void)
12432 const struct builtin_description
*d
;
12435 tree opaque_ftype_opaque
= NULL_TREE
;
12436 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
12437 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
12438 tree v2si_ftype_qi
= NULL_TREE
;
12439 tree v2si_ftype_v2si_qi
= NULL_TREE
;
12440 tree v2si_ftype_int_qi
= NULL_TREE
;
12441 unsigned builtin_mask
= rs6000_builtin_mask
;
12443 if (!TARGET_PAIRED_FLOAT
)
12445 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
12446 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
12449 /* Paired and SPE builtins are only available if you build a compiler with
12450 the appropriate options, so only create those builtins with the
12451 appropriate compiler option. Create Altivec and VSX builtins on machines
12452 with at least the general purpose extensions (970 and newer) to allow the
12453 use of the target attribute.. */
12455 if (TARGET_EXTRA_BUILTINS
)
12456 builtin_mask
|= RS6000_BTM_COMMON
;
12458 /* Add the ternary operators. */
12460 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
12463 unsigned mask
= d
->mask
;
12465 if ((mask
& builtin_mask
) != mask
)
12467 if (TARGET_DEBUG_BUILTIN
)
12468 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
12472 if (rs6000_overloaded_builtin_p (d
->code
))
12474 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
12475 type
= opaque_ftype_opaque_opaque_opaque
12476 = build_function_type_list (opaque_V4SI_type_node
,
12477 opaque_V4SI_type_node
,
12478 opaque_V4SI_type_node
,
12479 opaque_V4SI_type_node
,
12484 enum insn_code icode
= d
->icode
;
12485 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12488 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
12489 insn_data
[icode
].operand
[1].mode
,
12490 insn_data
[icode
].operand
[2].mode
,
12491 insn_data
[icode
].operand
[3].mode
,
12495 def_builtin (d
->name
, type
, d
->code
);
12498 /* Add the binary operators. */
12500 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
12502 enum machine_mode mode0
, mode1
, mode2
;
12504 unsigned mask
= d
->mask
;
12506 if ((mask
& builtin_mask
) != mask
)
12508 if (TARGET_DEBUG_BUILTIN
)
12509 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
12513 if (rs6000_overloaded_builtin_p (d
->code
))
12515 if (! (type
= opaque_ftype_opaque_opaque
))
12516 type
= opaque_ftype_opaque_opaque
12517 = build_function_type_list (opaque_V4SI_type_node
,
12518 opaque_V4SI_type_node
,
12519 opaque_V4SI_type_node
,
12524 enum insn_code icode
= d
->icode
;
12525 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12528 mode0
= insn_data
[icode
].operand
[0].mode
;
12529 mode1
= insn_data
[icode
].operand
[1].mode
;
12530 mode2
= insn_data
[icode
].operand
[2].mode
;
12532 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
12534 if (! (type
= v2si_ftype_v2si_qi
))
12535 type
= v2si_ftype_v2si_qi
12536 = build_function_type_list (opaque_V2SI_type_node
,
12537 opaque_V2SI_type_node
,
12542 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
12543 && mode2
== QImode
)
12545 if (! (type
= v2si_ftype_int_qi
))
12546 type
= v2si_ftype_int_qi
12547 = build_function_type_list (opaque_V2SI_type_node
,
12554 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
12558 def_builtin (d
->name
, type
, d
->code
);
12561 /* Add the simple unary operators. */
12563 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
12565 enum machine_mode mode0
, mode1
;
12567 unsigned mask
= d
->mask
;
12569 if ((mask
& builtin_mask
) != mask
)
12571 if (TARGET_DEBUG_BUILTIN
)
12572 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
12576 if (rs6000_overloaded_builtin_p (d
->code
))
12578 if (! (type
= opaque_ftype_opaque
))
12579 type
= opaque_ftype_opaque
12580 = build_function_type_list (opaque_V4SI_type_node
,
12581 opaque_V4SI_type_node
,
12586 enum insn_code icode
= d
->icode
;
12587 if (d
->name
== 0 || icode
== CODE_FOR_nothing
)
12590 mode0
= insn_data
[icode
].operand
[0].mode
;
12591 mode1
= insn_data
[icode
].operand
[1].mode
;
12593 if (mode0
== V2SImode
&& mode1
== QImode
)
12595 if (! (type
= v2si_ftype_qi
))
12596 type
= v2si_ftype_qi
12597 = build_function_type_list (opaque_V2SI_type_node
,
12603 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
12607 def_builtin (d
->name
, type
, d
->code
);
12612 rs6000_init_libfuncs (void)
12614 if (DEFAULT_ABI
!= ABI_V4
&& TARGET_XCOFF
&& !TARGET_POWERPC
)
12616 /* AIX library routines for float->int conversion. */
12617 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__itrunc");
12618 set_conv_libfunc (ufix_optab
, SImode
, DFmode
, "__uitrunc");
12619 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_qitrunc");
12620 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_quitrunc");
12623 if (!TARGET_IEEEQUAD
)
12624 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12625 if (!TARGET_XL_COMPAT
)
12627 set_optab_libfunc (add_optab
, TFmode
, "__gcc_qadd");
12628 set_optab_libfunc (sub_optab
, TFmode
, "__gcc_qsub");
12629 set_optab_libfunc (smul_optab
, TFmode
, "__gcc_qmul");
12630 set_optab_libfunc (sdiv_optab
, TFmode
, "__gcc_qdiv");
12632 if (!(TARGET_HARD_FLOAT
&& (TARGET_FPRS
|| TARGET_E500_DOUBLE
)))
12634 set_optab_libfunc (neg_optab
, TFmode
, "__gcc_qneg");
12635 set_optab_libfunc (eq_optab
, TFmode
, "__gcc_qeq");
12636 set_optab_libfunc (ne_optab
, TFmode
, "__gcc_qne");
12637 set_optab_libfunc (gt_optab
, TFmode
, "__gcc_qgt");
12638 set_optab_libfunc (ge_optab
, TFmode
, "__gcc_qge");
12639 set_optab_libfunc (lt_optab
, TFmode
, "__gcc_qlt");
12640 set_optab_libfunc (le_optab
, TFmode
, "__gcc_qle");
12642 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "__gcc_stoq");
12643 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "__gcc_dtoq");
12644 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "__gcc_qtos");
12645 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "__gcc_qtod");
12646 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "__gcc_qtoi");
12647 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "__gcc_qtou");
12648 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "__gcc_itoq");
12649 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "__gcc_utoq");
12652 if (!(TARGET_HARD_FLOAT
&& TARGET_FPRS
))
12653 set_optab_libfunc (unord_optab
, TFmode
, "__gcc_qunord");
12657 set_optab_libfunc (add_optab
, TFmode
, "_xlqadd");
12658 set_optab_libfunc (sub_optab
, TFmode
, "_xlqsub");
12659 set_optab_libfunc (smul_optab
, TFmode
, "_xlqmul");
12660 set_optab_libfunc (sdiv_optab
, TFmode
, "_xlqdiv");
12664 /* 32-bit SVR4 quad floating point routines. */
12666 set_optab_libfunc (add_optab
, TFmode
, "_q_add");
12667 set_optab_libfunc (sub_optab
, TFmode
, "_q_sub");
12668 set_optab_libfunc (neg_optab
, TFmode
, "_q_neg");
12669 set_optab_libfunc (smul_optab
, TFmode
, "_q_mul");
12670 set_optab_libfunc (sdiv_optab
, TFmode
, "_q_div");
12671 if (TARGET_PPC_GPOPT
)
12672 set_optab_libfunc (sqrt_optab
, TFmode
, "_q_sqrt");
12674 set_optab_libfunc (eq_optab
, TFmode
, "_q_feq");
12675 set_optab_libfunc (ne_optab
, TFmode
, "_q_fne");
12676 set_optab_libfunc (gt_optab
, TFmode
, "_q_fgt");
12677 set_optab_libfunc (ge_optab
, TFmode
, "_q_fge");
12678 set_optab_libfunc (lt_optab
, TFmode
, "_q_flt");
12679 set_optab_libfunc (le_optab
, TFmode
, "_q_fle");
12681 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_q_stoq");
12682 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_q_dtoq");
12683 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_q_qtos");
12684 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_q_qtod");
12685 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_q_qtoi");
12686 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_q_qtou");
12687 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_q_itoq");
12688 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_q_utoq");
12693 /* Expand a block clear operation, and return 1 if successful. Return 0
12694 if we should let the compiler generate normal code.
12696 operands[0] is the destination
12697 operands[1] is the length
12698 operands[3] is the alignment */
12701 expand_block_clear (rtx operands
[])
12703 rtx orig_dest
= operands
[0];
12704 rtx bytes_rtx
= operands
[1];
12705 rtx align_rtx
= operands
[3];
12706 bool constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
12707 HOST_WIDE_INT align
;
12708 HOST_WIDE_INT bytes
;
12713 /* If this is not a fixed size move, just call memcpy */
12717 /* This must be a fixed size alignment */
12718 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
12719 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
12721 /* Anything to clear? */
12722 bytes
= INTVAL (bytes_rtx
);
12726 /* Use the builtin memset after a point, to avoid huge code bloat.
12727 When optimize_size, avoid any significant code bloat; calling
12728 memset is about 4 instructions, so allow for one instruction to
12729 load zero and three to do clearing. */
12730 if (TARGET_ALTIVEC
&& align
>= 128)
12732 else if (TARGET_POWERPC64
&& align
>= 32)
12734 else if (TARGET_SPE
&& align
>= 64)
12739 if (optimize_size
&& bytes
> 3 * clear_step
)
12741 if (! optimize_size
&& bytes
> 8 * clear_step
)
12744 for (offset
= 0; bytes
> 0; offset
+= clear_bytes
, bytes
-= clear_bytes
)
12746 enum machine_mode mode
= BLKmode
;
12749 if (bytes
>= 16 && TARGET_ALTIVEC
&& align
>= 128)
12754 else if (bytes
>= 8 && TARGET_SPE
&& align
>= 64)
12759 else if (bytes
>= 8 && TARGET_POWERPC64
12760 /* 64-bit loads and stores require word-aligned
12762 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
12767 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
12768 { /* move 4 bytes */
12772 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
12773 { /* move 2 bytes */
12777 else /* move 1 byte at a time */
12783 dest
= adjust_address (orig_dest
, mode
, offset
);
12785 emit_move_insn (dest
, CONST0_RTX (mode
));
12792 /* Expand a block move operation, and return 1 if successful. Return 0
12793 if we should let the compiler generate normal code.
12795 operands[0] is the destination
12796 operands[1] is the source
12797 operands[2] is the length
12798 operands[3] is the alignment */
12800 #define MAX_MOVE_REG 4
12803 expand_block_move (rtx operands
[])
12805 rtx orig_dest
= operands
[0];
12806 rtx orig_src
= operands
[1];
12807 rtx bytes_rtx
= operands
[2];
12808 rtx align_rtx
= operands
[3];
12809 int constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
12814 rtx stores
[MAX_MOVE_REG
];
12817 /* If this is not a fixed size move, just call memcpy */
12821 /* This must be a fixed size alignment */
12822 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
12823 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
12825 /* Anything to move? */
12826 bytes
= INTVAL (bytes_rtx
);
12830 if (bytes
> rs6000_block_move_inline_limit
)
12833 for (offset
= 0; bytes
> 0; offset
+= move_bytes
, bytes
-= move_bytes
)
12836 rtx (*movmemsi
) (rtx
, rtx
, rtx
, rtx
);
12837 rtx (*mov
) (rtx
, rtx
);
12839 enum machine_mode mode
= BLKmode
;
12842 /* Altivec first, since it will be faster than a string move
12843 when it applies, and usually not significantly larger. */
12844 if (TARGET_ALTIVEC
&& bytes
>= 16 && align
>= 128)
12848 gen_func
.mov
= gen_movv4si
;
12850 else if (TARGET_SPE
&& bytes
>= 8 && align
>= 64)
12854 gen_func
.mov
= gen_movv2si
;
12856 else if (TARGET_STRING
12857 && bytes
> 24 /* move up to 32 bytes at a time */
12863 && ! fixed_regs
[10]
12864 && ! fixed_regs
[11]
12865 && ! fixed_regs
[12])
12867 move_bytes
= (bytes
> 32) ? 32 : bytes
;
12868 gen_func
.movmemsi
= gen_movmemsi_8reg
;
12870 else if (TARGET_STRING
12871 && bytes
> 16 /* move up to 24 bytes at a time */
12877 && ! fixed_regs
[10])
12879 move_bytes
= (bytes
> 24) ? 24 : bytes
;
12880 gen_func
.movmemsi
= gen_movmemsi_6reg
;
12882 else if (TARGET_STRING
12883 && bytes
> 8 /* move up to 16 bytes at a time */
12887 && ! fixed_regs
[8])
12889 move_bytes
= (bytes
> 16) ? 16 : bytes
;
12890 gen_func
.movmemsi
= gen_movmemsi_4reg
;
12892 else if (bytes
>= 8 && TARGET_POWERPC64
12893 /* 64-bit loads and stores require word-aligned
12895 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
12899 gen_func
.mov
= gen_movdi
;
12901 else if (TARGET_STRING
&& bytes
> 4 && !TARGET_POWERPC64
)
12902 { /* move up to 8 bytes at a time */
12903 move_bytes
= (bytes
> 8) ? 8 : bytes
;
12904 gen_func
.movmemsi
= gen_movmemsi_2reg
;
12906 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
12907 { /* move 4 bytes */
12910 gen_func
.mov
= gen_movsi
;
12912 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
12913 { /* move 2 bytes */
12916 gen_func
.mov
= gen_movhi
;
12918 else if (TARGET_STRING
&& bytes
> 1)
12919 { /* move up to 4 bytes at a time */
12920 move_bytes
= (bytes
> 4) ? 4 : bytes
;
12921 gen_func
.movmemsi
= gen_movmemsi_1reg
;
12923 else /* move 1 byte at a time */
12927 gen_func
.mov
= gen_movqi
;
12930 src
= adjust_address (orig_src
, mode
, offset
);
12931 dest
= adjust_address (orig_dest
, mode
, offset
);
12933 if (mode
!= BLKmode
)
12935 rtx tmp_reg
= gen_reg_rtx (mode
);
12937 emit_insn ((*gen_func
.mov
) (tmp_reg
, src
));
12938 stores
[num_reg
++] = (*gen_func
.mov
) (dest
, tmp_reg
);
12941 if (mode
== BLKmode
|| num_reg
>= MAX_MOVE_REG
|| bytes
== move_bytes
)
12944 for (i
= 0; i
< num_reg
; i
++)
12945 emit_insn (stores
[i
]);
12949 if (mode
== BLKmode
)
12951 /* Move the address into scratch registers. The movmemsi
12952 patterns require zero offset. */
12953 if (!REG_P (XEXP (src
, 0)))
12955 rtx src_reg
= copy_addr_to_reg (XEXP (src
, 0));
12956 src
= replace_equiv_address (src
, src_reg
);
12958 set_mem_size (src
, move_bytes
);
12960 if (!REG_P (XEXP (dest
, 0)))
12962 rtx dest_reg
= copy_addr_to_reg (XEXP (dest
, 0));
12963 dest
= replace_equiv_address (dest
, dest_reg
);
12965 set_mem_size (dest
, move_bytes
);
12967 emit_insn ((*gen_func
.movmemsi
) (dest
, src
,
12968 GEN_INT (move_bytes
& 31),
12977 /* Return a string to perform a load_multiple operation.
12978 operands[0] is the vector.
12979 operands[1] is the source address.
12980 operands[2] is the first destination register. */
12983 rs6000_output_load_multiple (rtx operands
[3])
12985 /* We have to handle the case where the pseudo used to contain the address
12986 is assigned to one of the output registers. */
12988 int words
= XVECLEN (operands
[0], 0);
12991 if (XVECLEN (operands
[0], 0) == 1)
12992 return "{l|lwz} %2,0(%1)";
12994 for (i
= 0; i
< words
; i
++)
12995 if (refers_to_regno_p (REGNO (operands
[2]) + i
,
12996 REGNO (operands
[2]) + i
+ 1, operands
[1], 0))
13000 xop
[0] = GEN_INT (4 * (words
-1));
13001 xop
[1] = operands
[1];
13002 xop
[2] = operands
[2];
13003 output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop
);
13008 xop
[0] = GEN_INT (4 * (words
-1));
13009 xop
[1] = operands
[1];
13010 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
13011 output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop
);
13016 for (j
= 0; j
< words
; j
++)
13019 xop
[0] = GEN_INT (j
* 4);
13020 xop
[1] = operands
[1];
13021 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + j
);
13022 output_asm_insn ("{l|lwz} %2,%0(%1)", xop
);
13024 xop
[0] = GEN_INT (i
* 4);
13025 xop
[1] = operands
[1];
13026 output_asm_insn ("{l|lwz} %1,%0(%1)", xop
);
13031 return "{lsi|lswi} %2,%1,%N0";
13035 /* A validation routine: say whether CODE, a condition code, and MODE
13036 match. The other alternatives either don't make sense or should
13037 never be generated. */
13040 validate_condition_mode (enum rtx_code code
, enum machine_mode mode
)
13042 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
13043 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
13044 && GET_MODE_CLASS (mode
) == MODE_CC
);
13046 /* These don't make sense. */
13047 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
13048 || mode
!= CCUNSmode
);
13050 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
13051 || mode
== CCUNSmode
);
13053 gcc_assert (mode
== CCFPmode
13054 || (code
!= ORDERED
&& code
!= UNORDERED
13055 && code
!= UNEQ
&& code
!= LTGT
13056 && code
!= UNGT
&& code
!= UNLT
13057 && code
!= UNGE
&& code
!= UNLE
));
13059 /* These should never be generated except for
13060 flag_finite_math_only. */
13061 gcc_assert (mode
!= CCFPmode
13062 || flag_finite_math_only
13063 || (code
!= LE
&& code
!= GE
13064 && code
!= UNEQ
&& code
!= LTGT
13065 && code
!= UNGT
&& code
!= UNLT
));
13067 /* These are invalid; the information is not there. */
13068 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
13072 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
13073 mask required to convert the result of a rotate insn into a shift
13074 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13077 includes_lshift_p (rtx shiftop
, rtx andop
)
13079 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
13081 shift_mask
<<= INTVAL (shiftop
);
13083 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
13086 /* Similar, but for right shift. */
13089 includes_rshift_p (rtx shiftop
, rtx andop
)
13091 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
13093 shift_mask
>>= INTVAL (shiftop
);
13095 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
13098 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13099 to perform a left shift. It must have exactly SHIFTOP least
13100 significant 0's, then one or more 1's, then zero or more 0's. */
13103 includes_rldic_lshift_p (rtx shiftop
, rtx andop
)
13105 if (GET_CODE (andop
) == CONST_INT
)
13107 HOST_WIDE_INT c
, lsb
, shift_mask
;
13109 c
= INTVAL (andop
);
13110 if (c
== 0 || c
== ~0)
13114 shift_mask
<<= INTVAL (shiftop
);
13116 /* Find the least significant one bit. */
13119 /* It must coincide with the LSB of the shift mask. */
13120 if (-lsb
!= shift_mask
)
13123 /* Invert to look for the next transition (if any). */
13126 /* Remove the low group of ones (originally low group of zeros). */
13129 /* Again find the lsb, and check we have all 1's above. */
13133 else if (GET_CODE (andop
) == CONST_DOUBLE
13134 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
13136 HOST_WIDE_INT low
, high
, lsb
;
13137 HOST_WIDE_INT shift_mask_low
, shift_mask_high
;
13139 low
= CONST_DOUBLE_LOW (andop
);
13140 if (HOST_BITS_PER_WIDE_INT
< 64)
13141 high
= CONST_DOUBLE_HIGH (andop
);
13143 if ((low
== 0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== 0))
13144 || (low
== ~0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0)))
13147 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
13149 shift_mask_high
= ~0;
13150 if (INTVAL (shiftop
) > 32)
13151 shift_mask_high
<<= INTVAL (shiftop
) - 32;
13153 lsb
= high
& -high
;
13155 if (-lsb
!= shift_mask_high
|| INTVAL (shiftop
) < 32)
13161 lsb
= high
& -high
;
13162 return high
== -lsb
;
13165 shift_mask_low
= ~0;
13166 shift_mask_low
<<= INTVAL (shiftop
);
13170 if (-lsb
!= shift_mask_low
)
13173 if (HOST_BITS_PER_WIDE_INT
< 64)
13178 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
13180 lsb
= high
& -high
;
13181 return high
== -lsb
;
13185 return low
== -lsb
&& (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0);
13191 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13192 to perform a left shift. It must have SHIFTOP or more least
13193 significant 0's, with the remainder of the word 1's. */
13196 includes_rldicr_lshift_p (rtx shiftop
, rtx andop
)
13198 if (GET_CODE (andop
) == CONST_INT
)
13200 HOST_WIDE_INT c
, lsb
, shift_mask
;
13203 shift_mask
<<= INTVAL (shiftop
);
13204 c
= INTVAL (andop
);
13206 /* Find the least significant one bit. */
13209 /* It must be covered by the shift mask.
13210 This test also rejects c == 0. */
13211 if ((lsb
& shift_mask
) == 0)
13214 /* Check we have all 1's above the transition, and reject all 1's. */
13215 return c
== -lsb
&& lsb
!= 1;
13217 else if (GET_CODE (andop
) == CONST_DOUBLE
13218 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
13220 HOST_WIDE_INT low
, lsb
, shift_mask_low
;
13222 low
= CONST_DOUBLE_LOW (andop
);
13224 if (HOST_BITS_PER_WIDE_INT
< 64)
13226 HOST_WIDE_INT high
, shift_mask_high
;
13228 high
= CONST_DOUBLE_HIGH (andop
);
13232 shift_mask_high
= ~0;
13233 if (INTVAL (shiftop
) > 32)
13234 shift_mask_high
<<= INTVAL (shiftop
) - 32;
13236 lsb
= high
& -high
;
13238 if ((lsb
& shift_mask_high
) == 0)
13241 return high
== -lsb
;
13247 shift_mask_low
= ~0;
13248 shift_mask_low
<<= INTVAL (shiftop
);
13252 if ((lsb
& shift_mask_low
) == 0)
13255 return low
== -lsb
&& lsb
!= 1;
13261 /* Return 1 if operands will generate a valid arguments to rlwimi
13262 instruction for insert with right shift in 64-bit mode. The mask may
13263 not start on the first bit or stop on the last bit because wrap-around
13264 effects of instruction do not correspond to semantics of RTL insn. */
13267 insvdi_rshift_rlwimi_p (rtx sizeop
, rtx startop
, rtx shiftop
)
13269 if (INTVAL (startop
) > 32
13270 && INTVAL (startop
) < 64
13271 && INTVAL (sizeop
) > 1
13272 && INTVAL (sizeop
) + INTVAL (startop
) < 64
13273 && INTVAL (shiftop
) > 0
13274 && INTVAL (sizeop
) + INTVAL (shiftop
) < 32
13275 && (64 - (INTVAL (shiftop
) & 63)) >= INTVAL (sizeop
))
13281 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13282 for lfq and stfq insns iff the registers are hard registers. */
13285 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
13287 /* We might have been passed a SUBREG. */
13288 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
13291 /* We might have been passed non floating point registers. */
13292 if (!FP_REGNO_P (REGNO (reg1
))
13293 || !FP_REGNO_P (REGNO (reg2
)))
13296 return (REGNO (reg1
) == REGNO (reg2
) - 1);
13299 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13300 addr1 and addr2 must be in consecutive memory locations
13301 (addr2 == addr1 + 8). */
13304 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
13307 unsigned int reg1
, reg2
;
13308 int offset1
, offset2
;
13310 /* The mems cannot be volatile. */
13311 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
13314 addr1
= XEXP (mem1
, 0);
13315 addr2
= XEXP (mem2
, 0);
13317 /* Extract an offset (if used) from the first addr. */
13318 if (GET_CODE (addr1
) == PLUS
)
13320 /* If not a REG, return zero. */
13321 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
13325 reg1
= REGNO (XEXP (addr1
, 0));
13326 /* The offset must be constant! */
13327 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
13329 offset1
= INTVAL (XEXP (addr1
, 1));
13332 else if (GET_CODE (addr1
) != REG
)
13336 reg1
= REGNO (addr1
);
13337 /* This was a simple (mem (reg)) expression. Offset is 0. */
13341 /* And now for the second addr. */
13342 if (GET_CODE (addr2
) == PLUS
)
13344 /* If not a REG, return zero. */
13345 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
13349 reg2
= REGNO (XEXP (addr2
, 0));
13350 /* The offset must be constant. */
13351 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
13353 offset2
= INTVAL (XEXP (addr2
, 1));
13356 else if (GET_CODE (addr2
) != REG
)
13360 reg2
= REGNO (addr2
);
13361 /* This was a simple (mem (reg)) expression. Offset is 0. */
13365 /* Both of these must have the same base register. */
13369 /* The offset for the second addr must be 8 more than the first addr. */
13370 if (offset2
!= offset1
+ 8)
13373 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13380 rs6000_secondary_memory_needed_rtx (enum machine_mode mode
)
13382 static bool eliminated
= false;
13385 if (mode
!= SDmode
)
13386 ret
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
13389 rtx mem
= cfun
->machine
->sdmode_stack_slot
;
13390 gcc_assert (mem
!= NULL_RTX
);
13394 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
13395 cfun
->machine
->sdmode_stack_slot
= mem
;
13401 if (TARGET_DEBUG_ADDR
)
13403 fprintf (stderr
, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13404 GET_MODE_NAME (mode
));
13406 fprintf (stderr
, "\tNULL_RTX\n");
13415 rs6000_check_sdmode (tree
*tp
, int *walk_subtrees
, void *data ATTRIBUTE_UNUSED
)
13417 /* Don't walk into types. */
13418 if (*tp
== NULL_TREE
|| *tp
== error_mark_node
|| TYPE_P (*tp
))
13420 *walk_subtrees
= 0;
13424 switch (TREE_CODE (*tp
))
13433 case VIEW_CONVERT_EXPR
:
13434 if (TYPE_MODE (TREE_TYPE (*tp
)) == SDmode
)
13444 enum reload_reg_type
{
13446 VECTOR_REGISTER_TYPE
,
13447 OTHER_REGISTER_TYPE
13450 static enum reload_reg_type
13451 rs6000_reload_register_type (enum reg_class rclass
)
13457 return GPR_REGISTER_TYPE
;
13462 return VECTOR_REGISTER_TYPE
;
13465 return OTHER_REGISTER_TYPE
;
13469 /* Inform reload about cases where moving X with a mode MODE to a register in
13470 RCLASS requires an extra scratch or immediate register. Return the class
13471 needed for the immediate register.
13473 For VSX and Altivec, we may need a register to convert sp+offset into
13476 For misaligned 64-bit gpr loads and stores we need a register to
13477 convert an offset address to indirect. */
13480 rs6000_secondary_reload (bool in_p
,
13482 reg_class_t rclass_i
,
13483 enum machine_mode mode
,
13484 secondary_reload_info
*sri
)
13486 enum reg_class rclass
= (enum reg_class
) rclass_i
;
13487 reg_class_t ret
= ALL_REGS
;
13488 enum insn_code icode
;
13489 bool default_p
= false;
13491 sri
->icode
= CODE_FOR_nothing
;
13493 /* Convert vector loads and stores into gprs to use an additional base
13495 icode
= rs6000_vector_reload
[mode
][in_p
!= false];
13496 if (icode
!= CODE_FOR_nothing
)
13499 sri
->icode
= CODE_FOR_nothing
;
13500 sri
->extra_cost
= 0;
13502 if (GET_CODE (x
) == MEM
)
13504 rtx addr
= XEXP (x
, 0);
13506 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13507 an extra register in that case, but it would need an extra
13508 register if the addressing is reg+reg or (reg+reg)&(-16). */
13509 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
13511 if (!legitimate_indirect_address_p (addr
, false)
13512 && !rs6000_legitimate_offset_address_p (TImode
, addr
,
13515 sri
->icode
= icode
;
13516 /* account for splitting the loads, and converting the
13517 address from reg+reg to reg. */
13518 sri
->extra_cost
= (((TARGET_64BIT
) ? 3 : 5)
13519 + ((GET_CODE (addr
) == AND
) ? 1 : 0));
13522 /* Loads to and stores from vector registers can only do reg+reg
13523 addressing. Altivec registers can also do (reg+reg)&(-16). */
13524 else if (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
13525 || rclass
== FLOAT_REGS
|| rclass
== NO_REGS
)
13527 if (!VECTOR_MEM_ALTIVEC_P (mode
)
13528 && GET_CODE (addr
) == AND
13529 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
13530 && INTVAL (XEXP (addr
, 1)) == -16
13531 && (legitimate_indirect_address_p (XEXP (addr
, 0), false)
13532 || legitimate_indexed_address_p (XEXP (addr
, 0), false)))
13534 sri
->icode
= icode
;
13535 sri
->extra_cost
= ((GET_CODE (XEXP (addr
, 0)) == PLUS
)
13538 else if (!legitimate_indirect_address_p (addr
, false)
13539 && (rclass
== NO_REGS
13540 || !legitimate_indexed_address_p (addr
, false)))
13542 sri
->icode
= icode
;
13543 sri
->extra_cost
= 1;
13546 icode
= CODE_FOR_nothing
;
13548 /* Any other loads, including to pseudo registers which haven't been
13549 assigned to a register yet, default to require a scratch
13553 sri
->icode
= icode
;
13554 sri
->extra_cost
= 2;
13557 else if (REG_P (x
))
13559 int regno
= true_regnum (x
);
13561 icode
= CODE_FOR_nothing
;
13562 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
13566 enum reg_class xclass
= REGNO_REG_CLASS (regno
);
13567 enum reload_reg_type rtype1
= rs6000_reload_register_type (rclass
);
13568 enum reload_reg_type rtype2
= rs6000_reload_register_type (xclass
);
13570 /* If memory is needed, use default_secondary_reload to create the
13572 if (rtype1
!= rtype2
|| rtype1
== OTHER_REGISTER_TYPE
)
13581 else if (TARGET_POWERPC64
13582 && rs6000_reload_register_type (rclass
) == GPR_REGISTER_TYPE
13584 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
13586 rtx off
= address_offset (XEXP (x
, 0));
13588 if (off
!= NULL_RTX
&& (INTVAL (off
) & 3) != 0)
13591 sri
->icode
= CODE_FOR_reload_di_load
;
13593 sri
->icode
= CODE_FOR_reload_di_store
;
13594 sri
->extra_cost
= 2;
13600 else if (!TARGET_POWERPC64
13601 && rs6000_reload_register_type (rclass
) == GPR_REGISTER_TYPE
13603 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
13605 rtx off
= address_offset (XEXP (x
, 0));
13607 if (off
!= NULL_RTX
13608 && ((unsigned HOST_WIDE_INT
) INTVAL (off
) + 0x8000
13609 >= 0x1000u
- (GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
)))
13612 sri
->icode
= CODE_FOR_reload_si_load
;
13614 sri
->icode
= CODE_FOR_reload_si_store
;
13615 sri
->extra_cost
= 2;
13625 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
13627 gcc_assert (ret
!= ALL_REGS
);
13629 if (TARGET_DEBUG_ADDR
)
13632 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13634 reg_class_names
[ret
],
13635 in_p
? "true" : "false",
13636 reg_class_names
[rclass
],
13637 GET_MODE_NAME (mode
));
13640 fprintf (stderr
, ", default secondary reload");
13642 if (sri
->icode
!= CODE_FOR_nothing
)
13643 fprintf (stderr
, ", reload func = %s, extra cost = %d\n",
13644 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
13646 fprintf (stderr
, "\n");
13654 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13655 to SP+reg addressing. */
13658 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
13660 int regno
= true_regnum (reg
);
13661 enum machine_mode mode
= GET_MODE (reg
);
13662 enum reg_class rclass
;
13664 rtx and_op2
= NULL_RTX
;
13667 rtx scratch_or_premodify
= scratch
;
13671 if (TARGET_DEBUG_ADDR
)
13673 fprintf (stderr
, "\nrs6000_secondary_reload_inner, type = %s\n",
13674 store_p
? "store" : "load");
13675 fprintf (stderr
, "reg:\n");
13677 fprintf (stderr
, "mem:\n");
13679 fprintf (stderr
, "scratch:\n");
13680 debug_rtx (scratch
);
13683 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
13684 gcc_assert (GET_CODE (mem
) == MEM
);
13685 rclass
= REGNO_REG_CLASS (regno
);
13686 addr
= XEXP (mem
, 0);
13690 /* GPRs can handle reg + small constant, all other addresses need to use
13691 the scratch register. */
13694 if (GET_CODE (addr
) == AND
)
13696 and_op2
= XEXP (addr
, 1);
13697 addr
= XEXP (addr
, 0);
13700 if (GET_CODE (addr
) == PRE_MODIFY
)
13702 scratch_or_premodify
= XEXP (addr
, 0);
13703 gcc_assert (REG_P (scratch_or_premodify
));
13704 gcc_assert (GET_CODE (XEXP (addr
, 1)) == PLUS
);
13705 addr
= XEXP (addr
, 1);
13708 if (GET_CODE (addr
) == PLUS
13709 && (and_op2
!= NULL_RTX
13710 || !rs6000_legitimate_offset_address_p (TImode
, addr
,
13713 addr_op1
= XEXP (addr
, 0);
13714 addr_op2
= XEXP (addr
, 1);
13715 gcc_assert (legitimate_indirect_address_p (addr_op1
, false));
13717 if (!REG_P (addr_op2
)
13718 && (GET_CODE (addr_op2
) != CONST_INT
13719 || !satisfies_constraint_I (addr_op2
)))
13721 if (TARGET_DEBUG_ADDR
)
13724 "\nMove plus addr to register %s, mode = %s: ",
13725 rs6000_reg_names
[REGNO (scratch
)],
13726 GET_MODE_NAME (mode
));
13727 debug_rtx (addr_op2
);
13729 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
13730 addr_op2
= scratch
;
13733 emit_insn (gen_rtx_SET (VOIDmode
,
13734 scratch_or_premodify
,
13735 gen_rtx_PLUS (Pmode
,
13739 addr
= scratch_or_premodify
;
13740 scratch_or_premodify
= scratch
;
13742 else if (!legitimate_indirect_address_p (addr
, false)
13743 && !rs6000_legitimate_offset_address_p (TImode
, addr
,
13746 if (TARGET_DEBUG_ADDR
)
13748 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
13749 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
13750 GET_MODE_NAME (mode
));
13753 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13754 addr
= scratch_or_premodify
;
13755 scratch_or_premodify
= scratch
;
13759 /* Float/Altivec registers can only handle reg+reg addressing. Move
13760 other addresses into a scratch register. */
13765 /* With float regs, we need to handle the AND ourselves, since we can't
13766 use the Altivec instruction with an implicit AND -16. Allow scalar
13767 loads to float registers to use reg+offset even if VSX. */
13768 if (GET_CODE (addr
) == AND
13769 && (rclass
!= ALTIVEC_REGS
|| GET_MODE_SIZE (mode
) != 16
13770 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
13771 || INTVAL (XEXP (addr
, 1)) != -16
13772 || !VECTOR_MEM_ALTIVEC_P (mode
)))
13774 and_op2
= XEXP (addr
, 1);
13775 addr
= XEXP (addr
, 0);
13778 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13779 as the address later. */
13780 if (GET_CODE (addr
) == PRE_MODIFY
13781 && (!VECTOR_MEM_VSX_P (mode
)
13782 || and_op2
!= NULL_RTX
13783 || !legitimate_indexed_address_p (XEXP (addr
, 1), false)))
13785 scratch_or_premodify
= XEXP (addr
, 0);
13786 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify
,
13788 gcc_assert (GET_CODE (XEXP (addr
, 1)) == PLUS
);
13789 addr
= XEXP (addr
, 1);
13792 if (legitimate_indirect_address_p (addr
, false) /* reg */
13793 || legitimate_indexed_address_p (addr
, false) /* reg+reg */
13794 || GET_CODE (addr
) == PRE_MODIFY
/* VSX pre-modify */
13795 || (GET_CODE (addr
) == AND
/* Altivec memory */
13796 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
13797 && INTVAL (XEXP (addr
, 1)) == -16
13798 && VECTOR_MEM_ALTIVEC_P (mode
))
13799 || (rclass
== FLOAT_REGS
/* legacy float mem */
13800 && GET_MODE_SIZE (mode
) == 8
13801 && and_op2
== NULL_RTX
13802 && scratch_or_premodify
== scratch
13803 && rs6000_legitimate_offset_address_p (mode
, addr
, false, false)))
13806 else if (GET_CODE (addr
) == PLUS
)
13808 addr_op1
= XEXP (addr
, 0);
13809 addr_op2
= XEXP (addr
, 1);
13810 gcc_assert (REG_P (addr_op1
));
13812 if (TARGET_DEBUG_ADDR
)
13814 fprintf (stderr
, "\nMove plus addr to register %s, mode = %s: ",
13815 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
13816 debug_rtx (addr_op2
);
13818 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
13819 emit_insn (gen_rtx_SET (VOIDmode
,
13820 scratch_or_premodify
,
13821 gen_rtx_PLUS (Pmode
,
13824 addr
= scratch_or_premodify
;
13825 scratch_or_premodify
= scratch
;
13828 else if (GET_CODE (addr
) == SYMBOL_REF
|| GET_CODE (addr
) == CONST
13829 || GET_CODE (addr
) == CONST_INT
|| REG_P (addr
))
13831 if (TARGET_DEBUG_ADDR
)
13833 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
13834 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
13835 GET_MODE_NAME (mode
));
13839 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13840 addr
= scratch_or_premodify
;
13841 scratch_or_premodify
= scratch
;
13845 gcc_unreachable ();
13850 gcc_unreachable ();
13853 /* If the original address involved a pre-modify that we couldn't use the VSX
13854 memory instruction with update, and we haven't taken care of already,
13855 store the address in the pre-modify register and use that as the
13857 if (scratch_or_premodify
!= scratch
&& scratch_or_premodify
!= addr
)
13859 emit_insn (gen_rtx_SET (VOIDmode
, scratch_or_premodify
, addr
));
13860 addr
= scratch_or_premodify
;
13863 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13864 memory instruction, recreate the AND now, including the clobber which is
13865 generated by the general ANDSI3/ANDDI3 patterns for the
13866 andi. instruction. */
13867 if (and_op2
!= NULL_RTX
)
13869 if (! legitimate_indirect_address_p (addr
, false))
13871 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, addr
));
13875 if (TARGET_DEBUG_ADDR
)
13877 fprintf (stderr
, "\nAnd addr to register %s, mode = %s: ",
13878 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
13879 debug_rtx (and_op2
);
13882 and_rtx
= gen_rtx_SET (VOIDmode
,
13884 gen_rtx_AND (Pmode
,
13888 cc_clobber
= gen_rtx_CLOBBER (CCmode
, gen_rtx_SCRATCH (CCmode
));
13889 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
13890 gen_rtvec (2, and_rtx
, cc_clobber
)));
13894 /* Adjust the address if it changed. */
13895 if (addr
!= XEXP (mem
, 0))
13897 mem
= change_address (mem
, mode
, addr
);
13898 if (TARGET_DEBUG_ADDR
)
13899 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
13902 /* Now create the move. */
13904 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
13906 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
13911 /* Convert reloads involving 64-bit gprs and misaligned offset
13912 addressing, or multiple 32-bit gprs and offsets that are too large,
13913 to use indirect addressing. */
13916 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
13918 int regno
= true_regnum (reg
);
13919 enum reg_class rclass
;
13921 rtx scratch_or_premodify
= scratch
;
13923 if (TARGET_DEBUG_ADDR
)
13925 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
13926 store_p
? "store" : "load");
13927 fprintf (stderr
, "reg:\n");
13929 fprintf (stderr
, "mem:\n");
13931 fprintf (stderr
, "scratch:\n");
13932 debug_rtx (scratch
);
13935 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
13936 gcc_assert (GET_CODE (mem
) == MEM
);
13937 rclass
= REGNO_REG_CLASS (regno
);
13938 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
13939 addr
= XEXP (mem
, 0);
13941 if (GET_CODE (addr
) == PRE_MODIFY
)
13943 scratch_or_premodify
= XEXP (addr
, 0);
13944 gcc_assert (REG_P (scratch_or_premodify
));
13945 addr
= XEXP (addr
, 1);
13947 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
13949 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
13951 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
13953 /* Now create the move. */
13955 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
13957 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
13962 /* Allocate a 64-bit stack slot to be used for copying SDmode
13963 values through if this function has any SDmode references. */
13966 rs6000_alloc_sdmode_stack_slot (void)
13970 gimple_stmt_iterator gsi
;
13972 gcc_assert (cfun
->machine
->sdmode_stack_slot
== NULL_RTX
);
13975 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
13977 tree ret
= walk_gimple_op (gsi_stmt (gsi
), rs6000_check_sdmode
, NULL
);
13980 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
13981 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
13987 /* Check for any SDmode parameters of the function. */
13988 for (t
= DECL_ARGUMENTS (cfun
->decl
); t
; t
= DECL_CHAIN (t
))
13990 if (TREE_TYPE (t
) == error_mark_node
)
13993 if (TYPE_MODE (TREE_TYPE (t
)) == SDmode
13994 || TYPE_MODE (DECL_ARG_TYPE (t
)) == SDmode
)
13996 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
13997 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
14005 rs6000_instantiate_decls (void)
14007 if (cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
14008 instantiate_decl_rtl (cfun
->machine
->sdmode_stack_slot
);
14011 /* Given an rtx X being reloaded into a reg required to be
14012 in class CLASS, return the class of reg to actually use.
14013 In general this is just CLASS; but on some machines
14014 in some cases it is preferable to use a more restrictive class.
14016 On the RS/6000, we have to return NO_REGS when we want to reload a
14017 floating-point CONST_DOUBLE to force it to be copied to memory.
14019 We also don't want to reload integer values into floating-point
14020 registers if we can at all help it. In fact, this can
14021 cause reload to die, if it tries to generate a reload of CTR
14022 into a FP register and discovers it doesn't have the memory location
14025 ??? Would it be a good idea to have reload do the converse, that is
14026 try to reload floating modes into FP registers if possible?
14029 static enum reg_class
14030 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
14032 enum machine_mode mode
= GET_MODE (x
);
14034 if (VECTOR_UNIT_VSX_P (mode
)
14035 && x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
14038 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
14039 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
14040 && easy_vector_constant (x
, mode
))
14041 return ALTIVEC_REGS
;
14043 if (CONSTANT_P (x
) && reg_classes_intersect_p (rclass
, FLOAT_REGS
))
14046 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
14047 return GENERAL_REGS
;
14049 /* For VSX, prefer the traditional registers for 64-bit values because we can
14050 use the non-VSX loads. Prefer the Altivec registers if Altivec is
14051 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
14052 prefer Altivec loads.. */
14053 if (rclass
== VSX_REGS
)
14055 if (GET_MODE_SIZE (mode
) <= 8)
14058 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
))
14059 return ALTIVEC_REGS
;
14067 /* Debug version of rs6000_preferred_reload_class. */
14068 static enum reg_class
14069 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
14071 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
14074 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
14076 reg_class_names
[ret
], reg_class_names
[rclass
],
14077 GET_MODE_NAME (GET_MODE (x
)));
14083 /* If we are copying between FP or AltiVec registers and anything else, we need
14084 a memory location. The exception is when we are targeting ppc64 and the
14085 move to/from fpr to gpr instructions are available. Also, under VSX, you
14086 can copy vector registers from the FP register set to the Altivec register
14087 set and vice versa. */
14090 rs6000_secondary_memory_needed (enum reg_class class1
,
14091 enum reg_class class2
,
14092 enum machine_mode mode
)
14094 if (class1
== class2
)
14097 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14098 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14099 between these classes. But we need memory for other things that can go in
14100 FLOAT_REGS like SFmode. */
14102 && (VECTOR_MEM_VSX_P (mode
) || VECTOR_UNIT_VSX_P (mode
))
14103 && (class1
== VSX_REGS
|| class1
== ALTIVEC_REGS
14104 || class1
== FLOAT_REGS
))
14105 return (class2
!= VSX_REGS
&& class2
!= ALTIVEC_REGS
14106 && class2
!= FLOAT_REGS
);
14108 if (class1
== VSX_REGS
|| class2
== VSX_REGS
)
14111 if (class1
== FLOAT_REGS
14112 && (!TARGET_MFPGPR
|| !TARGET_POWERPC64
14113 || ((mode
!= DFmode
)
14114 && (mode
!= DDmode
)
14115 && (mode
!= DImode
))))
14118 if (class2
== FLOAT_REGS
14119 && (!TARGET_MFPGPR
|| !TARGET_POWERPC64
14120 || ((mode
!= DFmode
)
14121 && (mode
!= DDmode
)
14122 && (mode
!= DImode
))))
14125 if (class1
== ALTIVEC_REGS
|| class2
== ALTIVEC_REGS
)
14131 /* Debug version of rs6000_secondary_memory_needed. */
14133 rs6000_debug_secondary_memory_needed (enum reg_class class1
,
14134 enum reg_class class2
,
14135 enum machine_mode mode
)
14137 bool ret
= rs6000_secondary_memory_needed (class1
, class2
, mode
);
14140 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14141 "class2 = %s, mode = %s\n",
14142 ret
? "true" : "false", reg_class_names
[class1
],
14143 reg_class_names
[class2
], GET_MODE_NAME (mode
));
14148 /* Return the register class of a scratch register needed to copy IN into
14149 or out of a register in RCLASS in MODE. If it can be done directly,
14150 NO_REGS is returned. */
14152 static enum reg_class
14153 rs6000_secondary_reload_class (enum reg_class rclass
, enum machine_mode mode
,
14158 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
14160 && MACHOPIC_INDIRECT
14164 /* We cannot copy a symbolic operand directly into anything
14165 other than BASE_REGS for TARGET_ELF. So indicate that a
14166 register from BASE_REGS is needed as an intermediate
14169 On Darwin, pic addresses require a load from memory, which
14170 needs a base register. */
14171 if (rclass
!= BASE_REGS
14172 && (GET_CODE (in
) == SYMBOL_REF
14173 || GET_CODE (in
) == HIGH
14174 || GET_CODE (in
) == LABEL_REF
14175 || GET_CODE (in
) == CONST
))
14179 if (GET_CODE (in
) == REG
)
14181 regno
= REGNO (in
);
14182 if (regno
>= FIRST_PSEUDO_REGISTER
)
14184 regno
= true_regnum (in
);
14185 if (regno
>= FIRST_PSEUDO_REGISTER
)
14189 else if (GET_CODE (in
) == SUBREG
)
14191 regno
= true_regnum (in
);
14192 if (regno
>= FIRST_PSEUDO_REGISTER
)
14198 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14200 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
14201 || (regno
>= 0 && INT_REGNO_P (regno
)))
14204 /* Constants, memory, and FP registers can go into FP registers. */
14205 if ((regno
== -1 || FP_REGNO_P (regno
))
14206 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
14207 return (mode
!= SDmode
) ? NO_REGS
: GENERAL_REGS
;
14209 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14212 && (regno
== -1 || VSX_REGNO_P (regno
))
14213 && VSX_REG_CLASS_P (rclass
))
14216 /* Memory, and AltiVec registers can go into AltiVec registers. */
14217 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
14218 && rclass
== ALTIVEC_REGS
)
14221 /* We can copy among the CR registers. */
14222 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
14223 && regno
>= 0 && CR_REGNO_P (regno
))
14226 /* Otherwise, we need GENERAL_REGS. */
14227 return GENERAL_REGS
;
14230 /* Debug version of rs6000_secondary_reload_class. */
14231 static enum reg_class
14232 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
14233 enum machine_mode mode
, rtx in
)
14235 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
14237 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14238 "mode = %s, input rtx:\n",
14239 reg_class_names
[ret
], reg_class_names
[rclass
],
14240 GET_MODE_NAME (mode
));
14246 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14249 rs6000_cannot_change_mode_class (enum machine_mode from
,
14250 enum machine_mode to
,
14251 enum reg_class rclass
)
14253 unsigned from_size
= GET_MODE_SIZE (from
);
14254 unsigned to_size
= GET_MODE_SIZE (to
);
14256 if (from_size
!= to_size
)
14258 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
14259 return ((from_size
< 8 || to_size
< 8 || TARGET_IEEEQUAD
)
14260 && reg_classes_intersect_p (xclass
, rclass
));
14263 if (TARGET_E500_DOUBLE
14264 && ((((to
) == DFmode
) + ((from
) == DFmode
)) == 1
14265 || (((to
) == TFmode
) + ((from
) == TFmode
)) == 1
14266 || (((to
) == DDmode
) + ((from
) == DDmode
)) == 1
14267 || (((to
) == TDmode
) + ((from
) == TDmode
)) == 1
14268 || (((to
) == DImode
) + ((from
) == DImode
)) == 1))
14271 /* Since the VSX register set includes traditional floating point registers
14272 and altivec registers, just check for the size being different instead of
14273 trying to check whether the modes are vector modes. Otherwise it won't
14274 allow say DF and DI to change classes. */
14275 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
14276 return (from_size
!= 8 && from_size
!= 16);
14278 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
14279 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
14282 if (TARGET_SPE
&& (SPE_VECTOR_MODE (from
) + SPE_VECTOR_MODE (to
)) == 1
14283 && reg_classes_intersect_p (GENERAL_REGS
, rclass
))
14289 /* Debug version of rs6000_cannot_change_mode_class. */
14291 rs6000_debug_cannot_change_mode_class (enum machine_mode from
,
14292 enum machine_mode to
,
14293 enum reg_class rclass
)
14295 bool ret
= rs6000_cannot_change_mode_class (from
, to
, rclass
);
14298 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14299 "to = %s, rclass = %s\n",
14300 ret
? "true" : "false",
14301 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
14302 reg_class_names
[rclass
]);
14307 /* Given a comparison operation, return the bit number in CCR to test. We
14308 know this is a valid comparison.
14310 SCC_P is 1 if this is for an scc. That means that %D will have been
14311 used instead of %C, so the bits will be in different places.
14313 Return -1 if OP isn't a valid comparison for some reason. */
14316 ccr_bit (rtx op
, int scc_p
)
14318 enum rtx_code code
= GET_CODE (op
);
14319 enum machine_mode cc_mode
;
14324 if (!COMPARISON_P (op
))
14327 reg
= XEXP (op
, 0);
14329 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
14331 cc_mode
= GET_MODE (reg
);
14332 cc_regnum
= REGNO (reg
);
14333 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
14335 validate_condition_mode (code
, cc_mode
);
14337 /* When generating a sCOND operation, only positive conditions are
14340 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
14341 || code
== GTU
|| code
== LTU
);
14346 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
14348 return base_bit
+ 2;
14349 case GT
: case GTU
: case UNLE
:
14350 return base_bit
+ 1;
14351 case LT
: case LTU
: case UNGE
:
14353 case ORDERED
: case UNORDERED
:
14354 return base_bit
+ 3;
14357 /* If scc, we will have done a cror to put the bit in the
14358 unordered position. So test that bit. For integer, this is ! LT
14359 unless this is an scc insn. */
14360 return scc_p
? base_bit
+ 3 : base_bit
;
14363 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
14366 gcc_unreachable ();
14370 /* Return the GOT register. */
14373 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
14375 /* The second flow pass currently (June 1999) can't update
14376 regs_ever_live without disturbing other parts of the compiler, so
14377 update it here to make the prolog/epilogue code happy. */
14378 if (!can_create_pseudo_p ()
14379 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
14380 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
14382 crtl
->uses_pic_offset_table
= 1;
14384 return pic_offset_table_rtx
;
14387 static rs6000_stack_t stack_info
;
14389 /* Function to init struct machine_function.
14390 This will be called, via a pointer variable,
14391 from push_function_context. */
14393 static struct machine_function
*
14394 rs6000_init_machine_status (void)
14396 stack_info
.reload_completed
= 0;
14397 return ggc_alloc_cleared_machine_function ();
14400 /* These macros test for integers and extract the low-order bits. */
14402 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14403 && GET_MODE (X) == VOIDmode)
14405 #define INT_LOWPART(X) \
14406 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14409 extract_MB (rtx op
)
14412 unsigned long val
= INT_LOWPART (op
);
14414 /* If the high bit is zero, the value is the first 1 bit we find
14416 if ((val
& 0x80000000) == 0)
14418 gcc_assert (val
& 0xffffffff);
14421 while (((val
<<= 1) & 0x80000000) == 0)
14426 /* If the high bit is set and the low bit is not, or the mask is all
14427 1's, the value is zero. */
14428 if ((val
& 1) == 0 || (val
& 0xffffffff) == 0xffffffff)
14431 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14434 while (((val
>>= 1) & 1) != 0)
14441 extract_ME (rtx op
)
14444 unsigned long val
= INT_LOWPART (op
);
14446 /* If the low bit is zero, the value is the first 1 bit we find from
14448 if ((val
& 1) == 0)
14450 gcc_assert (val
& 0xffffffff);
14453 while (((val
>>= 1) & 1) == 0)
14459 /* If the low bit is set and the high bit is not, or the mask is all
14460 1's, the value is 31. */
14461 if ((val
& 0x80000000) == 0 || (val
& 0xffffffff) == 0xffffffff)
14464 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14467 while (((val
<<= 1) & 0x80000000) != 0)
14473 /* Locate some local-dynamic symbol still in use by this function
14474 so that we can print its name in some tls_ld pattern. */
14476 static const char *
14477 rs6000_get_some_local_dynamic_name (void)
14481 if (cfun
->machine
->some_ld_name
)
14482 return cfun
->machine
->some_ld_name
;
14484 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
14486 && for_each_rtx (&PATTERN (insn
),
14487 rs6000_get_some_local_dynamic_name_1
, 0))
14488 return cfun
->machine
->some_ld_name
;
14490 gcc_unreachable ();
14493 /* Helper function for rs6000_get_some_local_dynamic_name. */
14496 rs6000_get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
14500 if (GET_CODE (x
) == SYMBOL_REF
)
14502 const char *str
= XSTR (x
, 0);
14503 if (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
14505 cfun
->machine
->some_ld_name
= str
;
14513 /* Write out a function code label. */
14516 rs6000_output_function_entry (FILE *file
, const char *fname
)
14518 if (fname
[0] != '.')
14520 switch (DEFAULT_ABI
)
14523 gcc_unreachable ();
14529 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
14538 RS6000_OUTPUT_BASENAME (file
, fname
);
14541 /* Print an operand. Recognize special options, documented below. */
14544 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14545 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14547 #define SMALL_DATA_RELOC "sda21"
14548 #define SMALL_DATA_REG 0
14552 print_operand (FILE *file
, rtx x
, int code
)
14555 unsigned HOST_WIDE_INT uval
;
14560 /* Write out an instruction after the call which may be replaced
14561 with glue code by the loader. This depends on the AIX version. */
14562 asm_fprintf (file
, RS6000_CALL_GLUE
);
14565 /* %a is output_address. */
14568 /* If X is a constant integer whose low-order 5 bits are zero,
14569 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
14570 in the AIX assembler where "sri" with a zero shift count
14571 writes a trash instruction. */
14572 if (GET_CODE (x
) == CONST_INT
&& (INTVAL (x
) & 31) == 0)
14579 /* If constant, low-order 16 bits of constant, unsigned.
14580 Otherwise, write normally. */
14582 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 0xffff);
14584 print_operand (file
, x
, 0);
14588 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14589 for 64-bit mask direction. */
14590 putc (((INT_LOWPART (x
) & 1) == 0 ? 'r' : 'l'), file
);
14593 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14597 /* X is a CR register. Print the number of the GT bit of the CR. */
14598 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14599 output_operand_lossage ("invalid %%c value");
14601 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 1);
14605 /* Like 'J' but get to the GT bit only. */
14606 gcc_assert (REG_P (x
));
14608 /* Bit 1 is GT bit. */
14609 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
14611 /* Add one for shift count in rlinm for scc. */
14612 fprintf (file
, "%d", i
+ 1);
14616 /* X is a CR register. Print the number of the EQ bit of the CR */
14617 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14618 output_operand_lossage ("invalid %%E value");
14620 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
14624 /* X is a CR register. Print the shift count needed to move it
14625 to the high-order four bits. */
14626 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14627 output_operand_lossage ("invalid %%f value");
14629 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
14633 /* Similar, but print the count for the rotate in the opposite
14635 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14636 output_operand_lossage ("invalid %%F value");
14638 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
14642 /* X is a constant integer. If it is negative, print "m",
14643 otherwise print "z". This is to make an aze or ame insn. */
14644 if (GET_CODE (x
) != CONST_INT
)
14645 output_operand_lossage ("invalid %%G value");
14646 else if (INTVAL (x
) >= 0)
14653 /* If constant, output low-order five bits. Otherwise, write
14656 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 31);
14658 print_operand (file
, x
, 0);
14662 /* If constant, output low-order six bits. Otherwise, write
14665 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 63);
14667 print_operand (file
, x
, 0);
14671 /* Print `i' if this is a constant, else nothing. */
14677 /* Write the bit number in CCR for jump. */
14678 i
= ccr_bit (x
, 0);
14680 output_operand_lossage ("invalid %%j code");
14682 fprintf (file
, "%d", i
);
14686 /* Similar, but add one for shift count in rlinm for scc and pass
14687 scc flag to `ccr_bit'. */
14688 i
= ccr_bit (x
, 1);
14690 output_operand_lossage ("invalid %%J code");
14692 /* If we want bit 31, write a shift count of zero, not 32. */
14693 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
14697 /* X must be a constant. Write the 1's complement of the
14700 output_operand_lossage ("invalid %%k value");
14702 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INT_LOWPART (x
));
14706 /* X must be a symbolic constant on ELF. Write an
14707 expression suitable for an 'addi' that adds in the low 16
14708 bits of the MEM. */
14709 if (GET_CODE (x
) == CONST
)
14711 if (GET_CODE (XEXP (x
, 0)) != PLUS
14712 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
14713 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
14714 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
14715 output_operand_lossage ("invalid %%K value");
14717 print_operand_address (file
, x
);
14718 fputs ("@l", file
);
14721 /* %l is output_asm_label. */
14724 /* Write second word of DImode or DFmode reference. Works on register
14725 or non-indexed memory only. */
14727 fputs (reg_names
[REGNO (x
) + 1], file
);
14728 else if (MEM_P (x
))
14730 /* Handle possible auto-increment. Since it is pre-increment and
14731 we have already done it, we can just use an offset of word. */
14732 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
14733 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
14734 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
14736 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
14737 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
14740 output_address (XEXP (adjust_address_nv (x
, SImode
,
14744 if (small_data_operand (x
, GET_MODE (x
)))
14745 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
14746 reg_names
[SMALL_DATA_REG
]);
14751 /* MB value for a mask operand. */
14752 if (! mask_operand (x
, SImode
))
14753 output_operand_lossage ("invalid %%m value");
14755 fprintf (file
, "%d", extract_MB (x
));
14759 /* ME value for a mask operand. */
14760 if (! mask_operand (x
, SImode
))
14761 output_operand_lossage ("invalid %%M value");
14763 fprintf (file
, "%d", extract_ME (x
));
14766 /* %n outputs the negative of its operand. */
14769 /* Write the number of elements in the vector times 4. */
14770 if (GET_CODE (x
) != PARALLEL
)
14771 output_operand_lossage ("invalid %%N value");
14773 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
14777 /* Similar, but subtract 1 first. */
14778 if (GET_CODE (x
) != PARALLEL
)
14779 output_operand_lossage ("invalid %%O value");
14781 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
14785 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14787 || INT_LOWPART (x
) < 0
14788 || (i
= exact_log2 (INT_LOWPART (x
))) < 0)
14789 output_operand_lossage ("invalid %%p value");
14791 fprintf (file
, "%d", i
);
14795 /* The operand must be an indirect memory reference. The result
14796 is the register name. */
14797 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
14798 || REGNO (XEXP (x
, 0)) >= 32)
14799 output_operand_lossage ("invalid %%P value");
14801 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
14805 /* This outputs the logical code corresponding to a boolean
14806 expression. The expression may have one or both operands
14807 negated (if one, only the first one). For condition register
14808 logical operations, it will also treat the negated
14809 CR codes as NOTs, but not handle NOTs of them. */
14811 const char *const *t
= 0;
14813 enum rtx_code code
= GET_CODE (x
);
14814 static const char * const tbl
[3][3] = {
14815 { "and", "andc", "nor" },
14816 { "or", "orc", "nand" },
14817 { "xor", "eqv", "xor" } };
14821 else if (code
== IOR
)
14823 else if (code
== XOR
)
14826 output_operand_lossage ("invalid %%q value");
14828 if (GET_CODE (XEXP (x
, 0)) != NOT
)
14832 if (GET_CODE (XEXP (x
, 1)) == NOT
)
14850 /* X is a CR register. Print the mask for `mtcrf'. */
14851 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
14852 output_operand_lossage ("invalid %%R value");
14854 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
14858 /* Low 5 bits of 32 - value */
14860 output_operand_lossage ("invalid %%s value");
14862 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INT_LOWPART (x
)) & 31);
14866 /* PowerPC64 mask position. All 0's is excluded.
14867 CONST_INT 32-bit mask is considered sign-extended so any
14868 transition must occur within the CONST_INT, not on the boundary. */
14869 if (! mask64_operand (x
, DImode
))
14870 output_operand_lossage ("invalid %%S value");
14872 uval
= INT_LOWPART (x
);
14874 if (uval
& 1) /* Clear Left */
14876 #if HOST_BITS_PER_WIDE_INT > 64
14877 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
14881 else /* Clear Right */
14884 #if HOST_BITS_PER_WIDE_INT > 64
14885 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
14891 gcc_assert (i
>= 0);
14892 fprintf (file
, "%d", i
);
14896 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
14897 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
14899 /* Bit 3 is OV bit. */
14900 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
14902 /* If we want bit 31, write a shift count of zero, not 32. */
14903 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
14907 /* Print the symbolic name of a branch target register. */
14908 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
14909 && REGNO (x
) != CTR_REGNO
))
14910 output_operand_lossage ("invalid %%T value");
14911 else if (REGNO (x
) == LR_REGNO
)
14912 fputs (TARGET_NEW_MNEMONICS
? "lr" : "r", file
);
14914 fputs ("ctr", file
);
14918 /* High-order 16 bits of constant for use in unsigned operand. */
14920 output_operand_lossage ("invalid %%u value");
14922 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
14923 (INT_LOWPART (x
) >> 16) & 0xffff);
14927 /* High-order 16 bits of constant for use in signed operand. */
14929 output_operand_lossage ("invalid %%v value");
14931 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
14932 (INT_LOWPART (x
) >> 16) & 0xffff);
14936 /* Print `u' if this has an auto-increment or auto-decrement. */
14938 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
14939 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
14940 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
14945 /* Print the trap code for this operand. */
14946 switch (GET_CODE (x
))
14949 fputs ("eq", file
); /* 4 */
14952 fputs ("ne", file
); /* 24 */
14955 fputs ("lt", file
); /* 16 */
14958 fputs ("le", file
); /* 20 */
14961 fputs ("gt", file
); /* 8 */
14964 fputs ("ge", file
); /* 12 */
14967 fputs ("llt", file
); /* 2 */
14970 fputs ("lle", file
); /* 6 */
14973 fputs ("lgt", file
); /* 1 */
14976 fputs ("lge", file
); /* 5 */
14979 gcc_unreachable ();
14984 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
14987 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
14988 ((INT_LOWPART (x
) & 0xffff) ^ 0x8000) - 0x8000);
14990 print_operand (file
, x
, 0);
14994 /* MB value for a PowerPC64 rldic operand. */
14995 i
= clz_hwi (GET_CODE (x
) == CONST_INT
14996 ? INTVAL (x
) : CONST_DOUBLE_HIGH (x
));
14998 #if HOST_BITS_PER_WIDE_INT == 32
14999 if (GET_CODE (x
) == CONST_INT
&& i
> 0)
15000 i
+= 32; /* zero-extend high-part was all 0's */
15001 else if (GET_CODE (x
) == CONST_DOUBLE
&& i
== 32)
15002 i
= clz_hwi (CONST_DOUBLE_LOW (x
)) + 32;
15005 fprintf (file
, "%d", i
);
15009 /* X is a FPR or Altivec register used in a VSX context. */
15010 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
15011 output_operand_lossage ("invalid %%x value");
15014 int reg
= REGNO (x
);
15015 int vsx_reg
= (FP_REGNO_P (reg
)
15017 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
15019 #ifdef TARGET_REGNAMES
15020 if (TARGET_REGNAMES
)
15021 fprintf (file
, "%%vs%d", vsx_reg
);
15024 fprintf (file
, "%d", vsx_reg
);
15030 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
15031 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
15032 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
15037 /* Like 'L', for third word of TImode */
15039 fputs (reg_names
[REGNO (x
) + 2], file
);
15040 else if (MEM_P (x
))
15042 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
15043 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15044 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
15045 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15046 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
15048 output_address (XEXP (adjust_address_nv (x
, SImode
, 8), 0));
15049 if (small_data_operand (x
, GET_MODE (x
)))
15050 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15051 reg_names
[SMALL_DATA_REG
]);
15056 /* X is a SYMBOL_REF. Write out the name preceded by a
15057 period and without any trailing data in brackets. Used for function
15058 names. If we are configured for System V (or the embedded ABI) on
15059 the PowerPC, do not emit the period, since those systems do not use
15060 TOCs and the like. */
15061 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
15063 /* Mark the decl as referenced so that cgraph will output the
15065 if (SYMBOL_REF_DECL (x
))
15066 mark_decl_referenced (SYMBOL_REF_DECL (x
));
15068 /* For macho, check to see if we need a stub. */
15071 const char *name
= XSTR (x
, 0);
15073 if (darwin_emit_branch_islands
15074 && MACHOPIC_INDIRECT
15075 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
15076 name
= machopic_indirection_name (x
, /*stub_p=*/true);
15078 assemble_name (file
, name
);
15080 else if (!DOT_SYMBOLS
)
15081 assemble_name (file
, XSTR (x
, 0));
15083 rs6000_output_function_entry (file
, XSTR (x
, 0));
15087 /* Like 'L', for last word of TImode. */
15089 fputs (reg_names
[REGNO (x
) + 3], file
);
15090 else if (MEM_P (x
))
15092 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
15093 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15094 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
15095 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15096 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
15098 output_address (XEXP (adjust_address_nv (x
, SImode
, 12), 0));
15099 if (small_data_operand (x
, GET_MODE (x
)))
15100 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15101 reg_names
[SMALL_DATA_REG
]);
15105 /* Print AltiVec or SPE memory operand. */
15110 gcc_assert (MEM_P (x
));
15114 /* Ugly hack because %y is overloaded. */
15115 if ((TARGET_SPE
|| TARGET_E500_DOUBLE
)
15116 && (GET_MODE_SIZE (GET_MODE (x
)) == 8
15117 || GET_MODE (x
) == TFmode
15118 || GET_MODE (x
) == TImode
))
15120 /* Handle [reg]. */
15123 fprintf (file
, "0(%s)", reg_names
[REGNO (tmp
)]);
15126 /* Handle [reg+UIMM]. */
15127 else if (GET_CODE (tmp
) == PLUS
&&
15128 GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
15132 gcc_assert (REG_P (XEXP (tmp
, 0)));
15134 x
= INTVAL (XEXP (tmp
, 1));
15135 fprintf (file
, "%d(%s)", x
, reg_names
[REGNO (XEXP (tmp
, 0))]);
15139 /* Fall through. Must be [reg+reg]. */
15141 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
15142 && GET_CODE (tmp
) == AND
15143 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
15144 && INTVAL (XEXP (tmp
, 1)) == -16)
15145 tmp
= XEXP (tmp
, 0);
15146 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
15147 && GET_CODE (tmp
) == PRE_MODIFY
)
15148 tmp
= XEXP (tmp
, 1);
15150 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
15153 if (!GET_CODE (tmp
) == PLUS
15154 || !REG_P (XEXP (tmp
, 0))
15155 || !REG_P (XEXP (tmp
, 1)))
15157 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15161 if (REGNO (XEXP (tmp
, 0)) == 0)
15162 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
15163 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
15165 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
15166 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
15173 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
15174 else if (MEM_P (x
))
15176 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15177 know the width from the mode. */
15178 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
15179 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
15180 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
15181 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
15182 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
15183 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
15184 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
15185 output_address (XEXP (XEXP (x
, 0), 1));
15187 output_address (XEXP (x
, 0));
15191 if (toc_relative_expr_p (x
, false))
15192 /* This hack along with a corresponding hack in
15193 rs6000_output_addr_const_extra arranges to output addends
15194 where the assembler expects to find them. eg.
15195 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15196 without this hack would be output as "x@toc+4". We
15198 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
15200 output_addr_const (file
, x
);
15205 assemble_name (file
, rs6000_get_some_local_dynamic_name ());
15209 output_operand_lossage ("invalid %%xn code");
15213 /* Print the address of an operand. */
15216 print_operand_address (FILE *file
, rtx x
)
15219 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
15220 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
15221 || GET_CODE (x
) == LABEL_REF
)
15223 output_addr_const (file
, x
);
15224 if (small_data_operand (x
, GET_MODE (x
)))
15225 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
15226 reg_names
[SMALL_DATA_REG
]);
15228 gcc_assert (!TARGET_TOC
);
15230 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
15231 && REG_P (XEXP (x
, 1)))
15233 if (REGNO (XEXP (x
, 0)) == 0)
15234 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
15235 reg_names
[ REGNO (XEXP (x
, 0)) ]);
15237 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
15238 reg_names
[ REGNO (XEXP (x
, 1)) ]);
15240 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
15241 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
15242 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
15243 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
15245 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
15246 && CONSTANT_P (XEXP (x
, 1)))
15248 fprintf (file
, "lo16(");
15249 output_addr_const (file
, XEXP (x
, 1));
15250 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
15254 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
15255 && CONSTANT_P (XEXP (x
, 1)))
15257 output_addr_const (file
, XEXP (x
, 1));
15258 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
15261 else if (toc_relative_expr_p (x
, false))
15263 /* This hack along with a corresponding hack in
15264 rs6000_output_addr_const_extra arranges to output addends
15265 where the assembler expects to find them. eg.
15267 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15268 without this hack would be output as "x@toc+8@l(9)". We
15269 want "x+8@toc@l(9)". */
15270 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
15271 if (GET_CODE (x
) == LO_SUM
)
15272 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
15274 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base
, 0, 1))]);
15277 gcc_unreachable ();
15280 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15283 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
15285 if (GET_CODE (x
) == UNSPEC
)
15286 switch (XINT (x
, 1))
15288 case UNSPEC_TOCREL
:
15289 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
15290 && REG_P (XVECEXP (x
, 0, 1))
15291 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
15292 output_addr_const (file
, XVECEXP (x
, 0, 0));
15293 if (x
== tocrel_base
&& tocrel_offset
!= const0_rtx
)
15295 if (INTVAL (tocrel_offset
) >= 0)
15296 fprintf (file
, "+");
15297 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset
));
15299 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
15302 assemble_name (file
, toc_label_name
);
15304 else if (TARGET_ELF
)
15305 fputs ("@toc", file
);
15309 case UNSPEC_MACHOPIC_OFFSET
:
15310 output_addr_const (file
, XVECEXP (x
, 0, 0));
15312 machopic_output_function_base_name (file
);
15319 /* Target hook for assembling integer objects. The PowerPC version has
15320 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15321 is defined. It also needs to handle DI-mode objects on 64-bit
15325 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
15327 #ifdef RELOCATABLE_NEEDS_FIXUP
15328 /* Special handling for SI values. */
15329 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
15331 static int recurse
= 0;
15333 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15334 the .fixup section. Since the TOC section is already relocated, we
15335 don't need to mark it here. We used to skip the text section, but it
15336 should never be valid for relocated addresses to be placed in the text
15338 if (TARGET_RELOCATABLE
15339 && in_section
!= toc_section
15341 && GET_CODE (x
) != CONST_INT
15342 && GET_CODE (x
) != CONST_DOUBLE
15348 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
15350 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
15351 fprintf (asm_out_file
, "\t.long\t(");
15352 output_addr_const (asm_out_file
, x
);
15353 fprintf (asm_out_file
, ")@fixup\n");
15354 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
15355 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
15356 fprintf (asm_out_file
, "\t.long\t");
15357 assemble_name (asm_out_file
, buf
);
15358 fprintf (asm_out_file
, "\n\t.previous\n");
15362 /* Remove initial .'s to turn a -mcall-aixdesc function
15363 address into the address of the descriptor, not the function
15365 else if (GET_CODE (x
) == SYMBOL_REF
15366 && XSTR (x
, 0)[0] == '.'
15367 && DEFAULT_ABI
== ABI_AIX
)
15369 const char *name
= XSTR (x
, 0);
15370 while (*name
== '.')
15373 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
15377 #endif /* RELOCATABLE_NEEDS_FIXUP */
15378 return default_assemble_integer (x
, size
, aligned_p
);
15381 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15382 /* Emit an assembler directive to set symbol visibility for DECL to
15383 VISIBILITY_TYPE. */
15386 rs6000_assemble_visibility (tree decl
, int vis
)
15388 /* Functions need to have their entry point symbol visibility set as
15389 well as their descriptor symbol visibility. */
15390 if (DEFAULT_ABI
== ABI_AIX
15392 && TREE_CODE (decl
) == FUNCTION_DECL
)
15394 static const char * const visibility_types
[] = {
15395 NULL
, "internal", "hidden", "protected"
15398 const char *name
, *type
;
15400 name
= ((* targetm
.strip_name_encoding
)
15401 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
15402 type
= visibility_types
[vis
];
15404 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
15405 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
15408 default_assemble_visibility (decl
, vis
);
15413 rs6000_reverse_condition (enum machine_mode mode
, enum rtx_code code
)
15415 /* Reversal of FP compares takes care -- an ordered compare
15416 becomes an unordered compare and vice versa. */
15417 if (mode
== CCFPmode
15418 && (!flag_finite_math_only
15419 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
15420 || code
== UNEQ
|| code
== LTGT
))
15421 return reverse_condition_maybe_unordered (code
);
15423 return reverse_condition (code
);
15426 /* Generate a compare for CODE. Return a brand-new rtx that
15427 represents the result of the compare. */
15430 rs6000_generate_compare (rtx cmp
, enum machine_mode mode
)
15432 enum machine_mode comp_mode
;
15433 rtx compare_result
;
15434 enum rtx_code code
= GET_CODE (cmp
);
15435 rtx op0
= XEXP (cmp
, 0);
15436 rtx op1
= XEXP (cmp
, 1);
15438 if (FLOAT_MODE_P (mode
))
15439 comp_mode
= CCFPmode
;
15440 else if (code
== GTU
|| code
== LTU
15441 || code
== GEU
|| code
== LEU
)
15442 comp_mode
= CCUNSmode
;
15443 else if ((code
== EQ
|| code
== NE
)
15444 && unsigned_reg_p (op0
)
15445 && (unsigned_reg_p (op1
)
15446 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
15447 /* These are unsigned values, perhaps there will be a later
15448 ordering compare that can be shared with this one. */
15449 comp_mode
= CCUNSmode
;
15451 comp_mode
= CCmode
;
15453 /* If we have an unsigned compare, make sure we don't have a signed value as
15455 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
15456 && INTVAL (op1
) < 0)
15458 op0
= copy_rtx_if_shared (op0
);
15459 op1
= force_reg (GET_MODE (op0
), op1
);
15460 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
15463 /* First, the compare. */
15464 compare_result
= gen_reg_rtx (comp_mode
);
15466 /* E500 FP compare instructions on the GPRs. Yuck! */
15467 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
)
15468 && FLOAT_MODE_P (mode
))
15470 rtx cmp
, or_result
, compare_result2
;
15471 enum machine_mode op_mode
= GET_MODE (op0
);
15473 if (op_mode
== VOIDmode
)
15474 op_mode
= GET_MODE (op1
);
15476 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15477 This explains the following mess. */
15481 case EQ
: case UNEQ
: case NE
: case LTGT
:
15485 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15486 ? gen_tstsfeq_gpr (compare_result
, op0
, op1
)
15487 : gen_cmpsfeq_gpr (compare_result
, op0
, op1
);
15491 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15492 ? gen_tstdfeq_gpr (compare_result
, op0
, op1
)
15493 : gen_cmpdfeq_gpr (compare_result
, op0
, op1
);
15497 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15498 ? gen_tsttfeq_gpr (compare_result
, op0
, op1
)
15499 : gen_cmptfeq_gpr (compare_result
, op0
, op1
);
15503 gcc_unreachable ();
15507 case GT
: case GTU
: case UNGT
: case UNGE
: case GE
: case GEU
:
15511 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15512 ? gen_tstsfgt_gpr (compare_result
, op0
, op1
)
15513 : gen_cmpsfgt_gpr (compare_result
, op0
, op1
);
15517 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15518 ? gen_tstdfgt_gpr (compare_result
, op0
, op1
)
15519 : gen_cmpdfgt_gpr (compare_result
, op0
, op1
);
15523 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15524 ? gen_tsttfgt_gpr (compare_result
, op0
, op1
)
15525 : gen_cmptfgt_gpr (compare_result
, op0
, op1
);
15529 gcc_unreachable ();
15533 case LT
: case LTU
: case UNLT
: case UNLE
: case LE
: case LEU
:
15537 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15538 ? gen_tstsflt_gpr (compare_result
, op0
, op1
)
15539 : gen_cmpsflt_gpr (compare_result
, op0
, op1
);
15543 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15544 ? gen_tstdflt_gpr (compare_result
, op0
, op1
)
15545 : gen_cmpdflt_gpr (compare_result
, op0
, op1
);
15549 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15550 ? gen_tsttflt_gpr (compare_result
, op0
, op1
)
15551 : gen_cmptflt_gpr (compare_result
, op0
, op1
);
15555 gcc_unreachable ();
15559 gcc_unreachable ();
15562 /* Synthesize LE and GE from LT/GT || EQ. */
15563 if (code
== LE
|| code
== GE
|| code
== LEU
|| code
== GEU
)
15569 case LE
: code
= LT
; break;
15570 case GE
: code
= GT
; break;
15571 case LEU
: code
= LT
; break;
15572 case GEU
: code
= GT
; break;
15573 default: gcc_unreachable ();
15576 compare_result2
= gen_reg_rtx (CCFPmode
);
15582 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15583 ? gen_tstsfeq_gpr (compare_result2
, op0
, op1
)
15584 : gen_cmpsfeq_gpr (compare_result2
, op0
, op1
);
15588 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15589 ? gen_tstdfeq_gpr (compare_result2
, op0
, op1
)
15590 : gen_cmpdfeq_gpr (compare_result2
, op0
, op1
);
15594 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
15595 ? gen_tsttfeq_gpr (compare_result2
, op0
, op1
)
15596 : gen_cmptfeq_gpr (compare_result2
, op0
, op1
);
15600 gcc_unreachable ();
15604 /* OR them together. */
15605 or_result
= gen_reg_rtx (CCFPmode
);
15606 cmp
= gen_e500_cr_ior_compare (or_result
, compare_result
,
15608 compare_result
= or_result
;
15613 if (code
== NE
|| code
== LTGT
)
15623 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15624 CLOBBERs to match cmptf_internal2 pattern. */
15625 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
15626 && GET_MODE (op0
) == TFmode
15627 && !TARGET_IEEEQUAD
15628 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_LONG_DOUBLE_128
)
15629 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
15631 gen_rtx_SET (VOIDmode
,
15633 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
15634 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15635 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15636 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15637 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15638 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15639 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15640 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15641 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
15642 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
15643 else if (GET_CODE (op1
) == UNSPEC
15644 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
15646 rtx op1b
= XVECEXP (op1
, 0, 0);
15647 comp_mode
= CCEQmode
;
15648 compare_result
= gen_reg_rtx (CCEQmode
);
15650 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
15652 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
15655 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
15656 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
15659 /* Some kinds of FP comparisons need an OR operation;
15660 under flag_finite_math_only we don't bother. */
15661 if (FLOAT_MODE_P (mode
)
15662 && !flag_finite_math_only
15663 && !(TARGET_HARD_FLOAT
&& !TARGET_FPRS
)
15664 && (code
== LE
|| code
== GE
15665 || code
== UNEQ
|| code
== LTGT
15666 || code
== UNGT
|| code
== UNLT
))
15668 enum rtx_code or1
, or2
;
15669 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
15670 rtx or_result
= gen_reg_rtx (CCEQmode
);
15674 case LE
: or1
= LT
; or2
= EQ
; break;
15675 case GE
: or1
= GT
; or2
= EQ
; break;
15676 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
15677 case LTGT
: or1
= LT
; or2
= GT
; break;
15678 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
15679 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
15680 default: gcc_unreachable ();
15682 validate_condition_mode (or1
, comp_mode
);
15683 validate_condition_mode (or2
, comp_mode
);
15684 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
15685 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
15686 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
15687 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
15689 emit_insn (gen_rtx_SET (VOIDmode
, or_result
, compare2_rtx
));
15691 compare_result
= or_result
;
15695 validate_condition_mode (code
, GET_MODE (compare_result
));
15697 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
15701 /* Emit the RTL for an sISEL pattern. */
15704 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
15706 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
15710 rs6000_emit_sCOND (enum machine_mode mode
, rtx operands
[])
15713 enum machine_mode op_mode
;
15714 enum rtx_code cond_code
;
15715 rtx result
= operands
[0];
15717 if (TARGET_ISEL
&& (mode
== SImode
|| mode
== DImode
))
15719 rs6000_emit_sISEL (mode
, operands
);
15723 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
15724 cond_code
= GET_CODE (condition_rtx
);
15726 if (FLOAT_MODE_P (mode
)
15727 && !TARGET_FPRS
&& TARGET_HARD_FLOAT
)
15731 PUT_MODE (condition_rtx
, SImode
);
15732 t
= XEXP (condition_rtx
, 0);
15734 gcc_assert (cond_code
== NE
|| cond_code
== EQ
);
15736 if (cond_code
== NE
)
15737 emit_insn (gen_e500_flip_gt_bit (t
, t
));
15739 emit_insn (gen_move_from_CR_gt_bit (result
, t
));
15743 if (cond_code
== NE
15744 || cond_code
== GE
|| cond_code
== LE
15745 || cond_code
== GEU
|| cond_code
== LEU
15746 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
15748 rtx not_result
= gen_reg_rtx (CCEQmode
);
15749 rtx not_op
, rev_cond_rtx
;
15750 enum machine_mode cc_mode
;
15752 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
15754 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
15755 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
15756 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
15757 emit_insn (gen_rtx_SET (VOIDmode
, not_result
, not_op
));
15758 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
15761 op_mode
= GET_MODE (XEXP (operands
[1], 0));
15762 if (op_mode
== VOIDmode
)
15763 op_mode
= GET_MODE (XEXP (operands
[1], 1));
15765 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
15767 PUT_MODE (condition_rtx
, DImode
);
15768 convert_move (result
, condition_rtx
, 0);
15772 PUT_MODE (condition_rtx
, SImode
);
15773 emit_insn (gen_rtx_SET (VOIDmode
, result
, condition_rtx
));
15777 /* Emit a branch of kind CODE to location LOC. */
15780 rs6000_emit_cbranch (enum machine_mode mode
, rtx operands
[])
15782 rtx condition_rtx
, loc_ref
;
15784 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
15785 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
15786 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
15787 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
15788 loc_ref
, pc_rtx
)));
15791 /* Return the string to output a conditional branch to LABEL, which is
15792 the operand number of the label, or -1 if the branch is really a
15793 conditional return.
15795 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15796 condition code register and its mode specifies what kind of
15797 comparison we made.
15799 REVERSED is nonzero if we should reverse the sense of the comparison.
15801 INSN is the insn. */
15804 output_cbranch (rtx op
, const char *label
, int reversed
, rtx insn
)
15806 static char string
[64];
15807 enum rtx_code code
= GET_CODE (op
);
15808 rtx cc_reg
= XEXP (op
, 0);
15809 enum machine_mode mode
= GET_MODE (cc_reg
);
15810 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
15811 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
15812 int really_reversed
= reversed
^ need_longbranch
;
15818 validate_condition_mode (code
, mode
);
15820 /* Work out which way this really branches. We could use
15821 reverse_condition_maybe_unordered here always but this
15822 makes the resulting assembler clearer. */
15823 if (really_reversed
)
15825 /* Reversal of FP compares takes care -- an ordered compare
15826 becomes an unordered compare and vice versa. */
15827 if (mode
== CCFPmode
)
15828 code
= reverse_condition_maybe_unordered (code
);
15830 code
= reverse_condition (code
);
15833 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
) && mode
== CCFPmode
)
15835 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15840 /* Opposite of GT. */
15849 gcc_unreachable ();
15855 /* Not all of these are actually distinct opcodes, but
15856 we distinguish them for clarity of the resulting assembler. */
15857 case NE
: case LTGT
:
15858 ccode
= "ne"; break;
15859 case EQ
: case UNEQ
:
15860 ccode
= "eq"; break;
15862 ccode
= "ge"; break;
15863 case GT
: case GTU
: case UNGT
:
15864 ccode
= "gt"; break;
15866 ccode
= "le"; break;
15867 case LT
: case LTU
: case UNLT
:
15868 ccode
= "lt"; break;
15869 case UNORDERED
: ccode
= "un"; break;
15870 case ORDERED
: ccode
= "nu"; break;
15871 case UNGE
: ccode
= "nl"; break;
15872 case UNLE
: ccode
= "ng"; break;
15874 gcc_unreachable ();
15877 /* Maybe we have a guess as to how likely the branch is.
15878 The old mnemonics don't have a way to specify this information. */
15880 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
15881 if (note
!= NULL_RTX
)
15883 /* PROB is the difference from 50%. */
15884 int prob
= INTVAL (XEXP (note
, 0)) - REG_BR_PROB_BASE
/ 2;
15886 /* Only hint for highly probable/improbable branches on newer
15887 cpus as static prediction overrides processor dynamic
15888 prediction. For older cpus we may as well always hint, but
15889 assume not taken for branches that are very close to 50% as a
15890 mispredicted taken branch is more expensive than a
15891 mispredicted not-taken branch. */
15892 if (rs6000_always_hint
15893 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
15894 && br_prob_note_reliable_p (note
)))
15896 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
15897 && ((prob
> 0) ^ need_longbranch
))
15905 s
+= sprintf (s
, "{b%sr|b%slr%s} ", ccode
, ccode
, pred
);
15907 s
+= sprintf (s
, "{b%s|b%s%s} ", ccode
, ccode
, pred
);
15909 /* We need to escape any '%' characters in the reg_names string.
15910 Assume they'd only be the first character.... */
15911 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
15913 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
15917 /* If the branch distance was too far, we may have to use an
15918 unconditional branch to go the distance. */
15919 if (need_longbranch
)
15920 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
15922 s
+= sprintf (s
, ",%s", label
);
15928 /* Return the string to flip the GT bit on a CR. */
15930 output_e500_flip_gt_bit (rtx dst
, rtx src
)
15932 static char string
[64];
15935 gcc_assert (GET_CODE (dst
) == REG
&& CR_REGNO_P (REGNO (dst
))
15936 && GET_CODE (src
) == REG
&& CR_REGNO_P (REGNO (src
)));
15939 a
= 4 * (REGNO (dst
) - CR0_REGNO
) + 1;
15940 b
= 4 * (REGNO (src
) - CR0_REGNO
) + 1;
15942 sprintf (string
, "crnot %d,%d", a
, b
);
15946 /* Return insn for VSX or Altivec comparisons. */
15949 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
15952 enum machine_mode mode
= GET_MODE (op0
);
15960 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
15970 mask
= gen_reg_rtx (mode
);
15971 emit_insn (gen_rtx_SET (VOIDmode
,
15973 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
15980 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
15981 DMODE is expected destination mode. This is a recursive function. */
15984 rs6000_emit_vector_compare (enum rtx_code rcode
,
15986 enum machine_mode dmode
)
15989 bool swap_operands
= false;
15990 bool try_again
= false;
15992 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
15993 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
15995 /* See if the comparison works as is. */
15996 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
16004 swap_operands
= true;
16009 swap_operands
= true;
16017 /* Invert condition and try again.
16018 e.g., A != B becomes ~(A==B). */
16020 enum rtx_code rev_code
;
16021 enum insn_code nor_code
;
16024 rev_code
= reverse_condition_maybe_unordered (rcode
);
16025 if (rev_code
== UNKNOWN
)
16028 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
16029 if (nor_code
== CODE_FOR_nothing
)
16032 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
16036 mask
= gen_reg_rtx (dmode
);
16037 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
16045 /* Try GT/GTU/LT/LTU OR EQ */
16048 enum insn_code ior_code
;
16049 enum rtx_code new_code
;
16070 gcc_unreachable ();
16073 ior_code
= optab_handler (ior_optab
, dmode
);
16074 if (ior_code
== CODE_FOR_nothing
)
16077 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
16081 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
16085 mask
= gen_reg_rtx (dmode
);
16086 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
16104 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
16109 /* You only get two chances. */
16113 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16114 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16115 operands for the relation operation COND. */
16118 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
16119 rtx cond
, rtx cc_op0
, rtx cc_op1
)
16121 enum machine_mode dest_mode
= GET_MODE (dest
);
16122 enum machine_mode mask_mode
= GET_MODE (cc_op0
);
16123 enum rtx_code rcode
= GET_CODE (cond
);
16124 enum machine_mode cc_mode
= CCmode
;
16128 bool invert_move
= false;
16130 if (VECTOR_UNIT_NONE_P (dest_mode
))
16133 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
16134 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
16138 /* Swap operands if we can, and fall back to doing the operation as
16139 specified, and doing a NOR to invert the test. */
16145 /* Invert condition and try again.
16146 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16147 invert_move
= true;
16148 rcode
= reverse_condition_maybe_unordered (rcode
);
16149 if (rcode
== UNKNOWN
)
16153 /* Mark unsigned tests with CCUNSmode. */
16158 cc_mode
= CCUNSmode
;
16165 /* Get the vector mask for the given relational operations. */
16166 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
16174 op_true
= op_false
;
16178 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
16179 CONST0_RTX (dest_mode
));
16180 emit_insn (gen_rtx_SET (VOIDmode
,
16182 gen_rtx_IF_THEN_ELSE (dest_mode
,
16189 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16190 operands of the last comparison is nonzero/true, FALSE_COND if it
16191 is zero/false. Return 0 if the hardware has no such operation. */
16194 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
16196 enum rtx_code code
= GET_CODE (op
);
16197 rtx op0
= XEXP (op
, 0);
16198 rtx op1
= XEXP (op
, 1);
16199 REAL_VALUE_TYPE c1
;
16200 enum machine_mode compare_mode
= GET_MODE (op0
);
16201 enum machine_mode result_mode
= GET_MODE (dest
);
16203 bool is_against_zero
;
16205 /* These modes should always match. */
16206 if (GET_MODE (op1
) != compare_mode
16207 /* In the isel case however, we can use a compare immediate, so
16208 op1 may be a small constant. */
16209 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
16211 if (GET_MODE (true_cond
) != result_mode
)
16213 if (GET_MODE (false_cond
) != result_mode
)
16216 /* Don't allow using floating point comparisons for integer results for
16218 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
16221 /* First, work out if the hardware can do this at all, or
16222 if it's too slow.... */
16223 if (!FLOAT_MODE_P (compare_mode
))
16226 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
16229 else if (TARGET_HARD_FLOAT
&& !TARGET_FPRS
16230 && SCALAR_FLOAT_MODE_P (compare_mode
))
16233 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
16235 /* A floating-point subtract might overflow, underflow, or produce
16236 an inexact result, thus changing the floating-point flags, so it
16237 can't be generated if we care about that. It's safe if one side
16238 of the construct is zero, since then no subtract will be
16240 if (SCALAR_FLOAT_MODE_P (compare_mode
)
16241 && flag_trapping_math
&& ! is_against_zero
)
16244 /* Eliminate half of the comparisons by switching operands, this
16245 makes the remaining code simpler. */
16246 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
16247 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
16249 code
= reverse_condition_maybe_unordered (code
);
16251 true_cond
= false_cond
;
16255 /* UNEQ and LTGT take four instructions for a comparison with zero,
16256 it'll probably be faster to use a branch here too. */
16257 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
16260 if (GET_CODE (op1
) == CONST_DOUBLE
)
16261 REAL_VALUE_FROM_CONST_DOUBLE (c1
, op1
);
16263 /* We're going to try to implement comparisons by performing
16264 a subtract, then comparing against zero. Unfortunately,
16265 Inf - Inf is NaN which is not zero, and so if we don't
16266 know that the operand is finite and the comparison
16267 would treat EQ different to UNORDERED, we can't do it. */
16268 if (HONOR_INFINITIES (compare_mode
)
16269 && code
!= GT
&& code
!= UNGE
16270 && (GET_CODE (op1
) != CONST_DOUBLE
|| real_isinf (&c1
))
16271 /* Constructs of the form (a OP b ? a : b) are safe. */
16272 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
16273 || (! rtx_equal_p (op0
, true_cond
)
16274 && ! rtx_equal_p (op1
, true_cond
))))
16277 /* At this point we know we can use fsel. */
16279 /* Reduce the comparison to a comparison against zero. */
16280 if (! is_against_zero
)
16282 temp
= gen_reg_rtx (compare_mode
);
16283 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16284 gen_rtx_MINUS (compare_mode
, op0
, op1
)));
16286 op1
= CONST0_RTX (compare_mode
);
16289 /* If we don't care about NaNs we can reduce some of the comparisons
16290 down to faster ones. */
16291 if (! HONOR_NANS (compare_mode
))
16297 true_cond
= false_cond
;
16310 /* Now, reduce everything down to a GE. */
16317 temp
= gen_reg_rtx (compare_mode
);
16318 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16323 temp
= gen_reg_rtx (compare_mode
);
16324 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_ABS (compare_mode
, op0
)));
16329 temp
= gen_reg_rtx (compare_mode
);
16330 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16331 gen_rtx_NEG (compare_mode
,
16332 gen_rtx_ABS (compare_mode
, op0
))));
16337 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16338 temp
= gen_reg_rtx (result_mode
);
16339 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16340 gen_rtx_IF_THEN_ELSE (result_mode
,
16341 gen_rtx_GE (VOIDmode
,
16343 true_cond
, false_cond
)));
16344 false_cond
= true_cond
;
16347 temp
= gen_reg_rtx (compare_mode
);
16348 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16353 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16354 temp
= gen_reg_rtx (result_mode
);
16355 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
16356 gen_rtx_IF_THEN_ELSE (result_mode
,
16357 gen_rtx_GE (VOIDmode
,
16359 true_cond
, false_cond
)));
16360 true_cond
= false_cond
;
16363 temp
= gen_reg_rtx (compare_mode
);
16364 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
16369 gcc_unreachable ();
16372 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
16373 gen_rtx_IF_THEN_ELSE (result_mode
,
16374 gen_rtx_GE (VOIDmode
,
16376 true_cond
, false_cond
)));
16380 /* Same as above, but for ints (isel). */
16383 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
16385 rtx condition_rtx
, cr
;
16386 enum machine_mode mode
= GET_MODE (dest
);
16387 enum rtx_code cond_code
;
16388 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
16391 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
16394 /* We still have to do the compare, because isel doesn't do a
16395 compare, it just looks at the CRx bits set by a previous compare
16397 condition_rtx
= rs6000_generate_compare (op
, mode
);
16398 cond_code
= GET_CODE (condition_rtx
);
16399 cr
= XEXP (condition_rtx
, 0);
16400 signedp
= GET_MODE (cr
) == CCmode
;
16402 isel_func
= (mode
== SImode
16403 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
16404 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
16408 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
16409 /* isel handles these directly. */
16413 /* We need to swap the sense of the comparison. */
16416 true_cond
= false_cond
;
16418 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
16423 false_cond
= force_reg (mode
, false_cond
);
16424 if (true_cond
!= const0_rtx
)
16425 true_cond
= force_reg (mode
, true_cond
);
16427 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
16433 output_isel (rtx
*operands
)
16435 enum rtx_code code
;
16437 code
= GET_CODE (operands
[1]);
16439 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
16441 gcc_assert (GET_CODE (operands
[2]) == REG
16442 && GET_CODE (operands
[3]) == REG
);
16443 PUT_CODE (operands
[1], reverse_condition (code
));
16444 return "isel %0,%3,%2,%j1";
16447 return "isel %0,%2,%3,%j1";
16451 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
16453 enum machine_mode mode
= GET_MODE (op0
);
16457 /* VSX/altivec have direct min/max insns. */
16458 if ((code
== SMAX
|| code
== SMIN
)
16459 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
16460 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
16462 emit_insn (gen_rtx_SET (VOIDmode
,
16464 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
16468 if (code
== SMAX
|| code
== SMIN
)
16473 if (code
== SMAX
|| code
== UMAX
)
16474 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
16475 op0
, op1
, mode
, 0);
16477 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
16478 op1
, op0
, mode
, 0);
16479 gcc_assert (target
);
16480 if (target
!= dest
)
16481 emit_move_insn (dest
, target
);
16484 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16485 COND is true. Mark the jump as unlikely to be taken. */
16488 emit_unlikely_jump (rtx cond
, rtx label
)
16490 rtx very_unlikely
= GEN_INT (REG_BR_PROB_BASE
/ 100 - 1);
16493 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
16494 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
16495 add_reg_note (x
, REG_BR_PROB
, very_unlikely
);
16498 /* A subroutine of the atomic operation splitters. Emit a load-locked
16499 instruction in MODE. */
16502 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
16504 rtx (*fn
) (rtx
, rtx
) = NULL
;
16509 fn
= gen_load_lockedsi
;
16512 fn
= gen_load_lockeddi
;
16515 gcc_unreachable ();
16517 emit_insn (fn (reg
, mem
));
16520 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16521 instruction in MODE. */
16524 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
16526 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
16531 fn
= gen_store_conditionalsi
;
16534 fn
= gen_store_conditionaldi
;
16537 gcc_unreachable ();
16540 /* Emit sync before stwcx. to address PPC405 Erratum. */
16541 if (PPC405_ERRATUM77
)
16542 emit_insn (gen_hwsync ());
16544 emit_insn (fn (res
, mem
, val
));
16547 /* Expand barriers before and after a load_locked/store_cond sequence. */
16550 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
16552 rtx addr
= XEXP (mem
, 0);
16553 int strict_p
= (reload_in_progress
|| reload_completed
);
16555 if (!legitimate_indirect_address_p (addr
, strict_p
)
16556 && !legitimate_indexed_address_p (addr
, strict_p
))
16558 addr
= force_reg (Pmode
, addr
);
16559 mem
= replace_equiv_address_nv (mem
, addr
);
16564 case MEMMODEL_RELAXED
:
16565 case MEMMODEL_CONSUME
:
16566 case MEMMODEL_ACQUIRE
:
16568 case MEMMODEL_RELEASE
:
16569 case MEMMODEL_ACQ_REL
:
16570 emit_insn (gen_lwsync ());
16572 case MEMMODEL_SEQ_CST
:
16573 emit_insn (gen_hwsync ());
16576 gcc_unreachable ();
16582 rs6000_post_atomic_barrier (enum memmodel model
)
16586 case MEMMODEL_RELAXED
:
16587 case MEMMODEL_CONSUME
:
16588 case MEMMODEL_RELEASE
:
16590 case MEMMODEL_ACQUIRE
:
16591 case MEMMODEL_ACQ_REL
:
16592 case MEMMODEL_SEQ_CST
:
16593 emit_insn (gen_isync ());
16596 gcc_unreachable ();
16600 /* A subroutine of the various atomic expanders. For sub-word operations,
16601 we must adjust things to operate on SImode. Given the original MEM,
16602 return a new aligned memory. Also build and return the quantities by
16603 which to shift and mask. */
16606 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
16608 rtx addr
, align
, shift
, mask
, mem
;
16609 HOST_WIDE_INT shift_mask
;
16610 enum machine_mode mode
= GET_MODE (orig_mem
);
16612 /* For smaller modes, we have to implement this via SImode. */
16613 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
16615 addr
= XEXP (orig_mem
, 0);
16616 addr
= force_reg (GET_MODE (addr
), addr
);
16618 /* Aligned memory containing subword. Generate a new memory. We
16619 do not want any of the existing MEM_ATTR data, as we're now
16620 accessing memory outside the original object. */
16621 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
16622 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16623 mem
= gen_rtx_MEM (SImode
, align
);
16624 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
16625 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
16626 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
16628 /* Shift amount for subword relative to aligned word. */
16629 shift
= gen_reg_rtx (SImode
);
16630 addr
= gen_lowpart (SImode
, addr
);
16631 emit_insn (gen_rlwinm (shift
, addr
, GEN_INT (3), GEN_INT (shift_mask
)));
16632 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
16633 shift
, 1, OPTAB_LIB_WIDEN
);
16636 /* Mask for insertion. */
16637 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
16638 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16644 /* A subroutine of the various atomic expanders. For sub-word operands,
16645 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16648 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
16652 x
= gen_reg_rtx (SImode
);
16653 emit_insn (gen_rtx_SET (VOIDmode
, x
,
16654 gen_rtx_AND (SImode
,
16655 gen_rtx_NOT (SImode
, mask
),
16658 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
16663 /* A subroutine of the various atomic expanders. For sub-word operands,
16664 extract WIDE to NARROW via SHIFT. */
16667 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
16669 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
16670 wide
, 1, OPTAB_LIB_WIDEN
);
16671 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
16674 /* Expand an atomic compare and swap operation. */
16677 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
16679 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
16680 rtx label1
, label2
, x
, mask
, shift
;
16681 enum machine_mode mode
;
16682 enum memmodel mod_s
, mod_f
;
16685 boolval
= operands
[0];
16686 retval
= operands
[1];
16688 oldval
= operands
[3];
16689 newval
= operands
[4];
16690 is_weak
= (INTVAL (operands
[5]) != 0);
16691 mod_s
= (enum memmodel
) INTVAL (operands
[6]);
16692 mod_f
= (enum memmodel
) INTVAL (operands
[7]);
16693 mode
= GET_MODE (mem
);
16695 mask
= shift
= NULL_RTX
;
16696 if (mode
== QImode
|| mode
== HImode
)
16698 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16700 /* Shift and mask OLDVAL into position with the word. */
16701 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
16702 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
16703 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16705 /* Shift and mask NEWVAL into position within the word. */
16706 newval
= convert_modes (SImode
, mode
, newval
, 1);
16707 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
16708 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16710 /* Prepare to adjust the return value. */
16711 retval
= gen_reg_rtx (SImode
);
16714 else if (reg_overlap_mentioned_p (retval
, oldval
))
16715 oldval
= copy_to_reg (oldval
);
16717 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
16722 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16723 emit_label (XEXP (label1
, 0));
16725 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16727 emit_load_locked (mode
, retval
, mem
);
16732 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
16733 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16736 cond
= gen_reg_rtx (CCmode
);
16737 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
16738 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
16740 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16741 emit_unlikely_jump (x
, label2
);
16745 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
16747 emit_store_conditional (mode
, cond
, mem
, x
);
16751 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16752 emit_unlikely_jump (x
, label1
);
16755 if (mod_f
!= MEMMODEL_RELAXED
)
16756 emit_label (XEXP (label2
, 0));
16758 rs6000_post_atomic_barrier (mod_s
);
16760 if (mod_f
== MEMMODEL_RELAXED
)
16761 emit_label (XEXP (label2
, 0));
16764 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
16766 /* In all cases, CR0 contains EQ on success, and NE on failure. */
16767 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
16768 emit_insn (gen_rtx_SET (VOIDmode
, boolval
, x
));
16771 /* Expand an atomic exchange operation. */
16774 rs6000_expand_atomic_exchange (rtx operands
[])
16776 rtx retval
, mem
, val
, cond
;
16777 enum machine_mode mode
;
16778 enum memmodel model
;
16779 rtx label
, x
, mask
, shift
;
16781 retval
= operands
[0];
16784 model
= (enum memmodel
) INTVAL (operands
[3]);
16785 mode
= GET_MODE (mem
);
16787 mask
= shift
= NULL_RTX
;
16788 if (mode
== QImode
|| mode
== HImode
)
16790 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16792 /* Shift and mask VAL into position with the word. */
16793 val
= convert_modes (SImode
, mode
, val
, 1);
16794 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
16795 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16797 /* Prepare to adjust the return value. */
16798 retval
= gen_reg_rtx (SImode
);
16802 mem
= rs6000_pre_atomic_barrier (mem
, model
);
16804 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
16805 emit_label (XEXP (label
, 0));
16807 emit_load_locked (mode
, retval
, mem
);
16811 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
16813 cond
= gen_reg_rtx (CCmode
);
16814 emit_store_conditional (mode
, cond
, mem
, x
);
16816 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16817 emit_unlikely_jump (x
, label
);
16819 rs6000_post_atomic_barrier (model
);
16822 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
16825 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16826 to perform. MEM is the memory on which to operate. VAL is the second
16827 operand of the binary operator. BEFORE and AFTER are optional locations to
16828 return the value of MEM either before of after the operation. MODEL_RTX
16829 is a CONST_INT containing the memory model to use. */
16832 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
16833 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
16835 enum memmodel model
= (enum memmodel
) INTVAL (model_rtx
);
16836 enum machine_mode mode
= GET_MODE (mem
);
16837 rtx label
, x
, cond
, mask
, shift
;
16838 rtx before
= orig_before
, after
= orig_after
;
16840 mask
= shift
= NULL_RTX
;
16841 if (mode
== QImode
|| mode
== HImode
)
16843 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
16845 /* Shift and mask VAL into position with the word. */
16846 val
= convert_modes (SImode
, mode
, val
, 1);
16847 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
16848 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16854 /* We've already zero-extended VAL. That is sufficient to
16855 make certain that it does not affect other bits. */
16860 /* If we make certain that all of the other bits in VAL are
16861 set, that will be sufficient to not affect other bits. */
16862 x
= gen_rtx_NOT (SImode
, mask
);
16863 x
= gen_rtx_IOR (SImode
, x
, val
);
16864 emit_insn (gen_rtx_SET (VOIDmode
, val
, x
));
16871 /* These will all affect bits outside the field and need
16872 adjustment via MASK within the loop. */
16876 gcc_unreachable ();
16879 /* Prepare to adjust the return value. */
16880 before
= gen_reg_rtx (SImode
);
16882 after
= gen_reg_rtx (SImode
);
16886 mem
= rs6000_pre_atomic_barrier (mem
, model
);
16888 label
= gen_label_rtx ();
16889 emit_label (label
);
16890 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
16892 if (before
== NULL_RTX
)
16893 before
= gen_reg_rtx (mode
);
16895 emit_load_locked (mode
, before
, mem
);
16899 x
= expand_simple_binop (mode
, AND
, before
, val
,
16900 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16901 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
16905 after
= expand_simple_binop (mode
, code
, before
, val
,
16906 after
, 1, OPTAB_LIB_WIDEN
);
16912 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
16913 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
16914 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
16917 cond
= gen_reg_rtx (CCmode
);
16918 emit_store_conditional (mode
, cond
, mem
, x
);
16920 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
16921 emit_unlikely_jump (x
, label
);
16923 rs6000_post_atomic_barrier (model
);
16928 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
16930 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
16932 else if (orig_after
&& after
!= orig_after
)
16933 emit_move_insn (orig_after
, after
);
16936 /* Emit instructions to move SRC to DST. Called by splitters for
16937 multi-register moves. It will emit at most one instruction for
16938 each register that is accessed; that is, it won't emit li/lis pairs
16939 (or equivalent for 64-bit code). One of SRC or DST must be a hard
16943 rs6000_split_multireg_move (rtx dst
, rtx src
)
16945 /* The register number of the first register being moved. */
16947 /* The mode that is to be moved. */
16948 enum machine_mode mode
;
16949 /* The mode that the move is being done in, and its size. */
16950 enum machine_mode reg_mode
;
16952 /* The number of registers that will be moved. */
16955 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
16956 mode
= GET_MODE (dst
);
16957 nregs
= hard_regno_nregs
[reg
][mode
];
16958 if (FP_REGNO_P (reg
))
16959 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
16960 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
16961 else if (ALTIVEC_REGNO_P (reg
))
16962 reg_mode
= V16QImode
;
16963 else if (TARGET_E500_DOUBLE
&& mode
== TFmode
)
16966 reg_mode
= word_mode
;
16967 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
16969 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
16971 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
16973 /* Move register range backwards, if we might have destructive
16976 for (i
= nregs
- 1; i
>= 0; i
--)
16977 emit_insn (gen_rtx_SET (VOIDmode
,
16978 simplify_gen_subreg (reg_mode
, dst
, mode
,
16979 i
* reg_mode_size
),
16980 simplify_gen_subreg (reg_mode
, src
, mode
,
16981 i
* reg_mode_size
)));
16987 bool used_update
= false;
16988 rtx restore_basereg
= NULL_RTX
;
16990 if (MEM_P (src
) && INT_REGNO_P (reg
))
16994 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
16995 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
16998 breg
= XEXP (XEXP (src
, 0), 0);
16999 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
17000 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
17001 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
17002 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
17003 src
= replace_equiv_address (src
, breg
);
17005 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
17007 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
17009 rtx basereg
= XEXP (XEXP (src
, 0), 0);
17012 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
17013 emit_insn (gen_rtx_SET (VOIDmode
, ndst
,
17014 gen_rtx_MEM (reg_mode
, XEXP (src
, 0))));
17015 used_update
= true;
17018 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
17019 XEXP (XEXP (src
, 0), 1)));
17020 src
= replace_equiv_address (src
, basereg
);
17024 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
17025 emit_insn (gen_rtx_SET (VOIDmode
, basereg
, XEXP (src
, 0)));
17026 src
= replace_equiv_address (src
, basereg
);
17030 breg
= XEXP (src
, 0);
17031 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
17032 breg
= XEXP (breg
, 0);
17034 /* If the base register we are using to address memory is
17035 also a destination reg, then change that register last. */
17037 && REGNO (breg
) >= REGNO (dst
)
17038 && REGNO (breg
) < REGNO (dst
) + nregs
)
17039 j
= REGNO (breg
) - REGNO (dst
);
17041 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
17045 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
17046 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
17049 breg
= XEXP (XEXP (dst
, 0), 0);
17050 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
17051 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
17052 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
17054 /* We have to update the breg before doing the store.
17055 Use store with update, if available. */
17059 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
17060 emit_insn (TARGET_32BIT
17061 ? (TARGET_POWERPC64
17062 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
17063 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
17064 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
17065 used_update
= true;
17068 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
17069 dst
= replace_equiv_address (dst
, breg
);
17071 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
17072 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
17074 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
17076 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
17079 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
17080 emit_insn (gen_rtx_SET (VOIDmode
,
17081 gen_rtx_MEM (reg_mode
, XEXP (dst
, 0)), nsrc
));
17082 used_update
= true;
17085 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
17086 XEXP (XEXP (dst
, 0), 1)));
17087 dst
= replace_equiv_address (dst
, basereg
);
17091 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
17092 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
17093 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
17095 && REG_P (offsetreg
)
17096 && REGNO (basereg
) != REGNO (offsetreg
));
17097 if (REGNO (basereg
) == 0)
17099 rtx tmp
= offsetreg
;
17100 offsetreg
= basereg
;
17103 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
17104 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
17105 dst
= replace_equiv_address (dst
, basereg
);
17108 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
17109 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
17112 for (i
= 0; i
< nregs
; i
++)
17114 /* Calculate index to next subword. */
17119 /* If compiler already emitted move of first word by
17120 store with update, no need to do anything. */
17121 if (j
== 0 && used_update
)
17124 emit_insn (gen_rtx_SET (VOIDmode
,
17125 simplify_gen_subreg (reg_mode
, dst
, mode
,
17126 j
* reg_mode_size
),
17127 simplify_gen_subreg (reg_mode
, src
, mode
,
17128 j
* reg_mode_size
)));
17130 if (restore_basereg
!= NULL_RTX
)
17131 emit_insn (restore_basereg
);
17136 /* This page contains routines that are used to determine what the
17137 function prologue and epilogue code will do and write them out. */
17142 return !call_used_regs
[r
] && df_regs_ever_live_p (r
);
17145 /* Return the first fixed-point register that is required to be
17146 saved. 32 if none. */
17149 first_reg_to_save (void)
17153 /* Find lowest numbered live register. */
17154 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
17155 if (save_reg_p (first_reg
))
17158 if (first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
17159 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
17160 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
17161 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
))
17162 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
17163 first_reg
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
17167 && crtl
->uses_pic_offset_table
17168 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
17169 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
17175 /* Similar, for FP regs. */
17178 first_fp_reg_to_save (void)
17182 /* Find lowest numbered live register. */
17183 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
17184 if (save_reg_p (first_reg
))
17190 /* Similar, for AltiVec regs. */
17193 first_altivec_reg_to_save (void)
17197 /* Stack frame remains as is unless we are in AltiVec ABI. */
17198 if (! TARGET_ALTIVEC_ABI
)
17199 return LAST_ALTIVEC_REGNO
+ 1;
17201 /* On Darwin, the unwind routines are compiled without
17202 TARGET_ALTIVEC, and use save_world to save/restore the
17203 altivec registers when necessary. */
17204 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
17205 && ! TARGET_ALTIVEC
)
17206 return FIRST_ALTIVEC_REGNO
+ 20;
17208 /* Find lowest numbered live register. */
17209 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17210 if (save_reg_p (i
))
17216 /* Return a 32-bit mask of the AltiVec registers we need to set in
17217 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17218 the 32-bit word is 0. */
17220 static unsigned int
17221 compute_vrsave_mask (void)
17223 unsigned int i
, mask
= 0;
17225 /* On Darwin, the unwind routines are compiled without
17226 TARGET_ALTIVEC, and use save_world to save/restore the
17227 call-saved altivec registers when necessary. */
17228 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
17229 && ! TARGET_ALTIVEC
)
17232 /* First, find out if we use _any_ altivec registers. */
17233 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17234 if (df_regs_ever_live_p (i
))
17235 mask
|= ALTIVEC_REG_BIT (i
);
17240 /* Next, remove the argument registers from the set. These must
17241 be in the VRSAVE mask set by the caller, so we don't need to add
17242 them in again. More importantly, the mask we compute here is
17243 used to generate CLOBBERs in the set_vrsave insn, and we do not
17244 wish the argument registers to die. */
17245 for (i
= crtl
->args
.info
.vregno
- 1; i
>= ALTIVEC_ARG_MIN_REG
; --i
)
17246 mask
&= ~ALTIVEC_REG_BIT (i
);
17248 /* Similarly, remove the return value from the set. */
17251 diddle_return_value (is_altivec_return_reg
, &yes
);
17253 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
17259 /* For a very restricted set of circumstances, we can cut down the
17260 size of prologues/epilogues by calling our own save/restore-the-world
17264 compute_save_world_info (rs6000_stack_t
*info_ptr
)
17266 info_ptr
->world_save_p
= 1;
17267 info_ptr
->world_save_p
17268 = (WORLD_SAVE_P (info_ptr
)
17269 && DEFAULT_ABI
== ABI_DARWIN
17270 && !cfun
->has_nonlocal_label
17271 && info_ptr
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
17272 && info_ptr
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
17273 && info_ptr
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
17274 && info_ptr
->cr_save_p
);
17276 /* This will not work in conjunction with sibcalls. Make sure there
17277 are none. (This check is expensive, but seldom executed.) */
17278 if (WORLD_SAVE_P (info_ptr
))
17281 for ( insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
17282 if ( GET_CODE (insn
) == CALL_INSN
17283 && SIBLING_CALL_P (insn
))
17285 info_ptr
->world_save_p
= 0;
17290 if (WORLD_SAVE_P (info_ptr
))
17292 /* Even if we're not touching VRsave, make sure there's room on the
17293 stack for it, if it looks like we're calling SAVE_WORLD, which
17294 will attempt to save it. */
17295 info_ptr
->vrsave_size
= 4;
17297 /* If we are going to save the world, we need to save the link register too. */
17298 info_ptr
->lr_save_p
= 1;
17300 /* "Save" the VRsave register too if we're saving the world. */
17301 if (info_ptr
->vrsave_mask
== 0)
17302 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
17304 /* Because the Darwin register save/restore routines only handle
17305 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17307 gcc_assert (info_ptr
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
17308 && (info_ptr
->first_altivec_reg_save
17309 >= FIRST_SAVED_ALTIVEC_REGNO
));
17316 is_altivec_return_reg (rtx reg
, void *xyes
)
17318 bool *yes
= (bool *) xyes
;
17319 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
17324 /* Look for user-defined global regs in the range FIRST to LAST-1.
17325 We should not restore these, and so cannot use lmw or out-of-line
17326 restore functions if there are any. We also can't save them
17327 (well, emit frame notes for them), because frame unwinding during
17328 exception handling will restore saved registers. */
17331 global_regs_p (unsigned first
, unsigned last
)
17333 while (first
< last
)
17334 if (global_regs
[first
++])
17339 /* Determine the strategy for savings/restoring registers. */
17342 SAVRES_MULTIPLE
= 0x1,
17343 SAVE_INLINE_FPRS
= 0x2,
17344 SAVE_INLINE_GPRS
= 0x4,
17345 REST_INLINE_FPRS
= 0x8,
17346 REST_INLINE_GPRS
= 0x10,
17347 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x20,
17348 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x40,
17349 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x80,
17350 SAVE_INLINE_VRS
= 0x100,
17351 REST_INLINE_VRS
= 0x200
17355 rs6000_savres_strategy (rs6000_stack_t
*info
,
17356 bool using_static_chain_p
)
17361 if (TARGET_MULTIPLE
17362 && !TARGET_POWERPC64
17363 && !(TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
)
17364 && info
->first_gp_reg_save
< 31
17365 && !global_regs_p (info
->first_gp_reg_save
, 32))
17366 strategy
|= SAVRES_MULTIPLE
;
17368 if (crtl
->calls_eh_return
17369 || cfun
->machine
->ra_need_lr
)
17370 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
17371 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
17372 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
17374 if (info
->first_fp_reg_save
== 64
17375 /* The out-of-line FP routines use double-precision stores;
17376 we can't use those routines if we don't have such stores. */
17377 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
)
17378 || global_regs_p (info
->first_fp_reg_save
, 64))
17379 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17381 if (info
->first_gp_reg_save
== 32
17382 || (!(strategy
& SAVRES_MULTIPLE
)
17383 && global_regs_p (info
->first_gp_reg_save
, 32)))
17384 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17386 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
17387 || global_regs_p (info
->first_altivec_reg_save
, LAST_ALTIVEC_REGNO
+ 1))
17388 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17390 /* Define cutoff for using out-of-line functions to save registers. */
17391 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
17393 if (!optimize_size
)
17395 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17396 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17397 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17401 /* Prefer out-of-line restore if it will exit. */
17402 if (info
->first_fp_reg_save
> 61)
17403 strategy
|= SAVE_INLINE_FPRS
;
17404 if (info
->first_gp_reg_save
> 29)
17406 if (info
->first_fp_reg_save
== 64)
17407 strategy
|= SAVE_INLINE_GPRS
;
17409 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17411 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
17412 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17415 else if (DEFAULT_ABI
== ABI_DARWIN
)
17417 if (info
->first_fp_reg_save
> 60)
17418 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17419 if (info
->first_gp_reg_save
> 29)
17420 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17421 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17425 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
);
17426 if (info
->first_fp_reg_save
> 61)
17427 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
17428 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
17429 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
17432 /* Don't bother to try to save things out-of-line if r11 is occupied
17433 by the static chain. It would require too much fiddling and the
17434 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17435 pointer on Darwin, and AIX uses r1 or r12. */
17436 if (using_static_chain_p
&& DEFAULT_ABI
!= ABI_AIX
)
17437 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
17439 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
17441 /* We can only use the out-of-line routines to restore if we've
17442 saved all the registers from first_fp_reg_save in the prologue.
17443 Otherwise, we risk loading garbage. */
17444 if ((strategy
& (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
)) == SAVE_INLINE_FPRS
)
17448 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
17449 if (!save_reg_p (i
))
17451 strategy
|= REST_INLINE_FPRS
;
17456 /* If we are going to use store multiple, then don't even bother
17457 with the out-of-line routines, since the store-multiple
17458 instruction will always be smaller. */
17459 if ((strategy
& SAVRES_MULTIPLE
))
17460 strategy
|= SAVE_INLINE_GPRS
;
17462 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17463 saved is an out-of-line save or restore. Set up the value for
17464 the next test (excluding out-of-line gpr restore). */
17465 lr_save_p
= (info
->lr_save_p
17466 || !(strategy
& SAVE_INLINE_GPRS
)
17467 || !(strategy
& SAVE_INLINE_FPRS
)
17468 || !(strategy
& SAVE_INLINE_VRS
)
17469 || !(strategy
& REST_INLINE_FPRS
)
17470 || !(strategy
& REST_INLINE_VRS
));
17472 /* The situation is more complicated with load multiple. We'd
17473 prefer to use the out-of-line routines for restores, since the
17474 "exit" out-of-line routines can handle the restore of LR and the
17475 frame teardown. However if doesn't make sense to use the
17476 out-of-line routine if that is the only reason we'd need to save
17477 LR, and we can't use the "exit" out-of-line gpr restore if we
17478 have saved some fprs; In those cases it is advantageous to use
17479 load multiple when available. */
17480 if ((strategy
& SAVRES_MULTIPLE
)
17482 || info
->first_fp_reg_save
!= 64))
17483 strategy
|= REST_INLINE_GPRS
;
17485 /* Saving CR interferes with the exit routines used on the SPE, so
17488 && info
->spe_64bit_regs_used
17489 && info
->cr_save_p
)
17490 strategy
|= REST_INLINE_GPRS
;
17492 /* We can only use load multiple or the out-of-line routines to
17493 restore if we've used store multiple or out-of-line routines
17494 in the prologue, i.e. if we've saved all the registers from
17495 first_gp_reg_save. Otherwise, we risk loading garbage. */
17496 if ((strategy
& (SAVE_INLINE_GPRS
| REST_INLINE_GPRS
| SAVRES_MULTIPLE
))
17497 == SAVE_INLINE_GPRS
)
17501 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
17502 if (!save_reg_p (i
))
17504 strategy
|= REST_INLINE_GPRS
;
17509 if (TARGET_ELF
&& TARGET_64BIT
)
17511 if (!(strategy
& SAVE_INLINE_FPRS
))
17512 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
17513 else if (!(strategy
& SAVE_INLINE_GPRS
)
17514 && info
->first_fp_reg_save
== 64)
17515 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
17517 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
17518 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
17520 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
17521 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
17526 /* Calculate the stack information for the current function. This is
17527 complicated by having two separate calling sequences, the AIX calling
17528 sequence and the V.4 calling sequence.
17530 AIX (and Darwin/Mac OS X) stack frames look like:
17532 SP----> +---------------------------------------+
17533 | back chain to caller | 0 0
17534 +---------------------------------------+
17535 | saved CR | 4 8 (8-11)
17536 +---------------------------------------+
17538 +---------------------------------------+
17539 | reserved for compilers | 12 24
17540 +---------------------------------------+
17541 | reserved for binders | 16 32
17542 +---------------------------------------+
17543 | saved TOC pointer | 20 40
17544 +---------------------------------------+
17545 | Parameter save area (P) | 24 48
17546 +---------------------------------------+
17547 | Alloca space (A) | 24+P etc.
17548 +---------------------------------------+
17549 | Local variable space (L) | 24+P+A
17550 +---------------------------------------+
17551 | Float/int conversion temporary (X) | 24+P+A+L
17552 +---------------------------------------+
17553 | Save area for AltiVec registers (W) | 24+P+A+L+X
17554 +---------------------------------------+
17555 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17556 +---------------------------------------+
17557 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17558 +---------------------------------------+
17559 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17560 +---------------------------------------+
17561 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17562 +---------------------------------------+
17563 old SP->| back chain to caller's caller |
17564 +---------------------------------------+
17566 The required alignment for AIX configurations is two words (i.e., 8
17570 V.4 stack frames look like:
17572 SP----> +---------------------------------------+
17573 | back chain to caller | 0
17574 +---------------------------------------+
17575 | caller's saved LR | 4
17576 +---------------------------------------+
17577 | Parameter save area (P) | 8
17578 +---------------------------------------+
17579 | Alloca space (A) | 8+P
17580 +---------------------------------------+
17581 | Varargs save area (V) | 8+P+A
17582 +---------------------------------------+
17583 | Local variable space (L) | 8+P+A+V
17584 +---------------------------------------+
17585 | Float/int conversion temporary (X) | 8+P+A+V+L
17586 +---------------------------------------+
17587 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17588 +---------------------------------------+
17589 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17590 +---------------------------------------+
17591 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17592 +---------------------------------------+
17593 | SPE: area for 64-bit GP registers |
17594 +---------------------------------------+
17595 | SPE alignment padding |
17596 +---------------------------------------+
17597 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17598 +---------------------------------------+
17599 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17600 +---------------------------------------+
17601 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17602 +---------------------------------------+
17603 old SP->| back chain to caller's caller |
17604 +---------------------------------------+
17606 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17607 given. (But note below and in sysv4.h that we require only 8 and
17608 may round up the size of our stack frame anyways. The historical
17609 reason is early versions of powerpc-linux which didn't properly
17610 align the stack at program startup. A happy side-effect is that
17611 -mno-eabi libraries can be used with -meabi programs.)
17613 The EABI configuration defaults to the V.4 layout. However,
17614 the stack alignment requirements may differ. If -mno-eabi is not
17615 given, the required stack alignment is 8 bytes; if -mno-eabi is
17616 given, the required alignment is 16 bytes. (But see V.4 comment
17619 #ifndef ABI_STACK_BOUNDARY
17620 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17623 static rs6000_stack_t
*
17624 rs6000_stack_info (void)
17626 rs6000_stack_t
*info_ptr
= &stack_info
;
17627 int reg_size
= TARGET_32BIT
? 4 : 8;
17631 HOST_WIDE_INT non_fixed_size
;
17632 bool using_static_chain_p
;
17634 if (reload_completed
&& info_ptr
->reload_completed
)
17637 memset (info_ptr
, 0, sizeof (*info_ptr
));
17638 info_ptr
->reload_completed
= reload_completed
;
17642 /* Cache value so we don't rescan instruction chain over and over. */
17643 if (cfun
->machine
->insn_chain_scanned_p
== 0)
17644 cfun
->machine
->insn_chain_scanned_p
17645 = spe_func_has_64bit_regs_p () + 1;
17646 info_ptr
->spe_64bit_regs_used
= cfun
->machine
->insn_chain_scanned_p
- 1;
17649 /* Select which calling sequence. */
17650 info_ptr
->abi
= DEFAULT_ABI
;
17652 /* Calculate which registers need to be saved & save area size. */
17653 info_ptr
->first_gp_reg_save
= first_reg_to_save ();
17654 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17655 even if it currently looks like we won't. Reload may need it to
17656 get at a constant; if so, it will have already created a constant
17657 pool entry for it. */
17658 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
17659 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
17660 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
17661 && crtl
->uses_const_pool
17662 && info_ptr
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
17663 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
17665 first_gp
= info_ptr
->first_gp_reg_save
;
17667 info_ptr
->gp_size
= reg_size
* (32 - first_gp
);
17669 /* For the SPE, we have an additional upper 32-bits on each GPR.
17670 Ideally we should save the entire 64-bits only when the upper
17671 half is used in SIMD instructions. Since we only record
17672 registers live (not the size they are used in), this proves
17673 difficult because we'd have to traverse the instruction chain at
17674 the right time, taking reload into account. This is a real pain,
17675 so we opt to save the GPRs in 64-bits always if but one register
17676 gets used in 64-bits. Otherwise, all the registers in the frame
17677 get saved in 32-bits.
17679 So... since when we save all GPRs (except the SP) in 64-bits, the
17680 traditional GP save area will be empty. */
17681 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17682 info_ptr
->gp_size
= 0;
17684 info_ptr
->first_fp_reg_save
= first_fp_reg_to_save ();
17685 info_ptr
->fp_size
= 8 * (64 - info_ptr
->first_fp_reg_save
);
17687 info_ptr
->first_altivec_reg_save
= first_altivec_reg_to_save ();
17688 info_ptr
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
17689 - info_ptr
->first_altivec_reg_save
);
17691 /* Does this function call anything? */
17692 info_ptr
->calls_p
= (! crtl
->is_leaf
17693 || cfun
->machine
->ra_needs_full_frame
);
17695 /* Determine if we need to save the condition code registers. */
17696 if (df_regs_ever_live_p (CR2_REGNO
)
17697 || df_regs_ever_live_p (CR3_REGNO
)
17698 || df_regs_ever_live_p (CR4_REGNO
))
17700 info_ptr
->cr_save_p
= 1;
17701 if (DEFAULT_ABI
== ABI_V4
)
17702 info_ptr
->cr_size
= reg_size
;
17705 /* If the current function calls __builtin_eh_return, then we need
17706 to allocate stack space for registers that will hold data for
17707 the exception handler. */
17708 if (crtl
->calls_eh_return
)
17711 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
17714 /* SPE saves EH registers in 64-bits. */
17715 ehrd_size
= i
* (TARGET_SPE_ABI
17716 && info_ptr
->spe_64bit_regs_used
!= 0
17717 ? UNITS_PER_SPE_WORD
: UNITS_PER_WORD
);
17722 /* Determine various sizes. */
17723 info_ptr
->reg_size
= reg_size
;
17724 info_ptr
->fixed_size
= RS6000_SAVE_AREA
;
17725 info_ptr
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
17726 info_ptr
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
17727 TARGET_ALTIVEC
? 16 : 8);
17728 if (FRAME_GROWS_DOWNWARD
)
17729 info_ptr
->vars_size
17730 += RS6000_ALIGN (info_ptr
->fixed_size
+ info_ptr
->vars_size
17731 + info_ptr
->parm_size
,
17732 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
17733 - (info_ptr
->fixed_size
+ info_ptr
->vars_size
17734 + info_ptr
->parm_size
);
17736 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17737 info_ptr
->spe_gp_size
= 8 * (32 - first_gp
);
17739 info_ptr
->spe_gp_size
= 0;
17741 if (TARGET_ALTIVEC_ABI
)
17742 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
17744 info_ptr
->vrsave_mask
= 0;
17746 if (TARGET_ALTIVEC_VRSAVE
&& info_ptr
->vrsave_mask
)
17747 info_ptr
->vrsave_size
= 4;
17749 info_ptr
->vrsave_size
= 0;
17751 compute_save_world_info (info_ptr
);
17753 /* Calculate the offsets. */
17754 switch (DEFAULT_ABI
)
17758 gcc_unreachable ();
17762 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
17763 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
17765 if (TARGET_ALTIVEC_ABI
)
17767 info_ptr
->vrsave_save_offset
17768 = info_ptr
->gp_save_offset
- info_ptr
->vrsave_size
;
17770 /* Align stack so vector save area is on a quadword boundary.
17771 The padding goes above the vectors. */
17772 if (info_ptr
->altivec_size
!= 0)
17773 info_ptr
->altivec_padding_size
17774 = info_ptr
->vrsave_save_offset
& 0xF;
17776 info_ptr
->altivec_padding_size
= 0;
17778 info_ptr
->altivec_save_offset
17779 = info_ptr
->vrsave_save_offset
17780 - info_ptr
->altivec_padding_size
17781 - info_ptr
->altivec_size
;
17782 gcc_assert (info_ptr
->altivec_size
== 0
17783 || info_ptr
->altivec_save_offset
% 16 == 0);
17785 /* Adjust for AltiVec case. */
17786 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
- ehrd_size
;
17789 info_ptr
->ehrd_offset
= info_ptr
->gp_save_offset
- ehrd_size
;
17790 info_ptr
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
17791 info_ptr
->lr_save_offset
= 2*reg_size
;
17795 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
17796 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
17797 info_ptr
->cr_save_offset
= info_ptr
->gp_save_offset
- info_ptr
->cr_size
;
17799 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
17801 /* Align stack so SPE GPR save area is aligned on a
17802 double-word boundary. */
17803 if (info_ptr
->spe_gp_size
!= 0 && info_ptr
->cr_save_offset
!= 0)
17804 info_ptr
->spe_padding_size
17805 = 8 - (-info_ptr
->cr_save_offset
% 8);
17807 info_ptr
->spe_padding_size
= 0;
17809 info_ptr
->spe_gp_save_offset
17810 = info_ptr
->cr_save_offset
17811 - info_ptr
->spe_padding_size
17812 - info_ptr
->spe_gp_size
;
17814 /* Adjust for SPE case. */
17815 info_ptr
->ehrd_offset
= info_ptr
->spe_gp_save_offset
;
17817 else if (TARGET_ALTIVEC_ABI
)
17819 info_ptr
->vrsave_save_offset
17820 = info_ptr
->cr_save_offset
- info_ptr
->vrsave_size
;
17822 /* Align stack so vector save area is on a quadword boundary. */
17823 if (info_ptr
->altivec_size
!= 0)
17824 info_ptr
->altivec_padding_size
17825 = 16 - (-info_ptr
->vrsave_save_offset
% 16);
17827 info_ptr
->altivec_padding_size
= 0;
17829 info_ptr
->altivec_save_offset
17830 = info_ptr
->vrsave_save_offset
17831 - info_ptr
->altivec_padding_size
17832 - info_ptr
->altivec_size
;
17834 /* Adjust for AltiVec case. */
17835 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
;
17838 info_ptr
->ehrd_offset
= info_ptr
->cr_save_offset
;
17839 info_ptr
->ehrd_offset
-= ehrd_size
;
17840 info_ptr
->lr_save_offset
= reg_size
;
17844 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
17845 info_ptr
->save_size
= RS6000_ALIGN (info_ptr
->fp_size
17846 + info_ptr
->gp_size
17847 + info_ptr
->altivec_size
17848 + info_ptr
->altivec_padding_size
17849 + info_ptr
->spe_gp_size
17850 + info_ptr
->spe_padding_size
17852 + info_ptr
->cr_size
17853 + info_ptr
->vrsave_size
,
17856 non_fixed_size
= (info_ptr
->vars_size
17857 + info_ptr
->parm_size
17858 + info_ptr
->save_size
);
17860 info_ptr
->total_size
= RS6000_ALIGN (non_fixed_size
+ info_ptr
->fixed_size
,
17861 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
17863 /* Determine if we need to save the link register. */
17864 if (info_ptr
->calls_p
17865 || (DEFAULT_ABI
== ABI_AIX
17867 && !TARGET_PROFILE_KERNEL
)
17868 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
17869 #ifdef TARGET_RELOCATABLE
17870 || (TARGET_RELOCATABLE
&& (get_pool_size () != 0))
17872 || rs6000_ra_ever_killed ())
17873 info_ptr
->lr_save_p
= 1;
17875 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
17876 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
17877 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
17878 info_ptr
->savres_strategy
= rs6000_savres_strategy (info_ptr
,
17879 using_static_chain_p
);
17881 if (!(info_ptr
->savres_strategy
& SAVE_INLINE_GPRS
)
17882 || !(info_ptr
->savres_strategy
& SAVE_INLINE_FPRS
)
17883 || !(info_ptr
->savres_strategy
& SAVE_INLINE_VRS
)
17884 || !(info_ptr
->savres_strategy
& REST_INLINE_GPRS
)
17885 || !(info_ptr
->savres_strategy
& REST_INLINE_FPRS
)
17886 || !(info_ptr
->savres_strategy
& REST_INLINE_VRS
))
17887 info_ptr
->lr_save_p
= 1;
17889 if (info_ptr
->lr_save_p
)
17890 df_set_regs_ever_live (LR_REGNO
, true);
17892 /* Determine if we need to allocate any stack frame:
17894 For AIX we need to push the stack if a frame pointer is needed
17895 (because the stack might be dynamically adjusted), if we are
17896 debugging, if we make calls, or if the sum of fp_save, gp_save,
17897 and local variables are more than the space needed to save all
17898 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
17899 + 18*8 = 288 (GPR13 reserved).
17901 For V.4 we don't have the stack cushion that AIX uses, but assume
17902 that the debugger can handle stackless frames. */
17904 if (info_ptr
->calls_p
)
17905 info_ptr
->push_p
= 1;
17907 else if (DEFAULT_ABI
== ABI_V4
)
17908 info_ptr
->push_p
= non_fixed_size
!= 0;
17910 else if (frame_pointer_needed
)
17911 info_ptr
->push_p
= 1;
17913 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
17914 info_ptr
->push_p
= 1;
17917 info_ptr
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
17919 /* Zero offsets if we're not saving those registers. */
17920 if (info_ptr
->fp_size
== 0)
17921 info_ptr
->fp_save_offset
= 0;
17923 if (info_ptr
->gp_size
== 0)
17924 info_ptr
->gp_save_offset
= 0;
17926 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->altivec_size
== 0)
17927 info_ptr
->altivec_save_offset
= 0;
17929 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->vrsave_mask
== 0)
17930 info_ptr
->vrsave_save_offset
= 0;
17932 if (! TARGET_SPE_ABI
17933 || info_ptr
->spe_64bit_regs_used
== 0
17934 || info_ptr
->spe_gp_size
== 0)
17935 info_ptr
->spe_gp_save_offset
= 0;
17937 if (! info_ptr
->lr_save_p
)
17938 info_ptr
->lr_save_offset
= 0;
17940 if (! info_ptr
->cr_save_p
)
17941 info_ptr
->cr_save_offset
= 0;
17946 /* Return true if the current function uses any GPRs in 64-bit SIMD
17950 spe_func_has_64bit_regs_p (void)
17954 /* Functions that save and restore all the call-saved registers will
17955 need to save/restore the registers in 64-bits. */
17956 if (crtl
->calls_eh_return
17957 || cfun
->calls_setjmp
17958 || crtl
->has_nonlocal_goto
)
17961 insns
= get_insns ();
17963 for (insn
= NEXT_INSN (insns
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
17969 /* FIXME: This should be implemented with attributes...
17971 (set_attr "spe64" "true")....then,
17972 if (get_spe64(insn)) return true;
17974 It's the only reliable way to do the stuff below. */
17976 i
= PATTERN (insn
);
17977 if (GET_CODE (i
) == SET
)
17979 enum machine_mode mode
= GET_MODE (SET_SRC (i
));
17981 if (SPE_VECTOR_MODE (mode
))
17983 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
))
17993 debug_stack_info (rs6000_stack_t
*info
)
17995 const char *abi_string
;
17998 info
= rs6000_stack_info ();
18000 fprintf (stderr
, "\nStack information for function %s:\n",
18001 ((current_function_decl
&& DECL_NAME (current_function_decl
))
18002 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
18007 default: abi_string
= "Unknown"; break;
18008 case ABI_NONE
: abi_string
= "NONE"; break;
18009 case ABI_AIX
: abi_string
= "AIX"; break;
18010 case ABI_DARWIN
: abi_string
= "Darwin"; break;
18011 case ABI_V4
: abi_string
= "V.4"; break;
18014 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
18016 if (TARGET_ALTIVEC_ABI
)
18017 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
18019 if (TARGET_SPE_ABI
)
18020 fprintf (stderr
, "\tSPE ABI extensions enabled.\n");
18022 if (info
->first_gp_reg_save
!= 32)
18023 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
18025 if (info
->first_fp_reg_save
!= 64)
18026 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
18028 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
18029 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
18030 info
->first_altivec_reg_save
);
18032 if (info
->lr_save_p
)
18033 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
18035 if (info
->cr_save_p
)
18036 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
18038 if (info
->vrsave_mask
)
18039 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
18042 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
18045 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
18047 if (info
->gp_save_offset
)
18048 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
18050 if (info
->fp_save_offset
)
18051 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
18053 if (info
->altivec_save_offset
)
18054 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
18055 info
->altivec_save_offset
);
18057 if (info
->spe_gp_save_offset
)
18058 fprintf (stderr
, "\tspe_gp_save_offset = %5d\n",
18059 info
->spe_gp_save_offset
);
18061 if (info
->vrsave_save_offset
)
18062 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
18063 info
->vrsave_save_offset
);
18065 if (info
->lr_save_offset
)
18066 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
18068 if (info
->cr_save_offset
)
18069 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
18071 if (info
->varargs_save_offset
)
18072 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
18074 if (info
->total_size
)
18075 fprintf (stderr
, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
18078 if (info
->vars_size
)
18079 fprintf (stderr
, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
18082 if (info
->parm_size
)
18083 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
18085 if (info
->fixed_size
)
18086 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
18089 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
18091 if (info
->spe_gp_size
)
18092 fprintf (stderr
, "\tspe_gp_size = %5d\n", info
->spe_gp_size
);
18095 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
18097 if (info
->altivec_size
)
18098 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
18100 if (info
->vrsave_size
)
18101 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
18103 if (info
->altivec_padding_size
)
18104 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
18105 info
->altivec_padding_size
);
18107 if (info
->spe_padding_size
)
18108 fprintf (stderr
, "\tspe_padding_size = %5d\n",
18109 info
->spe_padding_size
);
18112 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
18114 if (info
->save_size
)
18115 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
18117 if (info
->reg_size
!= 4)
18118 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
18120 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
18122 fprintf (stderr
, "\n");
18126 rs6000_return_addr (int count
, rtx frame
)
18128 /* Currently we don't optimize very well between prolog and body
18129 code and for PIC code the code can be actually quite bad, so
18130 don't try to be too clever here. */
18131 if (count
!= 0 || (DEFAULT_ABI
!= ABI_AIX
&& flag_pic
))
18133 cfun
->machine
->ra_needs_full_frame
= 1;
18140 plus_constant (Pmode
,
18142 (gen_rtx_MEM (Pmode
,
18143 memory_address (Pmode
, frame
))),
18144 RETURN_ADDRESS_OFFSET
)));
18147 cfun
->machine
->ra_need_lr
= 1;
18148 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
18151 /* Say whether a function is a candidate for sibcall handling or not. */
18154 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
18159 fntype
= TREE_TYPE (decl
);
18161 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
18163 /* We can't do it if the called function has more vector parameters
18164 than the current function; there's nowhere to put the VRsave code. */
18165 if (TARGET_ALTIVEC_ABI
18166 && TARGET_ALTIVEC_VRSAVE
18167 && !(decl
&& decl
== current_function_decl
))
18169 function_args_iterator args_iter
;
18173 /* Functions with vector parameters are required to have a
18174 prototype, so the argument type info must be available
18176 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
18177 if (TREE_CODE (type
) == VECTOR_TYPE
18178 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
18181 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
18182 if (TREE_CODE (type
) == VECTOR_TYPE
18183 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
18190 /* Under the AIX ABI we can't allow calls to non-local functions,
18191 because the callee may have a different TOC pointer to the
18192 caller and there's no way to ensure we restore the TOC when we
18193 return. With the secure-plt SYSV ABI we can't make non-local
18194 calls when -fpic/PIC because the plt call stubs use r30. */
18195 if (DEFAULT_ABI
== ABI_DARWIN
18196 || (DEFAULT_ABI
== ABI_AIX
18198 && !DECL_EXTERNAL (decl
)
18199 && (*targetm
.binds_local_p
) (decl
))
18200 || (DEFAULT_ABI
== ABI_V4
18201 && (!TARGET_SECURE_PLT
18204 && (*targetm
.binds_local_p
) (decl
)))))
18206 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
18208 if (!lookup_attribute ("longcall", attr_list
)
18209 || lookup_attribute ("shortcall", attr_list
))
18216 /* NULL if INSN insn is valid within a low-overhead loop.
18217 Otherwise return why doloop cannot be applied.
18218 PowerPC uses the COUNT register for branch on table instructions. */
18220 static const char *
18221 rs6000_invalid_within_doloop (const_rtx insn
)
18224 return "Function call in the loop.";
18227 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
18228 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
))
18229 return "Computed branch in the loop.";
18235 rs6000_ra_ever_killed (void)
18241 if (cfun
->is_thunk
)
18244 if (cfun
->machine
->lr_save_state
)
18245 return cfun
->machine
->lr_save_state
- 1;
18247 /* regs_ever_live has LR marked as used if any sibcalls are present,
18248 but this should not force saving and restoring in the
18249 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18250 clobbers LR, so that is inappropriate. */
18252 /* Also, the prologue can generate a store into LR that
18253 doesn't really count, like this:
18256 bcl to set PIC register
18260 When we're called from the epilogue, we need to avoid counting
18261 this as a store. */
18263 push_topmost_sequence ();
18264 top
= get_insns ();
18265 pop_topmost_sequence ();
18266 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
18268 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
18274 if (!SIBLING_CALL_P (insn
))
18277 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
18279 else if (set_of (reg
, insn
) != NULL_RTX
18280 && !prologue_epilogue_contains (insn
))
18287 /* Emit instructions needed to load the TOC register.
18288 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18289 a constant pool; or for SVR4 -fpic. */
18292 rs6000_emit_load_toc_table (int fromprolog
)
18295 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
18297 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
)
18300 rtx lab
, tmp1
, tmp2
, got
;
18302 lab
= gen_label_rtx ();
18303 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
18304 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18306 got
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
18308 got
= rs6000_got_sym ();
18309 tmp1
= tmp2
= dest
;
18312 tmp1
= gen_reg_rtx (Pmode
);
18313 tmp2
= gen_reg_rtx (Pmode
);
18315 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
18316 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
18317 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
18318 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
18320 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
18322 emit_insn (gen_load_toc_v4_pic_si ());
18323 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18325 else if (TARGET_ELF
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
== 2)
18328 rtx temp0
= (fromprolog
18329 ? gen_rtx_REG (Pmode
, 0)
18330 : gen_reg_rtx (Pmode
));
18336 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
18337 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18339 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
18340 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18342 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
18343 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18344 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
18350 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
18351 lab
= gen_label_rtx ();
18352 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
18353 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
18354 if (TARGET_LINK_STACK
)
18355 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
18356 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
18358 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
18360 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
18362 /* This is for AIX code running in non-PIC ELF32. */
18365 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
18366 realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
18368 emit_insn (gen_elf_high (dest
, realsym
));
18369 emit_insn (gen_elf_low (dest
, dest
, realsym
));
18373 gcc_assert (DEFAULT_ABI
== ABI_AIX
);
18376 emit_insn (gen_load_toc_aix_si (dest
));
18378 emit_insn (gen_load_toc_aix_di (dest
));
18382 /* Emit instructions to restore the link register after determining where
18383 its value has been stored. */
18386 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
18388 rs6000_stack_t
*info
= rs6000_stack_info ();
18391 operands
[0] = source
;
18392 operands
[1] = scratch
;
18394 if (info
->lr_save_p
)
18396 rtx frame_rtx
= stack_pointer_rtx
;
18397 HOST_WIDE_INT sp_offset
= 0;
18400 if (frame_pointer_needed
18401 || cfun
->calls_alloca
18402 || info
->total_size
> 32767)
18404 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
18405 emit_move_insn (operands
[1], tmp
);
18406 frame_rtx
= operands
[1];
18408 else if (info
->push_p
)
18409 sp_offset
= info
->total_size
;
18411 tmp
= plus_constant (Pmode
, frame_rtx
,
18412 info
->lr_save_offset
+ sp_offset
);
18413 tmp
= gen_frame_mem (Pmode
, tmp
);
18414 emit_move_insn (tmp
, operands
[0]);
18417 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
18419 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18420 state of lr_save_p so any change from here on would be a bug. In
18421 particular, stop rs6000_ra_ever_killed from considering the SET
18422 of lr we may have added just above. */
18423 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
18426 static GTY(()) alias_set_type set
= -1;
18429 get_TOC_alias_set (void)
18432 set
= new_alias_set ();
18436 /* This returns nonzero if the current function uses the TOC. This is
18437 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18438 is generated by the ABI_V4 load_toc_* patterns. */
18445 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
18448 rtx pat
= PATTERN (insn
);
18451 if (GET_CODE (pat
) == PARALLEL
)
18452 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
18454 rtx sub
= XVECEXP (pat
, 0, i
);
18455 if (GET_CODE (sub
) == USE
)
18457 sub
= XEXP (sub
, 0);
18458 if (GET_CODE (sub
) == UNSPEC
18459 && XINT (sub
, 1) == UNSPEC_TOC
)
18469 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
18471 rtx tocrel
, tocreg
, hi
;
18473 if (TARGET_DEBUG_ADDR
)
18475 if (GET_CODE (symbol
) == SYMBOL_REF
)
18476 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18480 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
18481 GET_RTX_NAME (GET_CODE (symbol
)));
18482 debug_rtx (symbol
);
18486 if (!can_create_pseudo_p ())
18487 df_set_regs_ever_live (TOC_REGISTER
, true);
18489 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
18490 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
18491 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
18494 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
18495 if (largetoc_reg
!= NULL
)
18497 emit_move_insn (largetoc_reg
, hi
);
18500 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
18503 /* Issue assembly directives that create a reference to the given DWARF
18504 FRAME_TABLE_LABEL from the current function section. */
18506 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
18508 fprintf (asm_out_file
, "\t.ref %s\n",
18509 (* targetm
.strip_name_encoding
) (frame_table_label
));
18512 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18513 and the change to the stack pointer. */
18516 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
18523 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
18524 if (hard_frame_needed
)
18525 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
18526 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
18527 || (hard_frame_needed
18528 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
18531 p
= rtvec_alloc (i
);
18534 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
18535 RTVEC_ELT (p
, i
) = gen_rtx_SET (VOIDmode
, mem
, const0_rtx
);
18538 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
18541 /* Emit the correct code for allocating stack space, as insns.
18542 If COPY_REG, make sure a copy of the old frame is left there.
18543 The generated code may use hard register 0 as a temporary. */
18546 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
18549 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
18550 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
18551 rtx todec
= gen_int_mode (-size
, Pmode
);
18554 if (INTVAL (todec
) != -size
)
18556 warning (0, "stack frame too large");
18557 emit_insn (gen_trap ());
18561 if (crtl
->limit_stack
)
18563 if (REG_P (stack_limit_rtx
)
18564 && REGNO (stack_limit_rtx
) > 1
18565 && REGNO (stack_limit_rtx
) <= 31)
18567 emit_insn (gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
)));
18568 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
18571 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
18573 && DEFAULT_ABI
== ABI_V4
)
18575 rtx toload
= gen_rtx_CONST (VOIDmode
,
18576 gen_rtx_PLUS (Pmode
,
18580 emit_insn (gen_elf_high (tmp_reg
, toload
));
18581 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
18582 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
18586 warning (0, "stack limit expression is not supported");
18592 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
18594 emit_move_insn (copy_reg
, stack_reg
);
18599 /* Need a note here so that try_split doesn't get confused. */
18600 if (get_last_insn () == NULL_RTX
)
18601 emit_note (NOTE_INSN_DELETED
);
18602 insn
= emit_move_insn (tmp_reg
, todec
);
18603 try_split (PATTERN (insn
), insn
, 0);
18607 insn
= emit_insn (TARGET_32BIT
18608 ? gen_movsi_update_stack (stack_reg
, stack_reg
,
18610 : gen_movdi_di_update_stack (stack_reg
, stack_reg
,
18611 todec
, stack_reg
));
18612 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18613 it now and set the alias set/attributes. The above gen_*_update
18614 calls will generate a PARALLEL with the MEM set being the first
18616 par
= PATTERN (insn
);
18617 gcc_assert (GET_CODE (par
) == PARALLEL
);
18618 set
= XVECEXP (par
, 0, 0);
18619 gcc_assert (GET_CODE (set
) == SET
);
18620 mem
= SET_DEST (set
);
18621 gcc_assert (MEM_P (mem
));
18622 MEM_NOTRAP_P (mem
) = 1;
18623 set_mem_alias_set (mem
, get_frame_alias_set ());
18625 RTX_FRAME_RELATED_P (insn
) = 1;
18626 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
18627 gen_rtx_SET (VOIDmode
, stack_reg
,
18628 gen_rtx_PLUS (Pmode
, stack_reg
,
18629 GEN_INT (-size
))));
18632 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18634 #if PROBE_INTERVAL > 32768
18635 #error Cannot use indexed addressing mode for stack probing
18638 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18639 inclusive. These are offsets from the current stack pointer. */
18642 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
18644 /* See if we have a constant small number of probes to generate. If so,
18645 that's the easy case. */
18646 if (first
+ size
<= 32768)
18650 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18651 it exceeds SIZE. If only one probe is needed, this will not
18652 generate any code. Then probe at FIRST + SIZE. */
18653 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
18654 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
18657 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
18661 /* Otherwise, do the same as above, but in a loop. Note that we must be
18662 extra careful with variables wrapping around because we might be at
18663 the very top (or the very bottom) of the address space and we have
18664 to be able to handle this case properly; in particular, we use an
18665 equality test for the loop condition. */
18668 HOST_WIDE_INT rounded_size
;
18669 rtx r12
= gen_rtx_REG (Pmode
, 12);
18670 rtx r0
= gen_rtx_REG (Pmode
, 0);
18672 /* Sanity check for the addressing mode we're going to use. */
18673 gcc_assert (first
<= 32768);
18675 /* Step 1: round SIZE to the previous multiple of the interval. */
18677 rounded_size
= size
& -PROBE_INTERVAL
;
18680 /* Step 2: compute initial and final value of the loop counter. */
18682 /* TEST_ADDR = SP + FIRST. */
18683 emit_insn (gen_rtx_SET (VOIDmode
, r12
,
18684 plus_constant (Pmode
, stack_pointer_rtx
,
18687 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18688 if (rounded_size
> 32768)
18690 emit_move_insn (r0
, GEN_INT (-rounded_size
));
18691 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
18692 gen_rtx_PLUS (Pmode
, r12
, r0
)));
18695 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
18696 plus_constant (Pmode
, r12
, -rounded_size
)));
18699 /* Step 3: the loop
18701 while (TEST_ADDR != LAST_ADDR)
18703 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18707 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18708 until it is equal to ROUNDED_SIZE. */
18711 emit_insn (gen_probe_stack_rangedi (r12
, r12
, r0
));
18713 emit_insn (gen_probe_stack_rangesi (r12
, r12
, r0
));
18716 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18717 that SIZE is equal to ROUNDED_SIZE. */
18719 if (size
!= rounded_size
)
18720 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
18724 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18725 absolute addresses. */
18728 output_probe_stack_range (rtx reg1
, rtx reg2
)
18730 static int labelno
= 0;
18731 char loop_lab
[32], end_lab
[32];
18734 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
18735 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
18737 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
18739 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
18743 output_asm_insn ("{cmp|cmpd} 0,%0,%1", xops
);
18745 output_asm_insn ("{cmp|cmpw} 0,%0,%1", xops
);
18747 fputs ("\tbeq 0,", asm_out_file
);
18748 assemble_name_raw (asm_out_file
, end_lab
);
18749 fputc ('\n', asm_out_file
);
18751 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
18752 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
18753 output_asm_insn ("{cal %0,%1(%0)|addi %0,%0,%1}", xops
);
18755 /* Probe at TEST_ADDR and branch. */
18756 xops
[1] = gen_rtx_REG (Pmode
, 0);
18757 output_asm_insn ("{st|stw} %1,0(%0)", xops
);
18758 fprintf (asm_out_file
, "\tb ");
18759 assemble_name_raw (asm_out_file
, loop_lab
);
18760 fputc ('\n', asm_out_file
);
18762 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
18767 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18768 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18769 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18770 deduce these equivalences by itself so it wasn't necessary to hold
18771 its hand so much. Don't be tempted to always supply d2_f_d_e with
18772 the actual cfa register, ie. r31 when we are using a hard frame
18773 pointer. That fails when saving regs off r1, and sched moves the
18774 r31 setup past the reg saves. */
18777 rs6000_frame_related (rtx insn
, rtx reg
, HOST_WIDE_INT val
,
18778 rtx reg2
, rtx rreg
)
18782 if (REGNO (reg
) == STACK_POINTER_REGNUM
&& reg2
== NULL_RTX
)
18784 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
18787 gcc_checking_assert (val
== 0);
18788 real
= PATTERN (insn
);
18789 if (GET_CODE (real
) == PARALLEL
)
18790 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
18791 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
18793 rtx set
= XVECEXP (real
, 0, i
);
18795 RTX_FRAME_RELATED_P (set
) = 1;
18797 RTX_FRAME_RELATED_P (insn
) = 1;
18801 /* copy_rtx will not make unique copies of registers, so we need to
18802 ensure we don't have unwanted sharing here. */
18804 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
18807 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
18809 real
= copy_rtx (PATTERN (insn
));
18811 if (reg2
!= NULL_RTX
)
18812 real
= replace_rtx (real
, reg2
, rreg
);
18814 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
18815 gcc_checking_assert (val
== 0);
18817 real
= replace_rtx (real
, reg
,
18818 gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
,
18819 STACK_POINTER_REGNUM
),
18822 /* We expect that 'real' is either a SET or a PARALLEL containing
18823 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18824 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18826 if (GET_CODE (real
) == SET
)
18830 temp
= simplify_rtx (SET_SRC (set
));
18832 SET_SRC (set
) = temp
;
18833 temp
= simplify_rtx (SET_DEST (set
));
18835 SET_DEST (set
) = temp
;
18836 if (GET_CODE (SET_DEST (set
)) == MEM
)
18838 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
18840 XEXP (SET_DEST (set
), 0) = temp
;
18847 gcc_assert (GET_CODE (real
) == PARALLEL
);
18848 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
18849 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
18851 rtx set
= XVECEXP (real
, 0, i
);
18853 temp
= simplify_rtx (SET_SRC (set
));
18855 SET_SRC (set
) = temp
;
18856 temp
= simplify_rtx (SET_DEST (set
));
18858 SET_DEST (set
) = temp
;
18859 if (GET_CODE (SET_DEST (set
)) == MEM
)
18861 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
18863 XEXP (SET_DEST (set
), 0) = temp
;
18865 RTX_FRAME_RELATED_P (set
) = 1;
18869 RTX_FRAME_RELATED_P (insn
) = 1;
18870 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, real
);
18875 /* Returns an insn that has a vrsave set operation with the
18876 appropriate CLOBBERs. */
18879 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
18882 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
18883 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
18886 = gen_rtx_SET (VOIDmode
,
18888 gen_rtx_UNSPEC_VOLATILE (SImode
,
18889 gen_rtvec (2, reg
, vrsave
),
18890 UNSPECV_SET_VRSAVE
));
18894 /* We need to clobber the registers in the mask so the scheduler
18895 does not move sets to VRSAVE before sets of AltiVec registers.
18897 However, if the function receives nonlocal gotos, reload will set
18898 all call saved registers live. We will end up with:
18900 (set (reg 999) (mem))
18901 (parallel [ (set (reg vrsave) (unspec blah))
18902 (clobber (reg 999))])
18904 The clobber will cause the store into reg 999 to be dead, and
18905 flow will attempt to delete an epilogue insn. In this case, we
18906 need an unspec use/set of the register. */
18908 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
18909 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
18911 if (!epiloguep
|| call_used_regs
[i
])
18912 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
18913 gen_rtx_REG (V4SImode
, i
));
18916 rtx reg
= gen_rtx_REG (V4SImode
, i
);
18919 = gen_rtx_SET (VOIDmode
,
18921 gen_rtx_UNSPEC (V4SImode
,
18922 gen_rtvec (1, reg
), 27));
18926 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
18928 for (i
= 0; i
< nclobs
; ++i
)
18929 XVECEXP (insn
, 0, i
) = clobs
[i
];
18935 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
18939 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
18940 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
18941 return gen_rtx_SET (VOIDmode
, store
? mem
: reg
, store
? reg
: mem
);
18945 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
18947 return gen_frame_set (reg
, frame_reg
, offset
, false);
18951 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
18953 return gen_frame_set (reg
, frame_reg
, offset
, true);
18956 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
18957 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
18960 emit_frame_save (rtx frame_reg
, enum machine_mode mode
,
18961 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
18965 /* Some cases that need register indexed addressing. */
18966 gcc_checking_assert (!((TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
18967 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
18968 || (TARGET_E500_DOUBLE
&& mode
== DFmode
)
18970 && SPE_VECTOR_MODE (mode
)
18971 && !SPE_CONST_OFFSET_OK (offset
))));
18973 reg
= gen_rtx_REG (mode
, regno
);
18974 insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
18975 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
18976 NULL_RTX
, NULL_RTX
);
18979 /* Emit an offset memory reference suitable for a frame store, while
18980 converting to a valid addressing mode. */
18983 gen_frame_mem_offset (enum machine_mode mode
, rtx reg
, int offset
)
18985 rtx int_rtx
, offset_rtx
;
18987 int_rtx
= GEN_INT (offset
);
18989 if ((TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode
) && !SPE_CONST_OFFSET_OK (offset
))
18990 || (TARGET_E500_DOUBLE
&& mode
== DFmode
))
18992 offset_rtx
= gen_rtx_REG (Pmode
, FIXED_SCRATCH
);
18993 emit_move_insn (offset_rtx
, int_rtx
);
18996 offset_rtx
= int_rtx
;
18998 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, offset_rtx
));
19001 #ifndef TARGET_FIX_AND_CONTINUE
19002 #define TARGET_FIX_AND_CONTINUE 0
19005 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
19006 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
19007 #define LAST_SAVRES_REGISTER 31
19008 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
19019 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
19021 /* Temporary holding space for an out-of-line register save/restore
19023 static char savres_routine_name
[30];
19025 /* Return the name for an out-of-line register save/restore routine.
19026 We are saving/restoring GPRs if GPR is true. */
19029 rs6000_savres_routine_name (rs6000_stack_t
*info
, int regno
, int sel
)
19031 const char *prefix
= "";
19032 const char *suffix
= "";
19034 /* Different targets are supposed to define
19035 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
19036 routine name could be defined with:
19038 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
19040 This is a nice idea in practice, but in reality, things are
19041 complicated in several ways:
19043 - ELF targets have save/restore routines for GPRs.
19045 - SPE targets use different prefixes for 32/64-bit registers, and
19046 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
19048 - PPC64 ELF targets have routines for save/restore of GPRs that
19049 differ in what they do with the link register, so having a set
19050 prefix doesn't work. (We only use one of the save routines at
19051 the moment, though.)
19053 - PPC32 elf targets have "exit" versions of the restore routines
19054 that restore the link register and can save some extra space.
19055 These require an extra suffix. (There are also "tail" versions
19056 of the restore routines and "GOT" versions of the save routines,
19057 but we don't generate those at present. Same problems apply,
19060 We deal with all this by synthesizing our own prefix/suffix and
19061 using that for the simple sprintf call shown above. */
19064 /* No floating point saves on the SPE. */
19065 gcc_assert ((sel
& SAVRES_REG
) == SAVRES_GPR
);
19067 if ((sel
& SAVRES_SAVE
))
19068 prefix
= info
->spe_64bit_regs_used
? "_save64gpr_" : "_save32gpr_";
19070 prefix
= info
->spe_64bit_regs_used
? "_rest64gpr_" : "_rest32gpr_";
19072 if ((sel
& SAVRES_LR
))
19075 else if (DEFAULT_ABI
== ABI_V4
)
19080 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19081 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
19082 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19083 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
19084 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19085 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
19089 if ((sel
& SAVRES_LR
))
19092 else if (DEFAULT_ABI
== ABI_AIX
)
19094 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
19095 /* No out-of-line save/restore routines for GPRs on AIX. */
19096 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
19100 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19101 prefix
= ((sel
& SAVRES_SAVE
)
19102 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
19103 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
19104 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19106 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
19107 if ((sel
& SAVRES_LR
))
19108 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
19112 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
19113 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
19116 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19117 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
19122 if (DEFAULT_ABI
== ABI_DARWIN
)
19124 /* The Darwin approach is (slightly) different, in order to be
19125 compatible with code generated by the system toolchain. There is a
19126 single symbol for the start of save sequence, and the code here
19127 embeds an offset into that code on the basis of the first register
19129 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
19130 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
19131 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
19132 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
19133 (regno
- 13) * 4, prefix
, regno
);
19134 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
19135 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
19136 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
19137 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19138 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
19139 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
19144 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
19146 return savres_routine_name
;
19149 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19150 We are saving/restoring GPRs if GPR is true. */
19153 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
19155 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19156 ? info
->first_gp_reg_save
19157 : (sel
& SAVRES_REG
) == SAVRES_FPR
19158 ? info
->first_fp_reg_save
- 32
19159 : (sel
& SAVRES_REG
) == SAVRES_VR
19160 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
19165 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19166 versions of the gpr routines. */
19167 if (TARGET_SPE_ABI
&& (sel
& SAVRES_REG
) == SAVRES_GPR
19168 && info
->spe_64bit_regs_used
)
19169 select
^= SAVRES_FPR
^ SAVRES_GPR
;
19171 /* Don't generate bogus routine names. */
19172 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
19173 && regno
<= LAST_SAVRES_REGISTER
19174 && select
>= 0 && select
<= 12);
19176 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
19182 name
= rs6000_savres_routine_name (info
, regno
, sel
);
19184 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
19185 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
19186 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
19192 /* Emit a sequence of insns, including a stack tie if needed, for
19193 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19194 reset the stack pointer, but move the base of the frame into
19195 reg UPDT_REGNO for use by out-of-line register restore routines. */
19198 rs6000_emit_stack_reset (rs6000_stack_t
*info
,
19199 rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
19200 unsigned updt_regno
)
19204 /* This blockage is needed so that sched doesn't decide to move
19205 the sp change before the register restores. */
19206 if (DEFAULT_ABI
== ABI_V4
19208 && info
->spe_64bit_regs_used
!= 0
19209 && info
->first_gp_reg_save
!= 32))
19210 rs6000_emit_stack_tie (frame_reg_rtx
, frame_pointer_needed
);
19212 /* If we are restoring registers out-of-line, we will be using the
19213 "exit" variants of the restore routines, which will reset the
19214 stack for us. But we do need to point updt_reg into the
19215 right place for those routines. */
19216 updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
19218 if (frame_off
!= 0)
19219 return emit_insn (gen_add3_insn (updt_reg_rtx
,
19220 frame_reg_rtx
, GEN_INT (frame_off
)));
19221 else if (REGNO (frame_reg_rtx
) != updt_regno
)
19222 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
19227 /* Return the register number used as a pointer by out-of-line
19228 save/restore functions. */
19230 static inline unsigned
19231 ptr_regno_for_savres (int sel
)
19233 if (DEFAULT_ABI
== ABI_AIX
)
19234 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
19235 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
19238 /* Construct a parallel rtx describing the effect of a call to an
19239 out-of-line register save/restore routine, and emit the insn
19240 or jump_insn as appropriate. */
19243 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
19244 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
19245 enum machine_mode reg_mode
, int sel
)
19248 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
19249 int reg_size
= GET_MODE_SIZE (reg_mode
);
19255 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19256 ? info
->first_gp_reg_save
19257 : (sel
& SAVRES_REG
) == SAVRES_FPR
19258 ? info
->first_fp_reg_save
19259 : (sel
& SAVRES_REG
) == SAVRES_VR
19260 ? info
->first_altivec_reg_save
19262 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
19264 : (sel
& SAVRES_REG
) == SAVRES_FPR
19266 : (sel
& SAVRES_REG
) == SAVRES_VR
19267 ? LAST_ALTIVEC_REGNO
+ 1
19269 n_regs
= end_reg
- start_reg
;
19270 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
19271 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
19274 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19275 RTVEC_ELT (p
, offset
++) = ret_rtx
;
19277 RTVEC_ELT (p
, offset
++)
19278 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
19280 sym
= rs6000_savres_routine_sym (info
, sel
);
19281 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
19283 use_reg
= ptr_regno_for_savres (sel
);
19284 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
19286 /* Vector regs are saved/restored using [reg+reg] addressing. */
19287 RTVEC_ELT (p
, offset
++)
19288 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
19289 RTVEC_ELT (p
, offset
++)
19290 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
19293 RTVEC_ELT (p
, offset
++)
19294 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
19296 for (i
= 0; i
< end_reg
- start_reg
; i
++)
19297 RTVEC_ELT (p
, i
+ offset
)
19298 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
19299 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
19300 (sel
& SAVRES_SAVE
) != 0);
19302 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19303 RTVEC_ELT (p
, i
+ offset
)
19304 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
19306 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
19308 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
19310 insn
= emit_jump_insn (par
);
19311 JUMP_LABEL (insn
) = ret_rtx
;
19314 insn
= emit_insn (par
);
19318 /* Determine whether the gp REG is really used. */
19321 rs6000_reg_live_or_pic_offset_p (int reg
)
19323 /* If the function calls eh_return, claim used all the registers that would
19324 be checked for liveness otherwise. This is required for the PIC offset
19325 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19326 register allocation purposes in this case. */
19328 return (((crtl
->calls_eh_return
|| df_regs_ever_live_p (reg
))
19329 && (!call_used_regs
[reg
]
19330 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
19331 && !TARGET_SINGLE_PIC_BASE
19332 && TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
19333 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
19334 && !TARGET_SINGLE_PIC_BASE
19335 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
19336 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
))));
19339 /* Emit function prologue as insns. */
19342 rs6000_emit_prologue (void)
19344 rs6000_stack_t
*info
= rs6000_stack_info ();
19345 enum machine_mode reg_mode
= Pmode
;
19346 int reg_size
= TARGET_32BIT
? 4 : 8;
19347 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
19348 rtx frame_reg_rtx
= sp_reg_rtx
;
19349 unsigned int cr_save_regno
;
19350 rtx cr_save_rtx
= NULL_RTX
;
19353 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
19354 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
19355 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
19356 /* Offset to top of frame for frame_reg and sp respectively. */
19357 HOST_WIDE_INT frame_off
= 0;
19358 HOST_WIDE_INT sp_off
= 0;
19360 #ifdef ENABLE_CHECKING
19361 /* Track and check usage of r0, r11, r12. */
19362 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
19363 #define START_USE(R) do \
19365 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19366 reg_inuse |= 1 << (R); \
19368 #define END_USE(R) do \
19370 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19371 reg_inuse &= ~(1 << (R)); \
19373 #define NOT_INUSE(R) do \
19375 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19378 #define START_USE(R) do {} while (0)
19379 #define END_USE(R) do {} while (0)
19380 #define NOT_INUSE(R) do {} while (0)
19383 if (flag_stack_usage_info
)
19384 current_function_static_stack_size
= info
->total_size
;
19386 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
&& info
->total_size
)
19387 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
, info
->total_size
);
19389 if (TARGET_FIX_AND_CONTINUE
)
19391 /* gdb on darwin arranges to forward a function from the old
19392 address by modifying the first 5 instructions of the function
19393 to branch to the overriding function. This is necessary to
19394 permit function pointers that point to the old function to
19395 actually forward to the new function. */
19396 emit_insn (gen_nop ());
19397 emit_insn (gen_nop ());
19398 emit_insn (gen_nop ());
19399 emit_insn (gen_nop ());
19400 emit_insn (gen_nop ());
19403 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
19405 reg_mode
= V2SImode
;
19409 /* Handle world saves specially here. */
19410 if (WORLD_SAVE_P (info
))
19417 /* save_world expects lr in r0. */
19418 reg0
= gen_rtx_REG (Pmode
, 0);
19419 if (info
->lr_save_p
)
19421 insn
= emit_move_insn (reg0
,
19422 gen_rtx_REG (Pmode
, LR_REGNO
));
19423 RTX_FRAME_RELATED_P (insn
) = 1;
19426 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19427 assumptions about the offsets of various bits of the stack
19429 gcc_assert (info
->gp_save_offset
== -220
19430 && info
->fp_save_offset
== -144
19431 && info
->lr_save_offset
== 8
19432 && info
->cr_save_offset
== 4
19435 && (!crtl
->calls_eh_return
19436 || info
->ehrd_offset
== -432)
19437 && info
->vrsave_save_offset
== -224
19438 && info
->altivec_save_offset
== -416);
19440 treg
= gen_rtx_REG (SImode
, 11);
19441 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
19443 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19444 in R11. It also clobbers R12, so beware! */
19446 /* Preserve CR2 for save_world prologues */
19448 sz
+= 32 - info
->first_gp_reg_save
;
19449 sz
+= 64 - info
->first_fp_reg_save
;
19450 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
19451 p
= rtvec_alloc (sz
);
19453 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
19454 gen_rtx_REG (SImode
,
19456 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
19457 gen_rtx_SYMBOL_REF (Pmode
,
19459 /* We do floats first so that the instruction pattern matches
19461 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
19463 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
19465 info
->first_fp_reg_save
+ i
),
19467 info
->fp_save_offset
+ frame_off
+ 8 * i
);
19468 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
19470 = gen_frame_store (gen_rtx_REG (V4SImode
,
19471 info
->first_altivec_reg_save
+ i
),
19473 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
19474 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19476 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
19478 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
19480 /* CR register traditionally saved as CR2. */
19482 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
19483 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
19484 /* Explain about use of R0. */
19485 if (info
->lr_save_p
)
19487 = gen_frame_store (reg0
,
19488 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
19489 /* Explain what happens to the stack pointer. */
19491 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
19492 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, sp_reg_rtx
, newval
);
19495 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
19496 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19497 treg
, GEN_INT (-info
->total_size
));
19498 sp_off
= frame_off
= info
->total_size
;
19501 strategy
= info
->savres_strategy
;
19503 /* For V.4, update stack before we do any saving and set back pointer. */
19504 if (! WORLD_SAVE_P (info
)
19506 && (DEFAULT_ABI
== ABI_V4
19507 || crtl
->calls_eh_return
))
19509 bool need_r11
= (TARGET_SPE
19510 ? (!(strategy
& SAVE_INLINE_GPRS
)
19511 && info
->spe_64bit_regs_used
== 0)
19512 : (!(strategy
& SAVE_INLINE_FPRS
)
19513 || !(strategy
& SAVE_INLINE_GPRS
)
19514 || !(strategy
& SAVE_INLINE_VRS
)));
19515 int ptr_regno
= -1;
19516 rtx ptr_reg
= NULL_RTX
;
19519 if (info
->total_size
< 32767)
19520 frame_off
= info
->total_size
;
19523 else if (info
->cr_save_p
19525 || info
->first_fp_reg_save
< 64
19526 || info
->first_gp_reg_save
< 32
19527 || info
->altivec_size
!= 0
19528 || info
->vrsave_mask
!= 0
19529 || crtl
->calls_eh_return
)
19533 /* The prologue won't be saving any regs so there is no need
19534 to set up a frame register to access any frame save area.
19535 We also won't be using frame_off anywhere below, but set
19536 the correct value anyway to protect against future
19537 changes to this function. */
19538 frame_off
= info
->total_size
;
19540 if (ptr_regno
!= -1)
19542 /* Set up the frame offset to that needed by the first
19543 out-of-line save function. */
19544 START_USE (ptr_regno
);
19545 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19546 frame_reg_rtx
= ptr_reg
;
19547 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
19548 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
19549 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
19550 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
19551 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
19552 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
19553 frame_off
= -ptr_off
;
19555 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
19556 sp_off
= info
->total_size
;
19557 if (frame_reg_rtx
!= sp_reg_rtx
)
19558 rs6000_emit_stack_tie (frame_reg_rtx
, false);
19561 /* If we use the link register, get it into r0. */
19562 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
)
19564 rtx addr
, reg
, mem
;
19566 reg
= gen_rtx_REG (Pmode
, 0);
19568 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
19569 RTX_FRAME_RELATED_P (insn
) = 1;
19571 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
19572 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
19574 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
19575 GEN_INT (info
->lr_save_offset
+ frame_off
));
19576 mem
= gen_rtx_MEM (Pmode
, addr
);
19577 /* This should not be of rs6000_sr_alias_set, because of
19578 __builtin_return_address. */
19580 insn
= emit_move_insn (mem
, reg
);
19581 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19582 NULL_RTX
, NULL_RTX
);
19587 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19588 r12 will be needed by out-of-line gpr restore. */
19589 cr_save_regno
= (DEFAULT_ABI
== ABI_AIX
19590 && !(strategy
& (SAVE_INLINE_GPRS
19591 | SAVE_NOINLINE_GPRS_SAVES_LR
))
19593 if (!WORLD_SAVE_P (info
)
19595 && REGNO (frame_reg_rtx
) != cr_save_regno
19596 && !(using_static_chain_p
&& cr_save_regno
== 11))
19600 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
19601 START_USE (cr_save_regno
);
19602 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
19603 RTX_FRAME_RELATED_P (insn
) = 1;
19604 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19605 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19606 But that's OK. All we have to do is specify that _one_ condition
19607 code register is saved in this stack slot. The thrower's epilogue
19608 will then restore all the call-saved registers.
19609 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19610 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
,
19611 gen_rtx_REG (SImode
, CR2_REGNO
));
19612 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
19615 /* Do any required saving of fpr's. If only one or two to save, do
19616 it ourselves. Otherwise, call function. */
19617 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
19620 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
19621 if (save_reg_p (info
->first_fp_reg_save
+ i
))
19622 emit_frame_save (frame_reg_rtx
,
19623 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
19624 ? DFmode
: SFmode
),
19625 info
->first_fp_reg_save
+ i
,
19626 info
->fp_save_offset
+ frame_off
+ 8 * i
,
19627 sp_off
- frame_off
);
19629 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
19631 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
19632 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
19633 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
19634 rtx ptr_reg
= frame_reg_rtx
;
19636 if (REGNO (frame_reg_rtx
) == ptr_regno
)
19637 gcc_checking_assert (frame_off
== 0);
19640 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19641 NOT_INUSE (ptr_regno
);
19642 emit_insn (gen_add3_insn (ptr_reg
,
19643 frame_reg_rtx
, GEN_INT (frame_off
)));
19645 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
19646 info
->fp_save_offset
,
19647 info
->lr_save_offset
,
19649 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
19650 NULL_RTX
, NULL_RTX
);
19655 /* Save GPRs. This is done as a PARALLEL if we are using
19656 the store-multiple instructions. */
19657 if (!WORLD_SAVE_P (info
)
19659 && info
->spe_64bit_regs_used
!= 0
19660 && info
->first_gp_reg_save
!= 32)
19663 rtx spe_save_area_ptr
;
19664 HOST_WIDE_INT save_off
;
19665 int ool_adjust
= 0;
19667 /* Determine whether we can address all of the registers that need
19668 to be saved with an offset from frame_reg_rtx that fits in
19669 the small const field for SPE memory instructions. */
19670 int spe_regs_addressable
19671 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
19672 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
19673 && (strategy
& SAVE_INLINE_GPRS
));
19675 if (spe_regs_addressable
)
19677 spe_save_area_ptr
= frame_reg_rtx
;
19678 save_off
= frame_off
;
19682 /* Make r11 point to the start of the SPE save area. We need
19683 to be careful here if r11 is holding the static chain. If
19684 it is, then temporarily save it in r0. */
19685 HOST_WIDE_INT offset
;
19687 if (!(strategy
& SAVE_INLINE_GPRS
))
19688 ool_adjust
= 8 * (info
->first_gp_reg_save
19689 - (FIRST_SAVRES_REGISTER
+ 1));
19690 offset
= info
->spe_gp_save_offset
+ frame_off
- ool_adjust
;
19691 spe_save_area_ptr
= gen_rtx_REG (Pmode
, 11);
19692 save_off
= frame_off
- offset
;
19694 if (using_static_chain_p
)
19696 rtx r0
= gen_rtx_REG (Pmode
, 0);
19699 gcc_assert (info
->first_gp_reg_save
> 11);
19701 emit_move_insn (r0
, spe_save_area_ptr
);
19703 else if (REGNO (frame_reg_rtx
) != 11)
19706 emit_insn (gen_addsi3 (spe_save_area_ptr
,
19707 frame_reg_rtx
, GEN_INT (offset
)));
19708 if (!using_static_chain_p
&& REGNO (frame_reg_rtx
) == 11)
19709 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
19712 if ((strategy
& SAVE_INLINE_GPRS
))
19714 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19715 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
19716 emit_frame_save (spe_save_area_ptr
, reg_mode
,
19717 info
->first_gp_reg_save
+ i
,
19718 (info
->spe_gp_save_offset
+ save_off
19720 sp_off
- save_off
);
19724 insn
= rs6000_emit_savres_rtx (info
, spe_save_area_ptr
,
19725 info
->spe_gp_save_offset
+ save_off
,
19727 SAVRES_SAVE
| SAVRES_GPR
);
19729 rs6000_frame_related (insn
, spe_save_area_ptr
, sp_off
- save_off
,
19730 NULL_RTX
, NULL_RTX
);
19733 /* Move the static chain pointer back. */
19734 if (!spe_regs_addressable
)
19736 if (using_static_chain_p
)
19738 emit_move_insn (spe_save_area_ptr
, gen_rtx_REG (Pmode
, 0));
19741 else if (REGNO (frame_reg_rtx
) != 11)
19745 else if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
19747 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
19748 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
19749 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
19750 rtx ptr_reg
= frame_reg_rtx
;
19751 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
19752 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
19756 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19758 /* Need to adjust r11 (r12) if we saved any FPRs. */
19759 if (end_save
+ frame_off
!= 0)
19761 rtx offset
= GEN_INT (end_save
+ frame_off
);
19764 frame_off
= -end_save
;
19766 NOT_INUSE (ptr_regno
);
19767 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
19769 else if (!ptr_set_up
)
19771 NOT_INUSE (ptr_regno
);
19772 emit_move_insn (ptr_reg
, frame_reg_rtx
);
19774 ptr_off
= -end_save
;
19775 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
19776 info
->gp_save_offset
+ ptr_off
,
19777 info
->lr_save_offset
+ ptr_off
,
19779 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
19780 NULL_RTX
, NULL_RTX
);
19784 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVRES_MULTIPLE
))
19788 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
19789 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19791 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
19793 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
19794 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
19795 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19796 NULL_RTX
, NULL_RTX
);
19798 else if (!WORLD_SAVE_P (info
))
19801 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
19802 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
19803 emit_frame_save (frame_reg_rtx
, reg_mode
,
19804 info
->first_gp_reg_save
+ i
,
19805 info
->gp_save_offset
+ frame_off
+ reg_size
* i
,
19806 sp_off
- frame_off
);
19809 if (crtl
->calls_eh_return
)
19816 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
19817 if (regno
== INVALID_REGNUM
)
19821 p
= rtvec_alloc (i
);
19825 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
19826 if (regno
== INVALID_REGNUM
)
19830 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
19832 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
19833 RTVEC_ELT (p
, i
) = insn
;
19834 RTX_FRAME_RELATED_P (insn
) = 1;
19837 insn
= emit_insn (gen_blockage ());
19838 RTX_FRAME_RELATED_P (insn
) = 1;
19839 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
19842 /* In AIX ABI we need to make sure r2 is really saved. */
19843 if (TARGET_AIX
&& crtl
->calls_eh_return
)
19845 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
19846 rtx save_insn
, join_insn
, note
;
19847 long toc_restore_insn
;
19849 tmp_reg
= gen_rtx_REG (Pmode
, 11);
19850 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
19851 if (using_static_chain_p
)
19854 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
19858 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
19859 /* Peek at instruction to which this function returns. If it's
19860 restoring r2, then we know we've already saved r2. We can't
19861 unconditionally save r2 because the value we have will already
19862 be updated if we arrived at this function via a plt call or
19863 toc adjusting stub. */
19864 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
19865 toc_restore_insn
= TARGET_32BIT
? 0x80410014 : 0xE8410028;
19866 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
19867 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
19868 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
19869 validate_condition_mode (EQ
, CCUNSmode
);
19870 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
19871 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
19872 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
19873 toc_save_done
= gen_label_rtx ();
19874 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
19875 gen_rtx_EQ (VOIDmode
, compare_result
,
19877 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
19879 jump
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, jump
));
19880 JUMP_LABEL (jump
) = toc_save_done
;
19881 LABEL_NUSES (toc_save_done
) += 1;
19883 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
19884 TOC_REGNUM
, frame_off
+ 5 * reg_size
,
19885 sp_off
- frame_off
);
19887 emit_label (toc_save_done
);
19889 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
19890 have a CFG that has different saves along different paths.
19891 Move the note to a dummy blockage insn, which describes that
19892 R2 is unconditionally saved after the label. */
19893 /* ??? An alternate representation might be a special insn pattern
19894 containing both the branch and the store. That might let the
19895 code that minimizes the number of DW_CFA_advance opcodes better
19896 freedom in placing the annotations. */
19897 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
19899 remove_note (save_insn
, note
);
19901 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
19902 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
19903 RTX_FRAME_RELATED_P (save_insn
) = 0;
19905 join_insn
= emit_insn (gen_blockage ());
19906 REG_NOTES (join_insn
) = note
;
19907 RTX_FRAME_RELATED_P (join_insn
) = 1;
19909 if (using_static_chain_p
)
19911 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
19918 /* Save CR if we use any that must be preserved. */
19919 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
19921 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
19922 GEN_INT (info
->cr_save_offset
+ frame_off
));
19923 rtx mem
= gen_frame_mem (SImode
, addr
);
19924 /* See the large comment above about why CR2_REGNO is used. */
19925 rtx magic_eh_cr_reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
19927 /* If we didn't copy cr before, do so now using r0. */
19928 if (cr_save_rtx
== NULL_RTX
)
19933 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
19934 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
19935 RTX_FRAME_RELATED_P (insn
) = 1;
19936 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
, magic_eh_cr_reg
);
19937 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
19939 insn
= emit_move_insn (mem
, cr_save_rtx
);
19940 END_USE (REGNO (cr_save_rtx
));
19942 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
19943 NULL_RTX
, NULL_RTX
);
19946 /* Update stack and set back pointer unless this is V.4,
19947 for which it was done previously. */
19948 if (!WORLD_SAVE_P (info
) && info
->push_p
19949 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
19951 rtx ptr_reg
= NULL
;
19954 /* If saving altivec regs we need to be able to address all save
19955 locations using a 16-bit offset. */
19956 if ((strategy
& SAVE_INLINE_VRS
) == 0
19957 || (info
->altivec_size
!= 0
19958 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
19959 + info
->total_size
- frame_off
) > 32767)
19960 || (info
->vrsave_mask
!= 0
19961 && (info
->vrsave_save_offset
19962 + info
->total_size
- frame_off
) > 32767))
19964 int sel
= SAVRES_SAVE
| SAVRES_VR
;
19965 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
19967 if (using_static_chain_p
19968 && ptr_regno
== STATIC_CHAIN_REGNUM
)
19970 if (REGNO (frame_reg_rtx
) != ptr_regno
)
19971 START_USE (ptr_regno
);
19972 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
19973 frame_reg_rtx
= ptr_reg
;
19974 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
19975 frame_off
= -ptr_off
;
19977 else if (REGNO (frame_reg_rtx
) == 1)
19978 frame_off
= info
->total_size
;
19979 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
19980 sp_off
= info
->total_size
;
19981 if (frame_reg_rtx
!= sp_reg_rtx
)
19982 rs6000_emit_stack_tie (frame_reg_rtx
, false);
19985 /* Set frame pointer, if needed. */
19986 if (frame_pointer_needed
)
19988 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
19990 RTX_FRAME_RELATED_P (insn
) = 1;
19993 /* Save AltiVec registers if needed. Save here because the red zone does
19994 not always include AltiVec registers. */
19995 if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
19996 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
19998 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20000 /* Oddly, the vector save/restore functions point r0 at the end
20001 of the save area, then use r11 or r12 to load offsets for
20002 [reg+reg] addressing. */
20003 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20004 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
20005 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20007 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
20009 if (end_save
+ frame_off
!= 0)
20011 rtx offset
= GEN_INT (end_save
+ frame_off
);
20013 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20016 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20018 ptr_off
= -end_save
;
20019 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20020 info
->altivec_save_offset
+ ptr_off
,
20021 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
20022 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
20023 NULL_RTX
, NULL_RTX
);
20024 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
20026 /* The oddity mentioned above clobbered our frame reg. */
20027 emit_move_insn (frame_reg_rtx
, ptr_reg
);
20028 frame_off
= ptr_off
;
20031 else if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
20032 && info
->altivec_size
!= 0)
20036 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20037 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20039 rtx areg
, savereg
, mem
;
20042 offset
= (info
->altivec_save_offset
+ frame_off
20043 + 16 * (i
- info
->first_altivec_reg_save
));
20045 savereg
= gen_rtx_REG (V4SImode
, i
);
20048 areg
= gen_rtx_REG (Pmode
, 0);
20049 emit_move_insn (areg
, GEN_INT (offset
));
20051 /* AltiVec addressing mode is [reg+reg]. */
20052 mem
= gen_frame_mem (V4SImode
,
20053 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
20055 insn
= emit_move_insn (mem
, savereg
);
20057 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
20058 areg
, GEN_INT (offset
));
20062 /* VRSAVE is a bit vector representing which AltiVec registers
20063 are used. The OS uses this to determine which vector
20064 registers to save on a context switch. We need to save
20065 VRSAVE on the stack frame, add whatever AltiVec registers we
20066 used in this function, and do the corresponding magic in the
20069 if (!WORLD_SAVE_P (info
)
20071 && TARGET_ALTIVEC_VRSAVE
20072 && info
->vrsave_mask
!= 0)
20078 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
20079 be using r12 as frame_reg_rtx and r11 as the static chain
20080 pointer for nested functions. */
20082 if (DEFAULT_ABI
== ABI_AIX
&& !using_static_chain_p
)
20084 else if (REGNO (frame_reg_rtx
) == 12)
20087 if (using_static_chain_p
)
20091 NOT_INUSE (save_regno
);
20092 reg
= gen_rtx_REG (SImode
, save_regno
);
20093 vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
20095 emit_insn (gen_get_vrsave_internal (reg
));
20097 emit_insn (gen_rtx_SET (VOIDmode
, reg
, vrsave
));
20100 offset
= info
->vrsave_save_offset
+ frame_off
;
20101 insn
= emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
20103 /* Include the registers in the mask. */
20104 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT ((int) info
->vrsave_mask
)));
20106 insn
= emit_insn (generate_set_vrsave (reg
, info
, 0));
20109 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
20110 if (!TARGET_SINGLE_PIC_BASE
20111 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
20112 || (DEFAULT_ABI
== ABI_V4
20113 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
20114 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
20116 /* If emit_load_toc_table will use the link register, we need to save
20117 it. We use R12 for this purpose because emit_load_toc_table
20118 can use register 0. This allows us to use a plain 'blr' to return
20119 from the procedure more often. */
20120 int save_LR_around_toc_setup
= (TARGET_ELF
20121 && DEFAULT_ABI
!= ABI_AIX
20123 && ! info
->lr_save_p
20124 && EDGE_COUNT (EXIT_BLOCK_PTR
->preds
) > 0);
20125 if (save_LR_around_toc_setup
)
20127 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20128 rtx tmp
= gen_rtx_REG (Pmode
, 12);
20130 insn
= emit_move_insn (tmp
, lr
);
20131 RTX_FRAME_RELATED_P (insn
) = 1;
20133 rs6000_emit_load_toc_table (TRUE
);
20135 insn
= emit_move_insn (lr
, tmp
);
20136 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
20137 RTX_FRAME_RELATED_P (insn
) = 1;
20140 rs6000_emit_load_toc_table (TRUE
);
20144 if (!TARGET_SINGLE_PIC_BASE
20145 && DEFAULT_ABI
== ABI_DARWIN
20146 && flag_pic
&& crtl
->uses_pic_offset_table
)
20148 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20149 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
20151 /* Save and restore LR locally around this call (in R0). */
20152 if (!info
->lr_save_p
)
20153 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
20155 emit_insn (gen_load_macho_picbase (src
));
20157 emit_move_insn (gen_rtx_REG (Pmode
,
20158 RS6000_PIC_OFFSET_TABLE_REGNUM
),
20161 if (!info
->lr_save_p
)
20162 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
20166 /* If we need to, save the TOC register after doing the stack setup.
20167 Do not emit eh frame info for this save. The unwinder wants info,
20168 conceptually attached to instructions in this function, about
20169 register values in the caller of this function. This R2 may have
20170 already been changed from the value in the caller.
20171 We don't attempt to write accurate DWARF EH frame info for R2
20172 because code emitted by gcc for a (non-pointer) function call
20173 doesn't save and restore R2. Instead, R2 is managed out-of-line
20174 by a linker generated plt call stub when the function resides in
20175 a shared library. This behaviour is costly to describe in DWARF,
20176 both in terms of the size of DWARF info and the time taken in the
20177 unwinder to interpret it. R2 changes, apart from the
20178 calls_eh_return case earlier in this function, are handled by
20179 linux-unwind.h frob_update_context. */
20180 if (rs6000_save_toc_in_prologue_p ())
20182 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
20183 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, 5 * reg_size
));
20187 /* Write function prologue. */
20190 rs6000_output_function_prologue (FILE *file
,
20191 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
20193 rs6000_stack_t
*info
= rs6000_stack_info ();
20195 if (TARGET_DEBUG_STACK
)
20196 debug_stack_info (info
);
20198 /* Write .extern for any function we will call to save and restore
20200 if (info
->first_fp_reg_save
< 64
20205 int regno
= info
->first_fp_reg_save
- 32;
20207 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
20209 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
20210 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20211 name
= rs6000_savres_routine_name (info
, regno
, sel
);
20212 fprintf (file
, "\t.extern %s\n", name
);
20214 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
20216 bool lr
= (info
->savres_strategy
20217 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
20218 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20219 name
= rs6000_savres_routine_name (info
, regno
, sel
);
20220 fprintf (file
, "\t.extern %s\n", name
);
20224 /* Write .extern for AIX common mode routines, if needed. */
20225 if (! TARGET_POWERPC
&& ! common_mode_defined
)
20227 fputs ("\t.extern __mulh\n", file
);
20228 fputs ("\t.extern __mull\n", file
);
20229 fputs ("\t.extern __divss\n", file
);
20230 fputs ("\t.extern __divus\n", file
);
20231 fputs ("\t.extern __quoss\n", file
);
20232 fputs ("\t.extern __quous\n", file
);
20233 common_mode_defined
= 1;
20236 rs6000_pic_labelno
++;
20239 /* Non-zero if vmx regs are restored before the frame pop, zero if
20240 we restore after the pop when possible. */
20241 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20243 /* Restoring cr is a two step process: loading a reg from the frame
20244 save, then moving the reg to cr. For ABI_V4 we must let the
20245 unwinder know that the stack location is no longer valid at or
20246 before the stack deallocation, but we can't emit a cfa_restore for
20247 cr at the stack deallocation like we do for other registers.
20248 The trouble is that it is possible for the move to cr to be
20249 scheduled after the stack deallocation. So say exactly where cr
20250 is located on each of the two insns. */
20253 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
20255 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
20256 rtx reg
= gen_rtx_REG (SImode
, regno
);
20257 rtx insn
= emit_move_insn (reg
, mem
);
20259 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
20261 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
20262 rtx set
= gen_rtx_SET (VOIDmode
, reg
, cr
);
20264 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
20265 RTX_FRAME_RELATED_P (insn
) = 1;
20270 /* Reload CR from REG. */
20273 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
20278 if (using_mfcr_multiple
)
20280 for (i
= 0; i
< 8; i
++)
20281 if (save_reg_p (CR0_REGNO
+ i
))
20283 gcc_assert (count
);
20286 if (using_mfcr_multiple
&& count
> 1)
20291 p
= rtvec_alloc (count
);
20294 for (i
= 0; i
< 8; i
++)
20295 if (save_reg_p (CR0_REGNO
+ i
))
20297 rtvec r
= rtvec_alloc (2);
20298 RTVEC_ELT (r
, 0) = reg
;
20299 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
20300 RTVEC_ELT (p
, ndx
) =
20301 gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
20302 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
20305 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
20306 gcc_assert (ndx
== count
);
20309 for (i
= 0; i
< 8; i
++)
20310 if (save_reg_p (CR0_REGNO
+ i
))
20311 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
20314 if (!exit_func
&& (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
20316 rtx insn
= get_last_insn ();
20317 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
20319 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
20320 RTX_FRAME_RELATED_P (insn
) = 1;
20324 /* Like cr, the move to lr instruction can be scheduled after the
20325 stack deallocation, but unlike cr, its stack frame save is still
20326 valid. So we only need to emit the cfa_restore on the correct
20330 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
20332 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
20333 rtx reg
= gen_rtx_REG (Pmode
, regno
);
20335 emit_move_insn (reg
, mem
);
20339 restore_saved_lr (int regno
, bool exit_func
)
20341 rtx reg
= gen_rtx_REG (Pmode
, regno
);
20342 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
20343 rtx insn
= emit_move_insn (lr
, reg
);
20345 if (!exit_func
&& flag_shrink_wrap
)
20347 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
20348 RTX_FRAME_RELATED_P (insn
) = 1;
20353 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
20355 if (info
->cr_save_p
)
20356 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20357 gen_rtx_REG (SImode
, CR2_REGNO
),
20359 if (info
->lr_save_p
)
20360 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20361 gen_rtx_REG (Pmode
, LR_REGNO
),
20363 return cfa_restores
;
20366 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20367 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20368 below stack pointer not cloberred by signals. */
20371 offset_below_red_zone_p (HOST_WIDE_INT offset
)
20373 return offset
< (DEFAULT_ABI
== ABI_V4
20375 : TARGET_32BIT
? -220 : -288);
20378 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20381 emit_cfa_restores (rtx cfa_restores
)
20383 rtx insn
= get_last_insn ();
20384 rtx
*loc
= ®_NOTES (insn
);
20387 loc
= &XEXP (*loc
, 1);
20388 *loc
= cfa_restores
;
20389 RTX_FRAME_RELATED_P (insn
) = 1;
20392 /* Emit function epilogue as insns. */
20395 rs6000_emit_epilogue (int sibcall
)
20397 rs6000_stack_t
*info
;
20398 int restoring_GPRs_inline
;
20399 int restoring_FPRs_inline
;
20400 int using_load_multiple
;
20401 int using_mtcr_multiple
;
20402 int use_backchain_to_restore_sp
;
20405 HOST_WIDE_INT frame_off
= 0;
20406 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
20407 rtx frame_reg_rtx
= sp_reg_rtx
;
20408 rtx cfa_restores
= NULL_RTX
;
20410 rtx cr_save_reg
= NULL_RTX
;
20411 enum machine_mode reg_mode
= Pmode
;
20412 int reg_size
= TARGET_32BIT
? 4 : 8;
20415 unsigned ptr_regno
;
20417 info
= rs6000_stack_info ();
20419 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
20421 reg_mode
= V2SImode
;
20425 strategy
= info
->savres_strategy
;
20426 using_load_multiple
= strategy
& SAVRES_MULTIPLE
;
20427 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
20428 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
20429 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
20430 || rs6000_cpu
== PROCESSOR_PPC603
20431 || rs6000_cpu
== PROCESSOR_PPC750
20433 /* Restore via the backchain when we have a large frame, since this
20434 is more efficient than an addis, addi pair. The second condition
20435 here will not trigger at the moment; We don't actually need a
20436 frame pointer for alloca, but the generic parts of the compiler
20437 give us one anyway. */
20438 use_backchain_to_restore_sp
= (info
->total_size
> 32767 - info
->lr_save_offset
20439 || (cfun
->calls_alloca
20440 && !frame_pointer_needed
));
20441 restore_lr
= (info
->lr_save_p
20442 && (restoring_FPRs_inline
20443 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
20444 && (restoring_GPRs_inline
20445 || info
->first_fp_reg_save
< 64));
20447 if (WORLD_SAVE_P (info
))
20451 const char *alloc_rname
;
20454 /* eh_rest_world_r10 will return to the location saved in the LR
20455 stack slot (which is not likely to be our caller.)
20456 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20457 rest_world is similar, except any R10 parameter is ignored.
20458 The exception-handling stuff that was here in 2.95 is no
20459 longer necessary. */
20463 + 32 - info
->first_gp_reg_save
20464 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
20465 + 63 + 1 - info
->first_fp_reg_save
);
20467 strcpy (rname
, ((crtl
->calls_eh_return
) ?
20468 "*eh_rest_world_r10" : "*rest_world"));
20469 alloc_rname
= ggc_strdup (rname
);
20472 RTVEC_ELT (p
, j
++) = ret_rtx
;
20473 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
20474 gen_rtx_REG (Pmode
,
20477 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
20478 /* The instruction pattern requires a clobber here;
20479 it is shared with the restVEC helper. */
20481 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
20484 /* CR register traditionally saved as CR2. */
20485 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
20487 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
20488 if (flag_shrink_wrap
)
20490 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
20491 gen_rtx_REG (Pmode
, LR_REGNO
),
20493 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20497 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
20499 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
20501 = gen_frame_load (reg
,
20502 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
20503 if (flag_shrink_wrap
)
20504 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20506 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
20508 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
20510 = gen_frame_load (reg
,
20511 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
20512 if (flag_shrink_wrap
)
20513 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20515 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
20517 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
20518 ? DFmode
: SFmode
),
20519 info
->first_fp_reg_save
+ i
);
20521 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
20522 if (flag_shrink_wrap
)
20523 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20526 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
20528 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
20530 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
20532 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
20534 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
20535 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
20537 if (flag_shrink_wrap
)
20539 REG_NOTES (insn
) = cfa_restores
;
20540 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
20541 RTX_FRAME_RELATED_P (insn
) = 1;
20546 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20548 frame_off
= info
->total_size
;
20550 /* Restore AltiVec registers if we must do so before adjusting the
20552 if (TARGET_ALTIVEC_ABI
20553 && info
->altivec_size
!= 0
20554 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20555 || (DEFAULT_ABI
!= ABI_V4
20556 && offset_below_red_zone_p (info
->altivec_save_offset
))))
20559 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
20561 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
20562 if (use_backchain_to_restore_sp
)
20564 int frame_regno
= 11;
20566 if ((strategy
& REST_INLINE_VRS
) == 0)
20568 /* Of r11 and r12, select the one not clobbered by an
20569 out-of-line restore function for the frame register. */
20570 frame_regno
= 11 + 12 - scratch_regno
;
20572 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
20573 emit_move_insn (frame_reg_rtx
,
20574 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20577 else if (frame_pointer_needed
)
20578 frame_reg_rtx
= hard_frame_pointer_rtx
;
20580 if ((strategy
& REST_INLINE_VRS
) == 0)
20582 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20584 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20585 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20587 if (end_save
+ frame_off
!= 0)
20589 rtx offset
= GEN_INT (end_save
+ frame_off
);
20591 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20594 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20596 ptr_off
= -end_save
;
20597 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20598 info
->altivec_save_offset
+ ptr_off
,
20599 0, V4SImode
, SAVRES_VR
);
20603 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20604 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20606 rtx addr
, areg
, mem
, reg
;
20608 areg
= gen_rtx_REG (Pmode
, 0);
20610 (areg
, GEN_INT (info
->altivec_save_offset
20612 + 16 * (i
- info
->first_altivec_reg_save
)));
20614 /* AltiVec addressing mode is [reg+reg]. */
20615 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
20616 mem
= gen_frame_mem (V4SImode
, addr
);
20618 reg
= gen_rtx_REG (V4SImode
, i
);
20619 emit_move_insn (reg
, mem
);
20623 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20624 if (((strategy
& REST_INLINE_VRS
) == 0
20625 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
20626 && (flag_shrink_wrap
20627 || (offset_below_red_zone_p
20628 (info
->altivec_save_offset
20629 + 16 * (i
- info
->first_altivec_reg_save
)))))
20631 rtx reg
= gen_rtx_REG (V4SImode
, i
);
20632 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20636 /* Restore VRSAVE if we must do so before adjusting the stack. */
20638 && TARGET_ALTIVEC_VRSAVE
20639 && info
->vrsave_mask
!= 0
20640 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20641 || (DEFAULT_ABI
!= ABI_V4
20642 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
20646 if (frame_reg_rtx
== sp_reg_rtx
)
20648 if (use_backchain_to_restore_sp
)
20650 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20651 emit_move_insn (frame_reg_rtx
,
20652 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20655 else if (frame_pointer_needed
)
20656 frame_reg_rtx
= hard_frame_pointer_rtx
;
20659 reg
= gen_rtx_REG (SImode
, 12);
20660 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20661 info
->vrsave_save_offset
+ frame_off
));
20663 emit_insn (generate_set_vrsave (reg
, info
, 1));
20667 /* If we have a large stack frame, restore the old stack pointer
20668 using the backchain. */
20669 if (use_backchain_to_restore_sp
)
20671 if (frame_reg_rtx
== sp_reg_rtx
)
20673 /* Under V.4, don't reset the stack pointer until after we're done
20674 loading the saved registers. */
20675 if (DEFAULT_ABI
== ABI_V4
)
20676 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20678 insn
= emit_move_insn (frame_reg_rtx
,
20679 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
20682 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20683 && DEFAULT_ABI
== ABI_V4
)
20684 /* frame_reg_rtx has been set up by the altivec restore. */
20688 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
20689 frame_reg_rtx
= sp_reg_rtx
;
20692 /* If we have a frame pointer, we can restore the old stack pointer
20694 else if (frame_pointer_needed
)
20696 frame_reg_rtx
= sp_reg_rtx
;
20697 if (DEFAULT_ABI
== ABI_V4
)
20698 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20699 /* Prevent reordering memory accesses against stack pointer restore. */
20700 else if (cfun
->calls_alloca
20701 || offset_below_red_zone_p (-info
->total_size
))
20702 rs6000_emit_stack_tie (frame_reg_rtx
, true);
20704 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
20705 GEN_INT (info
->total_size
)));
20708 else if (info
->push_p
20709 && DEFAULT_ABI
!= ABI_V4
20710 && !crtl
->calls_eh_return
)
20712 /* Prevent reordering memory accesses against stack pointer restore. */
20713 if (cfun
->calls_alloca
20714 || offset_below_red_zone_p (-info
->total_size
))
20715 rs6000_emit_stack_tie (frame_reg_rtx
, false);
20716 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
20717 GEN_INT (info
->total_size
)));
20720 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
20724 REG_NOTES (insn
) = cfa_restores
;
20725 cfa_restores
= NULL_RTX
;
20727 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
20728 RTX_FRAME_RELATED_P (insn
) = 1;
20731 /* Restore AltiVec registers if we have not done so already. */
20732 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20733 && TARGET_ALTIVEC_ABI
20734 && info
->altivec_size
!= 0
20735 && (DEFAULT_ABI
== ABI_V4
20736 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
20740 if ((strategy
& REST_INLINE_VRS
) == 0)
20742 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
20744 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
20745 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
20746 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
20748 if (end_save
+ frame_off
!= 0)
20750 rtx offset
= GEN_INT (end_save
+ frame_off
);
20752 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
20755 emit_move_insn (ptr_reg
, frame_reg_rtx
);
20757 ptr_off
= -end_save
;
20758 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
20759 info
->altivec_save_offset
+ ptr_off
,
20760 0, V4SImode
, SAVRES_VR
);
20761 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
20763 /* Frame reg was clobbered by out-of-line save. Restore it
20764 from ptr_reg, and if we are calling out-of-line gpr or
20765 fpr restore set up the correct pointer and offset. */
20766 unsigned newptr_regno
= 1;
20767 if (!restoring_GPRs_inline
)
20769 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
20770 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
20771 newptr_regno
= ptr_regno_for_savres (sel
);
20772 end_save
= info
->gp_save_offset
+ info
->gp_size
;
20774 else if (!restoring_FPRs_inline
)
20776 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
20777 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
20778 newptr_regno
= ptr_regno_for_savres (sel
);
20779 end_save
= info
->gp_save_offset
+ info
->gp_size
;
20782 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
20783 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
20785 if (end_save
+ ptr_off
!= 0)
20787 rtx offset
= GEN_INT (end_save
+ ptr_off
);
20789 frame_off
= -end_save
;
20790 emit_insn (gen_add3_insn (frame_reg_rtx
, ptr_reg
, offset
));
20794 frame_off
= ptr_off
;
20795 emit_move_insn (frame_reg_rtx
, ptr_reg
);
20801 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20802 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
20804 rtx addr
, areg
, mem
, reg
;
20806 areg
= gen_rtx_REG (Pmode
, 0);
20808 (areg
, GEN_INT (info
->altivec_save_offset
20810 + 16 * (i
- info
->first_altivec_reg_save
)));
20812 /* AltiVec addressing mode is [reg+reg]. */
20813 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
20814 mem
= gen_frame_mem (V4SImode
, addr
);
20816 reg
= gen_rtx_REG (V4SImode
, i
);
20817 emit_move_insn (reg
, mem
);
20821 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20822 if (((strategy
& REST_INLINE_VRS
) == 0
20823 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
20824 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
20826 rtx reg
= gen_rtx_REG (V4SImode
, i
);
20827 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
20831 /* Restore VRSAVE if we have not done so already. */
20832 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20834 && TARGET_ALTIVEC_VRSAVE
20835 && info
->vrsave_mask
!= 0
20836 && (DEFAULT_ABI
== ABI_V4
20837 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
20841 reg
= gen_rtx_REG (SImode
, 12);
20842 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20843 info
->vrsave_save_offset
+ frame_off
));
20845 emit_insn (generate_set_vrsave (reg
, info
, 1));
20848 /* If we exit by an out-of-line restore function on ABI_V4 then that
20849 function will deallocate the stack, so we don't need to worry
20850 about the unwinder restoring cr from an invalid stack frame
20852 exit_func
= (!restoring_FPRs_inline
20853 || (!restoring_GPRs_inline
20854 && info
->first_fp_reg_save
== 64));
20856 /* Get the old lr if we saved it. If we are restoring registers
20857 out-of-line, then the out-of-line routines can do this for us. */
20858 if (restore_lr
&& restoring_GPRs_inline
)
20859 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
20861 /* Get the old cr if we saved it. */
20862 if (info
->cr_save_p
)
20864 unsigned cr_save_regno
= 12;
20866 if (!restoring_GPRs_inline
)
20868 /* Ensure we don't use the register used by the out-of-line
20869 gpr register restore below. */
20870 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
20871 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
20872 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
20874 if (gpr_ptr_regno
== 12)
20875 cr_save_regno
= 11;
20876 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
20878 else if (REGNO (frame_reg_rtx
) == 12)
20879 cr_save_regno
= 11;
20881 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
20882 info
->cr_save_offset
+ frame_off
,
20886 /* Set LR here to try to overlap restores below. */
20887 if (restore_lr
&& restoring_GPRs_inline
)
20888 restore_saved_lr (0, exit_func
);
20890 /* Load exception handler data registers, if needed. */
20891 if (crtl
->calls_eh_return
)
20893 unsigned int i
, regno
;
20897 rtx reg
= gen_rtx_REG (reg_mode
, 2);
20898 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
20899 frame_off
+ 5 * reg_size
));
20906 regno
= EH_RETURN_DATA_REGNO (i
);
20907 if (regno
== INVALID_REGNUM
)
20910 /* Note: possible use of r0 here to address SPE regs. */
20911 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
20912 info
->ehrd_offset
+ frame_off
20913 + reg_size
* (int) i
);
20915 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
20919 /* Restore GPRs. This is done as a PARALLEL if we are using
20920 the load-multiple instructions. */
20922 && info
->spe_64bit_regs_used
20923 && info
->first_gp_reg_save
!= 32)
20925 /* Determine whether we can address all of the registers that need
20926 to be saved with an offset from frame_reg_rtx that fits in
20927 the small const field for SPE memory instructions. */
20928 int spe_regs_addressable
20929 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
20930 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
20931 && restoring_GPRs_inline
);
20933 if (!spe_regs_addressable
)
20935 int ool_adjust
= 0;
20936 rtx old_frame_reg_rtx
= frame_reg_rtx
;
20937 /* Make r11 point to the start of the SPE save area. We worried about
20938 not clobbering it when we were saving registers in the prologue.
20939 There's no need to worry here because the static chain is passed
20940 anew to every function. */
20942 if (!restoring_GPRs_inline
)
20943 ool_adjust
= 8 * (info
->first_gp_reg_save
20944 - (FIRST_SAVRES_REGISTER
+ 1));
20945 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
20946 emit_insn (gen_addsi3 (frame_reg_rtx
, old_frame_reg_rtx
,
20947 GEN_INT (info
->spe_gp_save_offset
20950 /* Keep the invariant that frame_reg_rtx + frame_off points
20951 at the top of the stack frame. */
20952 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
20955 if (restoring_GPRs_inline
)
20957 HOST_WIDE_INT spe_offset
= info
->spe_gp_save_offset
+ frame_off
;
20959 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
20960 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
20962 rtx offset
, addr
, mem
, reg
;
20964 /* We're doing all this to ensure that the immediate offset
20965 fits into the immediate field of 'evldd'. */
20966 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset
+ reg_size
* i
));
20968 offset
= GEN_INT (spe_offset
+ reg_size
* i
);
20969 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, offset
);
20970 mem
= gen_rtx_MEM (V2SImode
, addr
);
20971 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
20973 emit_move_insn (reg
, mem
);
20977 rs6000_emit_savres_rtx (info
, frame_reg_rtx
,
20978 info
->spe_gp_save_offset
+ frame_off
,
20979 info
->lr_save_offset
+ frame_off
,
20981 SAVRES_GPR
| SAVRES_LR
);
20983 else if (!restoring_GPRs_inline
)
20985 /* We are jumping to an out-of-line function. */
20987 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
20988 bool can_use_exit
= end_save
== 0;
20989 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
20992 /* Emit stack reset code if we need it. */
20993 ptr_regno
= ptr_regno_for_savres (sel
);
20994 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
20996 rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
20997 else if (end_save
+ frame_off
!= 0)
20998 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
20999 GEN_INT (end_save
+ frame_off
)));
21000 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
21001 emit_move_insn (ptr_reg
, frame_reg_rtx
);
21002 if (REGNO (frame_reg_rtx
) == ptr_regno
)
21003 frame_off
= -end_save
;
21005 if (can_use_exit
&& info
->cr_save_p
)
21006 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
21008 ptr_off
= -end_save
;
21009 rs6000_emit_savres_rtx (info
, ptr_reg
,
21010 info
->gp_save_offset
+ ptr_off
,
21011 info
->lr_save_offset
+ ptr_off
,
21014 else if (using_load_multiple
)
21017 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
21018 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21020 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
21022 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
21023 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
21027 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
21028 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
21029 emit_insn (gen_frame_load
21030 (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
21032 info
->gp_save_offset
+ frame_off
+ reg_size
* i
));
21035 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
21037 /* If the frame pointer was used then we can't delay emitting
21038 a REG_CFA_DEF_CFA note. This must happen on the insn that
21039 restores the frame pointer, r31. We may have already emitted
21040 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
21041 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
21042 be harmless if emitted. */
21043 if (frame_pointer_needed
)
21045 insn
= get_last_insn ();
21046 add_reg_note (insn
, REG_CFA_DEF_CFA
,
21047 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
21048 RTX_FRAME_RELATED_P (insn
) = 1;
21051 /* Set up cfa_restores. We always need these when
21052 shrink-wrapping. If not shrink-wrapping then we only need
21053 the cfa_restore when the stack location is no longer valid.
21054 The cfa_restores must be emitted on or before the insn that
21055 invalidates the stack, and of course must not be emitted
21056 before the insn that actually does the restore. The latter
21057 is why it is a bad idea to emit the cfa_restores as a group
21058 on the last instruction here that actually does a restore:
21059 That insn may be reordered with respect to others doing
21061 if (flag_shrink_wrap
21062 && !restoring_GPRs_inline
21063 && info
->first_fp_reg_save
== 64)
21064 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
21066 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
21067 if (!restoring_GPRs_inline
21068 || using_load_multiple
21069 || rs6000_reg_live_or_pic_offset_p (i
))
21071 rtx reg
= gen_rtx_REG (reg_mode
, i
);
21073 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
21077 if (!restoring_GPRs_inline
21078 && info
->first_fp_reg_save
== 64)
21080 /* We are jumping to an out-of-line function. */
21082 emit_cfa_restores (cfa_restores
);
21086 if (restore_lr
&& !restoring_GPRs_inline
)
21088 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
21089 restore_saved_lr (0, exit_func
);
21092 /* Restore fpr's if we need to do it without calling a function. */
21093 if (restoring_FPRs_inline
)
21094 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
21095 if (save_reg_p (info
->first_fp_reg_save
+ i
))
21097 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
21098 ? DFmode
: SFmode
),
21099 info
->first_fp_reg_save
+ i
);
21100 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
21101 info
->fp_save_offset
+ frame_off
+ 8 * i
));
21102 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
21103 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
21106 /* If we saved cr, restore it here. Just those that were used. */
21107 if (info
->cr_save_p
)
21108 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
21110 /* If this is V.4, unwind the stack pointer after all of the loads
21111 have been done, or set up r11 if we are restoring fp out of line. */
21113 if (!restoring_FPRs_inline
)
21115 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
21116 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
21117 ptr_regno
= ptr_regno_for_savres (sel
);
21120 insn
= rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
21121 if (REGNO (frame_reg_rtx
) == ptr_regno
)
21124 if (insn
&& restoring_FPRs_inline
)
21128 REG_NOTES (insn
) = cfa_restores
;
21129 cfa_restores
= NULL_RTX
;
21131 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
21132 RTX_FRAME_RELATED_P (insn
) = 1;
21135 if (crtl
->calls_eh_return
)
21137 rtx sa
= EH_RETURN_STACKADJ_RTX
;
21138 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
21144 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
21145 if (! restoring_FPRs_inline
)
21147 p
= rtvec_alloc (4 + 64 - info
->first_fp_reg_save
);
21148 RTVEC_ELT (p
, 0) = ret_rtx
;
21154 /* We can't hang the cfa_restores off a simple return,
21155 since the shrink-wrap code sometimes uses an existing
21156 return. This means there might be a path from
21157 pre-prologue code to this return, and dwarf2cfi code
21158 wants the eh_frame unwinder state to be the same on
21159 all paths to any point. So we need to emit the
21160 cfa_restores before the return. For -m64 we really
21161 don't need epilogue cfa_restores at all, except for
21162 this irritating dwarf2cfi with shrink-wrap
21163 requirement; The stack red-zone means eh_frame info
21164 from the prologue telling the unwinder to restore
21165 from the stack is perfectly good right to the end of
21167 emit_insn (gen_blockage ());
21168 emit_cfa_restores (cfa_restores
);
21169 cfa_restores
= NULL_RTX
;
21171 p
= rtvec_alloc (2);
21172 RTVEC_ELT (p
, 0) = simple_return_rtx
;
21175 RTVEC_ELT (p
, 1) = ((restoring_FPRs_inline
|| !lr
)
21176 ? gen_rtx_USE (VOIDmode
,
21177 gen_rtx_REG (Pmode
, LR_REGNO
))
21178 : gen_rtx_CLOBBER (VOIDmode
,
21179 gen_rtx_REG (Pmode
, LR_REGNO
)));
21181 /* If we have to restore more than two FP registers, branch to the
21182 restore function. It will return to our caller. */
21183 if (! restoring_FPRs_inline
)
21188 if (flag_shrink_wrap
)
21189 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
21191 sym
= rs6000_savres_routine_sym (info
,
21192 SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
21193 RTVEC_ELT (p
, 2) = gen_rtx_USE (VOIDmode
, sym
);
21194 RTVEC_ELT (p
, 3) = gen_rtx_USE (VOIDmode
,
21195 gen_rtx_REG (Pmode
,
21196 DEFAULT_ABI
== ABI_AIX
21198 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
21200 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
21202 RTVEC_ELT (p
, i
+ 4)
21203 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
21204 if (flag_shrink_wrap
)
21205 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
21210 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
21216 /* Ensure the cfa_restores are hung off an insn that won't
21217 be reordered above other restores. */
21218 emit_insn (gen_blockage ());
21220 emit_cfa_restores (cfa_restores
);
21224 /* Write function epilogue. */
21227 rs6000_output_function_epilogue (FILE *file
,
21228 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
21231 macho_branch_islands ();
21232 /* Mach-O doesn't support labels at the end of objects, so if
21233 it looks like we might want one, insert a NOP. */
21235 rtx insn
= get_last_insn ();
21236 rtx deleted_debug_label
= NULL_RTX
;
21239 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
21241 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21242 notes only, instead set their CODE_LABEL_NUMBER to -1,
21243 otherwise there would be code generation differences
21244 in between -g and -g0. */
21245 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
21246 deleted_debug_label
= insn
;
21247 insn
= PREV_INSN (insn
);
21252 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
21253 fputs ("\tnop\n", file
);
21254 else if (deleted_debug_label
)
21255 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
21256 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
21257 CODE_LABEL_NUMBER (insn
) = -1;
21261 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21264 We don't output a traceback table if -finhibit-size-directive was
21265 used. The documentation for -finhibit-size-directive reads
21266 ``don't output a @code{.size} assembler directive, or anything
21267 else that would cause trouble if the function is split in the
21268 middle, and the two halves are placed at locations far apart in
21269 memory.'' The traceback table has this property, since it
21270 includes the offset from the start of the function to the
21271 traceback table itself.
21273 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21274 different traceback table. */
21275 if (DEFAULT_ABI
== ABI_AIX
&& ! flag_inhibit_size_directive
21276 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
21278 const char *fname
= NULL
;
21279 const char *language_string
= lang_hooks
.name
;
21280 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
21282 int optional_tbtab
;
21283 rs6000_stack_t
*info
= rs6000_stack_info ();
21285 if (rs6000_traceback
== traceback_full
)
21286 optional_tbtab
= 1;
21287 else if (rs6000_traceback
== traceback_part
)
21288 optional_tbtab
= 0;
21290 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
21292 if (optional_tbtab
)
21294 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
21295 while (*fname
== '.') /* V.4 encodes . in the name */
21298 /* Need label immediately before tbtab, so we can compute
21299 its offset from the function start. */
21300 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
21301 ASM_OUTPUT_LABEL (file
, fname
);
21304 /* The .tbtab pseudo-op can only be used for the first eight
21305 expressions, since it can't handle the possibly variable
21306 length fields that follow. However, if you omit the optional
21307 fields, the assembler outputs zeros for all optional fields
21308 anyways, giving each variable length field is minimum length
21309 (as defined in sys/debug.h). Thus we can not use the .tbtab
21310 pseudo-op at all. */
21312 /* An all-zero word flags the start of the tbtab, for debuggers
21313 that have to find it by searching forward from the entry
21314 point or from the current pc. */
21315 fputs ("\t.long 0\n", file
);
21317 /* Tbtab format type. Use format type 0. */
21318 fputs ("\t.byte 0,", file
);
21320 /* Language type. Unfortunately, there does not seem to be any
21321 official way to discover the language being compiled, so we
21322 use language_string.
21323 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21324 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21325 a number, so for now use 9. LTO and Go aren't assigned numbers
21326 either, so for now use 0. */
21327 if (! strcmp (language_string
, "GNU C")
21328 || ! strcmp (language_string
, "GNU GIMPLE")
21329 || ! strcmp (language_string
, "GNU Go"))
21331 else if (! strcmp (language_string
, "GNU F77")
21332 || ! strcmp (language_string
, "GNU Fortran"))
21334 else if (! strcmp (language_string
, "GNU Pascal"))
21336 else if (! strcmp (language_string
, "GNU Ada"))
21338 else if (! strcmp (language_string
, "GNU C++")
21339 || ! strcmp (language_string
, "GNU Objective-C++"))
21341 else if (! strcmp (language_string
, "GNU Java"))
21343 else if (! strcmp (language_string
, "GNU Objective-C"))
21346 gcc_unreachable ();
21347 fprintf (file
, "%d,", i
);
21349 /* 8 single bit fields: global linkage (not set for C extern linkage,
21350 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21351 from start of procedure stored in tbtab, internal function, function
21352 has controlled storage, function has no toc, function uses fp,
21353 function logs/aborts fp operations. */
21354 /* Assume that fp operations are used if any fp reg must be saved. */
21355 fprintf (file
, "%d,",
21356 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
21358 /* 6 bitfields: function is interrupt handler, name present in
21359 proc table, function calls alloca, on condition directives
21360 (controls stack walks, 3 bits), saves condition reg, saves
21362 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21363 set up as a frame pointer, even when there is no alloca call. */
21364 fprintf (file
, "%d,",
21365 ((optional_tbtab
<< 6)
21366 | ((optional_tbtab
& frame_pointer_needed
) << 5)
21367 | (info
->cr_save_p
<< 1)
21368 | (info
->lr_save_p
)));
21370 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21372 fprintf (file
, "%d,",
21373 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
21375 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21376 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
21378 if (optional_tbtab
)
21380 /* Compute the parameter info from the function decl argument
21383 int next_parm_info_bit
= 31;
21385 for (decl
= DECL_ARGUMENTS (current_function_decl
);
21386 decl
; decl
= DECL_CHAIN (decl
))
21388 rtx parameter
= DECL_INCOMING_RTL (decl
);
21389 enum machine_mode mode
= GET_MODE (parameter
);
21391 if (GET_CODE (parameter
) == REG
)
21393 if (SCALAR_FLOAT_MODE_P (mode
))
21414 gcc_unreachable ();
21417 /* If only one bit will fit, don't or in this entry. */
21418 if (next_parm_info_bit
> 0)
21419 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
21420 next_parm_info_bit
-= 2;
21424 fixed_parms
+= ((GET_MODE_SIZE (mode
)
21425 + (UNITS_PER_WORD
- 1))
21427 next_parm_info_bit
-= 1;
21433 /* Number of fixed point parameters. */
21434 /* This is actually the number of words of fixed point parameters; thus
21435 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21436 fprintf (file
, "%d,", fixed_parms
);
21438 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21440 /* This is actually the number of fp registers that hold parameters;
21441 and thus the maximum value is 13. */
21442 /* Set parameters on stack bit if parameters are not in their original
21443 registers, regardless of whether they are on the stack? Xlc
21444 seems to set the bit when not optimizing. */
21445 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
21447 if (! optional_tbtab
)
21450 /* Optional fields follow. Some are variable length. */
21452 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21453 11 double float. */
21454 /* There is an entry for each parameter in a register, in the order that
21455 they occur in the parameter list. Any intervening arguments on the
21456 stack are ignored. If the list overflows a long (max possible length
21457 34 bits) then completely leave off all elements that don't fit. */
21458 /* Only emit this long if there was at least one parameter. */
21459 if (fixed_parms
|| float_parms
)
21460 fprintf (file
, "\t.long %d\n", parm_info
);
21462 /* Offset from start of code to tb table. */
21463 fputs ("\t.long ", file
);
21464 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
21465 RS6000_OUTPUT_BASENAME (file
, fname
);
21467 rs6000_output_function_entry (file
, fname
);
21470 /* Interrupt handler mask. */
21471 /* Omit this long, since we never set the interrupt handler bit
21474 /* Number of CTL (controlled storage) anchors. */
21475 /* Omit this long, since the has_ctl bit is never set above. */
21477 /* Displacement into stack of each CTL anchor. */
21478 /* Omit this list of longs, because there are no CTL anchors. */
21480 /* Length of function name. */
21483 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
21485 /* Function name. */
21486 assemble_string (fname
, strlen (fname
));
21488 /* Register for alloca automatic storage; this is always reg 31.
21489 Only emit this if the alloca bit was set above. */
21490 if (frame_pointer_needed
)
21491 fputs ("\t.byte 31\n", file
);
21493 fputs ("\t.align 2\n", file
);
21497 /* A C compound statement that outputs the assembler code for a thunk
21498 function, used to implement C++ virtual function calls with
21499 multiple inheritance. The thunk acts as a wrapper around a virtual
21500 function, adjusting the implicit object parameter before handing
21501 control off to the real function.
21503 First, emit code to add the integer DELTA to the location that
21504 contains the incoming first argument. Assume that this argument
21505 contains a pointer, and is the one used to pass the `this' pointer
21506 in C++. This is the incoming argument *before* the function
21507 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21508 values of all other incoming arguments.
21510 After the addition, emit code to jump to FUNCTION, which is a
21511 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21512 not touch the return address. Hence returning from FUNCTION will
21513 return to whoever called the current `thunk'.
21515 The effect must be as if FUNCTION had been called directly with the
21516 adjusted first argument. This macro is responsible for emitting
21517 all of the code for a thunk function; output_function_prologue()
21518 and output_function_epilogue() are not invoked.
21520 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21521 been extracted from it.) It might possibly be useful on some
21522 targets, but probably not.
21524 If you do not define this macro, the target-independent code in the
21525 C++ frontend will generate a less efficient heavyweight thunk that
21526 calls FUNCTION instead of jumping to it. The generic approach does
21527 not support varargs. */
21530 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
21531 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
21534 rtx this_rtx
, insn
, funexp
;
21536 reload_completed
= 1;
21537 epilogue_completed
= 1;
21539 /* Mark the end of the (empty) prologue. */
21540 emit_note (NOTE_INSN_PROLOGUE_END
);
21542 /* Find the "this" pointer. If the function returns a structure,
21543 the structure return pointer is in r3. */
21544 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
21545 this_rtx
= gen_rtx_REG (Pmode
, 4);
21547 this_rtx
= gen_rtx_REG (Pmode
, 3);
21549 /* Apply the constant offset, if required. */
21551 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
21553 /* Apply the offset from the vtable, if required. */
21556 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
21557 rtx tmp
= gen_rtx_REG (Pmode
, 12);
21559 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
21560 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
21562 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
21563 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
21567 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
21569 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
21571 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
21574 /* Generate a tail call to the target function. */
21575 if (!TREE_USED (function
))
21577 assemble_external (function
);
21578 TREE_USED (function
) = 1;
21580 funexp
= XEXP (DECL_RTL (function
), 0);
21581 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
21584 if (MACHOPIC_INDIRECT
)
21585 funexp
= machopic_indirect_call_target (funexp
);
21588 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21589 generate sibcall RTL explicitly. */
21590 insn
= emit_call_insn (
21591 gen_rtx_PARALLEL (VOIDmode
,
21593 gen_rtx_CALL (VOIDmode
,
21594 funexp
, const0_rtx
),
21595 gen_rtx_USE (VOIDmode
, const0_rtx
),
21596 gen_rtx_USE (VOIDmode
,
21597 gen_rtx_REG (SImode
,
21599 simple_return_rtx
)));
21600 SIBLING_CALL_P (insn
) = 1;
21603 /* Run just enough of rest_of_compilation to get the insns emitted.
21604 There's not really enough bulk here to make other passes such as
21605 instruction scheduling worth while. Note that use_thunk calls
21606 assemble_start_function and assemble_end_function. */
21607 insn
= get_insns ();
21608 insn_locators_alloc ();
21609 shorten_branches (insn
);
21610 final_start_function (insn
, file
, 1);
21611 final (insn
, file
, 1);
21612 final_end_function ();
21614 reload_completed
= 0;
21615 epilogue_completed
= 0;
21618 /* A quick summary of the various types of 'constant-pool tables'
21621 Target Flags Name One table per
21622 AIX (none) AIX TOC object file
21623 AIX -mfull-toc AIX TOC object file
21624 AIX -mminimal-toc AIX minimal TOC translation unit
21625 SVR4/EABI (none) SVR4 SDATA object file
21626 SVR4/EABI -fpic SVR4 pic object file
21627 SVR4/EABI -fPIC SVR4 PIC translation unit
21628 SVR4/EABI -mrelocatable EABI TOC function
21629 SVR4/EABI -maix AIX TOC object file
21630 SVR4/EABI -maix -mminimal-toc
21631 AIX minimal TOC translation unit
21633 Name Reg. Set by entries contains:
21634 made by addrs? fp? sum?
21636 AIX TOC 2 crt0 as Y option option
21637 AIX minimal TOC 30 prolog gcc Y Y option
21638 SVR4 SDATA 13 crt0 gcc N Y N
21639 SVR4 pic 30 prolog ld Y not yet N
21640 SVR4 PIC 30 prolog gcc Y option option
21641 EABI TOC 30 prolog gcc Y option option
21645 /* Hash functions for the hash table. */
21648 rs6000_hash_constant (rtx k
)
21650 enum rtx_code code
= GET_CODE (k
);
21651 enum machine_mode mode
= GET_MODE (k
);
21652 unsigned result
= (code
<< 3) ^ mode
;
21653 const char *format
;
21656 format
= GET_RTX_FORMAT (code
);
21657 flen
= strlen (format
);
21663 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
21666 if (mode
!= VOIDmode
)
21667 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
21679 for (; fidx
< flen
; fidx
++)
21680 switch (format
[fidx
])
21685 const char *str
= XSTR (k
, fidx
);
21686 len
= strlen (str
);
21687 result
= result
* 613 + len
;
21688 for (i
= 0; i
< len
; i
++)
21689 result
= result
* 613 + (unsigned) str
[i
];
21694 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
21698 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
21701 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
21702 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
21706 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
21707 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
21714 gcc_unreachable ();
21721 toc_hash_function (const void *hash_entry
)
21723 const struct toc_hash_struct
*thc
=
21724 (const struct toc_hash_struct
*) hash_entry
;
21725 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
21728 /* Compare H1 and H2 for equivalence. */
21731 toc_hash_eq (const void *h1
, const void *h2
)
21733 rtx r1
= ((const struct toc_hash_struct
*) h1
)->key
;
21734 rtx r2
= ((const struct toc_hash_struct
*) h2
)->key
;
21736 if (((const struct toc_hash_struct
*) h1
)->key_mode
21737 != ((const struct toc_hash_struct
*) h2
)->key_mode
)
21740 return rtx_equal_p (r1
, r2
);
21743 /* These are the names given by the C++ front-end to vtables, and
21744 vtable-like objects. Ideally, this logic should not be here;
21745 instead, there should be some programmatic way of inquiring as
21746 to whether or not an object is a vtable. */
21748 #define VTABLE_NAME_P(NAME) \
21749 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
21750 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
21751 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
21752 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
21753 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
21755 #ifdef NO_DOLLAR_IN_LABEL
21756 /* Return a GGC-allocated character string translating dollar signs in
21757 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
21760 rs6000_xcoff_strip_dollar (const char *name
)
21766 q
= (const char *) strchr (name
, '$');
21768 if (q
== 0 || q
== name
)
21771 len
= strlen (name
);
21772 strip
= XALLOCAVEC (char, len
+ 1);
21773 strcpy (strip
, name
);
21774 p
= strip
+ (q
- name
);
21778 p
= strchr (p
+ 1, '$');
21781 return ggc_alloc_string (strip
, len
);
21786 rs6000_output_symbol_ref (FILE *file
, rtx x
)
21788 /* Currently C++ toc references to vtables can be emitted before it
21789 is decided whether the vtable is public or private. If this is
21790 the case, then the linker will eventually complain that there is
21791 a reference to an unknown section. Thus, for vtables only,
21792 we emit the TOC reference to reference the symbol and not the
21794 const char *name
= XSTR (x
, 0);
21796 if (VTABLE_NAME_P (name
))
21798 RS6000_OUTPUT_BASENAME (file
, name
);
21801 assemble_name (file
, name
);
21804 /* Output a TOC entry. We derive the entry name from what is being
21808 output_toc (FILE *file
, rtx x
, int labelno
, enum machine_mode mode
)
21811 const char *name
= buf
;
21813 HOST_WIDE_INT offset
= 0;
21815 gcc_assert (!TARGET_NO_TOC
);
21817 /* When the linker won't eliminate them, don't output duplicate
21818 TOC entries (this happens on AIX if there is any kind of TOC,
21819 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
21821 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
21823 struct toc_hash_struct
*h
;
21826 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
21827 time because GGC is not initialized at that point. */
21828 if (toc_hash_table
== NULL
)
21829 toc_hash_table
= htab_create_ggc (1021, toc_hash_function
,
21830 toc_hash_eq
, NULL
);
21832 h
= ggc_alloc_toc_hash_struct ();
21834 h
->key_mode
= mode
;
21835 h
->labelno
= labelno
;
21837 found
= htab_find_slot (toc_hash_table
, h
, INSERT
);
21838 if (*found
== NULL
)
21840 else /* This is indeed a duplicate.
21841 Set this label equal to that label. */
21843 fputs ("\t.set ", file
);
21844 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
21845 fprintf (file
, "%d,", labelno
);
21846 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
21847 fprintf (file
, "%d\n", ((*(const struct toc_hash_struct
**)
21853 /* If we're going to put a double constant in the TOC, make sure it's
21854 aligned properly when strict alignment is on. */
21855 if (GET_CODE (x
) == CONST_DOUBLE
21856 && STRICT_ALIGNMENT
21857 && GET_MODE_BITSIZE (mode
) >= 64
21858 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
21859 ASM_OUTPUT_ALIGN (file
, 3);
21862 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
21864 /* Handle FP constants specially. Note that if we have a minimal
21865 TOC, things we put here aren't actually in the TOC, so we can allow
21867 if (GET_CODE (x
) == CONST_DOUBLE
&&
21868 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
))
21870 REAL_VALUE_TYPE rv
;
21873 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
21874 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
21875 REAL_VALUE_TO_TARGET_DECIMAL128 (rv
, k
);
21877 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv
, k
);
21881 if (TARGET_MINIMAL_TOC
)
21882 fputs (DOUBLE_INT_ASM_OP
, file
);
21884 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21885 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21886 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21887 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
21888 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21889 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21894 if (TARGET_MINIMAL_TOC
)
21895 fputs ("\t.long ", file
);
21897 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21898 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21899 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21900 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
21901 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
21902 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
21906 else if (GET_CODE (x
) == CONST_DOUBLE
&&
21907 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
21909 REAL_VALUE_TYPE rv
;
21912 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
21914 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
21915 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, k
);
21917 REAL_VALUE_TO_TARGET_DOUBLE (rv
, k
);
21921 if (TARGET_MINIMAL_TOC
)
21922 fputs (DOUBLE_INT_ASM_OP
, file
);
21924 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
21925 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21926 fprintf (file
, "0x%lx%08lx\n",
21927 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21932 if (TARGET_MINIMAL_TOC
)
21933 fputs ("\t.long ", file
);
21935 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
21936 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21937 fprintf (file
, "0x%lx,0x%lx\n",
21938 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
21942 else if (GET_CODE (x
) == CONST_DOUBLE
&&
21943 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
21945 REAL_VALUE_TYPE rv
;
21948 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
21949 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
21950 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
21952 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
21956 if (TARGET_MINIMAL_TOC
)
21957 fputs (DOUBLE_INT_ASM_OP
, file
);
21959 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
21960 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
21965 if (TARGET_MINIMAL_TOC
)
21966 fputs ("\t.long ", file
);
21968 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
21969 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
21973 else if (GET_MODE (x
) == VOIDmode
21974 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
21976 unsigned HOST_WIDE_INT low
;
21977 HOST_WIDE_INT high
;
21979 if (GET_CODE (x
) == CONST_DOUBLE
)
21981 low
= CONST_DOUBLE_LOW (x
);
21982 high
= CONST_DOUBLE_HIGH (x
);
21985 #if HOST_BITS_PER_WIDE_INT == 32
21988 high
= (low
& 0x80000000) ? ~0 : 0;
21992 low
= INTVAL (x
) & 0xffffffff;
21993 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
21997 /* TOC entries are always Pmode-sized, but since this
21998 is a bigendian machine then if we're putting smaller
21999 integer constants in the TOC we have to pad them.
22000 (This is still a win over putting the constants in
22001 a separate constant pool, because then we'd have
22002 to have both a TOC entry _and_ the actual constant.)
22004 For a 32-bit target, CONST_INT values are loaded and shifted
22005 entirely within `low' and can be stored in one TOC entry. */
22007 /* It would be easy to make this work, but it doesn't now. */
22008 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
22010 if (POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
22012 #if HOST_BITS_PER_WIDE_INT == 32
22013 lshift_double (low
, high
, POINTER_SIZE
- GET_MODE_BITSIZE (mode
),
22014 POINTER_SIZE
, &low
, &high
, 0);
22017 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
22018 high
= (HOST_WIDE_INT
) low
>> 32;
22025 if (TARGET_MINIMAL_TOC
)
22026 fputs (DOUBLE_INT_ASM_OP
, file
);
22028 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
22029 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22030 fprintf (file
, "0x%lx%08lx\n",
22031 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22036 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
22038 if (TARGET_MINIMAL_TOC
)
22039 fputs ("\t.long ", file
);
22041 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
22042 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22043 fprintf (file
, "0x%lx,0x%lx\n",
22044 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
22048 if (TARGET_MINIMAL_TOC
)
22049 fputs ("\t.long ", file
);
22051 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
22052 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
22058 if (GET_CODE (x
) == CONST
)
22060 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
22061 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
22063 base
= XEXP (XEXP (x
, 0), 0);
22064 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
22067 switch (GET_CODE (base
))
22070 name
= XSTR (base
, 0);
22074 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
22075 CODE_LABEL_NUMBER (XEXP (base
, 0)));
22079 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
22083 gcc_unreachable ();
22086 if (TARGET_MINIMAL_TOC
)
22087 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
22090 fputs ("\t.tc ", file
);
22091 RS6000_OUTPUT_BASENAME (file
, name
);
22094 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
22096 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
22098 fputs ("[TC],", file
);
22101 /* Currently C++ toc references to vtables can be emitted before it
22102 is decided whether the vtable is public or private. If this is
22103 the case, then the linker will eventually complain that there is
22104 a TOC reference to an unknown section. Thus, for vtables only,
22105 we emit the TOC reference to reference the symbol and not the
22107 if (VTABLE_NAME_P (name
))
22109 RS6000_OUTPUT_BASENAME (file
, name
);
22111 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
22112 else if (offset
> 0)
22113 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
22116 output_addr_const (file
, x
);
22120 /* Output an assembler pseudo-op to write an ASCII string of N characters
22121 starting at P to FILE.
22123 On the RS/6000, we have to do this using the .byte operation and
22124 write out special characters outside the quoted string.
22125 Also, the assembler is broken; very long strings are truncated,
22126 so we must artificially break them up early. */
22129 output_ascii (FILE *file
, const char *p
, int n
)
22132 int i
, count_string
;
22133 const char *for_string
= "\t.byte \"";
22134 const char *for_decimal
= "\t.byte ";
22135 const char *to_close
= NULL
;
22138 for (i
= 0; i
< n
; i
++)
22141 if (c
>= ' ' && c
< 0177)
22144 fputs (for_string
, file
);
22147 /* Write two quotes to get one. */
22155 for_decimal
= "\"\n\t.byte ";
22159 if (count_string
>= 512)
22161 fputs (to_close
, file
);
22163 for_string
= "\t.byte \"";
22164 for_decimal
= "\t.byte ";
22172 fputs (for_decimal
, file
);
22173 fprintf (file
, "%d", c
);
22175 for_string
= "\n\t.byte \"";
22176 for_decimal
= ", ";
22182 /* Now close the string if we have written one. Then end the line. */
22184 fputs (to_close
, file
);
22187 /* Generate a unique section name for FILENAME for a section type
22188 represented by SECTION_DESC. Output goes into BUF.
22190 SECTION_DESC can be any string, as long as it is different for each
22191 possible section type.
22193 We name the section in the same manner as xlc. The name begins with an
22194 underscore followed by the filename (after stripping any leading directory
22195 names) with the last period replaced by the string SECTION_DESC. If
22196 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22200 rs6000_gen_section_name (char **buf
, const char *filename
,
22201 const char *section_desc
)
22203 const char *q
, *after_last_slash
, *last_period
= 0;
22207 after_last_slash
= filename
;
22208 for (q
= filename
; *q
; q
++)
22211 after_last_slash
= q
+ 1;
22212 else if (*q
== '.')
22216 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
22217 *buf
= (char *) xmalloc (len
);
22222 for (q
= after_last_slash
; *q
; q
++)
22224 if (q
== last_period
)
22226 strcpy (p
, section_desc
);
22227 p
+= strlen (section_desc
);
22231 else if (ISALNUM (*q
))
22235 if (last_period
== 0)
22236 strcpy (p
, section_desc
);
22241 /* Emit profile function. */
22244 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
22246 /* Non-standard profiling for kernels, which just saves LR then calls
22247 _mcount without worrying about arg saves. The idea is to change
22248 the function prologue as little as possible as it isn't easy to
22249 account for arg save/restore code added just for _mcount. */
22250 if (TARGET_PROFILE_KERNEL
)
22253 if (DEFAULT_ABI
== ABI_AIX
)
22255 #ifndef NO_PROFILE_COUNTERS
22256 # define NO_PROFILE_COUNTERS 0
22258 if (NO_PROFILE_COUNTERS
)
22259 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
22260 LCT_NORMAL
, VOIDmode
, 0);
22264 const char *label_name
;
22267 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
22268 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
22269 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
22271 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
22272 LCT_NORMAL
, VOIDmode
, 1, fun
, Pmode
);
22275 else if (DEFAULT_ABI
== ABI_DARWIN
)
22277 const char *mcount_name
= RS6000_MCOUNT
;
22278 int caller_addr_regno
= LR_REGNO
;
22280 /* Be conservative and always set this, at least for now. */
22281 crtl
->uses_pic_offset_table
= 1;
22284 /* For PIC code, set up a stub and collect the caller's address
22285 from r0, which is where the prologue puts it. */
22286 if (MACHOPIC_INDIRECT
22287 && crtl
->uses_pic_offset_table
)
22288 caller_addr_regno
= 0;
22290 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
22291 LCT_NORMAL
, VOIDmode
, 1,
22292 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
22296 /* Write function profiler code. */
22299 output_function_profiler (FILE *file
, int labelno
)
22303 switch (DEFAULT_ABI
)
22306 gcc_unreachable ();
22311 warning (0, "no profiling of 64-bit code for this ABI");
22314 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
22315 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
22316 if (NO_PROFILE_COUNTERS
)
22318 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22319 reg_names
[0], reg_names
[1]);
22321 else if (TARGET_SECURE_PLT
&& flag_pic
)
22323 if (TARGET_LINK_STACK
)
22326 get_ppc476_thunk_name (name
);
22327 asm_fprintf (file
, "\tbl %s\n", name
);
22330 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
22331 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22332 reg_names
[0], reg_names
[1]);
22333 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
22334 asm_fprintf (file
, "\t{cau|addis} %s,%s,",
22335 reg_names
[12], reg_names
[12]);
22336 assemble_name (file
, buf
);
22337 asm_fprintf (file
, "-1b@ha\n\t{cal|la} %s,", reg_names
[0]);
22338 assemble_name (file
, buf
);
22339 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
22341 else if (flag_pic
== 1)
22343 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
22344 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22345 reg_names
[0], reg_names
[1]);
22346 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
22347 asm_fprintf (file
, "\t{l|lwz} %s,", reg_names
[0]);
22348 assemble_name (file
, buf
);
22349 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
22351 else if (flag_pic
> 1)
22353 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22354 reg_names
[0], reg_names
[1]);
22355 /* Now, we need to get the address of the label. */
22356 if (TARGET_LINK_STACK
)
22359 get_ppc476_thunk_name (name
);
22360 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
22361 assemble_name (file
, buf
);
22362 fputs ("-.\n1:", file
);
22363 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
22364 asm_fprintf (file
, "\taddi %s,%s,4\n",
22365 reg_names
[11], reg_names
[11]);
22369 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
22370 assemble_name (file
, buf
);
22371 fputs ("-.\n1:", file
);
22372 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
22374 asm_fprintf (file
, "\t{l|lwz} %s,0(%s)\n",
22375 reg_names
[0], reg_names
[11]);
22376 asm_fprintf (file
, "\t{cax|add} %s,%s,%s\n",
22377 reg_names
[0], reg_names
[0], reg_names
[11]);
22381 asm_fprintf (file
, "\t{liu|lis} %s,", reg_names
[12]);
22382 assemble_name (file
, buf
);
22383 fputs ("@ha\n", file
);
22384 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
22385 reg_names
[0], reg_names
[1]);
22386 asm_fprintf (file
, "\t{cal|la} %s,", reg_names
[0]);
22387 assemble_name (file
, buf
);
22388 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
22391 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22392 fprintf (file
, "\tbl %s%s\n",
22393 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
22398 if (!TARGET_PROFILE_KERNEL
)
22400 /* Don't do anything, done in output_profile_hook (). */
22404 gcc_assert (!TARGET_32BIT
);
22406 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
22407 asm_fprintf (file
, "\tstd %s,16(%s)\n", reg_names
[0], reg_names
[1]);
22409 if (cfun
->static_chain_decl
!= NULL
)
22411 asm_fprintf (file
, "\tstd %s,24(%s)\n",
22412 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
22413 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
22414 asm_fprintf (file
, "\tld %s,24(%s)\n",
22415 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
22418 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
22426 /* The following variable value is the last issued insn. */
22428 static rtx last_scheduled_insn
;
22430 /* The following variable helps to balance issuing of load and
22431 store instructions */
22433 static int load_store_pendulum
;
22435 /* Power4 load update and store update instructions are cracked into a
22436 load or store and an integer insn which are executed in the same cycle.
22437 Branches have their own dispatch slot which does not count against the
22438 GCC issue rate, but it changes the program flow so there are no other
22439 instructions to issue in this cycle. */
22442 rs6000_variable_issue_1 (rtx insn
, int more
)
22444 last_scheduled_insn
= insn
;
22445 if (GET_CODE (PATTERN (insn
)) == USE
22446 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22448 cached_can_issue_more
= more
;
22449 return cached_can_issue_more
;
22452 if (insn_terminates_group_p (insn
, current_group
))
22454 cached_can_issue_more
= 0;
22455 return cached_can_issue_more
;
22458 /* If no reservation, but reach here */
22459 if (recog_memoized (insn
) < 0)
22462 if (rs6000_sched_groups
)
22464 if (is_microcoded_insn (insn
))
22465 cached_can_issue_more
= 0;
22466 else if (is_cracked_insn (insn
))
22467 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
22469 cached_can_issue_more
= more
- 1;
22471 return cached_can_issue_more
;
22474 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
22477 cached_can_issue_more
= more
- 1;
22478 return cached_can_issue_more
;
22482 rs6000_variable_issue (FILE *stream
, int verbose
, rtx insn
, int more
)
22484 int r
= rs6000_variable_issue_1 (insn
, more
);
22486 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
22490 /* Adjust the cost of a scheduling dependency. Return the new cost of
22491 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22494 rs6000_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
22496 enum attr_type attr_type
;
22498 if (! recog_memoized (insn
))
22501 switch (REG_NOTE_KIND (link
))
22505 /* Data dependency; DEP_INSN writes a register that INSN reads
22506 some cycles later. */
22508 /* Separate a load from a narrower, dependent store. */
22509 if (rs6000_sched_groups
22510 && GET_CODE (PATTERN (insn
)) == SET
22511 && GET_CODE (PATTERN (dep_insn
)) == SET
22512 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
22513 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
22514 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
22515 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
22518 attr_type
= get_attr_type (insn
);
22523 /* Tell the first scheduling pass about the latency between
22524 a mtctr and bctr (and mtlr and br/blr). The first
22525 scheduling pass will not know about this latency since
22526 the mtctr instruction, which has the latency associated
22527 to it, will be generated by reload. */
22530 /* Leave some extra cycles between a compare and its
22531 dependent branch, to inhibit expensive mispredicts. */
22532 if ((rs6000_cpu_attr
== CPU_PPC603
22533 || rs6000_cpu_attr
== CPU_PPC604
22534 || rs6000_cpu_attr
== CPU_PPC604E
22535 || rs6000_cpu_attr
== CPU_PPC620
22536 || rs6000_cpu_attr
== CPU_PPC630
22537 || rs6000_cpu_attr
== CPU_PPC750
22538 || rs6000_cpu_attr
== CPU_PPC7400
22539 || rs6000_cpu_attr
== CPU_PPC7450
22540 || rs6000_cpu_attr
== CPU_PPCE5500
22541 || rs6000_cpu_attr
== CPU_PPCE6500
22542 || rs6000_cpu_attr
== CPU_POWER4
22543 || rs6000_cpu_attr
== CPU_POWER5
22544 || rs6000_cpu_attr
== CPU_POWER7
22545 || rs6000_cpu_attr
== CPU_CELL
)
22546 && recog_memoized (dep_insn
)
22547 && (INSN_CODE (dep_insn
) >= 0))
22549 switch (get_attr_type (dep_insn
))
22553 case TYPE_DELAYED_COMPARE
:
22554 case TYPE_IMUL_COMPARE
:
22555 case TYPE_LMUL_COMPARE
:
22556 case TYPE_FPCOMPARE
:
22557 case TYPE_CR_LOGICAL
:
22558 case TYPE_DELAYED_CR
:
22567 case TYPE_STORE_UX
:
22569 case TYPE_FPSTORE_U
:
22570 case TYPE_FPSTORE_UX
:
22571 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22572 && recog_memoized (dep_insn
)
22573 && (INSN_CODE (dep_insn
) >= 0))
22576 if (GET_CODE (PATTERN (insn
)) != SET
)
22577 /* If this happens, we have to extend this to schedule
22578 optimally. Return default for now. */
22581 /* Adjust the cost for the case where the value written
22582 by a fixed point operation is used as the address
22583 gen value on a store. */
22584 switch (get_attr_type (dep_insn
))
22591 if (! store_data_bypass_p (dep_insn
, insn
))
22595 case TYPE_LOAD_EXT
:
22596 case TYPE_LOAD_EXT_U
:
22597 case TYPE_LOAD_EXT_UX
:
22598 case TYPE_VAR_SHIFT_ROTATE
:
22599 case TYPE_VAR_DELAYED_COMPARE
:
22601 if (! store_data_bypass_p (dep_insn
, insn
))
22607 case TYPE_FAST_COMPARE
:
22610 case TYPE_INSERT_WORD
:
22611 case TYPE_INSERT_DWORD
:
22612 case TYPE_FPLOAD_U
:
22613 case TYPE_FPLOAD_UX
:
22615 case TYPE_STORE_UX
:
22616 case TYPE_FPSTORE_U
:
22617 case TYPE_FPSTORE_UX
:
22619 if (! store_data_bypass_p (dep_insn
, insn
))
22627 case TYPE_IMUL_COMPARE
:
22628 case TYPE_LMUL_COMPARE
:
22630 if (! store_data_bypass_p (dep_insn
, insn
))
22636 if (! store_data_bypass_p (dep_insn
, insn
))
22642 if (! store_data_bypass_p (dep_insn
, insn
))
22655 case TYPE_LOAD_EXT
:
22656 case TYPE_LOAD_EXT_U
:
22657 case TYPE_LOAD_EXT_UX
:
22658 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22659 && recog_memoized (dep_insn
)
22660 && (INSN_CODE (dep_insn
) >= 0))
22663 /* Adjust the cost for the case where the value written
22664 by a fixed point instruction is used within the address
22665 gen portion of a subsequent load(u)(x) */
22666 switch (get_attr_type (dep_insn
))
22673 if (set_to_load_agen (dep_insn
, insn
))
22677 case TYPE_LOAD_EXT
:
22678 case TYPE_LOAD_EXT_U
:
22679 case TYPE_LOAD_EXT_UX
:
22680 case TYPE_VAR_SHIFT_ROTATE
:
22681 case TYPE_VAR_DELAYED_COMPARE
:
22683 if (set_to_load_agen (dep_insn
, insn
))
22689 case TYPE_FAST_COMPARE
:
22692 case TYPE_INSERT_WORD
:
22693 case TYPE_INSERT_DWORD
:
22694 case TYPE_FPLOAD_U
:
22695 case TYPE_FPLOAD_UX
:
22697 case TYPE_STORE_UX
:
22698 case TYPE_FPSTORE_U
:
22699 case TYPE_FPSTORE_UX
:
22701 if (set_to_load_agen (dep_insn
, insn
))
22709 case TYPE_IMUL_COMPARE
:
22710 case TYPE_LMUL_COMPARE
:
22712 if (set_to_load_agen (dep_insn
, insn
))
22718 if (set_to_load_agen (dep_insn
, insn
))
22724 if (set_to_load_agen (dep_insn
, insn
))
22735 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22736 && recog_memoized (dep_insn
)
22737 && (INSN_CODE (dep_insn
) >= 0)
22738 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
22745 /* Fall out to return default cost. */
22749 case REG_DEP_OUTPUT
:
22750 /* Output dependency; DEP_INSN writes a register that INSN writes some
22752 if ((rs6000_cpu
== PROCESSOR_POWER6
)
22753 && recog_memoized (dep_insn
)
22754 && (INSN_CODE (dep_insn
) >= 0))
22756 attr_type
= get_attr_type (insn
);
22761 if (get_attr_type (dep_insn
) == TYPE_FP
)
22765 if (get_attr_type (dep_insn
) == TYPE_MFFGPR
)
22773 /* Anti dependency; DEP_INSN reads a register that INSN writes some
22778 gcc_unreachable ();
22784 /* Debug version of rs6000_adjust_cost. */
22787 rs6000_debug_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
22789 int ret
= rs6000_adjust_cost (insn
, link
, dep_insn
, cost
);
22795 switch (REG_NOTE_KIND (link
))
22797 default: dep
= "unknown depencency"; break;
22798 case REG_DEP_TRUE
: dep
= "data dependency"; break;
22799 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
22800 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
22804 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
22805 "%s, insn:\n", ret
, cost
, dep
);
22813 /* The function returns a true if INSN is microcoded.
22814 Return false otherwise. */
22817 is_microcoded_insn (rtx insn
)
22819 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22820 || GET_CODE (PATTERN (insn
)) == USE
22821 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22824 if (rs6000_cpu_attr
== CPU_CELL
)
22825 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
22827 if (rs6000_sched_groups
)
22829 enum attr_type type
= get_attr_type (insn
);
22830 if (type
== TYPE_LOAD_EXT_U
22831 || type
== TYPE_LOAD_EXT_UX
22832 || type
== TYPE_LOAD_UX
22833 || type
== TYPE_STORE_UX
22834 || type
== TYPE_MFCR
)
22841 /* The function returns true if INSN is cracked into 2 instructions
22842 by the processor (and therefore occupies 2 issue slots). */
22845 is_cracked_insn (rtx insn
)
22847 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22848 || GET_CODE (PATTERN (insn
)) == USE
22849 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22852 if (rs6000_sched_groups
)
22854 enum attr_type type
= get_attr_type (insn
);
22855 if (type
== TYPE_LOAD_U
|| type
== TYPE_STORE_U
22856 || type
== TYPE_FPLOAD_U
|| type
== TYPE_FPSTORE_U
22857 || type
== TYPE_FPLOAD_UX
|| type
== TYPE_FPSTORE_UX
22858 || type
== TYPE_LOAD_EXT
|| type
== TYPE_DELAYED_CR
22859 || type
== TYPE_COMPARE
|| type
== TYPE_DELAYED_COMPARE
22860 || type
== TYPE_IMUL_COMPARE
|| type
== TYPE_LMUL_COMPARE
22861 || type
== TYPE_IDIV
|| type
== TYPE_LDIV
22862 || type
== TYPE_INSERT_WORD
)
22869 /* The function returns true if INSN can be issued only from
22870 the branch slot. */
22873 is_branch_slot_insn (rtx insn
)
22875 if (!insn
|| !NONDEBUG_INSN_P (insn
)
22876 || GET_CODE (PATTERN (insn
)) == USE
22877 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
22880 if (rs6000_sched_groups
)
22882 enum attr_type type
= get_attr_type (insn
);
22883 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
22891 /* The function returns true if out_inst sets a value that is
22892 used in the address generation computation of in_insn */
22894 set_to_load_agen (rtx out_insn
, rtx in_insn
)
22896 rtx out_set
, in_set
;
22898 /* For performance reasons, only handle the simple case where
22899 both loads are a single_set. */
22900 out_set
= single_set (out_insn
);
22903 in_set
= single_set (in_insn
);
22905 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
22911 /* Try to determine base/offset/size parts of the given MEM.
22912 Return true if successful, false if all the values couldn't
22915 This function only looks for REG or REG+CONST address forms.
22916 REG+REG address form will return false. */
22919 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
22920 HOST_WIDE_INT
*size
)
22923 if MEM_SIZE_KNOWN_P (mem
)
22924 *size
= MEM_SIZE (mem
);
22928 if (GET_CODE (XEXP (mem
, 0)) == PRE_MODIFY
)
22929 addr_rtx
= XEXP (XEXP (mem
, 0), 1);
22931 addr_rtx
= (XEXP (mem
, 0));
22933 if (GET_CODE (addr_rtx
) == REG
)
22938 else if (GET_CODE (addr_rtx
) == PLUS
22939 && CONST_INT_P (XEXP (addr_rtx
, 1)))
22941 *base
= XEXP (addr_rtx
, 0);
22942 *offset
= INTVAL (XEXP (addr_rtx
, 1));
22950 /* The function returns true if the target storage location of
22951 mem1 is adjacent to the target storage location of mem2 */
22952 /* Return 1 if memory locations are adjacent. */
22955 adjacent_mem_locations (rtx mem1
, rtx mem2
)
22958 HOST_WIDE_INT off1
, size1
, off2
, size2
;
22960 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
22961 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
22962 return ((REGNO (reg1
) == REGNO (reg2
))
22963 && ((off1
+ size1
== off2
)
22964 || (off2
+ size2
== off1
)));
22969 /* This function returns true if it can be determined that the two MEM
22970 locations overlap by at least 1 byte based on base reg/offset/size. */
22973 mem_locations_overlap (rtx mem1
, rtx mem2
)
22976 HOST_WIDE_INT off1
, size1
, off2
, size2
;
22978 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
22979 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
22980 return ((REGNO (reg1
) == REGNO (reg2
))
22981 && (((off1
<= off2
) && (off1
+ size1
> off2
))
22982 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
22987 /* A C statement (sans semicolon) to update the integer scheduling
22988 priority INSN_PRIORITY (INSN). Increase the priority to execute the
22989 INSN earlier, reduce the priority to execute INSN later. Do not
22990 define this macro if you do not need to adjust the scheduling
22991 priorities of insns. */
22994 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED
, int priority
)
22996 rtx load_mem
, str_mem
;
22997 /* On machines (like the 750) which have asymmetric integer units,
22998 where one integer unit can do multiply and divides and the other
22999 can't, reduce the priority of multiply/divide so it is scheduled
23000 before other integer operations. */
23003 if (! INSN_P (insn
))
23006 if (GET_CODE (PATTERN (insn
)) == USE
)
23009 switch (rs6000_cpu_attr
) {
23011 switch (get_attr_type (insn
))
23018 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
23019 priority
, priority
);
23020 if (priority
>= 0 && priority
< 0x01000000)
23027 if (insn_must_be_first_in_group (insn
)
23028 && reload_completed
23029 && current_sched_info
->sched_max_insns_priority
23030 && rs6000_sched_restricted_insns_priority
)
23033 /* Prioritize insns that can be dispatched only in the first
23035 if (rs6000_sched_restricted_insns_priority
== 1)
23036 /* Attach highest priority to insn. This means that in
23037 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
23038 precede 'priority' (critical path) considerations. */
23039 return current_sched_info
->sched_max_insns_priority
;
23040 else if (rs6000_sched_restricted_insns_priority
== 2)
23041 /* Increase priority of insn by a minimal amount. This means that in
23042 haifa-sched.c:ready_sort(), only 'priority' (critical path)
23043 considerations precede dispatch-slot restriction considerations. */
23044 return (priority
+ 1);
23047 if (rs6000_cpu
== PROCESSOR_POWER6
23048 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
23049 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
23050 /* Attach highest priority to insn if the scheduler has just issued two
23051 stores and this instruction is a load, or two loads and this instruction
23052 is a store. Power6 wants loads and stores scheduled alternately
23054 return current_sched_info
->sched_max_insns_priority
;
23059 /* Return true if the instruction is nonpipelined on the Cell. */
23061 is_nonpipeline_insn (rtx insn
)
23063 enum attr_type type
;
23064 if (!insn
|| !NONDEBUG_INSN_P (insn
)
23065 || GET_CODE (PATTERN (insn
)) == USE
23066 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23069 type
= get_attr_type (insn
);
23070 if (type
== TYPE_IMUL
23071 || type
== TYPE_IMUL2
23072 || type
== TYPE_IMUL3
23073 || type
== TYPE_LMUL
23074 || type
== TYPE_IDIV
23075 || type
== TYPE_LDIV
23076 || type
== TYPE_SDIV
23077 || type
== TYPE_DDIV
23078 || type
== TYPE_SSQRT
23079 || type
== TYPE_DSQRT
23080 || type
== TYPE_MFCR
23081 || type
== TYPE_MFCRF
23082 || type
== TYPE_MFJMPR
)
23090 /* Return how many instructions the machine can issue per cycle. */
23093 rs6000_issue_rate (void)
23095 /* Unless scheduling for register pressure, use issue rate of 1 for
23096 first scheduling pass to decrease degradation. */
23097 if (!reload_completed
&& !flag_sched_pressure
)
23100 switch (rs6000_cpu_attr
) {
23102 case CPU_PPC601
: /* ? */
23112 case CPU_PPCE300C2
:
23113 case CPU_PPCE300C3
:
23114 case CPU_PPCE500MC
:
23115 case CPU_PPCE500MC64
:
23136 /* Return how many instructions to look ahead for better insn
23140 rs6000_use_sched_lookahead (void)
23142 switch (rs6000_cpu_attr
)
23149 return (reload_completed
? 8 : 0);
23156 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23158 rs6000_use_sched_lookahead_guard (rtx insn
)
23160 if (rs6000_cpu_attr
!= CPU_CELL
)
23163 if (insn
== NULL_RTX
|| !INSN_P (insn
))
23166 if (!reload_completed
23167 || is_nonpipeline_insn (insn
)
23168 || is_microcoded_insn (insn
))
23174 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23175 and return true. */
23178 find_mem_ref (rtx pat
, rtx
*mem_ref
)
23183 /* stack_tie does not produce any real memory traffic. */
23184 if (tie_operand (pat
, VOIDmode
))
23187 if (GET_CODE (pat
) == MEM
)
23193 /* Recursively process the pattern. */
23194 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
23196 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
23200 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
23203 else if (fmt
[i
] == 'E')
23204 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
23206 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
23214 /* Determine if PAT is a PATTERN of a load insn. */
23217 is_load_insn1 (rtx pat
, rtx
*load_mem
)
23219 if (!pat
|| pat
== NULL_RTX
)
23222 if (GET_CODE (pat
) == SET
)
23223 return find_mem_ref (SET_SRC (pat
), load_mem
);
23225 if (GET_CODE (pat
) == PARALLEL
)
23229 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
23230 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
23237 /* Determine if INSN loads from memory. */
23240 is_load_insn (rtx insn
, rtx
*load_mem
)
23242 if (!insn
|| !INSN_P (insn
))
23245 if (GET_CODE (insn
) == CALL_INSN
)
23248 return is_load_insn1 (PATTERN (insn
), load_mem
);
23251 /* Determine if PAT is a PATTERN of a store insn. */
23254 is_store_insn1 (rtx pat
, rtx
*str_mem
)
23256 if (!pat
|| pat
== NULL_RTX
)
23259 if (GET_CODE (pat
) == SET
)
23260 return find_mem_ref (SET_DEST (pat
), str_mem
);
23262 if (GET_CODE (pat
) == PARALLEL
)
23266 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
23267 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
23274 /* Determine if INSN stores to memory. */
23277 is_store_insn (rtx insn
, rtx
*str_mem
)
23279 if (!insn
|| !INSN_P (insn
))
23282 return is_store_insn1 (PATTERN (insn
), str_mem
);
23285 /* Returns whether the dependence between INSN and NEXT is considered
23286 costly by the given target. */
23289 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
23293 rtx load_mem
, str_mem
;
23295 /* If the flag is not enabled - no dependence is considered costly;
23296 allow all dependent insns in the same group.
23297 This is the most aggressive option. */
23298 if (rs6000_sched_costly_dep
== no_dep_costly
)
23301 /* If the flag is set to 1 - a dependence is always considered costly;
23302 do not allow dependent instructions in the same group.
23303 This is the most conservative option. */
23304 if (rs6000_sched_costly_dep
== all_deps_costly
)
23307 insn
= DEP_PRO (dep
);
23308 next
= DEP_CON (dep
);
23310 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
23311 && is_load_insn (next
, &load_mem
)
23312 && is_store_insn (insn
, &str_mem
))
23313 /* Prevent load after store in the same group. */
23316 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
23317 && is_load_insn (next
, &load_mem
)
23318 && is_store_insn (insn
, &str_mem
)
23319 && DEP_TYPE (dep
) == REG_DEP_TRUE
23320 && mem_locations_overlap(str_mem
, load_mem
))
23321 /* Prevent load after store in the same group if it is a true
23325 /* The flag is set to X; dependences with latency >= X are considered costly,
23326 and will not be scheduled in the same group. */
23327 if (rs6000_sched_costly_dep
<= max_dep_latency
23328 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
23334 /* Return the next insn after INSN that is found before TAIL is reached,
23335 skipping any "non-active" insns - insns that will not actually occupy
23336 an issue slot. Return NULL_RTX if such an insn is not found. */
23339 get_next_active_insn (rtx insn
, rtx tail
)
23341 if (insn
== NULL_RTX
|| insn
== tail
)
23346 insn
= NEXT_INSN (insn
);
23347 if (insn
== NULL_RTX
|| insn
== tail
)
23352 || (NONJUMP_INSN_P (insn
)
23353 && GET_CODE (PATTERN (insn
)) != USE
23354 && GET_CODE (PATTERN (insn
)) != CLOBBER
23355 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
23361 /* We are about to begin issuing insns for this clock cycle. */
23364 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
23365 rtx
*ready ATTRIBUTE_UNUSED
,
23366 int *pn_ready ATTRIBUTE_UNUSED
,
23367 int clock_var ATTRIBUTE_UNUSED
)
23369 int n_ready
= *pn_ready
;
23372 fprintf (dump
, "// rs6000_sched_reorder :\n");
23374 /* Reorder the ready list, if the second to last ready insn
23375 is a nonepipeline insn. */
23376 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
23378 if (is_nonpipeline_insn (ready
[n_ready
- 1])
23379 && (recog_memoized (ready
[n_ready
- 2]) > 0))
23380 /* Simply swap first two insns. */
23382 rtx tmp
= ready
[n_ready
- 1];
23383 ready
[n_ready
- 1] = ready
[n_ready
- 2];
23384 ready
[n_ready
- 2] = tmp
;
23388 if (rs6000_cpu
== PROCESSOR_POWER6
)
23389 load_store_pendulum
= 0;
23391 return rs6000_issue_rate ();
23394 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23397 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx
*ready
,
23398 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
23401 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
23403 /* For Power6, we need to handle some special cases to try and keep the
23404 store queue from overflowing and triggering expensive flushes.
23406 This code monitors how load and store instructions are being issued
23407 and skews the ready list one way or the other to increase the likelihood
23408 that a desired instruction is issued at the proper time.
23410 A couple of things are done. First, we maintain a "load_store_pendulum"
23411 to track the current state of load/store issue.
23413 - If the pendulum is at zero, then no loads or stores have been
23414 issued in the current cycle so we do nothing.
23416 - If the pendulum is 1, then a single load has been issued in this
23417 cycle and we attempt to locate another load in the ready list to
23420 - If the pendulum is -2, then two stores have already been
23421 issued in this cycle, so we increase the priority of the first load
23422 in the ready list to increase it's likelihood of being chosen first
23425 - If the pendulum is -1, then a single store has been issued in this
23426 cycle and we attempt to locate another store in the ready list to
23427 issue with it, preferring a store to an adjacent memory location to
23428 facilitate store pairing in the store queue.
23430 - If the pendulum is 2, then two loads have already been
23431 issued in this cycle, so we increase the priority of the first store
23432 in the ready list to increase it's likelihood of being chosen first
23435 - If the pendulum < -2 or > 2, then do nothing.
23437 Note: This code covers the most common scenarios. There exist non
23438 load/store instructions which make use of the LSU and which
23439 would need to be accounted for to strictly model the behavior
23440 of the machine. Those instructions are currently unaccounted
23441 for to help minimize compile time overhead of this code.
23443 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
23447 rtx tmp
, load_mem
, str_mem
;
23449 if (is_store_insn (last_scheduled_insn
, &str_mem
))
23450 /* Issuing a store, swing the load_store_pendulum to the left */
23451 load_store_pendulum
--;
23452 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
23453 /* Issuing a load, swing the load_store_pendulum to the right */
23454 load_store_pendulum
++;
23456 return cached_can_issue_more
;
23458 /* If the pendulum is balanced, or there is only one instruction on
23459 the ready list, then all is well, so return. */
23460 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
23461 return cached_can_issue_more
;
23463 if (load_store_pendulum
== 1)
23465 /* A load has been issued in this cycle. Scan the ready list
23466 for another load to issue with it */
23471 if (is_load_insn (ready
[pos
], &load_mem
))
23473 /* Found a load. Move it to the head of the ready list,
23474 and adjust it's priority so that it is more likely to
23477 for (i
=pos
; i
<*pn_ready
-1; i
++)
23478 ready
[i
] = ready
[i
+ 1];
23479 ready
[*pn_ready
-1] = tmp
;
23481 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23482 INSN_PRIORITY (tmp
)++;
23488 else if (load_store_pendulum
== -2)
23490 /* Two stores have been issued in this cycle. Increase the
23491 priority of the first load in the ready list to favor it for
23492 issuing in the next cycle. */
23497 if (is_load_insn (ready
[pos
], &load_mem
)
23499 && INSN_PRIORITY_KNOWN (ready
[pos
]))
23501 INSN_PRIORITY (ready
[pos
])++;
23503 /* Adjust the pendulum to account for the fact that a load
23504 was found and increased in priority. This is to prevent
23505 increasing the priority of multiple loads */
23506 load_store_pendulum
--;
23513 else if (load_store_pendulum
== -1)
23515 /* A store has been issued in this cycle. Scan the ready list for
23516 another store to issue with it, preferring a store to an adjacent
23518 int first_store_pos
= -1;
23524 if (is_store_insn (ready
[pos
], &str_mem
))
23527 /* Maintain the index of the first store found on the
23529 if (first_store_pos
== -1)
23530 first_store_pos
= pos
;
23532 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
23533 && adjacent_mem_locations (str_mem
, str_mem2
))
23535 /* Found an adjacent store. Move it to the head of the
23536 ready list, and adjust it's priority so that it is
23537 more likely to stay there */
23539 for (i
=pos
; i
<*pn_ready
-1; i
++)
23540 ready
[i
] = ready
[i
+ 1];
23541 ready
[*pn_ready
-1] = tmp
;
23543 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23544 INSN_PRIORITY (tmp
)++;
23546 first_store_pos
= -1;
23554 if (first_store_pos
>= 0)
23556 /* An adjacent store wasn't found, but a non-adjacent store was,
23557 so move the non-adjacent store to the front of the ready
23558 list, and adjust its priority so that it is more likely to
23560 tmp
= ready
[first_store_pos
];
23561 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
23562 ready
[i
] = ready
[i
+ 1];
23563 ready
[*pn_ready
-1] = tmp
;
23564 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
23565 INSN_PRIORITY (tmp
)++;
23568 else if (load_store_pendulum
== 2)
23570 /* Two loads have been issued in this cycle. Increase the priority
23571 of the first store in the ready list to favor it for issuing in
23577 if (is_store_insn (ready
[pos
], &str_mem
)
23579 && INSN_PRIORITY_KNOWN (ready
[pos
]))
23581 INSN_PRIORITY (ready
[pos
])++;
23583 /* Adjust the pendulum to account for the fact that a store
23584 was found and increased in priority. This is to prevent
23585 increasing the priority of multiple stores */
23586 load_store_pendulum
++;
23595 return cached_can_issue_more
;
23598 /* Return whether the presence of INSN causes a dispatch group termination
23599 of group WHICH_GROUP.
23601 If WHICH_GROUP == current_group, this function will return true if INSN
23602 causes the termination of the current group (i.e, the dispatch group to
23603 which INSN belongs). This means that INSN will be the last insn in the
23604 group it belongs to.
23606 If WHICH_GROUP == previous_group, this function will return true if INSN
23607 causes the termination of the previous group (i.e, the dispatch group that
23608 precedes the group to which INSN belongs). This means that INSN will be
23609 the first insn in the group it belongs to). */
23612 insn_terminates_group_p (rtx insn
, enum group_termination which_group
)
23619 first
= insn_must_be_first_in_group (insn
);
23620 last
= insn_must_be_last_in_group (insn
);
23625 if (which_group
== current_group
)
23627 else if (which_group
== previous_group
)
23635 insn_must_be_first_in_group (rtx insn
)
23637 enum attr_type type
;
23640 || GET_CODE (insn
) == NOTE
23641 || DEBUG_INSN_P (insn
)
23642 || GET_CODE (PATTERN (insn
)) == USE
23643 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23646 switch (rs6000_cpu
)
23648 case PROCESSOR_POWER5
:
23649 if (is_cracked_insn (insn
))
23651 case PROCESSOR_POWER4
:
23652 if (is_microcoded_insn (insn
))
23655 if (!rs6000_sched_groups
)
23658 type
= get_attr_type (insn
);
23665 case TYPE_DELAYED_CR
:
23666 case TYPE_CR_LOGICAL
:
23680 case PROCESSOR_POWER6
:
23681 type
= get_attr_type (insn
);
23685 case TYPE_INSERT_DWORD
:
23689 case TYPE_VAR_SHIFT_ROTATE
:
23696 case TYPE_INSERT_WORD
:
23697 case TYPE_DELAYED_COMPARE
:
23698 case TYPE_IMUL_COMPARE
:
23699 case TYPE_LMUL_COMPARE
:
23700 case TYPE_FPCOMPARE
:
23711 case TYPE_LOAD_EXT_UX
:
23713 case TYPE_STORE_UX
:
23714 case TYPE_FPLOAD_U
:
23715 case TYPE_FPLOAD_UX
:
23716 case TYPE_FPSTORE_U
:
23717 case TYPE_FPSTORE_UX
:
23723 case PROCESSOR_POWER7
:
23724 type
= get_attr_type (insn
);
23728 case TYPE_CR_LOGICAL
:
23735 case TYPE_DELAYED_COMPARE
:
23736 case TYPE_VAR_DELAYED_COMPARE
:
23742 case TYPE_LOAD_EXT
:
23743 case TYPE_LOAD_EXT_U
:
23744 case TYPE_LOAD_EXT_UX
:
23746 case TYPE_STORE_UX
:
23747 case TYPE_FPLOAD_U
:
23748 case TYPE_FPLOAD_UX
:
23749 case TYPE_FPSTORE_U
:
23750 case TYPE_FPSTORE_UX
:
23766 insn_must_be_last_in_group (rtx insn
)
23768 enum attr_type type
;
23771 || GET_CODE (insn
) == NOTE
23772 || DEBUG_INSN_P (insn
)
23773 || GET_CODE (PATTERN (insn
)) == USE
23774 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
23777 switch (rs6000_cpu
) {
23778 case PROCESSOR_POWER4
:
23779 case PROCESSOR_POWER5
:
23780 if (is_microcoded_insn (insn
))
23783 if (is_branch_slot_insn (insn
))
23787 case PROCESSOR_POWER6
:
23788 type
= get_attr_type (insn
);
23795 case TYPE_VAR_SHIFT_ROTATE
:
23802 case TYPE_DELAYED_COMPARE
:
23803 case TYPE_IMUL_COMPARE
:
23804 case TYPE_LMUL_COMPARE
:
23805 case TYPE_FPCOMPARE
:
23819 case PROCESSOR_POWER7
:
23820 type
= get_attr_type (insn
);
23828 case TYPE_LOAD_EXT_U
:
23829 case TYPE_LOAD_EXT_UX
:
23830 case TYPE_STORE_UX
:
23843 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
23844 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
23847 is_costly_group (rtx
*group_insns
, rtx next_insn
)
23850 int issue_rate
= rs6000_issue_rate ();
23852 for (i
= 0; i
< issue_rate
; i
++)
23854 sd_iterator_def sd_it
;
23856 rtx insn
= group_insns
[i
];
23861 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
23863 rtx next
= DEP_CON (dep
);
23865 if (next
== next_insn
23866 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
23874 /* Utility of the function redefine_groups.
23875 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
23876 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
23877 to keep it "far" (in a separate group) from GROUP_INSNS, following
23878 one of the following schemes, depending on the value of the flag
23879 -minsert_sched_nops = X:
23880 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
23881 in order to force NEXT_INSN into a separate group.
23882 (2) X < sched_finish_regroup_exact: insert exactly X nops.
23883 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
23884 insertion (has a group just ended, how many vacant issue slots remain in the
23885 last group, and how many dispatch groups were encountered so far). */
23888 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
23889 rtx next_insn
, bool *group_end
, int can_issue_more
,
23894 int issue_rate
= rs6000_issue_rate ();
23895 bool end
= *group_end
;
23898 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
23899 return can_issue_more
;
23901 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
23902 return can_issue_more
;
23904 force
= is_costly_group (group_insns
, next_insn
);
23906 return can_issue_more
;
23908 if (sched_verbose
> 6)
23909 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
23910 *group_count
,can_issue_more
);
23912 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
23915 can_issue_more
= 0;
23917 /* Since only a branch can be issued in the last issue_slot, it is
23918 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
23919 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
23920 in this case the last nop will start a new group and the branch
23921 will be forced to the new group. */
23922 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
23925 /* Power6 and Power7 have special group ending nop. */
23926 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
)
23928 nop
= gen_group_ending_nop ();
23929 emit_insn_before (nop
, next_insn
);
23930 can_issue_more
= 0;
23933 while (can_issue_more
> 0)
23936 emit_insn_before (nop
, next_insn
);
23944 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
23946 int n_nops
= rs6000_sched_insert_nops
;
23948 /* Nops can't be issued from the branch slot, so the effective
23949 issue_rate for nops is 'issue_rate - 1'. */
23950 if (can_issue_more
== 0)
23951 can_issue_more
= issue_rate
;
23953 if (can_issue_more
== 0)
23955 can_issue_more
= issue_rate
- 1;
23958 for (i
= 0; i
< issue_rate
; i
++)
23960 group_insns
[i
] = 0;
23967 emit_insn_before (nop
, next_insn
);
23968 if (can_issue_more
== issue_rate
- 1) /* new group begins */
23971 if (can_issue_more
== 0)
23973 can_issue_more
= issue_rate
- 1;
23976 for (i
= 0; i
< issue_rate
; i
++)
23978 group_insns
[i
] = 0;
23984 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
23987 /* Is next_insn going to start a new group? */
23990 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
23991 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
23992 || (can_issue_more
< issue_rate
&&
23993 insn_terminates_group_p (next_insn
, previous_group
)));
23994 if (*group_end
&& end
)
23997 if (sched_verbose
> 6)
23998 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
23999 *group_count
, can_issue_more
);
24000 return can_issue_more
;
24003 return can_issue_more
;
24006 /* This function tries to synch the dispatch groups that the compiler "sees"
24007 with the dispatch groups that the processor dispatcher is expected to
24008 form in practice. It tries to achieve this synchronization by forcing the
24009 estimated processor grouping on the compiler (as opposed to the function
24010 'pad_goups' which tries to force the scheduler's grouping on the processor).
24012 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
24013 examines the (estimated) dispatch groups that will be formed by the processor
24014 dispatcher. It marks these group boundaries to reflect the estimated
24015 processor grouping, overriding the grouping that the scheduler had marked.
24016 Depending on the value of the flag '-minsert-sched-nops' this function can
24017 force certain insns into separate groups or force a certain distance between
24018 them by inserting nops, for example, if there exists a "costly dependence"
24021 The function estimates the group boundaries that the processor will form as
24022 follows: It keeps track of how many vacant issue slots are available after
24023 each insn. A subsequent insn will start a new group if one of the following
24025 - no more vacant issue slots remain in the current dispatch group.
24026 - only the last issue slot, which is the branch slot, is vacant, but the next
24027 insn is not a branch.
24028 - only the last 2 or less issue slots, including the branch slot, are vacant,
24029 which means that a cracked insn (which occupies two issue slots) can't be
24030 issued in this group.
24031 - less than 'issue_rate' slots are vacant, and the next insn always needs to
24032 start a new group. */
24035 redefine_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
24037 rtx insn
, next_insn
;
24039 int can_issue_more
;
24042 int group_count
= 0;
24046 issue_rate
= rs6000_issue_rate ();
24047 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
24048 for (i
= 0; i
< issue_rate
; i
++)
24050 group_insns
[i
] = 0;
24052 can_issue_more
= issue_rate
;
24054 insn
= get_next_active_insn (prev_head_insn
, tail
);
24057 while (insn
!= NULL_RTX
)
24059 slot
= (issue_rate
- can_issue_more
);
24060 group_insns
[slot
] = insn
;
24062 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
24063 if (insn_terminates_group_p (insn
, current_group
))
24064 can_issue_more
= 0;
24066 next_insn
= get_next_active_insn (insn
, tail
);
24067 if (next_insn
== NULL_RTX
)
24068 return group_count
+ 1;
24070 /* Is next_insn going to start a new group? */
24072 = (can_issue_more
== 0
24073 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
24074 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
24075 || (can_issue_more
< issue_rate
&&
24076 insn_terminates_group_p (next_insn
, previous_group
)));
24078 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
24079 next_insn
, &group_end
, can_issue_more
,
24085 can_issue_more
= 0;
24086 for (i
= 0; i
< issue_rate
; i
++)
24088 group_insns
[i
] = 0;
24092 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
24093 PUT_MODE (next_insn
, VOIDmode
);
24094 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
24095 PUT_MODE (next_insn
, TImode
);
24098 if (can_issue_more
== 0)
24099 can_issue_more
= issue_rate
;
24102 return group_count
;
24105 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
24106 dispatch group boundaries that the scheduler had marked. Pad with nops
24107 any dispatch groups which have vacant issue slots, in order to force the
24108 scheduler's grouping on the processor dispatcher. The function
24109 returns the number of dispatch groups found. */
24112 pad_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
24114 rtx insn
, next_insn
;
24117 int can_issue_more
;
24119 int group_count
= 0;
24121 /* Initialize issue_rate. */
24122 issue_rate
= rs6000_issue_rate ();
24123 can_issue_more
= issue_rate
;
24125 insn
= get_next_active_insn (prev_head_insn
, tail
);
24126 next_insn
= get_next_active_insn (insn
, tail
);
24128 while (insn
!= NULL_RTX
)
24131 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
24133 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
24135 if (next_insn
== NULL_RTX
)
24140 /* If the scheduler had marked group termination at this location
24141 (between insn and next_insn), and neither insn nor next_insn will
24142 force group termination, pad the group with nops to force group
24145 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
24146 && !insn_terminates_group_p (insn
, current_group
)
24147 && !insn_terminates_group_p (next_insn
, previous_group
))
24149 if (!is_branch_slot_insn (next_insn
))
24152 while (can_issue_more
)
24155 emit_insn_before (nop
, next_insn
);
24160 can_issue_more
= issue_rate
;
24165 next_insn
= get_next_active_insn (insn
, tail
);
24168 return group_count
;
24171 /* We're beginning a new block. Initialize data structures as necessary. */
24174 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
24175 int sched_verbose ATTRIBUTE_UNUSED
,
24176 int max_ready ATTRIBUTE_UNUSED
)
24178 last_scheduled_insn
= NULL_RTX
;
24179 load_store_pendulum
= 0;
24182 /* The following function is called at the end of scheduling BB.
24183 After reload, it inserts nops at insn group bundling. */
24186 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
24191 fprintf (dump
, "=== Finishing schedule.\n");
24193 if (reload_completed
&& rs6000_sched_groups
)
24195 /* Do not run sched_finish hook when selective scheduling enabled. */
24196 if (sel_sched_p ())
24199 if (rs6000_sched_insert_nops
== sched_finish_none
)
24202 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
24203 n_groups
= pad_groups (dump
, sched_verbose
,
24204 current_sched_info
->prev_head
,
24205 current_sched_info
->next_tail
);
24207 n_groups
= redefine_groups (dump
, sched_verbose
,
24208 current_sched_info
->prev_head
,
24209 current_sched_info
->next_tail
);
24211 if (sched_verbose
>= 6)
24213 fprintf (dump
, "ngroups = %d\n", n_groups
);
24214 print_rtl (dump
, current_sched_info
->prev_head
);
24215 fprintf (dump
, "Done finish_sched\n");
24220 struct _rs6000_sched_context
24222 short cached_can_issue_more
;
24223 rtx last_scheduled_insn
;
24224 int load_store_pendulum
;
24227 typedef struct _rs6000_sched_context rs6000_sched_context_def
;
24228 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
24230 /* Allocate store for new scheduling context. */
24232 rs6000_alloc_sched_context (void)
24234 return xmalloc (sizeof (rs6000_sched_context_def
));
24237 /* If CLEAN_P is true then initializes _SC with clean data,
24238 and from the global context otherwise. */
24240 rs6000_init_sched_context (void *_sc
, bool clean_p
)
24242 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
24246 sc
->cached_can_issue_more
= 0;
24247 sc
->last_scheduled_insn
= NULL_RTX
;
24248 sc
->load_store_pendulum
= 0;
24252 sc
->cached_can_issue_more
= cached_can_issue_more
;
24253 sc
->last_scheduled_insn
= last_scheduled_insn
;
24254 sc
->load_store_pendulum
= load_store_pendulum
;
24258 /* Sets the global scheduling context to the one pointed to by _SC. */
24260 rs6000_set_sched_context (void *_sc
)
24262 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
24264 gcc_assert (sc
!= NULL
);
24266 cached_can_issue_more
= sc
->cached_can_issue_more
;
24267 last_scheduled_insn
= sc
->last_scheduled_insn
;
24268 load_store_pendulum
= sc
->load_store_pendulum
;
24273 rs6000_free_sched_context (void *_sc
)
24275 gcc_assert (_sc
!= NULL
);
24281 /* Length in units of the trampoline for entering a nested function. */
24284 rs6000_trampoline_size (void)
24288 switch (DEFAULT_ABI
)
24291 gcc_unreachable ();
24294 ret
= (TARGET_32BIT
) ? 12 : 24;
24299 ret
= (TARGET_32BIT
) ? 40 : 48;
24306 /* Emit RTL insns to initialize the variable parts of a trampoline.
24307 FNADDR is an RTX for the address of the function's pure code.
24308 CXT is an RTX for the static chain value for the function. */
24311 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
24313 int regsize
= (TARGET_32BIT
) ? 4 : 8;
24314 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
24315 rtx ctx_reg
= force_reg (Pmode
, cxt
);
24316 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
24318 switch (DEFAULT_ABI
)
24321 gcc_unreachable ();
24323 /* Under AIX, just build the 3 word function descriptor */
24326 rtx fnmem
, fn_reg
, toc_reg
;
24328 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
24329 error ("You cannot take the address of a nested function if you use "
24330 "the -mno-pointers-to-nested-functions option.");
24332 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
24333 fn_reg
= gen_reg_rtx (Pmode
);
24334 toc_reg
= gen_reg_rtx (Pmode
);
24336 /* Macro to shorten the code expansions below. */
24337 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24339 m_tramp
= replace_equiv_address (m_tramp
, addr
);
24341 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
24342 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
24343 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
24344 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
24345 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
24351 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24354 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
24355 LCT_NORMAL
, VOIDmode
, 4,
24357 GEN_INT (rs6000_trampoline_size ()), SImode
,
24365 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24366 identifier as an argument, so the front end shouldn't look it up. */
24369 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
24371 return is_attribute_p ("altivec", attr_id
);
24374 /* Handle the "altivec" attribute. The attribute may have
24375 arguments as follows:
24377 __attribute__((altivec(vector__)))
24378 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24379 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24381 and may appear more than once (e.g., 'vector bool char') in a
24382 given declaration. */
24385 rs6000_handle_altivec_attribute (tree
*node
,
24386 tree name ATTRIBUTE_UNUSED
,
24388 int flags ATTRIBUTE_UNUSED
,
24389 bool *no_add_attrs
)
24391 tree type
= *node
, result
= NULL_TREE
;
24392 enum machine_mode mode
;
24395 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
24396 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
24397 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
24400 while (POINTER_TYPE_P (type
)
24401 || TREE_CODE (type
) == FUNCTION_TYPE
24402 || TREE_CODE (type
) == METHOD_TYPE
24403 || TREE_CODE (type
) == ARRAY_TYPE
)
24404 type
= TREE_TYPE (type
);
24406 mode
= TYPE_MODE (type
);
24408 /* Check for invalid AltiVec type qualifiers. */
24409 if (type
== long_double_type_node
)
24410 error ("use of %<long double%> in AltiVec types is invalid");
24411 else if (type
== boolean_type_node
)
24412 error ("use of boolean types in AltiVec types is invalid");
24413 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
24414 error ("use of %<complex%> in AltiVec types is invalid");
24415 else if (DECIMAL_FLOAT_MODE_P (mode
))
24416 error ("use of decimal floating point types in AltiVec types is invalid");
24417 else if (!TARGET_VSX
)
24419 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
24422 error ("use of %<long%> in AltiVec types is invalid for "
24423 "64-bit code without -mvsx");
24424 else if (rs6000_warn_altivec_long
)
24425 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24428 else if (type
== long_long_unsigned_type_node
24429 || type
== long_long_integer_type_node
)
24430 error ("use of %<long long%> in AltiVec types is invalid without "
24432 else if (type
== double_type_node
)
24433 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24436 switch (altivec_type
)
24439 unsigned_p
= TYPE_UNSIGNED (type
);
24443 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
24446 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
24449 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
24452 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
24454 case SFmode
: result
= V4SF_type_node
; break;
24455 case DFmode
: result
= V2DF_type_node
; break;
24456 /* If the user says 'vector int bool', we may be handed the 'bool'
24457 attribute _before_ the 'vector' attribute, and so select the
24458 proper type in the 'b' case below. */
24459 case V4SImode
: case V8HImode
: case V16QImode
: case V4SFmode
:
24460 case V2DImode
: case V2DFmode
:
24468 case DImode
: case V2DImode
: result
= bool_V2DI_type_node
; break;
24469 case SImode
: case V4SImode
: result
= bool_V4SI_type_node
; break;
24470 case HImode
: case V8HImode
: result
= bool_V8HI_type_node
; break;
24471 case QImode
: case V16QImode
: result
= bool_V16QI_type_node
;
24478 case V8HImode
: result
= pixel_V8HI_type_node
;
24484 /* Propagate qualifiers attached to the element type
24485 onto the vector type. */
24486 if (result
&& result
!= type
&& TYPE_QUALS (type
))
24487 result
= build_qualified_type (result
, TYPE_QUALS (type
));
24489 *no_add_attrs
= true; /* No need to hang on to the attribute. */
24492 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
24497 /* AltiVec defines four built-in scalar types that serve as vector
24498 elements; we must teach the compiler how to mangle them. */
24500 static const char *
24501 rs6000_mangle_type (const_tree type
)
24503 type
= TYPE_MAIN_VARIANT (type
);
24505 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
24506 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
24509 if (type
== bool_char_type_node
) return "U6__boolc";
24510 if (type
== bool_short_type_node
) return "U6__bools";
24511 if (type
== pixel_type_node
) return "u7__pixel";
24512 if (type
== bool_int_type_node
) return "U6__booli";
24513 if (type
== bool_long_type_node
) return "U6__booll";
24515 /* Mangle IBM extended float long double as `g' (__float128) on
24516 powerpc*-linux where long-double-64 previously was the default. */
24517 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
24519 && TARGET_LONG_DOUBLE_128
24520 && !TARGET_IEEEQUAD
)
24523 /* For all other types, use normal C++ mangling. */
24527 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24528 struct attribute_spec.handler. */
24531 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
24532 tree args ATTRIBUTE_UNUSED
,
24533 int flags ATTRIBUTE_UNUSED
,
24534 bool *no_add_attrs
)
24536 if (TREE_CODE (*node
) != FUNCTION_TYPE
24537 && TREE_CODE (*node
) != FIELD_DECL
24538 && TREE_CODE (*node
) != TYPE_DECL
)
24540 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
24542 *no_add_attrs
= true;
24548 /* Set longcall attributes on all functions declared when
24549 rs6000_default_long_calls is true. */
24551 rs6000_set_default_type_attributes (tree type
)
24553 if (rs6000_default_long_calls
24554 && (TREE_CODE (type
) == FUNCTION_TYPE
24555 || TREE_CODE (type
) == METHOD_TYPE
))
24556 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
24558 TYPE_ATTRIBUTES (type
));
24561 darwin_set_default_type_attributes (type
);
24565 /* Return a reference suitable for calling a function with the
24566 longcall attribute. */
24569 rs6000_longcall_ref (rtx call_ref
)
24571 const char *call_name
;
24574 if (GET_CODE (call_ref
) != SYMBOL_REF
)
24577 /* System V adds '.' to the internal name, so skip them. */
24578 call_name
= XSTR (call_ref
, 0);
24579 if (*call_name
== '.')
24581 while (*call_name
== '.')
24584 node
= get_identifier (call_name
);
24585 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
24588 return force_reg (Pmode
, call_ref
);
24591 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24592 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24595 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24596 struct attribute_spec.handler. */
24598 rs6000_handle_struct_attribute (tree
*node
, tree name
,
24599 tree args ATTRIBUTE_UNUSED
,
24600 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
24603 if (DECL_P (*node
))
24605 if (TREE_CODE (*node
) == TYPE_DECL
)
24606 type
= &TREE_TYPE (*node
);
24611 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
24612 || TREE_CODE (*type
) == UNION_TYPE
)))
24614 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
24615 *no_add_attrs
= true;
24618 else if ((is_attribute_p ("ms_struct", name
)
24619 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
24620 || ((is_attribute_p ("gcc_struct", name
)
24621 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
24623 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
24625 *no_add_attrs
= true;
24632 rs6000_ms_bitfield_layout_p (const_tree record_type
)
24634 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
24635 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
24636 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
24639 #ifdef USING_ELFOS_H
24641 /* A get_unnamed_section callback, used for switching to toc_section. */
24644 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
24646 if (DEFAULT_ABI
== ABI_AIX
24647 && TARGET_MINIMAL_TOC
24648 && !TARGET_RELOCATABLE
)
24650 if (!toc_initialized
)
24652 toc_initialized
= 1;
24653 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
24654 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
24655 fprintf (asm_out_file
, "\t.tc ");
24656 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
24657 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24658 fprintf (asm_out_file
, "\n");
24660 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24661 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24662 fprintf (asm_out_file
, " = .+32768\n");
24665 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24667 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_RELOCATABLE
)
24668 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
24671 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
24672 if (!toc_initialized
)
24674 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
24675 fprintf (asm_out_file
, " = .+32768\n");
24676 toc_initialized
= 1;
24681 /* Implement TARGET_ASM_INIT_SECTIONS. */
24684 rs6000_elf_asm_init_sections (void)
24687 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
24690 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
24691 SDATA2_SECTION_ASM_OP
);
24694 /* Implement TARGET_SELECT_RTX_SECTION. */
24697 rs6000_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
24698 unsigned HOST_WIDE_INT align
)
24700 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
24701 return toc_section
;
24703 return default_elf_select_rtx_section (mode
, x
, align
);
24706 /* For a SYMBOL_REF, set generic flags and then perform some
24707 target-specific processing.
24709 When the AIX ABI is requested on a non-AIX system, replace the
24710 function name with the real name (with a leading .) rather than the
24711 function descriptor name. This saves a lot of overriding code to
24712 read the prefixes. */
24714 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
24716 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
24718 default_encode_section_info (decl
, rtl
, first
);
24721 && TREE_CODE (decl
) == FUNCTION_DECL
24723 && DEFAULT_ABI
== ABI_AIX
)
24725 rtx sym_ref
= XEXP (rtl
, 0);
24726 size_t len
= strlen (XSTR (sym_ref
, 0));
24727 char *str
= XALLOCAVEC (char, len
+ 2);
24729 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
24730 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
24735 compare_section_name (const char *section
, const char *templ
)
24739 len
= strlen (templ
);
24740 return (strncmp (section
, templ
, len
) == 0
24741 && (section
[len
] == 0 || section
[len
] == '.'));
24745 rs6000_elf_in_small_data_p (const_tree decl
)
24747 if (rs6000_sdata
== SDATA_NONE
)
24750 /* We want to merge strings, so we never consider them small data. */
24751 if (TREE_CODE (decl
) == STRING_CST
)
24754 /* Functions are never in the small data area. */
24755 if (TREE_CODE (decl
) == FUNCTION_DECL
)
24758 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
24760 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
24761 if (compare_section_name (section
, ".sdata")
24762 || compare_section_name (section
, ".sdata2")
24763 || compare_section_name (section
, ".gnu.linkonce.s")
24764 || compare_section_name (section
, ".sbss")
24765 || compare_section_name (section
, ".sbss2")
24766 || compare_section_name (section
, ".gnu.linkonce.sb")
24767 || strcmp (section
, ".PPC.EMB.sdata0") == 0
24768 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
24773 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
24776 && size
<= g_switch_value
24777 /* If it's not public, and we're not going to reference it there,
24778 there's no need to put it in the small data section. */
24779 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
24786 #endif /* USING_ELFOS_H */
24788 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
24791 rs6000_use_blocks_for_constant_p (enum machine_mode mode
, const_rtx x
)
24793 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
24796 /* Return a REG that occurs in ADDR with coefficient 1.
24797 ADDR can be effectively incremented by incrementing REG.
24799 r0 is special and we must not select it as an address
24800 register by this routine since our caller will try to
24801 increment the returned register via an "la" instruction. */
24804 find_addr_reg (rtx addr
)
24806 while (GET_CODE (addr
) == PLUS
)
24808 if (GET_CODE (XEXP (addr
, 0)) == REG
24809 && REGNO (XEXP (addr
, 0)) != 0)
24810 addr
= XEXP (addr
, 0);
24811 else if (GET_CODE (XEXP (addr
, 1)) == REG
24812 && REGNO (XEXP (addr
, 1)) != 0)
24813 addr
= XEXP (addr
, 1);
24814 else if (CONSTANT_P (XEXP (addr
, 0)))
24815 addr
= XEXP (addr
, 1);
24816 else if (CONSTANT_P (XEXP (addr
, 1)))
24817 addr
= XEXP (addr
, 0);
24819 gcc_unreachable ();
24821 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
24826 rs6000_fatal_bad_address (rtx op
)
24828 fatal_insn ("bad address", op
);
24833 typedef struct branch_island_d
{
24834 tree function_name
;
24839 DEF_VEC_O(branch_island
);
24840 DEF_VEC_ALLOC_O(branch_island
,gc
);
24842 static VEC(branch_island
,gc
) *branch_islands
;
24844 /* Remember to generate a branch island for far calls to the given
24848 add_compiler_branch_island (tree label_name
, tree function_name
,
24851 branch_island
*bi
= VEC_safe_push (branch_island
, gc
, branch_islands
, NULL
);
24853 bi
->function_name
= function_name
;
24854 bi
->label_name
= label_name
;
24855 bi
->line_number
= line_number
;
24858 /* Generate far-jump branch islands for everything recorded in
24859 branch_islands. Invoked immediately after the last instruction of
24860 the epilogue has been emitted; the branch islands must be appended
24861 to, and contiguous with, the function body. Mach-O stubs are
24862 generated in machopic_output_stub(). */
24865 macho_branch_islands (void)
24869 while (!VEC_empty (branch_island
, branch_islands
))
24871 branch_island
*bi
= VEC_last (branch_island
, branch_islands
);
24872 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
24873 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
24874 char name_buf
[512];
24875 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
24876 if (name
[0] == '*' || name
[0] == '&')
24877 strcpy (name_buf
, name
+1);
24881 strcpy (name_buf
+1, name
);
24883 strcpy (tmp_buf
, "\n");
24884 strcat (tmp_buf
, label
);
24885 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24886 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
24887 dbxout_stabd (N_SLINE
, bi
->line_number
);
24888 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24891 if (TARGET_LINK_STACK
)
24894 get_ppc476_thunk_name (name
);
24895 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
24896 strcat (tmp_buf
, name
);
24897 strcat (tmp_buf
, "\n");
24898 strcat (tmp_buf
, label
);
24899 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
24903 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
24904 strcat (tmp_buf
, label
);
24905 strcat (tmp_buf
, "_pic\n");
24906 strcat (tmp_buf
, label
);
24907 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
24910 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
24911 strcat (tmp_buf
, name_buf
);
24912 strcat (tmp_buf
, " - ");
24913 strcat (tmp_buf
, label
);
24914 strcat (tmp_buf
, "_pic)\n");
24916 strcat (tmp_buf
, "\tmtlr r0\n");
24918 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
24919 strcat (tmp_buf
, name_buf
);
24920 strcat (tmp_buf
, " - ");
24921 strcat (tmp_buf
, label
);
24922 strcat (tmp_buf
, "_pic)\n");
24924 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
24928 strcat (tmp_buf
, ":\nlis r12,hi16(");
24929 strcat (tmp_buf
, name_buf
);
24930 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
24931 strcat (tmp_buf
, name_buf
);
24932 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
24934 output_asm_insn (tmp_buf
, 0);
24935 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24936 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
24937 dbxout_stabd (N_SLINE
, bi
->line_number
);
24938 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24939 VEC_pop (branch_island
, branch_islands
);
24943 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
24944 already there or not. */
24947 no_previous_def (tree function_name
)
24952 FOR_EACH_VEC_ELT (branch_island
, branch_islands
, ix
, bi
)
24953 if (function_name
== bi
->function_name
)
24958 /* GET_PREV_LABEL gets the label name from the previous definition of
24962 get_prev_label (tree function_name
)
24967 FOR_EACH_VEC_ELT (branch_island
, branch_islands
, ix
, bi
)
24968 if (function_name
== bi
->function_name
)
24969 return bi
->label_name
;
24973 /* INSN is either a function call or a millicode call. It may have an
24974 unconditional jump in its delay slot.
24976 CALL_DEST is the routine we are calling. */
24979 output_call (rtx insn
, rtx
*operands
, int dest_operand_number
,
24980 int cookie_operand_number
)
24982 static char buf
[256];
24983 if (darwin_emit_branch_islands
24984 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
24985 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
24988 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
24990 if (no_previous_def (funname
))
24992 rtx label_rtx
= gen_label_rtx ();
24993 char *label_buf
, temp_buf
[256];
24994 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
24995 CODE_LABEL_NUMBER (label_rtx
));
24996 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
24997 labelname
= get_identifier (label_buf
);
24998 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
25001 labelname
= get_prev_label (funname
);
25003 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
25004 instruction will reach 'foo', otherwise link as 'bl L42'".
25005 "L42" should be a 'branch island', that will do a far jump to
25006 'foo'. Branch islands are generated in
25007 macho_branch_islands(). */
25008 sprintf (buf
, "jbsr %%z%d,%.246s",
25009 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
25012 sprintf (buf
, "bl %%z%d", dest_operand_number
);
25016 /* Generate PIC and indirect symbol stubs. */
25019 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
25021 unsigned int length
;
25022 char *symbol_name
, *lazy_ptr_name
;
25023 char *local_label_0
;
25024 static int label
= 0;
25026 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25027 symb
= (*targetm
.strip_name_encoding
) (symb
);
25030 length
= strlen (symb
);
25031 symbol_name
= XALLOCAVEC (char, length
+ 32);
25032 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
25034 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
25035 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
25038 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
25040 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
25044 fprintf (file
, "\t.align 5\n");
25046 fprintf (file
, "%s:\n", stub
);
25047 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25050 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
25051 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
25053 fprintf (file
, "\tmflr r0\n");
25054 if (TARGET_LINK_STACK
)
25057 get_ppc476_thunk_name (name
);
25058 fprintf (file
, "\tbl %s\n", name
);
25059 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
25063 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
25064 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
25066 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
25067 lazy_ptr_name
, local_label_0
);
25068 fprintf (file
, "\tmtlr r0\n");
25069 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
25070 (TARGET_64BIT
? "ldu" : "lwzu"),
25071 lazy_ptr_name
, local_label_0
);
25072 fprintf (file
, "\tmtctr r12\n");
25073 fprintf (file
, "\tbctr\n");
25077 fprintf (file
, "\t.align 4\n");
25079 fprintf (file
, "%s:\n", stub
);
25080 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25082 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
25083 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
25084 (TARGET_64BIT
? "ldu" : "lwzu"),
25086 fprintf (file
, "\tmtctr r12\n");
25087 fprintf (file
, "\tbctr\n");
25090 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
25091 fprintf (file
, "%s:\n", lazy_ptr_name
);
25092 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
25093 fprintf (file
, "%sdyld_stub_binding_helper\n",
25094 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
25097 /* Legitimize PIC addresses. If the address is already
25098 position-independent, we return ORIG. Newly generated
25099 position-independent addresses go into a reg. This is REG if non
25100 zero, otherwise we allocate register(s) as necessary. */
25102 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
25105 rs6000_machopic_legitimize_pic_address (rtx orig
, enum machine_mode mode
,
25110 if (reg
== NULL
&& ! reload_in_progress
&& ! reload_completed
)
25111 reg
= gen_reg_rtx (Pmode
);
25113 if (GET_CODE (orig
) == CONST
)
25117 if (GET_CODE (XEXP (orig
, 0)) == PLUS
25118 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
25121 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
25123 /* Use a different reg for the intermediate value, as
25124 it will be marked UNCHANGING. */
25125 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
25126 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
25129 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
25132 if (GET_CODE (offset
) == CONST_INT
)
25134 if (SMALL_INT (offset
))
25135 return plus_constant (Pmode
, base
, INTVAL (offset
));
25136 else if (! reload_in_progress
&& ! reload_completed
)
25137 offset
= force_reg (Pmode
, offset
);
25140 rtx mem
= force_const_mem (Pmode
, orig
);
25141 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
25144 return gen_rtx_PLUS (Pmode
, base
, offset
);
25147 /* Fall back on generic machopic code. */
25148 return machopic_legitimize_pic_address (orig
, mode
, reg
);
25151 /* Output a .machine directive for the Darwin assembler, and call
25152 the generic start_file routine. */
25155 rs6000_darwin_file_start (void)
25157 static const struct
25163 { "ppc64", "ppc64", MASK_64BIT
},
25164 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
25165 { "power4", "ppc970", 0 },
25166 { "G5", "ppc970", 0 },
25167 { "7450", "ppc7450", 0 },
25168 { "7400", "ppc7400", MASK_ALTIVEC
},
25169 { "G4", "ppc7400", 0 },
25170 { "750", "ppc750", 0 },
25171 { "740", "ppc750", 0 },
25172 { "G3", "ppc750", 0 },
25173 { "604e", "ppc604e", 0 },
25174 { "604", "ppc604", 0 },
25175 { "603e", "ppc603", 0 },
25176 { "603", "ppc603", 0 },
25177 { "601", "ppc601", 0 },
25178 { NULL
, "ppc", 0 } };
25179 const char *cpu_id
= "";
25182 rs6000_file_start ();
25183 darwin_file_start ();
25185 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25187 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
25188 cpu_id
= rs6000_default_cpu
;
25190 if (global_options_set
.x_rs6000_cpu_index
)
25191 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
25193 /* Look through the mapping array. Pick the first name that either
25194 matches the argument, has a bit set in IF_SET that is also set
25195 in the target flags, or has a NULL name. */
25198 while (mapping
[i
].arg
!= NULL
25199 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
25200 && (mapping
[i
].if_set
& target_flags
) == 0)
25203 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
25206 #endif /* TARGET_MACHO */
25210 rs6000_elf_reloc_rw_mask (void)
25214 else if (DEFAULT_ABI
== ABI_AIX
)
25220 /* Record an element in the table of global constructors. SYMBOL is
25221 a SYMBOL_REF of the function to be called; PRIORITY is a number
25222 between 0 and MAX_INIT_PRIORITY.
25224 This differs from default_named_section_asm_out_constructor in
25225 that we have special handling for -mrelocatable. */
25227 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
25229 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
25231 const char *section
= ".ctors";
25234 if (priority
!= DEFAULT_INIT_PRIORITY
)
25236 sprintf (buf
, ".ctors.%.5u",
25237 /* Invert the numbering so the linker puts us in the proper
25238 order; constructors are run from right to left, and the
25239 linker sorts in increasing order. */
25240 MAX_INIT_PRIORITY
- priority
);
25244 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
25245 assemble_align (POINTER_SIZE
);
25247 if (TARGET_RELOCATABLE
)
25249 fputs ("\t.long (", asm_out_file
);
25250 output_addr_const (asm_out_file
, symbol
);
25251 fputs (")@fixup\n", asm_out_file
);
25254 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
25257 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
25259 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
25261 const char *section
= ".dtors";
25264 if (priority
!= DEFAULT_INIT_PRIORITY
)
25266 sprintf (buf
, ".dtors.%.5u",
25267 /* Invert the numbering so the linker puts us in the proper
25268 order; constructors are run from right to left, and the
25269 linker sorts in increasing order. */
25270 MAX_INIT_PRIORITY
- priority
);
25274 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
25275 assemble_align (POINTER_SIZE
);
25277 if (TARGET_RELOCATABLE
)
25279 fputs ("\t.long (", asm_out_file
);
25280 output_addr_const (asm_out_file
, symbol
);
25281 fputs (")@fixup\n", asm_out_file
);
25284 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
25288 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
25292 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
25293 ASM_OUTPUT_LABEL (file
, name
);
25294 fputs (DOUBLE_INT_ASM_OP
, file
);
25295 rs6000_output_function_entry (file
, name
);
25296 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
25299 fputs ("\t.size\t", file
);
25300 assemble_name (file
, name
);
25301 fputs (",24\n\t.type\t.", file
);
25302 assemble_name (file
, name
);
25303 fputs (",@function\n", file
);
25304 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
25306 fputs ("\t.globl\t.", file
);
25307 assemble_name (file
, name
);
25312 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
25313 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
25314 rs6000_output_function_entry (file
, name
);
25315 fputs (":\n", file
);
25319 if (TARGET_RELOCATABLE
25320 && !TARGET_SECURE_PLT
25321 && (get_pool_size () != 0 || crtl
->profile
)
25326 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
25328 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
25329 fprintf (file
, "\t.long ");
25330 assemble_name (file
, buf
);
25332 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
25333 assemble_name (file
, buf
);
25337 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
25338 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
25340 if (DEFAULT_ABI
== ABI_AIX
)
25342 const char *desc_name
, *orig_name
;
25344 orig_name
= (*targetm
.strip_name_encoding
) (name
);
25345 desc_name
= orig_name
;
25346 while (*desc_name
== '.')
25349 if (TREE_PUBLIC (decl
))
25350 fprintf (file
, "\t.globl %s\n", desc_name
);
25352 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
25353 fprintf (file
, "%s:\n", desc_name
);
25354 fprintf (file
, "\t.long %s\n", orig_name
);
25355 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
25356 if (DEFAULT_ABI
== ABI_AIX
)
25357 fputs ("\t.long 0\n", file
);
25358 fprintf (file
, "\t.previous\n");
25360 ASM_OUTPUT_LABEL (file
, name
);
25363 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
25365 rs6000_elf_file_end (void)
25367 #ifdef HAVE_AS_GNU_ATTRIBUTE
25368 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
25370 if (rs6000_passes_float
)
25371 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n",
25372 ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
) ? 1
25373 : (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_SINGLE_FLOAT
) ? 3
25375 if (rs6000_passes_vector
)
25376 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
25377 (TARGET_ALTIVEC_ABI
? 2
25378 : TARGET_SPE_ABI
? 3
25380 if (rs6000_returns_struct
)
25381 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
25382 aix_struct_return
? 2 : 1);
25385 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25387 file_end_indicate_exec_stack ();
25394 rs6000_xcoff_asm_output_anchor (rtx symbol
)
25398 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
25399 SYMBOL_REF_BLOCK_OFFSET (symbol
));
25400 ASM_OUTPUT_DEF (asm_out_file
, XSTR (symbol
, 0), buffer
);
25404 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
25406 fputs (GLOBAL_ASM_OP
, stream
);
25407 RS6000_OUTPUT_BASENAME (stream
, name
);
25408 putc ('\n', stream
);
25411 /* A get_unnamed_decl callback, used for read-only sections. PTR
25412 points to the section string variable. */
25415 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
25417 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
25418 *(const char *const *) directive
,
25419 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
25422 /* Likewise for read-write sections. */
25425 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
25427 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
25428 *(const char *const *) directive
,
25429 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
25432 /* A get_unnamed_section callback, used for switching to toc_section. */
25435 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
25437 if (TARGET_MINIMAL_TOC
)
25439 /* toc_section is always selected at least once from
25440 rs6000_xcoff_file_start, so this is guaranteed to
25441 always be defined once and only once in each file. */
25442 if (!toc_initialized
)
25444 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
25445 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
25446 toc_initialized
= 1;
25448 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
25449 (TARGET_32BIT
? "" : ",3"));
25452 fputs ("\t.toc\n", asm_out_file
);
25455 /* Implement TARGET_ASM_INIT_SECTIONS. */
25458 rs6000_xcoff_asm_init_sections (void)
25460 read_only_data_section
25461 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
25462 &xcoff_read_only_section_name
);
25464 private_data_section
25465 = get_unnamed_section (SECTION_WRITE
,
25466 rs6000_xcoff_output_readwrite_section_asm_op
,
25467 &xcoff_private_data_section_name
);
25469 read_only_private_data_section
25470 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
25471 &xcoff_private_data_section_name
);
25474 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
25476 readonly_data_section
= read_only_data_section
;
25477 exception_section
= data_section
;
25481 rs6000_xcoff_reloc_rw_mask (void)
25487 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
25488 tree decl ATTRIBUTE_UNUSED
)
25491 static const char * const suffix
[3] = { "PR", "RO", "RW" };
25493 if (flags
& SECTION_CODE
)
25495 else if (flags
& SECTION_WRITE
)
25500 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
25501 (flags
& SECTION_CODE
) ? "." : "",
25502 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
25506 rs6000_xcoff_select_section (tree decl
, int reloc
,
25507 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
25509 if (decl_readonly_section (decl
, reloc
))
25511 if (TREE_PUBLIC (decl
))
25512 return read_only_data_section
;
25514 return read_only_private_data_section
;
25518 if (TREE_PUBLIC (decl
))
25519 return data_section
;
25521 return private_data_section
;
25526 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
25530 /* Use select_section for private and uninitialized data. */
25531 if (!TREE_PUBLIC (decl
)
25532 || DECL_COMMON (decl
)
25533 || DECL_INITIAL (decl
) == NULL_TREE
25534 || DECL_INITIAL (decl
) == error_mark_node
25535 || (flag_zero_initialized_in_bss
25536 && initializer_zerop (DECL_INITIAL (decl
))))
25539 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
25540 name
= (*targetm
.strip_name_encoding
) (name
);
25541 DECL_SECTION_NAME (decl
) = build_string (strlen (name
), name
);
25544 /* Select section for constant in constant pool.
25546 On RS/6000, all constants are in the private read-only data area.
25547 However, if this is being placed in the TOC it must be output as a
25551 rs6000_xcoff_select_rtx_section (enum machine_mode mode
, rtx x
,
25552 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
25554 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
25555 return toc_section
;
25557 return read_only_private_data_section
;
25560 /* Remove any trailing [DS] or the like from the symbol name. */
25562 static const char *
25563 rs6000_xcoff_strip_name_encoding (const char *name
)
25568 len
= strlen (name
);
25569 if (name
[len
- 1] == ']')
25570 return ggc_alloc_string (name
, len
- 4);
25575 /* Section attributes. AIX is always PIC. */
25577 static unsigned int
25578 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
25580 unsigned int align
;
25581 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
25583 /* Align to at least UNIT size. */
25584 if (flags
& SECTION_CODE
|| !decl
)
25585 align
= MIN_UNITS_PER_WORD
;
25587 /* Increase alignment of large objects if not already stricter. */
25588 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
25589 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
25590 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
25592 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
25595 /* Output at beginning of assembler file.
25597 Initialize the section names for the RS/6000 at this point.
25599 Specify filename, including full path, to assembler.
25601 We want to go into the TOC section so at least one .toc will be emitted.
25602 Also, in order to output proper .bs/.es pairs, we need at least one static
25603 [RW] section emitted.
25605 Finally, declare mcount when profiling to make the assembler happy. */
25608 rs6000_xcoff_file_start (void)
25610 rs6000_gen_section_name (&xcoff_bss_section_name
,
25611 main_input_filename
, ".bss_");
25612 rs6000_gen_section_name (&xcoff_private_data_section_name
,
25613 main_input_filename
, ".rw_");
25614 rs6000_gen_section_name (&xcoff_read_only_section_name
,
25615 main_input_filename
, ".ro_");
25617 fputs ("\t.file\t", asm_out_file
);
25618 output_quoted_string (asm_out_file
, main_input_filename
);
25619 fputc ('\n', asm_out_file
);
25620 if (write_symbols
!= NO_DEBUG
)
25621 switch_to_section (private_data_section
);
25622 switch_to_section (text_section
);
25624 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
25625 rs6000_file_start ();
25628 /* Output at end of assembler file.
25629 On the RS/6000, referencing data should automatically pull in text. */
25632 rs6000_xcoff_file_end (void)
25634 switch_to_section (text_section
);
25635 fputs ("_section_.text:\n", asm_out_file
);
25636 switch_to_section (data_section
);
25637 fputs (TARGET_32BIT
25638 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25641 #endif /* TARGET_XCOFF */
25643 /* Compute a (partial) cost for rtx X. Return true if the complete
25644 cost has been computed, and false if subexpressions should be
25645 scanned. In either case, *TOTAL contains the cost result. */
25648 rs6000_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
25649 int *total
, bool speed
)
25651 enum machine_mode mode
= GET_MODE (x
);
25655 /* On the RS/6000, if it is valid in the insn, it is free. */
25657 if (((outer_code
== SET
25658 || outer_code
== PLUS
25659 || outer_code
== MINUS
)
25660 && (satisfies_constraint_I (x
)
25661 || satisfies_constraint_L (x
)))
25662 || (outer_code
== AND
25663 && (satisfies_constraint_K (x
)
25665 ? satisfies_constraint_L (x
)
25666 : satisfies_constraint_J (x
))
25667 || mask_operand (x
, mode
)
25669 && mask64_operand (x
, DImode
))))
25670 || ((outer_code
== IOR
|| outer_code
== XOR
)
25671 && (satisfies_constraint_K (x
)
25673 ? satisfies_constraint_L (x
)
25674 : satisfies_constraint_J (x
))))
25675 || outer_code
== ASHIFT
25676 || outer_code
== ASHIFTRT
25677 || outer_code
== LSHIFTRT
25678 || outer_code
== ROTATE
25679 || outer_code
== ROTATERT
25680 || outer_code
== ZERO_EXTRACT
25681 || (outer_code
== MULT
25682 && satisfies_constraint_I (x
))
25683 || ((outer_code
== DIV
|| outer_code
== UDIV
25684 || outer_code
== MOD
|| outer_code
== UMOD
)
25685 && exact_log2 (INTVAL (x
)) >= 0)
25686 || (outer_code
== COMPARE
25687 && (satisfies_constraint_I (x
)
25688 || satisfies_constraint_K (x
)))
25689 || ((outer_code
== EQ
|| outer_code
== NE
)
25690 && (satisfies_constraint_I (x
)
25691 || satisfies_constraint_K (x
)
25693 ? satisfies_constraint_L (x
)
25694 : satisfies_constraint_J (x
))))
25695 || (outer_code
== GTU
25696 && satisfies_constraint_I (x
))
25697 || (outer_code
== LTU
25698 && satisfies_constraint_P (x
)))
25703 else if ((outer_code
== PLUS
25704 && reg_or_add_cint_operand (x
, VOIDmode
))
25705 || (outer_code
== MINUS
25706 && reg_or_sub_cint_operand (x
, VOIDmode
))
25707 || ((outer_code
== SET
25708 || outer_code
== IOR
25709 || outer_code
== XOR
)
25711 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
25713 *total
= COSTS_N_INSNS (1);
25719 if (mode
== DImode
&& code
== CONST_DOUBLE
)
25721 if ((outer_code
== IOR
|| outer_code
== XOR
)
25722 && CONST_DOUBLE_HIGH (x
) == 0
25723 && (CONST_DOUBLE_LOW (x
)
25724 & ~ (unsigned HOST_WIDE_INT
) 0xffff) == 0)
25729 else if ((outer_code
== AND
&& and64_2_operand (x
, DImode
))
25730 || ((outer_code
== SET
25731 || outer_code
== IOR
25732 || outer_code
== XOR
)
25733 && CONST_DOUBLE_HIGH (x
) == 0))
25735 *total
= COSTS_N_INSNS (1);
25745 /* When optimizing for size, MEM should be slightly more expensive
25746 than generating address, e.g., (plus (reg) (const)).
25747 L1 cache latency is about two instructions. */
25748 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
25757 if (FLOAT_MODE_P (mode
))
25758 *total
= rs6000_cost
->fp
;
25760 *total
= COSTS_N_INSNS (1);
25764 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
25765 && satisfies_constraint_I (XEXP (x
, 1)))
25767 if (INTVAL (XEXP (x
, 1)) >= -256
25768 && INTVAL (XEXP (x
, 1)) <= 255)
25769 *total
= rs6000_cost
->mulsi_const9
;
25771 *total
= rs6000_cost
->mulsi_const
;
25773 else if (mode
== SFmode
)
25774 *total
= rs6000_cost
->fp
;
25775 else if (FLOAT_MODE_P (mode
))
25776 *total
= rs6000_cost
->dmul
;
25777 else if (mode
== DImode
)
25778 *total
= rs6000_cost
->muldi
;
25780 *total
= rs6000_cost
->mulsi
;
25784 if (mode
== SFmode
)
25785 *total
= rs6000_cost
->fp
;
25787 *total
= rs6000_cost
->dmul
;
25792 if (FLOAT_MODE_P (mode
))
25794 *total
= mode
== DFmode
? rs6000_cost
->ddiv
25795 : rs6000_cost
->sdiv
;
25802 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
25803 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
25805 if (code
== DIV
|| code
== MOD
)
25807 *total
= COSTS_N_INSNS (2);
25810 *total
= COSTS_N_INSNS (1);
25814 if (GET_MODE (XEXP (x
, 1)) == DImode
)
25815 *total
= rs6000_cost
->divdi
;
25817 *total
= rs6000_cost
->divsi
;
25819 /* Add in shift and subtract for MOD. */
25820 if (code
== MOD
|| code
== UMOD
)
25821 *total
+= COSTS_N_INSNS (2);
25826 *total
= COSTS_N_INSNS (4);
25830 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
25834 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
25838 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
25850 *total
= COSTS_N_INSNS (1);
25858 /* Handle mul_highpart. */
25859 if (outer_code
== TRUNCATE
25860 && GET_CODE (XEXP (x
, 0)) == MULT
)
25862 if (mode
== DImode
)
25863 *total
= rs6000_cost
->muldi
;
25865 *total
= rs6000_cost
->mulsi
;
25868 else if (outer_code
== AND
)
25871 *total
= COSTS_N_INSNS (1);
25876 if (GET_CODE (XEXP (x
, 0)) == MEM
)
25879 *total
= COSTS_N_INSNS (1);
25885 if (!FLOAT_MODE_P (mode
))
25887 *total
= COSTS_N_INSNS (1);
25893 case UNSIGNED_FLOAT
:
25896 case FLOAT_TRUNCATE
:
25897 *total
= rs6000_cost
->fp
;
25901 if (mode
== DFmode
)
25904 *total
= rs6000_cost
->fp
;
25908 switch (XINT (x
, 1))
25911 *total
= rs6000_cost
->fp
;
25923 *total
= COSTS_N_INSNS (1);
25926 else if (FLOAT_MODE_P (mode
)
25927 && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
&& TARGET_FPRS
)
25929 *total
= rs6000_cost
->fp
;
25937 /* Carry bit requires mode == Pmode.
25938 NEG or PLUS already counted so only add one. */
25940 && (outer_code
== NEG
|| outer_code
== PLUS
))
25942 *total
= COSTS_N_INSNS (1);
25945 if (outer_code
== SET
)
25947 if (XEXP (x
, 1) == const0_rtx
)
25949 if (TARGET_ISEL
&& !TARGET_MFCRF
)
25950 *total
= COSTS_N_INSNS (8);
25952 *total
= COSTS_N_INSNS (2);
25955 else if (mode
== Pmode
)
25957 *total
= COSTS_N_INSNS (3);
25966 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
25968 if (TARGET_ISEL
&& !TARGET_MFCRF
)
25969 *total
= COSTS_N_INSNS (8);
25971 *total
= COSTS_N_INSNS (2);
25975 if (outer_code
== COMPARE
)
25989 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
25992 rs6000_debug_rtx_costs (rtx x
, int code
, int outer_code
, int opno
, int *total
,
25995 bool ret
= rs6000_rtx_costs (x
, code
, outer_code
, opno
, total
, speed
);
25998 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
25999 "opno = %d, total = %d, speed = %s, x:\n",
26000 ret
? "complete" : "scan inner",
26001 GET_RTX_NAME (code
),
26002 GET_RTX_NAME (outer_code
),
26005 speed
? "true" : "false");
26012 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
26015 rs6000_debug_address_cost (rtx x
, bool speed
)
26017 int ret
= TARGET_ADDRESS_COST (x
, speed
);
26019 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
26020 ret
, speed
? "true" : "false");
26027 /* A C expression returning the cost of moving data from a register of class
26028 CLASS1 to one of CLASS2. */
26031 rs6000_register_move_cost (enum machine_mode mode
,
26032 reg_class_t from
, reg_class_t to
)
26036 if (TARGET_DEBUG_COST
)
26039 /* Moves from/to GENERAL_REGS. */
26040 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
26041 || reg_classes_intersect_p (from
, GENERAL_REGS
))
26043 reg_class_t rclass
= from
;
26045 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
26048 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
26049 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
26050 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
26052 /* It's more expensive to move CR_REGS than CR0_REGS because of the
26054 else if (rclass
== CR_REGS
)
26057 /* For those processors that have slow LR/CTR moves, make them more
26058 expensive than memory in order to bias spills to memory .*/
26059 else if ((rs6000_cpu
== PROCESSOR_POWER6
26060 || rs6000_cpu
== PROCESSOR_POWER7
)
26061 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
26062 ret
= 6 * hard_regno_nregs
[0][mode
];
26065 /* A move will cost one instruction per GPR moved. */
26066 ret
= 2 * hard_regno_nregs
[0][mode
];
26069 /* If we have VSX, we can easily move between FPR or Altivec registers. */
26070 else if (VECTOR_UNIT_VSX_P (mode
)
26071 && reg_classes_intersect_p (to
, VSX_REGS
)
26072 && reg_classes_intersect_p (from
, VSX_REGS
))
26073 ret
= 2 * hard_regno_nregs
[32][mode
];
26075 /* Moving between two similar registers is just one instruction. */
26076 else if (reg_classes_intersect_p (to
, from
))
26077 ret
= (mode
== TFmode
|| mode
== TDmode
) ? 4 : 2;
26079 /* Everything else has to go through GENERAL_REGS. */
26081 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
26082 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
26084 if (TARGET_DEBUG_COST
)
26086 if (dbg_cost_ctrl
== 1)
26088 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
26089 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
26090 reg_class_names
[to
]);
26097 /* A C expressions returning the cost of moving data of MODE from a register to
26101 rs6000_memory_move_cost (enum machine_mode mode
, reg_class_t rclass
,
26102 bool in ATTRIBUTE_UNUSED
)
26106 if (TARGET_DEBUG_COST
)
26109 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
26110 ret
= 4 * hard_regno_nregs
[0][mode
];
26111 else if (reg_classes_intersect_p (rclass
, FLOAT_REGS
))
26112 ret
= 4 * hard_regno_nregs
[32][mode
];
26113 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
26114 ret
= 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
26116 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
26118 if (TARGET_DEBUG_COST
)
26120 if (dbg_cost_ctrl
== 1)
26122 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
26123 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
26130 /* Returns a code for a target-specific builtin that implements
26131 reciprocal of the function, or NULL_TREE if not available. */
26134 rs6000_builtin_reciprocal (unsigned int fn
, bool md_fn
,
26135 bool sqrt ATTRIBUTE_UNUSED
)
26137 if (optimize_insn_for_size_p ())
26143 case VSX_BUILTIN_XVSQRTDP
:
26144 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
26147 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
26149 case VSX_BUILTIN_XVSQRTSP
:
26150 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
26153 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
26162 case BUILT_IN_SQRT
:
26163 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode
))
26166 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRT
];
26168 case BUILT_IN_SQRTF
:
26169 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode
))
26172 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRTF
];
26179 /* Load up a constant. If the mode is a vector mode, splat the value across
26180 all of the vector elements. */
26183 rs6000_load_constant_and_splat (enum machine_mode mode
, REAL_VALUE_TYPE dconst
)
26187 if (mode
== SFmode
|| mode
== DFmode
)
26189 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, mode
);
26190 reg
= force_reg (mode
, d
);
26192 else if (mode
== V4SFmode
)
26194 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, SFmode
);
26195 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
26196 reg
= gen_reg_rtx (mode
);
26197 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
26199 else if (mode
== V2DFmode
)
26201 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, DFmode
);
26202 rtvec v
= gen_rtvec (2, d
, d
);
26203 reg
= gen_reg_rtx (mode
);
26204 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
26207 gcc_unreachable ();
26212 /* Generate an FMA instruction. */
26215 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
26217 enum machine_mode mode
= GET_MODE (target
);
26220 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
26221 gcc_assert (dst
!= NULL
);
26224 emit_move_insn (target
, dst
);
26227 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26230 rs6000_emit_msub (rtx target
, rtx m1
, rtx m2
, rtx a
)
26232 enum machine_mode mode
= GET_MODE (target
);
26235 /* Altivec does not support fms directly;
26236 generate in terms of fma in that case. */
26237 if (optab_handler (fms_optab
, mode
) != CODE_FOR_nothing
)
26238 dst
= expand_ternary_op (mode
, fms_optab
, m1
, m2
, a
, target
, 0);
26241 a
= expand_unop (mode
, neg_optab
, a
, NULL_RTX
, 0);
26242 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
26244 gcc_assert (dst
!= NULL
);
26247 emit_move_insn (target
, dst
);
26250 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26253 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
26255 enum machine_mode mode
= GET_MODE (dst
);
26258 /* This is a tad more complicated, since the fnma_optab is for
26259 a different expression: fma(-m1, m2, a), which is the same
26260 thing except in the case of signed zeros.
26262 Fortunately we know that if FMA is supported that FNMSUB is
26263 also supported in the ISA. Just expand it directly. */
26265 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
26267 r
= gen_rtx_NEG (mode
, a
);
26268 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
26269 r
= gen_rtx_NEG (mode
, r
);
26270 emit_insn (gen_rtx_SET (VOIDmode
, dst
, r
));
26273 /* Newton-Raphson approximation of floating point divide with just 2 passes
26274 (either single precision floating point, or newer machines with higher
26275 accuracy estimates). Support both scalar and vector divide. Assumes no
26276 trapping math and finite arguments. */
26279 rs6000_emit_swdiv_high_precision (rtx dst
, rtx n
, rtx d
)
26281 enum machine_mode mode
= GET_MODE (dst
);
26282 rtx x0
, e0
, e1
, y1
, u0
, v0
;
26283 enum insn_code code
= optab_handler (smul_optab
, mode
);
26284 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26285 rtx one
= rs6000_load_constant_and_splat (mode
, dconst1
);
26287 gcc_assert (code
!= CODE_FOR_nothing
);
26289 /* x0 = 1./d estimate */
26290 x0
= gen_reg_rtx (mode
);
26291 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26292 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
26295 e0
= gen_reg_rtx (mode
);
26296 rs6000_emit_nmsub (e0
, d
, x0
, one
); /* e0 = 1. - (d * x0) */
26298 e1
= gen_reg_rtx (mode
);
26299 rs6000_emit_madd (e1
, e0
, e0
, e0
); /* e1 = (e0 * e0) + e0 */
26301 y1
= gen_reg_rtx (mode
);
26302 rs6000_emit_madd (y1
, e1
, x0
, x0
); /* y1 = (e1 * x0) + x0 */
26304 u0
= gen_reg_rtx (mode
);
26305 emit_insn (gen_mul (u0
, n
, y1
)); /* u0 = n * y1 */
26307 v0
= gen_reg_rtx (mode
);
26308 rs6000_emit_nmsub (v0
, d
, u0
, n
); /* v0 = n - (d * u0) */
26310 rs6000_emit_madd (dst
, v0
, y1
, u0
); /* dst = (v0 * y1) + u0 */
26313 /* Newton-Raphson approximation of floating point divide that has a low
26314 precision estimate. Assumes no trapping math and finite arguments. */
26317 rs6000_emit_swdiv_low_precision (rtx dst
, rtx n
, rtx d
)
26319 enum machine_mode mode
= GET_MODE (dst
);
26320 rtx x0
, e0
, e1
, e2
, y1
, y2
, y3
, u0
, v0
, one
;
26321 enum insn_code code
= optab_handler (smul_optab
, mode
);
26322 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26324 gcc_assert (code
!= CODE_FOR_nothing
);
26326 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
26328 /* x0 = 1./d estimate */
26329 x0
= gen_reg_rtx (mode
);
26330 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26331 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
26334 e0
= gen_reg_rtx (mode
);
26335 rs6000_emit_nmsub (e0
, d
, x0
, one
); /* e0 = 1. - d * x0 */
26337 y1
= gen_reg_rtx (mode
);
26338 rs6000_emit_madd (y1
, e0
, x0
, x0
); /* y1 = x0 + e0 * x0 */
26340 e1
= gen_reg_rtx (mode
);
26341 emit_insn (gen_mul (e1
, e0
, e0
)); /* e1 = e0 * e0 */
26343 y2
= gen_reg_rtx (mode
);
26344 rs6000_emit_madd (y2
, e1
, y1
, y1
); /* y2 = y1 + e1 * y1 */
26346 e2
= gen_reg_rtx (mode
);
26347 emit_insn (gen_mul (e2
, e1
, e1
)); /* e2 = e1 * e1 */
26349 y3
= gen_reg_rtx (mode
);
26350 rs6000_emit_madd (y3
, e2
, y2
, y2
); /* y3 = y2 + e2 * y2 */
26352 u0
= gen_reg_rtx (mode
);
26353 emit_insn (gen_mul (u0
, n
, y3
)); /* u0 = n * y3 */
26355 v0
= gen_reg_rtx (mode
);
26356 rs6000_emit_nmsub (v0
, d
, u0
, n
); /* v0 = n - d * u0 */
26358 rs6000_emit_madd (dst
, v0
, y3
, u0
); /* dst = u0 + v0 * y3 */
26361 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26362 add a reg_note saying that this was a division. Support both scalar and
26363 vector divide. Assumes no trapping math and finite arguments. */
26366 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
26368 enum machine_mode mode
= GET_MODE (dst
);
26370 if (RS6000_RECIP_HIGH_PRECISION_P (mode
))
26371 rs6000_emit_swdiv_high_precision (dst
, n
, d
);
26373 rs6000_emit_swdiv_low_precision (dst
, n
, d
);
26376 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
26379 /* Newton-Raphson approximation of single/double-precision floating point
26380 rsqrt. Assumes no trapping math and finite arguments. */
26383 rs6000_emit_swrsqrt (rtx dst
, rtx src
)
26385 enum machine_mode mode
= GET_MODE (src
);
26386 rtx x0
= gen_reg_rtx (mode
);
26387 rtx y
= gen_reg_rtx (mode
);
26388 int passes
= (TARGET_RECIP_PRECISION
) ? 2 : 3;
26389 REAL_VALUE_TYPE dconst3_2
;
26392 enum insn_code code
= optab_handler (smul_optab
, mode
);
26393 gen_2arg_fn_t gen_mul
= (gen_2arg_fn_t
) GEN_FCN (code
);
26395 gcc_assert (code
!= CODE_FOR_nothing
);
26397 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26398 real_from_integer (&dconst3_2
, VOIDmode
, 3, 0, 0);
26399 SET_REAL_EXP (&dconst3_2
, REAL_EXP (&dconst3_2
) - 1);
26401 halfthree
= rs6000_load_constant_and_splat (mode
, dconst3_2
);
26403 /* x0 = rsqrt estimate */
26404 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
26405 gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
26408 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26409 rs6000_emit_msub (y
, src
, halfthree
, src
);
26411 for (i
= 0; i
< passes
; i
++)
26413 rtx x1
= gen_reg_rtx (mode
);
26414 rtx u
= gen_reg_rtx (mode
);
26415 rtx v
= gen_reg_rtx (mode
);
26417 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26418 emit_insn (gen_mul (u
, x0
, x0
));
26419 rs6000_emit_nmsub (v
, y
, u
, halfthree
);
26420 emit_insn (gen_mul (x1
, x0
, v
));
26424 emit_move_insn (dst
, x0
);
26428 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26429 (Power7) targets. DST is the target, and SRC is the argument operand. */
26432 rs6000_emit_popcount (rtx dst
, rtx src
)
26434 enum machine_mode mode
= GET_MODE (dst
);
26437 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26438 if (TARGET_POPCNTD
)
26440 if (mode
== SImode
)
26441 emit_insn (gen_popcntdsi2 (dst
, src
));
26443 emit_insn (gen_popcntddi2 (dst
, src
));
26447 tmp1
= gen_reg_rtx (mode
);
26449 if (mode
== SImode
)
26451 emit_insn (gen_popcntbsi2 (tmp1
, src
));
26452 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
26454 tmp2
= force_reg (SImode
, tmp2
);
26455 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
26459 emit_insn (gen_popcntbdi2 (tmp1
, src
));
26460 tmp2
= expand_mult (DImode
, tmp1
,
26461 GEN_INT ((HOST_WIDE_INT
)
26462 0x01010101 << 32 | 0x01010101),
26464 tmp2
= force_reg (DImode
, tmp2
);
26465 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
26470 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26471 target, and SRC is the argument operand. */
26474 rs6000_emit_parity (rtx dst
, rtx src
)
26476 enum machine_mode mode
= GET_MODE (dst
);
26479 tmp
= gen_reg_rtx (mode
);
26481 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26484 if (mode
== SImode
)
26486 emit_insn (gen_popcntbsi2 (tmp
, src
));
26487 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
26491 emit_insn (gen_popcntbdi2 (tmp
, src
));
26492 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
26497 if (mode
== SImode
)
26499 /* Is mult+shift >= shift+xor+shift+xor? */
26500 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
26502 rtx tmp1
, tmp2
, tmp3
, tmp4
;
26504 tmp1
= gen_reg_rtx (SImode
);
26505 emit_insn (gen_popcntbsi2 (tmp1
, src
));
26507 tmp2
= gen_reg_rtx (SImode
);
26508 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
26509 tmp3
= gen_reg_rtx (SImode
);
26510 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
26512 tmp4
= gen_reg_rtx (SImode
);
26513 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
26514 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
26517 rs6000_emit_popcount (tmp
, src
);
26518 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
26522 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26523 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
26525 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
26527 tmp1
= gen_reg_rtx (DImode
);
26528 emit_insn (gen_popcntbdi2 (tmp1
, src
));
26530 tmp2
= gen_reg_rtx (DImode
);
26531 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
26532 tmp3
= gen_reg_rtx (DImode
);
26533 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
26535 tmp4
= gen_reg_rtx (DImode
);
26536 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
26537 tmp5
= gen_reg_rtx (DImode
);
26538 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
26540 tmp6
= gen_reg_rtx (DImode
);
26541 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
26542 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
26545 rs6000_emit_popcount (tmp
, src
);
26546 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
26550 /* Expand an Altivec constant permutation. Return true if we match
26551 an efficient implementation; false to fall back to VPERM. */
26554 altivec_expand_vec_perm_const (rtx operands
[4])
26556 struct altivec_perm_insn
{
26557 enum insn_code impl
;
26558 unsigned char perm
[16];
26560 static const struct altivec_perm_insn patterns
[] = {
26561 { CODE_FOR_altivec_vpkuhum
,
26562 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26563 { CODE_FOR_altivec_vpkuwum
,
26564 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26565 { CODE_FOR_altivec_vmrghb
,
26566 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26567 { CODE_FOR_altivec_vmrghh
,
26568 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26569 { CODE_FOR_altivec_vmrghw
,
26570 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26571 { CODE_FOR_altivec_vmrglb
,
26572 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26573 { CODE_FOR_altivec_vmrglh
,
26574 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26575 { CODE_FOR_altivec_vmrglw
,
26576 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26579 unsigned int i
, j
, elt
, which
;
26580 unsigned char perm
[16];
26581 rtx target
, op0
, op1
, sel
, x
;
26584 target
= operands
[0];
26589 /* Unpack the constant selector. */
26590 for (i
= which
= 0; i
< 16; ++i
)
26592 rtx e
= XVECEXP (sel
, 0, i
);
26593 elt
= INTVAL (e
) & 31;
26594 which
|= (elt
< 16 ? 1 : 2);
26598 /* Simplify the constant selector based on operands. */
26602 gcc_unreachable ();
26606 if (!rtx_equal_p (op0
, op1
))
26611 for (i
= 0; i
< 16; ++i
)
26623 /* Look for splat patterns. */
26628 for (i
= 0; i
< 16; ++i
)
26629 if (perm
[i
] != elt
)
26633 emit_insn (gen_altivec_vspltb (target
, op0
, GEN_INT (elt
)));
26639 for (i
= 0; i
< 16; i
+= 2)
26640 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
26644 x
= gen_reg_rtx (V8HImode
);
26645 emit_insn (gen_altivec_vsplth (x
, gen_lowpart (V8HImode
, op0
),
26646 GEN_INT (elt
/ 2)));
26647 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26654 for (i
= 0; i
< 16; i
+= 4)
26656 || perm
[i
+ 1] != elt
+ 1
26657 || perm
[i
+ 2] != elt
+ 2
26658 || perm
[i
+ 3] != elt
+ 3)
26662 x
= gen_reg_rtx (V4SImode
);
26663 emit_insn (gen_altivec_vspltw (x
, gen_lowpart (V4SImode
, op0
),
26664 GEN_INT (elt
/ 4)));
26665 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26671 /* Look for merge and pack patterns. */
26672 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
26676 elt
= patterns
[j
].perm
[0];
26677 if (perm
[0] == elt
)
26679 else if (perm
[0] == elt
+ 16)
26683 for (i
= 1; i
< 16; ++i
)
26685 elt
= patterns
[j
].perm
[i
];
26687 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
26688 else if (one_vec
&& elt
>= 16)
26690 if (perm
[i
] != elt
)
26695 enum insn_code icode
= patterns
[j
].impl
;
26696 enum machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
26697 enum machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
26700 x
= op0
, op0
= op1
, op1
= x
;
26701 if (imode
!= V16QImode
)
26703 op0
= gen_lowpart (imode
, op0
);
26704 op1
= gen_lowpart (imode
, op1
);
26706 if (omode
== V16QImode
)
26709 x
= gen_reg_rtx (omode
);
26710 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
26711 if (omode
!= V16QImode
)
26712 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
26720 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
26721 Return true if we match an efficient implementation. */
26724 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
26725 unsigned char perm0
, unsigned char perm1
)
26729 /* If both selectors come from the same operand, fold to single op. */
26730 if ((perm0
& 2) == (perm1
& 2))
26737 /* If both operands are equal, fold to simpler permutation. */
26738 if (rtx_equal_p (op0
, op1
))
26741 perm1
= (perm1
& 1) + 2;
26743 /* If the first selector comes from the second operand, swap. */
26744 else if (perm0
& 2)
26750 x
= op0
, op0
= op1
, op1
= x
;
26752 /* If the second selector does not come from the second operand, fail. */
26753 else if ((perm1
& 2) == 0)
26757 if (target
!= NULL
)
26759 enum machine_mode vmode
, dmode
;
26762 vmode
= GET_MODE (target
);
26763 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
26764 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4);
26766 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
26767 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
26768 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
26769 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
26775 rs6000_expand_vec_perm_const (rtx operands
[4])
26777 rtx target
, op0
, op1
, sel
;
26778 unsigned char perm0
, perm1
;
26780 target
= operands
[0];
26785 /* Unpack the constant selector. */
26786 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
26787 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
26789 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
26792 /* Test whether a constant permutation is supported. */
26795 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
26796 const unsigned char *sel
)
26798 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
26799 if (TARGET_ALTIVEC
)
26802 /* Check for ps_merge* or evmerge* insns. */
26803 if ((TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
26804 || (TARGET_SPE
&& vmode
== V2SImode
))
26806 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
26807 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
26808 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
26814 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
26817 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
26818 enum machine_mode vmode
, unsigned nelt
, rtx perm
[])
26820 enum machine_mode imode
;
26824 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
26826 imode
= GET_MODE_INNER (vmode
);
26827 imode
= mode_for_size (GET_MODE_BITSIZE (imode
), MODE_INT
, 0);
26828 imode
= mode_for_vector (imode
, nelt
);
26831 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
26832 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
26834 emit_move_insn (target
, x
);
26837 /* Expand an extract even operation. */
26840 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
26842 enum machine_mode vmode
= GET_MODE (target
);
26843 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
26846 for (i
= 0; i
< nelt
; i
++)
26847 perm
[i
] = GEN_INT (i
* 2);
26849 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
26852 /* Expand a vector interleave operation. */
26855 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
26857 enum machine_mode vmode
= GET_MODE (target
);
26858 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
26861 high
= (highp
== BYTES_BIG_ENDIAN
? 0 : nelt
/ 2);
26862 for (i
= 0; i
< nelt
/ 2; i
++)
26864 perm
[i
* 2] = GEN_INT (i
+ high
);
26865 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
26868 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
26871 /* Return an RTX representing where to find the function value of a
26872 function returning MODE. */
26874 rs6000_complex_function_value (enum machine_mode mode
)
26876 unsigned int regno
;
26878 enum machine_mode inner
= GET_MODE_INNER (mode
);
26879 unsigned int inner_bytes
= GET_MODE_SIZE (inner
);
26881 if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
26882 regno
= FP_ARG_RETURN
;
26885 regno
= GP_ARG_RETURN
;
26887 /* 32-bit is OK since it'll go in r3/r4. */
26888 if (TARGET_32BIT
&& inner_bytes
>= 4)
26889 return gen_rtx_REG (mode
, regno
);
26892 if (inner_bytes
>= 8)
26893 return gen_rtx_REG (mode
, regno
);
26895 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
26897 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
26898 GEN_INT (inner_bytes
));
26899 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
26902 /* Target hook for TARGET_FUNCTION_VALUE.
26904 On the SPE, both FPs and vectors are returned in r3.
26906 On RS/6000 an integer value is in r3 and a floating-point value is in
26907 fp1, unless -msoft-float. */
26910 rs6000_function_value (const_tree valtype
,
26911 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
26912 bool outgoing ATTRIBUTE_UNUSED
)
26914 enum machine_mode mode
;
26915 unsigned int regno
;
26917 /* Special handling for structs in darwin64. */
26919 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
26921 CUMULATIVE_ARGS valcum
;
26925 valcum
.fregno
= FP_ARG_MIN_REG
;
26926 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
26927 /* Do a trial code generation as if this were going to be passed as
26928 an argument; if any part goes in memory, we return NULL. */
26929 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
26932 /* Otherwise fall through to standard ABI rules. */
26935 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DImode
)
26937 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
26938 return gen_rtx_PARALLEL (DImode
,
26940 gen_rtx_EXPR_LIST (VOIDmode
,
26941 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
26943 gen_rtx_EXPR_LIST (VOIDmode
,
26944 gen_rtx_REG (SImode
,
26945 GP_ARG_RETURN
+ 1),
26948 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DCmode
)
26950 return gen_rtx_PARALLEL (DCmode
,
26952 gen_rtx_EXPR_LIST (VOIDmode
,
26953 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
26955 gen_rtx_EXPR_LIST (VOIDmode
,
26956 gen_rtx_REG (SImode
,
26957 GP_ARG_RETURN
+ 1),
26959 gen_rtx_EXPR_LIST (VOIDmode
,
26960 gen_rtx_REG (SImode
,
26961 GP_ARG_RETURN
+ 2),
26963 gen_rtx_EXPR_LIST (VOIDmode
,
26964 gen_rtx_REG (SImode
,
26965 GP_ARG_RETURN
+ 3),
26969 mode
= TYPE_MODE (valtype
);
26970 if ((INTEGRAL_TYPE_P (valtype
) && GET_MODE_BITSIZE (mode
) < BITS_PER_WORD
)
26971 || POINTER_TYPE_P (valtype
))
26972 mode
= TARGET_32BIT
? SImode
: DImode
;
26974 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
26975 /* _Decimal128 must use an even/odd register pair. */
26976 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
26977 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
26978 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
26979 regno
= FP_ARG_RETURN
;
26980 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
26981 && targetm
.calls
.split_complex_arg
)
26982 return rs6000_complex_function_value (mode
);
26983 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
26984 return register is used in both cases, and we won't see V2DImode/V2DFmode
26985 for pure altivec, combine the two cases. */
26986 else if (TREE_CODE (valtype
) == VECTOR_TYPE
26987 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
26988 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
26989 regno
= ALTIVEC_ARG_RETURN
;
26990 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
26991 && (mode
== DFmode
|| mode
== DCmode
26992 || mode
== TFmode
|| mode
== TCmode
))
26993 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
26995 regno
= GP_ARG_RETURN
;
26997 return gen_rtx_REG (mode
, regno
);
27000 /* Define how to find the value returned by a library function
27001 assuming the value has mode MODE. */
27003 rs6000_libcall_value (enum machine_mode mode
)
27005 unsigned int regno
;
27007 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
27009 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
27010 return gen_rtx_PARALLEL (DImode
,
27012 gen_rtx_EXPR_LIST (VOIDmode
,
27013 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
27015 gen_rtx_EXPR_LIST (VOIDmode
,
27016 gen_rtx_REG (SImode
,
27017 GP_ARG_RETURN
+ 1),
27021 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
27022 /* _Decimal128 must use an even/odd register pair. */
27023 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
27024 else if (SCALAR_FLOAT_MODE_P (mode
)
27025 && TARGET_HARD_FLOAT
&& TARGET_FPRS
27026 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
27027 regno
= FP_ARG_RETURN
;
27028 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
27029 return register is used in both cases, and we won't see V2DImode/V2DFmode
27030 for pure altivec, combine the two cases. */
27031 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
27032 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
27033 regno
= ALTIVEC_ARG_RETURN
;
27034 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
27035 return rs6000_complex_function_value (mode
);
27036 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
27037 && (mode
== DFmode
|| mode
== DCmode
27038 || mode
== TFmode
|| mode
== TCmode
))
27039 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
27041 regno
= GP_ARG_RETURN
;
27043 return gen_rtx_REG (mode
, regno
);
27047 /* Given FROM and TO register numbers, say whether this elimination is allowed.
27048 Frame pointer elimination is automatically handled.
27050 For the RS/6000, if frame pointer elimination is being done, we would like
27051 to convert ap into fp, not sp.
27053 We need r30 if -mminimal-toc was specified, and there are constant pool
27057 rs6000_can_eliminate (const int from
, const int to
)
27059 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
27060 ? ! frame_pointer_needed
27061 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
27062 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
|| get_pool_size () == 0
27066 /* Define the offset between two registers, FROM to be eliminated and its
27067 replacement TO, at the start of a routine. */
27069 rs6000_initial_elimination_offset (int from
, int to
)
27071 rs6000_stack_t
*info
= rs6000_stack_info ();
27072 HOST_WIDE_INT offset
;
27074 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27075 offset
= info
->push_p
? 0 : -info
->total_size
;
27076 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27078 offset
= info
->push_p
? 0 : -info
->total_size
;
27079 if (FRAME_GROWS_DOWNWARD
)
27080 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
27082 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
27083 offset
= FRAME_GROWS_DOWNWARD
27084 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
27086 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
27087 offset
= info
->total_size
;
27088 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
27089 offset
= info
->push_p
? info
->total_size
: 0;
27090 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
27093 gcc_unreachable ();
27099 rs6000_dwarf_register_span (rtx reg
)
27103 unsigned regno
= REGNO (reg
);
27104 enum machine_mode mode
= GET_MODE (reg
);
27108 && (SPE_VECTOR_MODE (GET_MODE (reg
))
27109 || (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
)
27110 && mode
!= SFmode
&& mode
!= SDmode
&& mode
!= SCmode
)))
27115 regno
= REGNO (reg
);
27117 /* The duality of the SPE register size wreaks all kinds of havoc.
27118 This is a way of distinguishing r0 in 32-bits from r0 in
27120 words
= (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
27121 gcc_assert (words
<= 4);
27122 for (i
= 0; i
< words
; i
++, regno
++)
27124 if (BYTES_BIG_ENDIAN
)
27126 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
+ 1200);
27127 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
);
27131 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
);
27132 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
+ 1200);
27136 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (words
* 2, parts
));
27139 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27142 rs6000_init_dwarf_reg_sizes_extra (tree address
)
27147 enum machine_mode mode
= TYPE_MODE (char_type_node
);
27148 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
27149 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
27150 rtx value
= gen_int_mode (4, mode
);
27152 for (i
= 1201; i
< 1232; i
++)
27154 int column
= DWARF_REG_TO_UNWIND_COLUMN (i
);
27155 HOST_WIDE_INT offset
27156 = DWARF_FRAME_REGNUM (column
) * GET_MODE_SIZE (mode
);
27158 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
27163 /* Map internal gcc register numbers to DWARF2 register numbers. */
27166 rs6000_dbx_register_number (unsigned int regno
)
27168 if (regno
<= 63 || write_symbols
!= DWARF2_DEBUG
)
27170 if (regno
== MQ_REGNO
)
27172 if (regno
== LR_REGNO
)
27174 if (regno
== CTR_REGNO
)
27176 if (CR_REGNO_P (regno
))
27177 return regno
- CR0_REGNO
+ 86;
27178 if (regno
== CA_REGNO
)
27179 return 101; /* XER */
27180 if (ALTIVEC_REGNO_P (regno
))
27181 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
27182 if (regno
== VRSAVE_REGNO
)
27184 if (regno
== VSCR_REGNO
)
27186 if (regno
== SPE_ACC_REGNO
)
27188 if (regno
== SPEFSCR_REGNO
)
27190 /* SPE high reg number. We get these values of regno from
27191 rs6000_dwarf_register_span. */
27192 gcc_assert (regno
>= 1200 && regno
< 1232);
27196 /* target hook eh_return_filter_mode */
27197 static enum machine_mode
27198 rs6000_eh_return_filter_mode (void)
27200 return TARGET_32BIT
? SImode
: word_mode
;
27203 /* Target hook for scalar_mode_supported_p. */
27205 rs6000_scalar_mode_supported_p (enum machine_mode mode
)
27207 if (DECIMAL_FLOAT_MODE_P (mode
))
27208 return default_decimal_float_supported_p ();
27210 return default_scalar_mode_supported_p (mode
);
27213 /* Target hook for vector_mode_supported_p. */
27215 rs6000_vector_mode_supported_p (enum machine_mode mode
)
27218 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
27221 if (TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
27224 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
27231 /* Target hook for invalid_arg_for_unprototyped_fn. */
27232 static const char *
27233 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
27235 return (!rs6000_darwin64_abi
27237 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
27238 && (funcdecl
== NULL_TREE
27239 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
27240 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
27241 ? N_("AltiVec argument passed to unprototyped function")
27245 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27246 setup by using __stack_chk_fail_local hidden function instead of
27247 calling __stack_chk_fail directly. Otherwise it is better to call
27248 __stack_chk_fail directly. */
27250 static tree ATTRIBUTE_UNUSED
27251 rs6000_stack_protect_fail (void)
27253 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
27254 ? default_hidden_stack_protect_fail ()
27255 : default_external_stack_protect_fail ();
27259 rs6000_final_prescan_insn (rtx insn
, rtx
*operand ATTRIBUTE_UNUSED
,
27260 int num_operands ATTRIBUTE_UNUSED
)
27262 if (rs6000_warn_cell_microcode
)
27265 int insn_code_number
= recog_memoized (insn
);
27266 location_t location
= locator_location (INSN_LOCATOR (insn
));
27268 /* Punt on insns we cannot recognize. */
27269 if (insn_code_number
< 0)
27272 temp
= get_insn_template (insn_code_number
, insn
);
27274 if (get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
)
27275 warning_at (location
, OPT_mwarn_cell_microcode
,
27276 "emitting microcode insn %s\t[%s] #%d",
27277 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
27278 else if (get_attr_cell_micro (insn
) == CELL_MICRO_CONDITIONAL
)
27279 warning_at (location
, OPT_mwarn_cell_microcode
,
27280 "emitting conditional microcode insn %s\t[%s] #%d",
27281 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
27286 /* Mask options that we want to support inside of attribute((target)) and
27287 #pragma GCC target operations. Note, we do not include things like
27288 64/32-bit, endianess, hard/soft floating point, etc. that would have
27289 different calling sequences. */
27291 struct rs6000_opt_mask
{
27292 const char *name
; /* option name */
27293 int mask
; /* mask to set */
27294 bool invert
; /* invert sense of mask */
27295 bool valid_target
; /* option is a target option */
27298 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
27300 { "altivec", MASK_ALTIVEC
, false, true },
27301 { "cmpb", MASK_CMPB
, false, true },
27302 { "dlmzb", MASK_DLMZB
, false, true },
27303 { "fprnd", MASK_FPRND
, false, true },
27304 { "hard-dfp", MASK_DFP
, false, true },
27305 { "isel", MASK_ISEL
, false, true },
27306 { "mfcrf", MASK_MFCRF
, false, true },
27307 { "mfpgpr", MASK_MFPGPR
, false, true },
27308 { "mulhw", MASK_MULHW
, false, true },
27309 { "multiple", MASK_MULTIPLE
, false, true },
27310 { "update", MASK_NO_UPDATE
, true , true },
27311 { "popcntb", MASK_POPCNTB
, false, true },
27312 { "popcntd", MASK_POPCNTD
, false, true },
27313 { "powerpc-gfxopt", MASK_PPC_GFXOPT
, false, true },
27314 { "powerpc-gpopt", MASK_PPC_GPOPT
, false, true },
27315 { "recip-precision", MASK_RECIP_PRECISION
, false, true },
27316 { "string", MASK_STRING
, false, true },
27317 { "vsx", MASK_VSX
, false, true },
27320 { "aix64", MASK_64BIT
, false, false },
27321 { "aix32", MASK_64BIT
, true, false },
27323 { "64", MASK_64BIT
, false, false },
27324 { "32", MASK_64BIT
, true, false },
27328 { "eabi", MASK_EABI
, false, false },
27330 #ifdef MASK_LITTLE_ENDIAN
27331 { "little", MASK_LITTLE_ENDIAN
, false, false },
27332 { "big", MASK_LITTLE_ENDIAN
, true, false },
27334 #ifdef MASK_RELOCATABLE
27335 { "relocatable", MASK_RELOCATABLE
, false, false },
27337 #ifdef MASK_STRICT_ALIGN
27338 { "strict-align", MASK_STRICT_ALIGN
, false, false },
27340 { "powerpc", MASK_POWERPC
, false, false },
27341 { "soft-float", MASK_SOFT_FLOAT
, false, false },
27342 { "string", MASK_STRING
, false, false },
27345 /* Builtin mask mapping for printing the flags. */
27346 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
27348 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
27349 { "vsx", RS6000_BTM_VSX
, false, false },
27350 { "spe", RS6000_BTM_SPE
, false, false },
27351 { "paired", RS6000_BTM_PAIRED
, false, false },
27352 { "fre", RS6000_BTM_FRE
, false, false },
27353 { "fres", RS6000_BTM_FRES
, false, false },
27354 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
27355 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
27356 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
27357 { "powerpc", RS6000_BTM_POWERPC
, false, false },
27358 { "cell", RS6000_BTM_CELL
, false, false },
27361 /* Option variables that we want to support inside attribute((target)) and
27362 #pragma GCC target operations. */
27364 struct rs6000_opt_var
{
27365 const char *name
; /* option name */
27366 size_t global_offset
; /* offset of the option in global_options. */
27367 size_t target_offset
; /* offset of the option in target optiosn. */
27370 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
27373 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
27374 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
27375 { "avoid-indexed-addresses",
27376 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
27377 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
27379 offsetof (struct gcc_options
, x_rs6000_paired_float
),
27380 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
27382 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
27383 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
27386 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27387 parsing. Return true if there were no errors. */
27390 rs6000_inner_target_options (tree args
, bool attr_p
)
27394 if (args
== NULL_TREE
)
27397 else if (TREE_CODE (args
) == STRING_CST
)
27399 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
27402 while ((q
= strtok (p
, ",")) != NULL
)
27404 bool error_p
= false;
27405 bool not_valid_p
= false;
27406 const char *cpu_opt
= NULL
;
27409 if (strncmp (q
, "cpu=", 4) == 0)
27411 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
27412 if (cpu_index
>= 0)
27413 rs6000_cpu_index
= cpu_index
;
27420 else if (strncmp (q
, "tune=", 5) == 0)
27422 int tune_index
= rs6000_cpu_name_lookup (q
+5);
27423 if (tune_index
>= 0)
27424 rs6000_tune_index
= tune_index
;
27434 bool invert
= false;
27438 if (strncmp (r
, "no-", 3) == 0)
27444 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
27445 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
27447 int mask
= rs6000_opt_masks
[i
].mask
;
27449 if (!rs6000_opt_masks
[i
].valid_target
)
27450 not_valid_p
= true;
27454 target_flags_explicit
|= mask
;
27456 /* VSX needs altivec, so -mvsx automagically sets
27458 if (mask
== MASK_VSX
&& !invert
)
27459 mask
|= MASK_ALTIVEC
;
27461 if (rs6000_opt_masks
[i
].invert
)
27465 target_flags
&= ~mask
;
27467 target_flags
|= mask
;
27472 if (error_p
&& !not_valid_p
)
27474 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
27475 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
27477 size_t j
= rs6000_opt_vars
[i
].global_offset
;
27478 *((int *) ((char *)&global_options
+ j
)) = !invert
;
27487 const char *eprefix
, *esuffix
;
27492 eprefix
= "__attribute__((__target__(";
27497 eprefix
= "#pragma GCC target ";
27502 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt
, eprefix
,
27504 else if (not_valid_p
)
27505 error ("%s\"%s\"%s is not allowed", eprefix
, q
, esuffix
);
27507 error ("%s\"%s\"%s is invalid", eprefix
, q
, esuffix
);
27512 else if (TREE_CODE (args
) == TREE_LIST
)
27516 tree value
= TREE_VALUE (args
);
27519 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
27523 args
= TREE_CHAIN (args
);
27525 while (args
!= NULL_TREE
);
27529 gcc_unreachable ();
27534 /* Print out the target options as a list for -mdebug=target. */
27537 rs6000_debug_target_options (tree args
, const char *prefix
)
27539 if (args
== NULL_TREE
)
27540 fprintf (stderr
, "%s<NULL>", prefix
);
27542 else if (TREE_CODE (args
) == STRING_CST
)
27544 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
27547 while ((q
= strtok (p
, ",")) != NULL
)
27550 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
27555 else if (TREE_CODE (args
) == TREE_LIST
)
27559 tree value
= TREE_VALUE (args
);
27562 rs6000_debug_target_options (value
, prefix
);
27565 args
= TREE_CHAIN (args
);
27567 while (args
!= NULL_TREE
);
27571 gcc_unreachable ();
27577 /* Hook to validate attribute((target("..."))). */
27580 rs6000_valid_attribute_p (tree fndecl
,
27581 tree
ARG_UNUSED (name
),
27585 struct cl_target_option cur_target
;
27587 tree old_optimize
= build_optimization_node ();
27588 tree new_target
, new_optimize
;
27589 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
27591 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
27593 if (TARGET_DEBUG_TARGET
)
27595 tree tname
= DECL_NAME (fndecl
);
27596 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
27598 fprintf (stderr
, "function: %.*s\n",
27599 (int) IDENTIFIER_LENGTH (tname
),
27600 IDENTIFIER_POINTER (tname
));
27602 fprintf (stderr
, "function: unknown\n");
27604 fprintf (stderr
, "args:");
27605 rs6000_debug_target_options (args
, " ");
27606 fprintf (stderr
, "\n");
27609 fprintf (stderr
, "flags: 0x%x\n", flags
);
27611 fprintf (stderr
, "--------------------\n");
27614 old_optimize
= build_optimization_node ();
27615 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
27617 /* If the function changed the optimization levels as well as setting target
27618 options, start with the optimizations specified. */
27619 if (func_optimize
&& func_optimize
!= old_optimize
)
27620 cl_optimization_restore (&global_options
,
27621 TREE_OPTIMIZATION (func_optimize
));
27623 /* The target attributes may also change some optimization flags, so update
27624 the optimization options if necessary. */
27625 cl_target_option_save (&cur_target
, &global_options
);
27626 rs6000_cpu_index
= rs6000_tune_index
= -1;
27627 ret
= rs6000_inner_target_options (args
, true);
27629 /* Set up any additional state. */
27632 ret
= rs6000_option_override_internal (false);
27633 new_target
= build_target_option_node ();
27638 new_optimize
= build_optimization_node ();
27645 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
27647 if (old_optimize
!= new_optimize
)
27648 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
27651 cl_target_option_restore (&global_options
, &cur_target
);
27653 if (old_optimize
!= new_optimize
)
27654 cl_optimization_restore (&global_options
,
27655 TREE_OPTIMIZATION (old_optimize
));
27661 /* Hook to validate the current #pragma GCC target and set the state, and
27662 update the macros based on what was changed. If ARGS is NULL, then
27663 POP_TARGET is used to reset the options. */
27666 rs6000_pragma_target_parse (tree args
, tree pop_target
)
27668 tree prev_tree
= build_target_option_node ();
27670 struct cl_target_option
*prev_opt
, *cur_opt
;
27671 unsigned prev_bumask
, cur_bumask
, diff_bumask
;
27672 int prev_flags
, cur_flags
, diff_flags
;
27674 if (TARGET_DEBUG_TARGET
)
27676 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
27677 fprintf (stderr
, "args:");
27678 rs6000_debug_target_options (args
, " ");
27679 fprintf (stderr
, "\n");
27683 fprintf (stderr
, "pop_target:\n");
27684 debug_tree (pop_target
);
27687 fprintf (stderr
, "pop_target: <NULL>\n");
27689 fprintf (stderr
, "--------------------\n");
27694 cur_tree
= ((pop_target
)
27696 : target_option_default_node
);
27697 cl_target_option_restore (&global_options
,
27698 TREE_TARGET_OPTION (cur_tree
));
27702 rs6000_cpu_index
= rs6000_tune_index
= -1;
27703 if (!rs6000_inner_target_options (args
, false)
27704 || !rs6000_option_override_internal (false)
27705 || (cur_tree
= build_target_option_node ()) == NULL_TREE
)
27707 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
27708 fprintf (stderr
, "invalid pragma\n");
27714 target_option_current_node
= cur_tree
;
27716 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
27717 change the macros that are defined. */
27718 if (rs6000_target_modify_macros_ptr
)
27720 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
27721 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
27722 prev_flags
= prev_opt
->x_target_flags
;
27724 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
27725 cur_flags
= cur_opt
->x_target_flags
;
27726 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
27728 diff_bumask
= (prev_bumask
^ cur_bumask
);
27729 diff_flags
= (prev_flags
^ cur_flags
);
27731 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
27733 /* Delete old macros. */
27734 rs6000_target_modify_macros_ptr (false,
27735 prev_flags
& diff_flags
,
27736 prev_bumask
& diff_bumask
);
27738 /* Define new macros. */
27739 rs6000_target_modify_macros_ptr (true,
27740 cur_flags
& diff_flags
,
27741 cur_bumask
& diff_bumask
);
27749 /* Remember the last target of rs6000_set_current_function. */
27750 static GTY(()) tree rs6000_previous_fndecl
;
27752 /* Establish appropriate back-end context for processing the function
27753 FNDECL. The argument might be NULL to indicate processing at top
27754 level, outside of any function scope. */
27756 rs6000_set_current_function (tree fndecl
)
27758 tree old_tree
= (rs6000_previous_fndecl
27759 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
)
27762 tree new_tree
= (fndecl
27763 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl
)
27766 if (TARGET_DEBUG_TARGET
)
27768 bool print_final
= false;
27769 fprintf (stderr
, "\n==================== rs6000_set_current_function");
27772 fprintf (stderr
, ", fndecl %s (%p)",
27773 (DECL_NAME (fndecl
)
27774 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
27775 : "<unknown>"), (void *)fndecl
);
27777 if (rs6000_previous_fndecl
)
27778 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
27780 fprintf (stderr
, "\n");
27783 fprintf (stderr
, "\nnew fndecl target specific options:\n");
27784 debug_tree (new_tree
);
27785 print_final
= true;
27790 fprintf (stderr
, "\nold fndecl target specific options:\n");
27791 debug_tree (old_tree
);
27792 print_final
= true;
27796 fprintf (stderr
, "--------------------\n");
27799 /* Only change the context if the function changes. This hook is called
27800 several times in the course of compiling a function, and we don't want to
27801 slow things down too much or call target_reinit when it isn't safe. */
27802 if (fndecl
&& fndecl
!= rs6000_previous_fndecl
)
27804 rs6000_previous_fndecl
= fndecl
;
27805 if (old_tree
== new_tree
)
27810 cl_target_option_restore (&global_options
,
27811 TREE_TARGET_OPTION (new_tree
));
27817 struct cl_target_option
*def
27818 = TREE_TARGET_OPTION (target_option_current_node
);
27820 cl_target_option_restore (&global_options
, def
);
27827 /* Save the current options */
27830 rs6000_function_specific_save (struct cl_target_option
*ptr
)
27832 ptr
->rs6000_target_flags_explicit
= target_flags_explicit
;
27835 /* Restore the current options */
27838 rs6000_function_specific_restore (struct cl_target_option
*ptr
)
27840 target_flags_explicit
= ptr
->rs6000_target_flags_explicit
;
27841 (void) rs6000_option_override_internal (false);
27844 /* Print the current options */
27847 rs6000_function_specific_print (FILE *file
, int indent
,
27848 struct cl_target_option
*ptr
)
27851 int flags
= ptr
->x_target_flags
;
27852 unsigned bu_mask
= ptr
->x_rs6000_builtin_mask
;
27854 /* Print the various mask options. */
27855 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
27856 if ((flags
& rs6000_opt_masks
[i
].mask
) != 0)
27858 flags
&= ~ rs6000_opt_masks
[i
].mask
;
27859 fprintf (file
, "%*s-m%s%s\n", indent
, "",
27860 rs6000_opt_masks
[i
].invert
? "no-" : "",
27861 rs6000_opt_masks
[i
].name
);
27864 /* Print the various options that are variables. */
27865 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
27867 size_t j
= rs6000_opt_vars
[i
].target_offset
;
27868 if (((signed char *) ptr
)[j
])
27869 fprintf (file
, "%*s-m%s\n", indent
, "",
27870 rs6000_opt_vars
[i
].name
);
27873 /* Print the various builtin flags. */
27874 fprintf (file
, "%*sbuiltin mask = 0x%x\n", indent
, "", bu_mask
);
27875 for (i
= 0; i
< ARRAY_SIZE (rs6000_builtin_mask_names
); i
++)
27876 if ((bu_mask
& rs6000_builtin_mask_names
[i
].mask
) != 0)
27878 fprintf (file
, "%*s%s builtins supported\n", indent
, "",
27879 rs6000_builtin_mask_names
[i
].name
);
27884 /* Hook to determine if one function can safely inline another. */
27887 rs6000_can_inline_p (tree caller
, tree callee
)
27890 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
27891 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
27893 /* If callee has no option attributes, then it is ok to inline. */
27897 /* If caller has no option attributes, but callee does then it is not ok to
27899 else if (!caller_tree
)
27904 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
27905 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
27907 /* Callee's options should a subset of the caller's, i.e. a vsx function
27908 can inline an altivec function but a non-vsx function can't inline a
27910 if ((caller_opts
->x_target_flags
& callee_opts
->x_target_flags
)
27911 == callee_opts
->x_target_flags
)
27915 if (TARGET_DEBUG_TARGET
)
27916 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
27917 (DECL_NAME (caller
)
27918 ? IDENTIFIER_POINTER (DECL_NAME (caller
))
27920 (DECL_NAME (callee
)
27921 ? IDENTIFIER_POINTER (DECL_NAME (callee
))
27923 (ret
? "can" : "cannot"));
27928 /* Allocate a stack temp and fixup the address so it meets the particular
27929 memory requirements (either offetable or REG+REG addressing). */
27932 rs6000_allocate_stack_temp (enum machine_mode mode
,
27933 bool offsettable_p
,
27936 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
27937 rtx addr
= XEXP (stack
, 0);
27938 int strict_p
= (reload_in_progress
|| reload_completed
);
27940 if (!legitimate_indirect_address_p (addr
, strict_p
))
27943 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
27944 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
27946 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
27947 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
27953 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
27954 to such a form to deal with memory reference instructions like STFIWX that
27955 only take reg+reg addressing. */
27958 rs6000_address_for_fpconvert (rtx x
)
27960 int strict_p
= (reload_in_progress
|| reload_completed
);
27963 gcc_assert (MEM_P (x
));
27964 addr
= XEXP (x
, 0);
27965 if (! legitimate_indirect_address_p (addr
, strict_p
)
27966 && ! legitimate_indexed_address_p (addr
, strict_p
))
27968 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
27970 rtx reg
= XEXP (addr
, 0);
27971 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
27972 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
27973 gcc_assert (REG_P (reg
));
27974 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
27977 else if (GET_CODE (addr
) == PRE_MODIFY
)
27979 rtx reg
= XEXP (addr
, 0);
27980 rtx expr
= XEXP (addr
, 1);
27981 gcc_assert (REG_P (reg
));
27982 gcc_assert (GET_CODE (expr
) == PLUS
);
27983 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
27987 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
27993 /* Given a memory reference, if it is not in the form for altivec memory
27994 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
27995 convert to the altivec format. */
27998 rs6000_address_for_altivec (rtx x
)
28000 gcc_assert (MEM_P (x
));
28001 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
28003 rtx addr
= XEXP (x
, 0);
28004 int strict_p
= (reload_in_progress
|| reload_completed
);
28006 if (!legitimate_indexed_address_p (addr
, strict_p
)
28007 && !legitimate_indirect_address_p (addr
, strict_p
))
28008 addr
= copy_to_mode_reg (Pmode
, addr
);
28010 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
28011 x
= change_address (x
, GET_MODE (x
), addr
);
28017 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
28019 On the RS/6000, all integer constants are acceptable, most won't be valid
28020 for particular insns, though. Only easy FP constants are acceptable. */
28023 rs6000_legitimate_constant_p (enum machine_mode mode
, rtx x
)
28025 if (rs6000_tls_referenced_p (x
))
28028 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
28029 || GET_MODE (x
) == VOIDmode
28030 || (TARGET_POWERPC64
&& mode
== DImode
)
28031 || easy_fp_constant (x
, mode
)
28032 || easy_vector_constant (x
, mode
));
28036 /* A function pointer under AIX is a pointer to a data area whose first word
28037 contains the actual address of the function, whose second word contains a
28038 pointer to its TOC, and whose third word contains a value to place in the
28039 static chain register (r11). Note that if we load the static chain, our
28040 "trampoline" need not have any executable code. */
28043 rs6000_call_indirect_aix (rtx value
, rtx func_desc
, rtx flag
)
28049 rtx stack_toc_offset
;
28051 rtx func_toc_offset
;
28053 rtx func_sc_offset
;
28056 rtx (*call_func
) (rtx
, rtx
, rtx
, rtx
);
28057 rtx (*call_value_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
28059 stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
28060 toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
28062 /* Load up address of the actual function. */
28063 func_desc
= force_reg (Pmode
, func_desc
);
28064 func_addr
= gen_reg_rtx (Pmode
);
28065 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
28070 stack_toc_offset
= GEN_INT (TOC_SAVE_OFFSET_32BIT
);
28071 func_toc_offset
= GEN_INT (AIX_FUNC_DESC_TOC_32BIT
);
28072 func_sc_offset
= GEN_INT (AIX_FUNC_DESC_SC_32BIT
);
28073 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28075 call_func
= gen_call_indirect_aix32bit
;
28076 call_value_func
= gen_call_value_indirect_aix32bit
;
28080 call_func
= gen_call_indirect_aix32bit_nor11
;
28081 call_value_func
= gen_call_value_indirect_aix32bit_nor11
;
28086 stack_toc_offset
= GEN_INT (TOC_SAVE_OFFSET_64BIT
);
28087 func_toc_offset
= GEN_INT (AIX_FUNC_DESC_TOC_64BIT
);
28088 func_sc_offset
= GEN_INT (AIX_FUNC_DESC_SC_64BIT
);
28089 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28091 call_func
= gen_call_indirect_aix64bit
;
28092 call_value_func
= gen_call_value_indirect_aix64bit
;
28096 call_func
= gen_call_indirect_aix64bit_nor11
;
28097 call_value_func
= gen_call_value_indirect_aix64bit_nor11
;
28101 /* Reserved spot to store the TOC. */
28102 stack_toc_mem
= gen_frame_mem (Pmode
,
28103 gen_rtx_PLUS (Pmode
,
28105 stack_toc_offset
));
28108 gcc_assert (cfun
->machine
);
28110 /* Can we optimize saving the TOC in the prologue or do we need to do it at
28112 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
28113 cfun
->machine
->save_toc_in_prologue
= true;
28117 MEM_VOLATILE_P (stack_toc_mem
) = 1;
28118 emit_move_insn (stack_toc_mem
, toc_reg
);
28121 /* Calculate the address to load the TOC of the called function. We don't
28122 actually load this until the split after reload. */
28123 func_toc_mem
= gen_rtx_MEM (Pmode
,
28124 gen_rtx_PLUS (Pmode
,
28128 /* If we have a static chain, load it up. */
28129 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28131 func_sc_mem
= gen_rtx_MEM (Pmode
,
28132 gen_rtx_PLUS (Pmode
,
28136 sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
28137 emit_move_insn (sc_reg
, func_sc_mem
);
28140 /* Create the call. */
28142 insn
= call_value_func (value
, func_addr
, flag
, func_toc_mem
,
28145 insn
= call_func (func_addr
, flag
, func_toc_mem
, stack_toc_mem
);
28147 emit_call_insn (insn
);
28150 /* Return whether we need to always update the saved TOC pointer when we update
28151 the stack pointer. */
28154 rs6000_save_toc_in_prologue_p (void)
28156 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
28159 #ifdef HAVE_GAS_HIDDEN
28160 # define USE_HIDDEN_LINKONCE 1
28162 # define USE_HIDDEN_LINKONCE 0
28165 /* Fills in the label name that should be used for a 476 link stack thunk. */
28168 get_ppc476_thunk_name (char name
[32])
28170 gcc_assert (TARGET_LINK_STACK
);
28172 if (USE_HIDDEN_LINKONCE
)
28173 sprintf (name
, "__ppc476.get_thunk");
28175 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
28178 /* This function emits the simple thunk routine that is used to preserve
28179 the link stack on the 476 cpu. */
28181 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
28183 rs6000_code_end (void)
28188 if (!TARGET_LINK_STACK
)
28191 get_ppc476_thunk_name (name
);
28193 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
28194 build_function_type_list (void_type_node
, NULL_TREE
));
28195 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
28196 NULL_TREE
, void_type_node
);
28197 TREE_PUBLIC (decl
) = 1;
28198 TREE_STATIC (decl
) = 1;
28200 if (USE_HIDDEN_LINKONCE
)
28202 DECL_COMDAT_GROUP (decl
) = DECL_ASSEMBLER_NAME (decl
);
28203 targetm
.asm_out
.unique_section (decl
, 0);
28204 switch_to_section (get_named_section (decl
, NULL
, 0));
28205 DECL_WEAK (decl
) = 1;
28206 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
28207 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
28208 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
28209 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
28213 switch_to_section (text_section
);
28214 ASM_OUTPUT_LABEL (asm_out_file
, name
);
28217 DECL_INITIAL (decl
) = make_node (BLOCK
);
28218 current_function_decl
= decl
;
28219 init_function_start (decl
);
28220 first_function_block_is_cold
= false;
28221 /* Make sure unwind info is emitted for the thunk if needed. */
28222 final_start_function (emit_barrier (), asm_out_file
, 1);
28224 fputs ("\tblr\n", asm_out_file
);
28226 final_end_function ();
28227 init_insn_lengths ();
28228 free_after_compilation (cfun
);
28230 current_function_decl
= NULL
;
28233 /* Add r30 to hard reg set if the prologue sets it up and it is not
28234 pic_offset_table_rtx. */
28237 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
28239 if (!TARGET_SINGLE_PIC_BASE
28241 && TARGET_MINIMAL_TOC
28242 && get_pool_size () != 0)
28243 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
28246 struct gcc_target targetm
= TARGET_INITIALIZER
;
28248 #include "gt-rs6000.h"