1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
43 #include "basic-block.h"
44 #include "integrate.h"
50 #include "target-def.h"
51 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "sched-int.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
59 #include "tm-constrs.h"
61 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #include "gstab.h" /* for N_SLINE */
67 #ifndef TARGET_NO_PROTOTYPE
68 #define TARGET_NO_PROTOTYPE 0
71 #define min(A,B) ((A) < (B) ? (A) : (B))
72 #define max(A,B) ((A) > (B) ? (A) : (B))
74 /* Structure used to define the rs6000 stack */
75 typedef struct rs6000_stack
{
76 int first_gp_reg_save
; /* first callee saved GP register used */
77 int first_fp_reg_save
; /* first callee saved FP register used */
78 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
79 int lr_save_p
; /* true if the link reg needs to be saved */
80 int cr_save_p
; /* true if the CR reg needs to be saved */
81 unsigned int vrsave_mask
; /* mask of vec registers to save */
82 int push_p
; /* true if we need to allocate stack space */
83 int calls_p
; /* true if the function makes any calls */
84 int world_save_p
; /* true if we're saving *everything*:
85 r13-r31, cr, f14-f31, vrsave, v20-v31 */
86 enum rs6000_abi abi
; /* which ABI to use */
87 int gp_save_offset
; /* offset to save GP regs from initial SP */
88 int fp_save_offset
; /* offset to save FP regs from initial SP */
89 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
90 int lr_save_offset
; /* offset to save LR from initial SP */
91 int cr_save_offset
; /* offset to save CR from initial SP */
92 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
93 int spe_gp_save_offset
; /* offset to save spe 64-bit gprs */
94 int varargs_save_offset
; /* offset to save the varargs registers */
95 int ehrd_offset
; /* offset to EH return data */
96 int reg_size
; /* register size (4 or 8) */
97 HOST_WIDE_INT vars_size
; /* variable save area size */
98 int parm_size
; /* outgoing parameter size */
99 int save_size
; /* save area size */
100 int fixed_size
; /* fixed size of stack frame */
101 int gp_size
; /* size of saved GP registers */
102 int fp_size
; /* size of saved FP registers */
103 int altivec_size
; /* size of saved AltiVec registers */
104 int cr_size
; /* size to hold CR if not in save_size */
105 int vrsave_size
; /* size to hold VRSAVE if not in save_size */
106 int altivec_padding_size
; /* size of altivec alignment padding if
108 int spe_gp_size
; /* size of 64-bit GPR save size for SPE */
109 int spe_padding_size
;
110 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
111 int spe_64bit_regs_used
;
114 /* A C structure for machine-specific, per-function data.
115 This is added to the cfun structure. */
116 typedef struct machine_function
GTY(())
118 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
119 int ra_needs_full_frame
;
120 /* Some local-dynamic symbol. */
121 const char *some_ld_name
;
122 /* Whether the instruction chain has been scanned already. */
123 int insn_chain_scanned_p
;
124 /* Flags if __builtin_return_address (0) was used. */
126 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
127 varargs save area. */
128 HOST_WIDE_INT varargs_save_offset
;
129 /* Temporary stack slot to use for SDmode copies. This slot is
130 64-bits wide and is allocated early enough so that the offset
131 does not overflow the 16-bit load/store offset field. */
132 rtx sdmode_stack_slot
;
135 /* Target cpu type */
137 enum processor_type rs6000_cpu
;
138 struct rs6000_cpu_select rs6000_select
[3] =
140 /* switch name, tune arch */
141 { (const char *)0, "--with-cpu=", 1, 1 },
142 { (const char *)0, "-mcpu=", 1, 1 },
143 { (const char *)0, "-mtune=", 1, 0 },
146 static GTY(()) bool rs6000_cell_dont_microcode
;
148 /* Always emit branch hint bits. */
149 static GTY(()) bool rs6000_always_hint
;
151 /* Schedule instructions for group formation. */
152 static GTY(()) bool rs6000_sched_groups
;
154 /* Align branch targets. */
155 static GTY(()) bool rs6000_align_branch_targets
;
157 /* Support for -msched-costly-dep option. */
158 const char *rs6000_sched_costly_dep_str
;
159 enum rs6000_dependence_cost rs6000_sched_costly_dep
;
161 /* Support for -minsert-sched-nops option. */
162 const char *rs6000_sched_insert_nops_str
;
163 enum rs6000_nop_insertion rs6000_sched_insert_nops
;
165 /* Support targetm.vectorize.builtin_mask_for_load. */
166 static GTY(()) tree altivec_builtin_mask_for_load
;
168 /* Size of long double. */
169 int rs6000_long_double_type_size
;
171 /* IEEE quad extended precision long double. */
174 /* Nonzero to use AltiVec ABI. */
175 int rs6000_altivec_abi
;
177 /* Nonzero if we want SPE SIMD instructions. */
180 /* Nonzero if we want SPE ABI extensions. */
183 /* Nonzero to use isel instructions. */
186 /* Nonzero if floating point operations are done in the GPRs. */
187 int rs6000_float_gprs
= 0;
189 /* Nonzero if we want Darwin's struct-by-value-in-regs ABI. */
190 int rs6000_darwin64_abi
;
192 /* Set to nonzero once AIX common-mode calls have been defined. */
193 static GTY(()) int common_mode_defined
;
195 /* Save information from a "cmpxx" operation until the branch or scc is
197 rtx rs6000_compare_op0
, rs6000_compare_op1
;
198 int rs6000_compare_fp_p
;
200 /* Label number of label created for -mrelocatable, to call to so we can
201 get the address of the GOT section */
202 int rs6000_pic_labelno
;
205 /* Which abi to adhere to */
206 const char *rs6000_abi_name
;
208 /* Semantics of the small data area */
209 enum rs6000_sdata_type rs6000_sdata
= SDATA_DATA
;
211 /* Which small data model to use */
212 const char *rs6000_sdata_name
= (char *)0;
214 /* Counter for labels which are to be placed in .fixup. */
215 int fixuplabelno
= 0;
218 /* Bit size of immediate TLS offsets and string from which it is decoded. */
219 int rs6000_tls_size
= 32;
220 const char *rs6000_tls_size_string
;
222 /* ABI enumeration available for subtarget to use. */
223 enum rs6000_abi rs6000_current_abi
;
225 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
229 const char *rs6000_debug_name
;
230 int rs6000_debug_stack
; /* debug stack applications */
231 int rs6000_debug_arg
; /* debug argument handling */
233 /* Value is TRUE if register/mode pair is acceptable. */
234 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
236 /* Built in types. */
238 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
239 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
241 const char *rs6000_traceback_name
;
243 traceback_default
= 0,
249 /* Flag to say the TOC is initialized */
251 char toc_label_name
[10];
253 /* Cached value of rs6000_variable_issue. This is cached in
254 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
255 static short cached_can_issue_more
;
257 static GTY(()) section
*read_only_data_section
;
258 static GTY(()) section
*private_data_section
;
259 static GTY(()) section
*read_only_private_data_section
;
260 static GTY(()) section
*sdata2_section
;
261 static GTY(()) section
*toc_section
;
263 /* Control alignment for fields within structures. */
264 /* String from -malign-XXXXX. */
265 int rs6000_alignment_flags
;
267 /* True for any options that were explicitly set. */
269 bool aix_struct_ret
; /* True if -maix-struct-ret was used. */
270 bool alignment
; /* True if -malign- was used. */
271 bool spe_abi
; /* True if -mabi=spe/no-spe was used. */
272 bool altivec_abi
; /* True if -mabi=altivec/no-altivec used. */
273 bool spe
; /* True if -mspe= was used. */
274 bool float_gprs
; /* True if -mfloat-gprs= was used. */
275 bool isel
; /* True if -misel was used. */
276 bool long_double
; /* True if -mlong-double- was used. */
277 bool ieee
; /* True if -mabi=ieee/ibmlongdouble used. */
278 bool vrsave
; /* True if -mvrsave was used. */
279 } rs6000_explicit_options
;
281 struct builtin_description
283 /* mask is not const because we're going to alter it below. This
284 nonsense will go away when we rewrite the -march infrastructure
285 to give us more target flag bits. */
287 const enum insn_code icode
;
288 const char *const name
;
289 const enum rs6000_builtins code
;
292 /* Target cpu costs. */
294 struct processor_costs
{
295 const int mulsi
; /* cost of SImode multiplication. */
296 const int mulsi_const
; /* cost of SImode multiplication by constant. */
297 const int mulsi_const9
; /* cost of SImode mult by short constant. */
298 const int muldi
; /* cost of DImode multiplication. */
299 const int divsi
; /* cost of SImode division. */
300 const int divdi
; /* cost of DImode division. */
301 const int fp
; /* cost of simple SFmode and DFmode insns. */
302 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
303 const int sdiv
; /* cost of SFmode division (fdivs). */
304 const int ddiv
; /* cost of DFmode division (fdiv). */
305 const int cache_line_size
; /* cache line size in bytes. */
306 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
307 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
308 const int simultaneous_prefetches
; /* number of parallel prefetch
312 const struct processor_costs
*rs6000_cost
;
314 /* Processor costs (relative to an add) */
316 /* Instruction size costs on 32bit processors. */
318 struct processor_costs size32_cost
= {
319 COSTS_N_INSNS (1), /* mulsi */
320 COSTS_N_INSNS (1), /* mulsi_const */
321 COSTS_N_INSNS (1), /* mulsi_const9 */
322 COSTS_N_INSNS (1), /* muldi */
323 COSTS_N_INSNS (1), /* divsi */
324 COSTS_N_INSNS (1), /* divdi */
325 COSTS_N_INSNS (1), /* fp */
326 COSTS_N_INSNS (1), /* dmul */
327 COSTS_N_INSNS (1), /* sdiv */
328 COSTS_N_INSNS (1), /* ddiv */
335 /* Instruction size costs on 64bit processors. */
337 struct processor_costs size64_cost
= {
338 COSTS_N_INSNS (1), /* mulsi */
339 COSTS_N_INSNS (1), /* mulsi_const */
340 COSTS_N_INSNS (1), /* mulsi_const9 */
341 COSTS_N_INSNS (1), /* muldi */
342 COSTS_N_INSNS (1), /* divsi */
343 COSTS_N_INSNS (1), /* divdi */
344 COSTS_N_INSNS (1), /* fp */
345 COSTS_N_INSNS (1), /* dmul */
346 COSTS_N_INSNS (1), /* sdiv */
347 COSTS_N_INSNS (1), /* ddiv */
354 /* Instruction costs on RIOS1 processors. */
356 struct processor_costs rios1_cost
= {
357 COSTS_N_INSNS (5), /* mulsi */
358 COSTS_N_INSNS (4), /* mulsi_const */
359 COSTS_N_INSNS (3), /* mulsi_const9 */
360 COSTS_N_INSNS (5), /* muldi */
361 COSTS_N_INSNS (19), /* divsi */
362 COSTS_N_INSNS (19), /* divdi */
363 COSTS_N_INSNS (2), /* fp */
364 COSTS_N_INSNS (2), /* dmul */
365 COSTS_N_INSNS (19), /* sdiv */
366 COSTS_N_INSNS (19), /* ddiv */
367 128, /* cache line size */
373 /* Instruction costs on RIOS2 processors. */
375 struct processor_costs rios2_cost
= {
376 COSTS_N_INSNS (2), /* mulsi */
377 COSTS_N_INSNS (2), /* mulsi_const */
378 COSTS_N_INSNS (2), /* mulsi_const9 */
379 COSTS_N_INSNS (2), /* muldi */
380 COSTS_N_INSNS (13), /* divsi */
381 COSTS_N_INSNS (13), /* divdi */
382 COSTS_N_INSNS (2), /* fp */
383 COSTS_N_INSNS (2), /* dmul */
384 COSTS_N_INSNS (17), /* sdiv */
385 COSTS_N_INSNS (17), /* ddiv */
386 256, /* cache line size */
392 /* Instruction costs on RS64A processors. */
394 struct processor_costs rs64a_cost
= {
395 COSTS_N_INSNS (20), /* mulsi */
396 COSTS_N_INSNS (12), /* mulsi_const */
397 COSTS_N_INSNS (8), /* mulsi_const9 */
398 COSTS_N_INSNS (34), /* muldi */
399 COSTS_N_INSNS (65), /* divsi */
400 COSTS_N_INSNS (67), /* divdi */
401 COSTS_N_INSNS (4), /* fp */
402 COSTS_N_INSNS (4), /* dmul */
403 COSTS_N_INSNS (31), /* sdiv */
404 COSTS_N_INSNS (31), /* ddiv */
405 128, /* cache line size */
411 /* Instruction costs on MPCCORE processors. */
413 struct processor_costs mpccore_cost
= {
414 COSTS_N_INSNS (2), /* mulsi */
415 COSTS_N_INSNS (2), /* mulsi_const */
416 COSTS_N_INSNS (2), /* mulsi_const9 */
417 COSTS_N_INSNS (2), /* muldi */
418 COSTS_N_INSNS (6), /* divsi */
419 COSTS_N_INSNS (6), /* divdi */
420 COSTS_N_INSNS (4), /* fp */
421 COSTS_N_INSNS (5), /* dmul */
422 COSTS_N_INSNS (10), /* sdiv */
423 COSTS_N_INSNS (17), /* ddiv */
424 32, /* cache line size */
430 /* Instruction costs on PPC403 processors. */
432 struct processor_costs ppc403_cost
= {
433 COSTS_N_INSNS (4), /* mulsi */
434 COSTS_N_INSNS (4), /* mulsi_const */
435 COSTS_N_INSNS (4), /* mulsi_const9 */
436 COSTS_N_INSNS (4), /* muldi */
437 COSTS_N_INSNS (33), /* divsi */
438 COSTS_N_INSNS (33), /* divdi */
439 COSTS_N_INSNS (11), /* fp */
440 COSTS_N_INSNS (11), /* dmul */
441 COSTS_N_INSNS (11), /* sdiv */
442 COSTS_N_INSNS (11), /* ddiv */
443 32, /* cache line size */
449 /* Instruction costs on PPC405 processors. */
451 struct processor_costs ppc405_cost
= {
452 COSTS_N_INSNS (5), /* mulsi */
453 COSTS_N_INSNS (4), /* mulsi_const */
454 COSTS_N_INSNS (3), /* mulsi_const9 */
455 COSTS_N_INSNS (5), /* muldi */
456 COSTS_N_INSNS (35), /* divsi */
457 COSTS_N_INSNS (35), /* divdi */
458 COSTS_N_INSNS (11), /* fp */
459 COSTS_N_INSNS (11), /* dmul */
460 COSTS_N_INSNS (11), /* sdiv */
461 COSTS_N_INSNS (11), /* ddiv */
462 32, /* cache line size */
468 /* Instruction costs on PPC440 processors. */
470 struct processor_costs ppc440_cost
= {
471 COSTS_N_INSNS (3), /* mulsi */
472 COSTS_N_INSNS (2), /* mulsi_const */
473 COSTS_N_INSNS (2), /* mulsi_const9 */
474 COSTS_N_INSNS (3), /* muldi */
475 COSTS_N_INSNS (34), /* divsi */
476 COSTS_N_INSNS (34), /* divdi */
477 COSTS_N_INSNS (5), /* fp */
478 COSTS_N_INSNS (5), /* dmul */
479 COSTS_N_INSNS (19), /* sdiv */
480 COSTS_N_INSNS (33), /* ddiv */
481 32, /* cache line size */
487 /* Instruction costs on PPC601 processors. */
489 struct processor_costs ppc601_cost
= {
490 COSTS_N_INSNS (5), /* mulsi */
491 COSTS_N_INSNS (5), /* mulsi_const */
492 COSTS_N_INSNS (5), /* mulsi_const9 */
493 COSTS_N_INSNS (5), /* muldi */
494 COSTS_N_INSNS (36), /* divsi */
495 COSTS_N_INSNS (36), /* divdi */
496 COSTS_N_INSNS (4), /* fp */
497 COSTS_N_INSNS (5), /* dmul */
498 COSTS_N_INSNS (17), /* sdiv */
499 COSTS_N_INSNS (31), /* ddiv */
500 32, /* cache line size */
506 /* Instruction costs on PPC603 processors. */
508 struct processor_costs ppc603_cost
= {
509 COSTS_N_INSNS (5), /* mulsi */
510 COSTS_N_INSNS (3), /* mulsi_const */
511 COSTS_N_INSNS (2), /* mulsi_const9 */
512 COSTS_N_INSNS (5), /* muldi */
513 COSTS_N_INSNS (37), /* divsi */
514 COSTS_N_INSNS (37), /* divdi */
515 COSTS_N_INSNS (3), /* fp */
516 COSTS_N_INSNS (4), /* dmul */
517 COSTS_N_INSNS (18), /* sdiv */
518 COSTS_N_INSNS (33), /* ddiv */
519 32, /* cache line size */
525 /* Instruction costs on PPC604 processors. */
527 struct processor_costs ppc604_cost
= {
528 COSTS_N_INSNS (4), /* mulsi */
529 COSTS_N_INSNS (4), /* mulsi_const */
530 COSTS_N_INSNS (4), /* mulsi_const9 */
531 COSTS_N_INSNS (4), /* muldi */
532 COSTS_N_INSNS (20), /* divsi */
533 COSTS_N_INSNS (20), /* divdi */
534 COSTS_N_INSNS (3), /* fp */
535 COSTS_N_INSNS (3), /* dmul */
536 COSTS_N_INSNS (18), /* sdiv */
537 COSTS_N_INSNS (32), /* ddiv */
538 32, /* cache line size */
544 /* Instruction costs on PPC604e processors. */
546 struct processor_costs ppc604e_cost
= {
547 COSTS_N_INSNS (2), /* mulsi */
548 COSTS_N_INSNS (2), /* mulsi_const */
549 COSTS_N_INSNS (2), /* mulsi_const9 */
550 COSTS_N_INSNS (2), /* muldi */
551 COSTS_N_INSNS (20), /* divsi */
552 COSTS_N_INSNS (20), /* divdi */
553 COSTS_N_INSNS (3), /* fp */
554 COSTS_N_INSNS (3), /* dmul */
555 COSTS_N_INSNS (18), /* sdiv */
556 COSTS_N_INSNS (32), /* ddiv */
557 32, /* cache line size */
563 /* Instruction costs on PPC620 processors. */
565 struct processor_costs ppc620_cost
= {
566 COSTS_N_INSNS (5), /* mulsi */
567 COSTS_N_INSNS (4), /* mulsi_const */
568 COSTS_N_INSNS (3), /* mulsi_const9 */
569 COSTS_N_INSNS (7), /* muldi */
570 COSTS_N_INSNS (21), /* divsi */
571 COSTS_N_INSNS (37), /* divdi */
572 COSTS_N_INSNS (3), /* fp */
573 COSTS_N_INSNS (3), /* dmul */
574 COSTS_N_INSNS (18), /* sdiv */
575 COSTS_N_INSNS (32), /* ddiv */
576 128, /* cache line size */
582 /* Instruction costs on PPC630 processors. */
584 struct processor_costs ppc630_cost
= {
585 COSTS_N_INSNS (5), /* mulsi */
586 COSTS_N_INSNS (4), /* mulsi_const */
587 COSTS_N_INSNS (3), /* mulsi_const9 */
588 COSTS_N_INSNS (7), /* muldi */
589 COSTS_N_INSNS (21), /* divsi */
590 COSTS_N_INSNS (37), /* divdi */
591 COSTS_N_INSNS (3), /* fp */
592 COSTS_N_INSNS (3), /* dmul */
593 COSTS_N_INSNS (17), /* sdiv */
594 COSTS_N_INSNS (21), /* ddiv */
595 128, /* cache line size */
601 /* Instruction costs on Cell processor. */
602 /* COSTS_N_INSNS (1) ~ one add. */
604 struct processor_costs ppccell_cost
= {
605 COSTS_N_INSNS (9/2)+2, /* mulsi */
606 COSTS_N_INSNS (6/2), /* mulsi_const */
607 COSTS_N_INSNS (6/2), /* mulsi_const9 */
608 COSTS_N_INSNS (15/2)+2, /* muldi */
609 COSTS_N_INSNS (38/2), /* divsi */
610 COSTS_N_INSNS (70/2), /* divdi */
611 COSTS_N_INSNS (10/2), /* fp */
612 COSTS_N_INSNS (10/2), /* dmul */
613 COSTS_N_INSNS (74/2), /* sdiv */
614 COSTS_N_INSNS (74/2), /* ddiv */
615 128, /* cache line size */
621 /* Instruction costs on PPC750 and PPC7400 processors. */
623 struct processor_costs ppc750_cost
= {
624 COSTS_N_INSNS (5), /* mulsi */
625 COSTS_N_INSNS (3), /* mulsi_const */
626 COSTS_N_INSNS (2), /* mulsi_const9 */
627 COSTS_N_INSNS (5), /* muldi */
628 COSTS_N_INSNS (17), /* divsi */
629 COSTS_N_INSNS (17), /* divdi */
630 COSTS_N_INSNS (3), /* fp */
631 COSTS_N_INSNS (3), /* dmul */
632 COSTS_N_INSNS (17), /* sdiv */
633 COSTS_N_INSNS (31), /* ddiv */
634 32, /* cache line size */
640 /* Instruction costs on PPC7450 processors. */
642 struct processor_costs ppc7450_cost
= {
643 COSTS_N_INSNS (4), /* mulsi */
644 COSTS_N_INSNS (3), /* mulsi_const */
645 COSTS_N_INSNS (3), /* mulsi_const9 */
646 COSTS_N_INSNS (4), /* muldi */
647 COSTS_N_INSNS (23), /* divsi */
648 COSTS_N_INSNS (23), /* divdi */
649 COSTS_N_INSNS (5), /* fp */
650 COSTS_N_INSNS (5), /* dmul */
651 COSTS_N_INSNS (21), /* sdiv */
652 COSTS_N_INSNS (35), /* ddiv */
653 32, /* cache line size */
659 /* Instruction costs on PPC8540 processors. */
661 struct processor_costs ppc8540_cost
= {
662 COSTS_N_INSNS (4), /* mulsi */
663 COSTS_N_INSNS (4), /* mulsi_const */
664 COSTS_N_INSNS (4), /* mulsi_const9 */
665 COSTS_N_INSNS (4), /* muldi */
666 COSTS_N_INSNS (19), /* divsi */
667 COSTS_N_INSNS (19), /* divdi */
668 COSTS_N_INSNS (4), /* fp */
669 COSTS_N_INSNS (4), /* dmul */
670 COSTS_N_INSNS (29), /* sdiv */
671 COSTS_N_INSNS (29), /* ddiv */
672 32, /* cache line size */
675 1, /* prefetch streams /*/
678 /* Instruction costs on E300C2 and E300C3 cores. */
680 struct processor_costs ppce300c2c3_cost
= {
681 COSTS_N_INSNS (4), /* mulsi */
682 COSTS_N_INSNS (4), /* mulsi_const */
683 COSTS_N_INSNS (4), /* mulsi_const9 */
684 COSTS_N_INSNS (4), /* muldi */
685 COSTS_N_INSNS (19), /* divsi */
686 COSTS_N_INSNS (19), /* divdi */
687 COSTS_N_INSNS (3), /* fp */
688 COSTS_N_INSNS (4), /* dmul */
689 COSTS_N_INSNS (18), /* sdiv */
690 COSTS_N_INSNS (33), /* ddiv */
694 1, /* prefetch streams /*/
697 /* Instruction costs on POWER4 and POWER5 processors. */
699 struct processor_costs power4_cost
= {
700 COSTS_N_INSNS (3), /* mulsi */
701 COSTS_N_INSNS (2), /* mulsi_const */
702 COSTS_N_INSNS (2), /* mulsi_const9 */
703 COSTS_N_INSNS (4), /* muldi */
704 COSTS_N_INSNS (18), /* divsi */
705 COSTS_N_INSNS (34), /* divdi */
706 COSTS_N_INSNS (3), /* fp */
707 COSTS_N_INSNS (3), /* dmul */
708 COSTS_N_INSNS (17), /* sdiv */
709 COSTS_N_INSNS (17), /* ddiv */
710 128, /* cache line size */
713 8, /* prefetch streams /*/
716 /* Instruction costs on POWER6 processors. */
718 struct processor_costs power6_cost
= {
719 COSTS_N_INSNS (8), /* mulsi */
720 COSTS_N_INSNS (8), /* mulsi_const */
721 COSTS_N_INSNS (8), /* mulsi_const9 */
722 COSTS_N_INSNS (8), /* muldi */
723 COSTS_N_INSNS (22), /* divsi */
724 COSTS_N_INSNS (28), /* divdi */
725 COSTS_N_INSNS (3), /* fp */
726 COSTS_N_INSNS (3), /* dmul */
727 COSTS_N_INSNS (13), /* sdiv */
728 COSTS_N_INSNS (16), /* ddiv */
729 128, /* cache line size */
732 16, /* prefetch streams */
736 static bool rs6000_function_ok_for_sibcall (tree
, tree
);
737 static const char *rs6000_invalid_within_doloop (const_rtx
);
738 static rtx
rs6000_generate_compare (enum rtx_code
);
739 static void rs6000_emit_stack_tie (void);
740 static void rs6000_frame_related (rtx
, rtx
, HOST_WIDE_INT
, rtx
, rtx
);
741 static bool spe_func_has_64bit_regs_p (void);
742 static void emit_frame_save (rtx
, rtx
, enum machine_mode
, unsigned int,
744 static rtx
gen_frame_mem_offset (enum machine_mode
, rtx
, int);
745 static void rs6000_emit_allocate_stack (HOST_WIDE_INT
, int);
746 static unsigned rs6000_hash_constant (rtx
);
747 static unsigned toc_hash_function (const void *);
748 static int toc_hash_eq (const void *, const void *);
749 static int constant_pool_expr_1 (rtx
, int *, int *);
750 static bool constant_pool_expr_p (rtx
);
751 static bool legitimate_small_data_p (enum machine_mode
, rtx
);
752 static bool legitimate_lo_sum_address_p (enum machine_mode
, rtx
, int);
753 static struct machine_function
* rs6000_init_machine_status (void);
754 static bool rs6000_assemble_integer (rtx
, unsigned int, int);
755 static bool no_global_regs_above (int);
756 #ifdef HAVE_GAS_HIDDEN
757 static void rs6000_assemble_visibility (tree
, int);
759 static int rs6000_ra_ever_killed (void);
760 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
761 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
762 static bool rs6000_ms_bitfield_layout_p (const_tree
);
763 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
764 static void rs6000_eliminate_indexed_memrefs (rtx operands
[2]);
765 static const char *rs6000_mangle_type (const_tree
);
766 extern const struct attribute_spec rs6000_attribute_table
[];
767 static void rs6000_set_default_type_attributes (tree
);
768 static bool rs6000_reg_live_or_pic_offset_p (int);
769 static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT
);
770 static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT
);
771 static void rs6000_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
, HOST_WIDE_INT
,
773 static rtx
rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
774 static bool rs6000_return_in_memory (const_tree
, const_tree
);
775 static void rs6000_file_start (void);
777 static int rs6000_elf_reloc_rw_mask (void);
778 static void rs6000_elf_asm_out_constructor (rtx
, int);
779 static void rs6000_elf_asm_out_destructor (rtx
, int);
780 static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED
;
781 static void rs6000_elf_asm_init_sections (void);
782 static section
*rs6000_elf_select_rtx_section (enum machine_mode
, rtx
,
783 unsigned HOST_WIDE_INT
);
784 static void rs6000_elf_encode_section_info (tree
, rtx
, int)
787 static bool rs6000_use_blocks_for_constant_p (enum machine_mode
, const_rtx
);
788 static void rs6000_alloc_sdmode_stack_slot (void);
789 static void rs6000_instantiate_decls (void);
791 static void rs6000_xcoff_asm_output_anchor (rtx
);
792 static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
793 static void rs6000_xcoff_asm_init_sections (void);
794 static int rs6000_xcoff_reloc_rw_mask (void);
795 static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree
);
796 static section
*rs6000_xcoff_select_section (tree
, int,
797 unsigned HOST_WIDE_INT
);
798 static void rs6000_xcoff_unique_section (tree
, int);
799 static section
*rs6000_xcoff_select_rtx_section
800 (enum machine_mode
, rtx
, unsigned HOST_WIDE_INT
);
801 static const char * rs6000_xcoff_strip_name_encoding (const char *);
802 static unsigned int rs6000_xcoff_section_type_flags (tree
, const char *, int);
803 static void rs6000_xcoff_file_start (void);
804 static void rs6000_xcoff_file_end (void);
806 static int rs6000_variable_issue (FILE *, int, rtx
, int);
807 static bool rs6000_rtx_costs (rtx
, int, int, int *);
808 static int rs6000_adjust_cost (rtx
, rtx
, rtx
, int);
809 static void rs6000_sched_init (FILE *, int, int);
810 static bool is_microcoded_insn (rtx
);
811 static bool is_nonpipeline_insn (rtx
);
812 static bool is_cracked_insn (rtx
);
813 static bool is_branch_slot_insn (rtx
);
814 static bool is_load_insn (rtx
);
815 static rtx
get_store_dest (rtx pat
);
816 static bool is_store_insn (rtx
);
817 static bool set_to_load_agen (rtx
,rtx
);
818 static bool adjacent_mem_locations (rtx
,rtx
);
819 static int rs6000_adjust_priority (rtx
, int);
820 static int rs6000_issue_rate (void);
821 static bool rs6000_is_costly_dependence (dep_t
, int, int);
822 static rtx
get_next_active_insn (rtx
, rtx
);
823 static bool insn_terminates_group_p (rtx
, enum group_termination
);
824 static bool insn_must_be_first_in_group (rtx
);
825 static bool insn_must_be_last_in_group (rtx
);
826 static bool is_costly_group (rtx
*, rtx
);
827 static int force_new_group (int, FILE *, rtx
*, rtx
, bool *, int, int *);
828 static int redefine_groups (FILE *, int, rtx
, rtx
);
829 static int pad_groups (FILE *, int, rtx
, rtx
);
830 static void rs6000_sched_finish (FILE *, int);
831 static int rs6000_sched_reorder (FILE *, int, rtx
*, int *, int);
832 static int rs6000_sched_reorder2 (FILE *, int, rtx
*, int *, int);
833 static int rs6000_use_sched_lookahead (void);
834 static int rs6000_use_sched_lookahead_guard (rtx
);
835 static tree
rs6000_builtin_reciprocal (unsigned int, bool, bool);
836 static tree
rs6000_builtin_mask_for_load (void);
837 static tree
rs6000_builtin_mul_widen_even (tree
);
838 static tree
rs6000_builtin_mul_widen_odd (tree
);
839 static tree
rs6000_builtin_conversion (enum tree_code
, tree
);
841 static void def_builtin (int, const char *, tree
, int);
842 static bool rs6000_vector_alignment_reachable (const_tree
, bool);
843 static void rs6000_init_builtins (void);
844 static rtx
rs6000_expand_unop_builtin (enum insn_code
, tree
, rtx
);
845 static rtx
rs6000_expand_binop_builtin (enum insn_code
, tree
, rtx
);
846 static rtx
rs6000_expand_ternop_builtin (enum insn_code
, tree
, rtx
);
847 static rtx
rs6000_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
848 static void altivec_init_builtins (void);
849 static void rs6000_common_init_builtins (void);
850 static void rs6000_init_libfuncs (void);
852 static void paired_init_builtins (void);
853 static rtx
paired_expand_builtin (tree
, rtx
, bool *);
854 static rtx
paired_expand_lv_builtin (enum insn_code
, tree
, rtx
);
855 static rtx
paired_expand_stv_builtin (enum insn_code
, tree
);
856 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
858 static void enable_mask_for_builtins (struct builtin_description
*, int,
859 enum rs6000_builtins
,
860 enum rs6000_builtins
);
861 static tree
build_opaque_vector_type (tree
, int);
862 static void spe_init_builtins (void);
863 static rtx
spe_expand_builtin (tree
, rtx
, bool *);
864 static rtx
spe_expand_stv_builtin (enum insn_code
, tree
);
865 static rtx
spe_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
866 static rtx
spe_expand_evsel_builtin (enum insn_code
, tree
, rtx
);
867 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
868 static rs6000_stack_t
*rs6000_stack_info (void);
869 static void debug_stack_info (rs6000_stack_t
*);
871 static rtx
altivec_expand_builtin (tree
, rtx
, bool *);
872 static rtx
altivec_expand_ld_builtin (tree
, rtx
, bool *);
873 static rtx
altivec_expand_st_builtin (tree
, rtx
, bool *);
874 static rtx
altivec_expand_dst_builtin (tree
, rtx
, bool *);
875 static rtx
altivec_expand_abs_builtin (enum insn_code
, tree
, rtx
);
876 static rtx
altivec_expand_predicate_builtin (enum insn_code
,
877 const char *, tree
, rtx
);
878 static rtx
altivec_expand_lv_builtin (enum insn_code
, tree
, rtx
);
879 static rtx
altivec_expand_stv_builtin (enum insn_code
, tree
);
880 static rtx
altivec_expand_vec_init_builtin (tree
, tree
, rtx
);
881 static rtx
altivec_expand_vec_set_builtin (tree
);
882 static rtx
altivec_expand_vec_ext_builtin (tree
, rtx
);
883 static int get_element_number (tree
, tree
);
884 static bool rs6000_handle_option (size_t, const char *, int);
885 static void rs6000_parse_tls_size_option (void);
886 static void rs6000_parse_yes_no_option (const char *, const char *, int *);
887 static int first_altivec_reg_to_save (void);
888 static unsigned int compute_vrsave_mask (void);
889 static void compute_save_world_info (rs6000_stack_t
*info_ptr
);
890 static void is_altivec_return_reg (rtx
, void *);
891 static rtx
generate_set_vrsave (rtx
, rs6000_stack_t
*, int);
892 int easy_vector_constant (rtx
, enum machine_mode
);
893 static bool rs6000_is_opaque_type (const_tree
);
894 static rtx
rs6000_dwarf_register_span (rtx
);
895 static void rs6000_init_dwarf_reg_sizes_extra (tree
);
896 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
897 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
898 static rtx
rs6000_tls_get_addr (void);
899 static rtx
rs6000_got_sym (void);
900 static int rs6000_tls_symbol_ref_1 (rtx
*, void *);
901 static const char *rs6000_get_some_local_dynamic_name (void);
902 static int rs6000_get_some_local_dynamic_name_1 (rtx
*, void *);
903 static rtx
rs6000_complex_function_value (enum machine_mode
);
904 static rtx
rs6000_spe_function_arg (CUMULATIVE_ARGS
*,
905 enum machine_mode
, tree
);
906 static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*,
908 static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*,
909 tree
, HOST_WIDE_INT
);
910 static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*,
913 static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*,
914 const_tree
, HOST_WIDE_INT
,
916 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
, int, bool);
917 static rtx
rs6000_mixed_function_arg (enum machine_mode
, tree
, int);
918 static void rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
);
919 static void setup_incoming_varargs (CUMULATIVE_ARGS
*,
920 enum machine_mode
, tree
,
922 static bool rs6000_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode
,
924 static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode
,
926 static const char *invalid_arg_for_unprototyped_fn (const_tree
, const_tree
, const_tree
);
928 static void macho_branch_islands (void);
929 static int no_previous_def (tree function_name
);
930 static tree
get_prev_label (tree function_name
);
931 static void rs6000_darwin_file_start (void);
934 static tree
rs6000_build_builtin_va_list (void);
935 static void rs6000_va_start (tree
, rtx
);
936 static tree
rs6000_gimplify_va_arg (tree
, tree
, tree
*, tree
*);
937 static bool rs6000_must_pass_in_stack (enum machine_mode
, const_tree
);
938 static bool rs6000_scalar_mode_supported_p (enum machine_mode
);
939 static bool rs6000_vector_mode_supported_p (enum machine_mode
);
940 static int get_vec_cmp_insn (enum rtx_code
, enum machine_mode
,
942 static rtx
rs6000_emit_vector_compare (enum rtx_code
, rtx
, rtx
,
944 static int get_vsel_insn (enum machine_mode
);
945 static void rs6000_emit_vector_select (rtx
, rtx
, rtx
, rtx
);
946 static tree
rs6000_stack_protect_fail (void);
948 const int INSN_NOT_AVAILABLE
= -1;
949 static enum machine_mode
rs6000_eh_return_filter_mode (void);
951 /* Hash table stuff for keeping track of TOC entries. */
953 struct toc_hash_struct
GTY(())
955 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
956 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
958 enum machine_mode key_mode
;
962 static GTY ((param_is (struct toc_hash_struct
))) htab_t toc_hash_table
;
964 /* Default register names. */
965 char rs6000_reg_names
[][8] =
967 "0", "1", "2", "3", "4", "5", "6", "7",
968 "8", "9", "10", "11", "12", "13", "14", "15",
969 "16", "17", "18", "19", "20", "21", "22", "23",
970 "24", "25", "26", "27", "28", "29", "30", "31",
971 "0", "1", "2", "3", "4", "5", "6", "7",
972 "8", "9", "10", "11", "12", "13", "14", "15",
973 "16", "17", "18", "19", "20", "21", "22", "23",
974 "24", "25", "26", "27", "28", "29", "30", "31",
975 "mq", "lr", "ctr","ap",
976 "0", "1", "2", "3", "4", "5", "6", "7",
978 /* AltiVec registers. */
979 "0", "1", "2", "3", "4", "5", "6", "7",
980 "8", "9", "10", "11", "12", "13", "14", "15",
981 "16", "17", "18", "19", "20", "21", "22", "23",
982 "24", "25", "26", "27", "28", "29", "30", "31",
985 "spe_acc", "spefscr",
986 /* Soft frame pointer. */
990 #ifdef TARGET_REGNAMES
991 static const char alt_reg_names
[][8] =
993 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
994 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
995 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
996 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
997 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
998 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
999 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1000 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1001 "mq", "lr", "ctr", "ap",
1002 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1004 /* AltiVec registers. */
1005 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1006 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1007 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1008 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1010 /* SPE registers. */
1011 "spe_acc", "spefscr",
1012 /* Soft frame pointer. */
1017 #ifndef MASK_STRICT_ALIGN
1018 #define MASK_STRICT_ALIGN 0
1020 #ifndef TARGET_PROFILE_KERNEL
1021 #define TARGET_PROFILE_KERNEL 0
1024 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1025 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1027 /* Initialize the GCC target structure. */
1028 #undef TARGET_ATTRIBUTE_TABLE
1029 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1030 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1031 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1033 #undef TARGET_ASM_ALIGNED_DI_OP
1034 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1036 /* Default unaligned ops are only provided for ELF. Find the ops needed
1037 for non-ELF systems. */
1038 #ifndef OBJECT_FORMAT_ELF
1040 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1042 #undef TARGET_ASM_UNALIGNED_HI_OP
1043 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1044 #undef TARGET_ASM_UNALIGNED_SI_OP
1045 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1046 #undef TARGET_ASM_UNALIGNED_DI_OP
1047 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1050 #undef TARGET_ASM_UNALIGNED_HI_OP
1051 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1052 #undef TARGET_ASM_UNALIGNED_SI_OP
1053 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1054 #undef TARGET_ASM_UNALIGNED_DI_OP
1055 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1056 #undef TARGET_ASM_ALIGNED_DI_OP
1057 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1061 /* This hook deals with fixups for relocatable code and DI-mode objects
1063 #undef TARGET_ASM_INTEGER
1064 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1066 #ifdef HAVE_GAS_HIDDEN
1067 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1068 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1071 #undef TARGET_HAVE_TLS
1072 #define TARGET_HAVE_TLS HAVE_AS_TLS
1074 #undef TARGET_CANNOT_FORCE_CONST_MEM
1075 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_tls_referenced_p
1077 #undef TARGET_ASM_FUNCTION_PROLOGUE
1078 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1079 #undef TARGET_ASM_FUNCTION_EPILOGUE
1080 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1082 #undef TARGET_SCHED_VARIABLE_ISSUE
1083 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1085 #undef TARGET_SCHED_ISSUE_RATE
1086 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1087 #undef TARGET_SCHED_ADJUST_COST
1088 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1089 #undef TARGET_SCHED_ADJUST_PRIORITY
1090 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1091 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1092 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1093 #undef TARGET_SCHED_INIT
1094 #define TARGET_SCHED_INIT rs6000_sched_init
1095 #undef TARGET_SCHED_FINISH
1096 #define TARGET_SCHED_FINISH rs6000_sched_finish
1097 #undef TARGET_SCHED_REORDER
1098 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1099 #undef TARGET_SCHED_REORDER2
1100 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1102 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1103 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1105 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1106 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1108 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1109 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1110 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
1111 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even
1112 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD
1113 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd
1114 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
1115 #define TARGET_VECTORIZE_BUILTIN_CONVERSION rs6000_builtin_conversion
1117 #undef TARGET_VECTOR_ALIGNMENT_REACHABLE
1118 #define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1120 #undef TARGET_INIT_BUILTINS
1121 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1123 #undef TARGET_EXPAND_BUILTIN
1124 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1126 #undef TARGET_MANGLE_TYPE
1127 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1129 #undef TARGET_INIT_LIBFUNCS
1130 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1133 #undef TARGET_BINDS_LOCAL_P
1134 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1137 #undef TARGET_MS_BITFIELD_LAYOUT_P
1138 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1140 #undef TARGET_ASM_OUTPUT_MI_THUNK
1141 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1143 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1144 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1146 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1147 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1149 #undef TARGET_INVALID_WITHIN_DOLOOP
1150 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1152 #undef TARGET_RTX_COSTS
1153 #define TARGET_RTX_COSTS rs6000_rtx_costs
1154 #undef TARGET_ADDRESS_COST
1155 #define TARGET_ADDRESS_COST hook_int_rtx_0
1157 #undef TARGET_VECTOR_OPAQUE_P
1158 #define TARGET_VECTOR_OPAQUE_P rs6000_is_opaque_type
1160 #undef TARGET_DWARF_REGISTER_SPAN
1161 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1163 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1164 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1166 /* On rs6000, function arguments are promoted, as are function return
1168 #undef TARGET_PROMOTE_FUNCTION_ARGS
1169 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1170 #undef TARGET_PROMOTE_FUNCTION_RETURN
1171 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1173 #undef TARGET_RETURN_IN_MEMORY
1174 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1176 #undef TARGET_SETUP_INCOMING_VARARGS
1177 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1179 /* Always strict argument naming on rs6000. */
1180 #undef TARGET_STRICT_ARGUMENT_NAMING
1181 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1182 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1183 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1184 #undef TARGET_SPLIT_COMPLEX_ARG
1185 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1186 #undef TARGET_MUST_PASS_IN_STACK
1187 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1188 #undef TARGET_PASS_BY_REFERENCE
1189 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1190 #undef TARGET_ARG_PARTIAL_BYTES
1191 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1193 #undef TARGET_BUILD_BUILTIN_VA_LIST
1194 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1196 #undef TARGET_EXPAND_BUILTIN_VA_START
1197 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1199 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1200 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1202 #undef TARGET_EH_RETURN_FILTER_MODE
1203 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1205 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1206 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1208 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1209 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1211 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1212 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1214 #undef TARGET_HANDLE_OPTION
1215 #define TARGET_HANDLE_OPTION rs6000_handle_option
1217 #undef TARGET_DEFAULT_TARGET_FLAGS
1218 #define TARGET_DEFAULT_TARGET_FLAGS \
1221 #undef TARGET_STACK_PROTECT_FAIL
1222 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1224 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1225 The PowerPC architecture requires only weak consistency among
1226 processors--that is, memory accesses between processors need not be
1227 sequentially consistent and memory accesses among processors can occur
1228 in any order. The ability to order memory accesses weakly provides
1229 opportunities for more efficient use of the system bus. Unless a
1230 dependency exists, the 604e allows read operations to precede store
1232 #undef TARGET_RELAXED_ORDERING
1233 #define TARGET_RELAXED_ORDERING true
1236 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1237 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1240 /* Use a 32-bit anchor range. This leads to sequences like:
1242 addis tmp,anchor,high
1245 where tmp itself acts as an anchor, and can be shared between
1246 accesses to the same 64k page. */
1247 #undef TARGET_MIN_ANCHOR_OFFSET
1248 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1249 #undef TARGET_MAX_ANCHOR_OFFSET
1250 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1251 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1252 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1254 #undef TARGET_BUILTIN_RECIPROCAL
1255 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1257 #undef TARGET_EXPAND_TO_RTL_HOOK
1258 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1260 #undef TARGET_INSTANTIATE_DECLS
1261 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1263 struct gcc_target targetm
= TARGET_INITIALIZER
;
1266 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1269 rs6000_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
1271 /* The GPRs can hold any mode, but values bigger than one register
1272 cannot go past R31. */
1273 if (INT_REGNO_P (regno
))
1274 return INT_REGNO_P (regno
+ HARD_REGNO_NREGS (regno
, mode
) - 1);
1276 /* The float registers can only hold floating modes and DImode.
1277 This excludes the 32-bit decimal float mode for now. */
1278 if (FP_REGNO_P (regno
))
1280 ((SCALAR_FLOAT_MODE_P (mode
)
1281 && (mode
!= TDmode
|| (regno
% 2) == 0)
1282 && FP_REGNO_P (regno
+ HARD_REGNO_NREGS (regno
, mode
) - 1))
1283 || (GET_MODE_CLASS (mode
) == MODE_INT
1284 && GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
1285 || (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
1286 && PAIRED_VECTOR_MODE (mode
)));
1288 /* The CR register can only hold CC modes. */
1289 if (CR_REGNO_P (regno
))
1290 return GET_MODE_CLASS (mode
) == MODE_CC
;
1292 if (XER_REGNO_P (regno
))
1293 return mode
== PSImode
;
1295 /* AltiVec only in AldyVec registers. */
1296 if (ALTIVEC_REGNO_P (regno
))
1297 return ALTIVEC_VECTOR_MODE (mode
);
1299 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1300 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1303 /* We cannot put TImode anywhere except general register and it must be
1304 able to fit within the register set. */
1306 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
1309 /* Initialize rs6000_hard_regno_mode_ok_p table. */
1311 rs6000_init_hard_regno_mode_ok (void)
1315 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
1316 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1317 if (rs6000_hard_regno_mode_ok (r
, m
))
1318 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
1322 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
1325 darwin_rs6000_override_options (void)
1327 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
1329 rs6000_altivec_abi
= 1;
1330 TARGET_ALTIVEC_VRSAVE
= 1;
1331 if (DEFAULT_ABI
== ABI_DARWIN
)
1333 if (MACHO_DYNAMIC_NO_PIC_P
)
1336 warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC");
1339 else if (flag_pic
== 1)
1344 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
1346 target_flags
|= MASK_POWERPC64
;
1347 warning (0, "-m64 requires PowerPC64 architecture, enabling");
1351 rs6000_default_long_calls
= 1;
1352 target_flags
|= MASK_SOFT_FLOAT
;
1355 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
1357 if (!flag_mkernel
&& !flag_apple_kext
1359 && ! (target_flags_explicit
& MASK_ALTIVEC
))
1360 target_flags
|= MASK_ALTIVEC
;
1362 /* Unless the user (not the configurer) has explicitly overridden
1363 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
1364 G4 unless targetting the kernel. */
1367 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
1368 && ! (target_flags_explicit
& MASK_ALTIVEC
)
1369 && ! rs6000_select
[1].string
)
1371 target_flags
|= MASK_ALTIVEC
;
1376 /* If not otherwise specified by a target, make 'long double' equivalent to
1379 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
1380 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
1383 /* Override command line options. Mostly we process the processor
1384 type and sometimes adjust other TARGET_ options. */
1387 rs6000_override_options (const char *default_cpu
)
1390 struct rs6000_cpu_select
*ptr
;
1393 /* Simplifications for entries below. */
1396 POWERPC_BASE_MASK
= MASK_POWERPC
| MASK_NEW_MNEMONICS
,
1397 POWERPC_7400_MASK
= POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_ALTIVEC
1400 /* This table occasionally claims that a processor does not support
1401 a particular feature even though it does, but the feature is slower
1402 than the alternative. Thus, it shouldn't be relied on as a
1403 complete description of the processor's support.
1405 Please keep this list in order, and don't forget to update the
1406 documentation in invoke.texi when adding a new processor or
1410 const char *const name
; /* Canonical processor name. */
1411 const enum processor_type processor
; /* Processor type enum value. */
1412 const int target_enable
; /* Target flags to enable. */
1413 } const processor_target_table
[]
1414 = {{"401", PROCESSOR_PPC403
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1415 {"403", PROCESSOR_PPC403
,
1416 POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
| MASK_STRICT_ALIGN
},
1417 {"405", PROCESSOR_PPC405
,
1418 POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
| MASK_MULHW
| MASK_DLMZB
},
1419 {"405fp", PROCESSOR_PPC405
,
1420 POWERPC_BASE_MASK
| MASK_MULHW
| MASK_DLMZB
},
1421 {"440", PROCESSOR_PPC440
,
1422 POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
| MASK_MULHW
| MASK_DLMZB
},
1423 {"440fp", PROCESSOR_PPC440
,
1424 POWERPC_BASE_MASK
| MASK_MULHW
| MASK_DLMZB
},
1425 {"464", PROCESSOR_PPC440
,
1426 POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
| MASK_MULHW
| MASK_DLMZB
},
1427 {"464fp", PROCESSOR_PPC440
,
1428 POWERPC_BASE_MASK
| MASK_MULHW
| MASK_DLMZB
},
1429 {"505", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
},
1430 {"601", PROCESSOR_PPC601
,
1431 MASK_POWER
| POWERPC_BASE_MASK
| MASK_MULTIPLE
| MASK_STRING
},
1432 {"602", PROCESSOR_PPC603
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1433 {"603", PROCESSOR_PPC603
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1434 {"603e", PROCESSOR_PPC603
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1435 {"604", PROCESSOR_PPC604
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1436 {"604e", PROCESSOR_PPC604e
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1437 {"620", PROCESSOR_PPC620
,
1438 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
},
1439 {"630", PROCESSOR_PPC630
,
1440 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
},
1441 {"740", PROCESSOR_PPC750
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1442 {"7400", PROCESSOR_PPC7400
, POWERPC_7400_MASK
},
1443 {"7450", PROCESSOR_PPC7450
, POWERPC_7400_MASK
},
1444 {"750", PROCESSOR_PPC750
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1445 {"801", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1446 {"821", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1447 {"823", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1448 {"8540", PROCESSOR_PPC8540
, POWERPC_BASE_MASK
| MASK_STRICT_ALIGN
},
1449 /* 8548 has a dummy entry for now. */
1450 {"8548", PROCESSOR_PPC8540
, POWERPC_BASE_MASK
| MASK_STRICT_ALIGN
},
1451 {"e300c2", PROCESSOR_PPCE300C2
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1452 {"e300c3", PROCESSOR_PPCE300C3
, POWERPC_BASE_MASK
},
1453 {"860", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1454 {"970", PROCESSOR_POWER4
,
1455 POWERPC_7400_MASK
| MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
1456 {"cell", PROCESSOR_CELL
,
1457 POWERPC_7400_MASK
| MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
1458 {"common", PROCESSOR_COMMON
, MASK_NEW_MNEMONICS
},
1459 {"ec603e", PROCESSOR_PPC603
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1460 {"G3", PROCESSOR_PPC750
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1461 {"G4", PROCESSOR_PPC7450
, POWERPC_7400_MASK
},
1462 {"G5", PROCESSOR_POWER4
,
1463 POWERPC_7400_MASK
| MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
1464 {"power", PROCESSOR_POWER
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1465 {"power2", PROCESSOR_POWER
,
1466 MASK_POWER
| MASK_POWER2
| MASK_MULTIPLE
| MASK_STRING
},
1467 {"power3", PROCESSOR_PPC630
,
1468 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
},
1469 {"power4", PROCESSOR_POWER4
,
1470 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_MFCRF
| MASK_POWERPC64
},
1471 {"power5", PROCESSOR_POWER5
,
1472 POWERPC_BASE_MASK
| MASK_POWERPC64
| MASK_PPC_GFXOPT
1473 | MASK_MFCRF
| MASK_POPCNTB
},
1474 {"power5+", PROCESSOR_POWER5
,
1475 POWERPC_BASE_MASK
| MASK_POWERPC64
| MASK_PPC_GFXOPT
1476 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
},
1477 {"power6", PROCESSOR_POWER6
,
1478 POWERPC_7400_MASK
| MASK_POWERPC64
| MASK_MFCRF
| MASK_POPCNTB
1479 | MASK_FPRND
| MASK_CMPB
| MASK_DFP
},
1480 {"power6x", PROCESSOR_POWER6
,
1481 POWERPC_7400_MASK
| MASK_POWERPC64
| MASK_MFCRF
| MASK_POPCNTB
1482 | MASK_FPRND
| MASK_CMPB
| MASK_MFPGPR
| MASK_DFP
},
1483 {"powerpc", PROCESSOR_POWERPC
, POWERPC_BASE_MASK
},
1484 {"powerpc64", PROCESSOR_POWERPC64
,
1485 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
},
1486 {"rios", PROCESSOR_RIOS1
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1487 {"rios1", PROCESSOR_RIOS1
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1488 {"rios2", PROCESSOR_RIOS2
,
1489 MASK_POWER
| MASK_POWER2
| MASK_MULTIPLE
| MASK_STRING
},
1490 {"rsc", PROCESSOR_PPC601
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1491 {"rsc1", PROCESSOR_PPC601
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1492 {"rs64", PROCESSOR_RS64A
,
1493 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
}
1496 const size_t ptt_size
= ARRAY_SIZE (processor_target_table
);
1498 /* Some OSs don't support saving the high part of 64-bit registers on
1499 context switch. Other OSs don't support saving Altivec registers.
1500 On those OSs, we don't touch the MASK_POWERPC64 or MASK_ALTIVEC
1501 settings; if the user wants either, the user must explicitly specify
1502 them and we won't interfere with the user's specification. */
1505 POWER_MASKS
= MASK_POWER
| MASK_POWER2
| MASK_MULTIPLE
| MASK_STRING
,
1506 POWERPC_MASKS
= (POWERPC_BASE_MASK
| MASK_PPC_GPOPT
| MASK_STRICT_ALIGN
1507 | MASK_PPC_GFXOPT
| MASK_POWERPC64
| MASK_ALTIVEC
1508 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
| MASK_MULHW
1509 | MASK_DLMZB
| MASK_CMPB
| MASK_MFPGPR
| MASK_DFP
)
1512 rs6000_init_hard_regno_mode_ok ();
1514 set_masks
= POWER_MASKS
| POWERPC_MASKS
| MASK_SOFT_FLOAT
;
1515 #ifdef OS_MISSING_POWERPC64
1516 if (OS_MISSING_POWERPC64
)
1517 set_masks
&= ~MASK_POWERPC64
;
1519 #ifdef OS_MISSING_ALTIVEC
1520 if (OS_MISSING_ALTIVEC
)
1521 set_masks
&= ~MASK_ALTIVEC
;
1524 /* Don't override by the processor default if given explicitly. */
1525 set_masks
&= ~target_flags_explicit
;
1527 /* Identify the processor type. */
1528 rs6000_select
[0].string
= default_cpu
;
1529 rs6000_cpu
= TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
;
1531 for (i
= 0; i
< ARRAY_SIZE (rs6000_select
); i
++)
1533 ptr
= &rs6000_select
[i
];
1534 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
1536 for (j
= 0; j
< ptt_size
; j
++)
1537 if (! strcmp (ptr
->string
, processor_target_table
[j
].name
))
1539 if (ptr
->set_tune_p
)
1540 rs6000_cpu
= processor_target_table
[j
].processor
;
1542 if (ptr
->set_arch_p
)
1544 target_flags
&= ~set_masks
;
1545 target_flags
|= (processor_target_table
[j
].target_enable
1552 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
1559 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
)
1562 error ("AltiVec not supported in this target");
1564 error ("Spe not supported in this target");
1567 /* If we are optimizing big endian systems for space, use the load/store
1568 multiple and string instructions. */
1569 if (BYTES_BIG_ENDIAN
&& optimize_size
)
1570 target_flags
|= ~target_flags_explicit
& (MASK_MULTIPLE
| MASK_STRING
);
1572 /* Don't allow -mmultiple or -mstring on little endian systems
1573 unless the cpu is a 750, because the hardware doesn't support the
1574 instructions used in little endian mode, and causes an alignment
1575 trap. The 750 does not cause an alignment trap (except when the
1576 target is unaligned). */
1578 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
1580 if (TARGET_MULTIPLE
)
1582 target_flags
&= ~MASK_MULTIPLE
;
1583 if ((target_flags_explicit
& MASK_MULTIPLE
) != 0)
1584 warning (0, "-mmultiple is not supported on little endian systems");
1589 target_flags
&= ~MASK_STRING
;
1590 if ((target_flags_explicit
& MASK_STRING
) != 0)
1591 warning (0, "-mstring is not supported on little endian systems");
1595 /* Set debug flags */
1596 if (rs6000_debug_name
)
1598 if (! strcmp (rs6000_debug_name
, "all"))
1599 rs6000_debug_stack
= rs6000_debug_arg
= 1;
1600 else if (! strcmp (rs6000_debug_name
, "stack"))
1601 rs6000_debug_stack
= 1;
1602 else if (! strcmp (rs6000_debug_name
, "arg"))
1603 rs6000_debug_arg
= 1;
1605 error ("unknown -mdebug-%s switch", rs6000_debug_name
);
1608 if (rs6000_traceback_name
)
1610 if (! strncmp (rs6000_traceback_name
, "full", 4))
1611 rs6000_traceback
= traceback_full
;
1612 else if (! strncmp (rs6000_traceback_name
, "part", 4))
1613 rs6000_traceback
= traceback_part
;
1614 else if (! strncmp (rs6000_traceback_name
, "no", 2))
1615 rs6000_traceback
= traceback_none
;
1617 error ("unknown -mtraceback arg %qs; expecting %<full%>, %<partial%> or %<none%>",
1618 rs6000_traceback_name
);
1621 if (!rs6000_explicit_options
.long_double
)
1622 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
1624 #ifndef POWERPC_LINUX
1625 if (!rs6000_explicit_options
.ieee
)
1626 rs6000_ieeequad
= 1;
1629 /* Enable Altivec ABI for AIX -maltivec. */
1630 if (TARGET_XCOFF
&& TARGET_ALTIVEC
)
1631 rs6000_altivec_abi
= 1;
1633 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
1634 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
1635 be explicitly overridden in either case. */
1638 if (!rs6000_explicit_options
.altivec_abi
1639 && (TARGET_64BIT
|| TARGET_ALTIVEC
))
1640 rs6000_altivec_abi
= 1;
1642 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
1643 if (!rs6000_explicit_options
.vrsave
)
1644 TARGET_ALTIVEC_VRSAVE
= rs6000_altivec_abi
;
1647 /* Set the Darwin64 ABI as default for 64-bit Darwin. */
1648 if (DEFAULT_ABI
== ABI_DARWIN
&& TARGET_64BIT
)
1650 rs6000_darwin64_abi
= 1;
1652 darwin_one_byte_bool
= 1;
1654 /* Default to natural alignment, for better performance. */
1655 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
1658 /* Place FP constants in the constant pool instead of TOC
1659 if section anchors enabled. */
1660 if (flag_section_anchors
)
1661 TARGET_NO_FP_IN_TOC
= 1;
1663 /* Handle -mtls-size option. */
1664 rs6000_parse_tls_size_option ();
1666 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1667 SUBTARGET_OVERRIDE_OPTIONS
;
1669 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1670 SUBSUBTARGET_OVERRIDE_OPTIONS
;
1672 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
1673 SUB3TARGET_OVERRIDE_OPTIONS
;
1678 /* The e500 does not have string instructions, and we set
1679 MASK_STRING above when optimizing for size. */
1680 if ((target_flags
& MASK_STRING
) != 0)
1681 target_flags
= target_flags
& ~MASK_STRING
;
1683 else if (rs6000_select
[1].string
!= NULL
)
1685 /* For the powerpc-eabispe configuration, we set all these by
1686 default, so let's unset them if we manually set another
1687 CPU that is not the E500. */
1688 if (!rs6000_explicit_options
.spe_abi
)
1690 if (!rs6000_explicit_options
.spe
)
1692 if (!rs6000_explicit_options
.float_gprs
)
1693 rs6000_float_gprs
= 0;
1694 if (!rs6000_explicit_options
.isel
)
1698 /* Detect invalid option combinations with E500. */
1701 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
1702 && rs6000_cpu
!= PROCESSOR_POWER5
1703 && rs6000_cpu
!= PROCESSOR_POWER6
1704 && rs6000_cpu
!= PROCESSOR_CELL
);
1705 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
1706 || rs6000_cpu
== PROCESSOR_POWER5
);
1707 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
1708 || rs6000_cpu
== PROCESSOR_POWER5
1709 || rs6000_cpu
== PROCESSOR_POWER6
);
1711 rs6000_sched_restricted_insns_priority
1712 = (rs6000_sched_groups
? 1 : 0);
1714 /* Handle -msched-costly-dep option. */
1715 rs6000_sched_costly_dep
1716 = (rs6000_sched_groups
? store_to_load_dep_costly
: no_dep_costly
);
1718 if (rs6000_sched_costly_dep_str
)
1720 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
1721 rs6000_sched_costly_dep
= no_dep_costly
;
1722 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
1723 rs6000_sched_costly_dep
= all_deps_costly
;
1724 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
1725 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
1726 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
1727 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
1729 rs6000_sched_costly_dep
= atoi (rs6000_sched_costly_dep_str
);
1732 /* Handle -minsert-sched-nops option. */
1733 rs6000_sched_insert_nops
1734 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
1736 if (rs6000_sched_insert_nops_str
)
1738 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
1739 rs6000_sched_insert_nops
= sched_finish_none
;
1740 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
1741 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
1742 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
1743 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
1745 rs6000_sched_insert_nops
= atoi (rs6000_sched_insert_nops_str
);
1748 #ifdef TARGET_REGNAMES
1749 /* If the user desires alternate register names, copy in the
1750 alternate names now. */
1751 if (TARGET_REGNAMES
)
1752 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
1755 /* Set aix_struct_return last, after the ABI is determined.
1756 If -maix-struct-return or -msvr4-struct-return was explicitly
1757 used, don't override with the ABI default. */
1758 if (!rs6000_explicit_options
.aix_struct_ret
)
1759 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
1761 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
1762 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
1765 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
1767 /* We can only guarantee the availability of DI pseudo-ops when
1768 assembling for 64-bit targets. */
1771 targetm
.asm_out
.aligned_op
.di
= NULL
;
1772 targetm
.asm_out
.unaligned_op
.di
= NULL
;
1775 /* Set branch target alignment, if not optimizing for size. */
1778 /* Cell wants to be aligned 8byte for dual issue. */
1779 if (rs6000_cpu
== PROCESSOR_CELL
)
1781 if (align_functions
<= 0)
1782 align_functions
= 8;
1783 if (align_jumps
<= 0)
1785 if (align_loops
<= 0)
1788 if (rs6000_align_branch_targets
)
1790 if (align_functions
<= 0)
1791 align_functions
= 16;
1792 if (align_jumps
<= 0)
1794 if (align_loops
<= 0)
1797 if (align_jumps_max_skip
<= 0)
1798 align_jumps_max_skip
= 15;
1799 if (align_loops_max_skip
<= 0)
1800 align_loops_max_skip
= 15;
1803 /* Arrange to save and restore machine status around nested functions. */
1804 init_machine_status
= rs6000_init_machine_status
;
1806 /* We should always be splitting complex arguments, but we can't break
1807 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
1808 if (DEFAULT_ABI
!= ABI_AIX
)
1809 targetm
.calls
.split_complex_arg
= NULL
;
1811 /* Initialize rs6000_cost with the appropriate target costs. */
1813 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
1817 case PROCESSOR_RIOS1
:
1818 rs6000_cost
= &rios1_cost
;
1821 case PROCESSOR_RIOS2
:
1822 rs6000_cost
= &rios2_cost
;
1825 case PROCESSOR_RS64A
:
1826 rs6000_cost
= &rs64a_cost
;
1829 case PROCESSOR_MPCCORE
:
1830 rs6000_cost
= &mpccore_cost
;
1833 case PROCESSOR_PPC403
:
1834 rs6000_cost
= &ppc403_cost
;
1837 case PROCESSOR_PPC405
:
1838 rs6000_cost
= &ppc405_cost
;
1841 case PROCESSOR_PPC440
:
1842 rs6000_cost
= &ppc440_cost
;
1845 case PROCESSOR_PPC601
:
1846 rs6000_cost
= &ppc601_cost
;
1849 case PROCESSOR_PPC603
:
1850 rs6000_cost
= &ppc603_cost
;
1853 case PROCESSOR_PPC604
:
1854 rs6000_cost
= &ppc604_cost
;
1857 case PROCESSOR_PPC604e
:
1858 rs6000_cost
= &ppc604e_cost
;
1861 case PROCESSOR_PPC620
:
1862 rs6000_cost
= &ppc620_cost
;
1865 case PROCESSOR_PPC630
:
1866 rs6000_cost
= &ppc630_cost
;
1869 case PROCESSOR_CELL
:
1870 rs6000_cost
= &ppccell_cost
;
1873 case PROCESSOR_PPC750
:
1874 case PROCESSOR_PPC7400
:
1875 rs6000_cost
= &ppc750_cost
;
1878 case PROCESSOR_PPC7450
:
1879 rs6000_cost
= &ppc7450_cost
;
1882 case PROCESSOR_PPC8540
:
1883 rs6000_cost
= &ppc8540_cost
;
1886 case PROCESSOR_PPCE300C2
:
1887 case PROCESSOR_PPCE300C3
:
1888 rs6000_cost
= &ppce300c2c3_cost
;
1891 case PROCESSOR_POWER4
:
1892 case PROCESSOR_POWER5
:
1893 rs6000_cost
= &power4_cost
;
1896 case PROCESSOR_POWER6
:
1897 rs6000_cost
= &power6_cost
;
1904 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES
))
1905 set_param_value ("simultaneous-prefetches",
1906 rs6000_cost
->simultaneous_prefetches
);
1907 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE
))
1908 set_param_value ("l1-cache-size", rs6000_cost
->l1_cache_size
);
1909 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE
))
1910 set_param_value ("l1-cache-line-size", rs6000_cost
->cache_line_size
);
1911 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE
))
1912 set_param_value ("l2-cache-size", rs6000_cost
->l2_cache_size
);
1914 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
1915 can be optimized to ap = __builtin_next_arg (0). */
1916 if (DEFAULT_ABI
!= ABI_V4
)
1917 targetm
.expand_builtin_va_start
= NULL
;
1920 /* Implement targetm.vectorize.builtin_mask_for_load. */
1922 rs6000_builtin_mask_for_load (void)
1925 return altivec_builtin_mask_for_load
;
1930 /* Implement targetm.vectorize.builtin_conversion. */
1932 rs6000_builtin_conversion (enum tree_code code
, tree type
)
1934 if (!TARGET_ALTIVEC
)
1940 switch (TYPE_MODE (type
))
1943 return TYPE_UNSIGNED (type
) ?
1944 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VCFUX
] :
1945 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VCFSX
];
1954 /* Implement targetm.vectorize.builtin_mul_widen_even. */
1956 rs6000_builtin_mul_widen_even (tree type
)
1958 if (!TARGET_ALTIVEC
)
1961 switch (TYPE_MODE (type
))
1964 return TYPE_UNSIGNED (type
) ?
1965 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULEUH
] :
1966 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULESH
];
1969 return TYPE_UNSIGNED (type
) ?
1970 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULEUB
] :
1971 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULESB
];
1977 /* Implement targetm.vectorize.builtin_mul_widen_odd. */
1979 rs6000_builtin_mul_widen_odd (tree type
)
1981 if (!TARGET_ALTIVEC
)
1984 switch (TYPE_MODE (type
))
1987 return TYPE_UNSIGNED (type
) ?
1988 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULOUH
] :
1989 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULOSH
];
1992 return TYPE_UNSIGNED (type
) ?
1993 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULOUB
] :
1994 rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULOSB
];
2001 /* Return true iff, data reference of TYPE can reach vector alignment (16)
2002 after applying N number of iterations. This routine does not determine
2003 how may iterations are required to reach desired alignment. */
2006 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
2013 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
2016 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
2026 /* Assuming that all other types are naturally aligned. CHECKME! */
2031 /* Handle generic options of the form -mfoo=yes/no.
2032 NAME is the option name.
2033 VALUE is the option value.
2034 FLAG is the pointer to the flag where to store a 1 or 0, depending on
2035 whether the option value is 'yes' or 'no' respectively. */
2037 rs6000_parse_yes_no_option (const char *name
, const char *value
, int *flag
)
2041 else if (!strcmp (value
, "yes"))
2043 else if (!strcmp (value
, "no"))
2046 error ("unknown -m%s= option specified: '%s'", name
, value
);
2049 /* Validate and record the size specified with the -mtls-size option. */
2052 rs6000_parse_tls_size_option (void)
2054 if (rs6000_tls_size_string
== 0)
2056 else if (strcmp (rs6000_tls_size_string
, "16") == 0)
2057 rs6000_tls_size
= 16;
2058 else if (strcmp (rs6000_tls_size_string
, "32") == 0)
2059 rs6000_tls_size
= 32;
2060 else if (strcmp (rs6000_tls_size_string
, "64") == 0)
2061 rs6000_tls_size
= 64;
2063 error ("bad value %qs for -mtls-size switch", rs6000_tls_size_string
);
2067 optimization_options (int level ATTRIBUTE_UNUSED
, int size ATTRIBUTE_UNUSED
)
2069 if (DEFAULT_ABI
== ABI_DARWIN
)
2070 /* The Darwin libraries never set errno, so we might as well
2071 avoid calling them when that's the only reason we would. */
2072 flag_errno_math
= 0;
2074 /* Double growth factor to counter reduced min jump length. */
2075 set_param_value ("max-grow-copy-bb-insns", 16);
2077 /* Enable section anchors by default.
2078 Skip section anchors for Objective C and Objective C++
2079 until front-ends fixed. */
2080 if (!TARGET_MACHO
&& lang_hooks
.name
[4] != 'O')
2081 flag_section_anchors
= 1;
2084 /* Implement TARGET_HANDLE_OPTION. */
2087 rs6000_handle_option (size_t code
, const char *arg
, int value
)
2092 target_flags
&= ~(MASK_POWER
| MASK_POWER2
2093 | MASK_MULTIPLE
| MASK_STRING
);
2094 target_flags_explicit
|= (MASK_POWER
| MASK_POWER2
2095 | MASK_MULTIPLE
| MASK_STRING
);
2097 case OPT_mno_powerpc
:
2098 target_flags
&= ~(MASK_POWERPC
| MASK_PPC_GPOPT
2099 | MASK_PPC_GFXOPT
| MASK_POWERPC64
);
2100 target_flags_explicit
|= (MASK_POWERPC
| MASK_PPC_GPOPT
2101 | MASK_PPC_GFXOPT
| MASK_POWERPC64
);
2104 target_flags
&= ~MASK_MINIMAL_TOC
;
2105 TARGET_NO_FP_IN_TOC
= 0;
2106 TARGET_NO_SUM_IN_TOC
= 0;
2107 target_flags_explicit
|= MASK_MINIMAL_TOC
;
2108 #ifdef TARGET_USES_SYSV4_OPT
2109 /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
2110 just the same as -mminimal-toc. */
2111 target_flags
|= MASK_MINIMAL_TOC
;
2112 target_flags_explicit
|= MASK_MINIMAL_TOC
;
2116 #ifdef TARGET_USES_SYSV4_OPT
2118 /* Make -mtoc behave like -mminimal-toc. */
2119 target_flags
|= MASK_MINIMAL_TOC
;
2120 target_flags_explicit
|= MASK_MINIMAL_TOC
;
2124 #ifdef TARGET_USES_AIX64_OPT
2129 target_flags
|= MASK_POWERPC64
| MASK_POWERPC
;
2130 target_flags
|= ~target_flags_explicit
& MASK_PPC_GFXOPT
;
2131 target_flags_explicit
|= MASK_POWERPC64
| MASK_POWERPC
;
2134 #ifdef TARGET_USES_AIX64_OPT
2139 target_flags
&= ~MASK_POWERPC64
;
2140 target_flags_explicit
|= MASK_POWERPC64
;
2143 case OPT_minsert_sched_nops_
:
2144 rs6000_sched_insert_nops_str
= arg
;
2147 case OPT_mminimal_toc
:
2150 TARGET_NO_FP_IN_TOC
= 0;
2151 TARGET_NO_SUM_IN_TOC
= 0;
2158 target_flags
|= (MASK_MULTIPLE
| MASK_STRING
);
2159 target_flags_explicit
|= (MASK_MULTIPLE
| MASK_STRING
);
2166 target_flags
|= (MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
);
2167 target_flags_explicit
|= (MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
);
2171 case OPT_mpowerpc_gpopt
:
2172 case OPT_mpowerpc_gfxopt
:
2175 target_flags
|= MASK_POWERPC
;
2176 target_flags_explicit
|= MASK_POWERPC
;
2180 case OPT_maix_struct_return
:
2181 case OPT_msvr4_struct_return
:
2182 rs6000_explicit_options
.aix_struct_ret
= true;
2186 rs6000_explicit_options
.vrsave
= true;
2187 rs6000_parse_yes_no_option ("vrsave", arg
, &(TARGET_ALTIVEC_VRSAVE
));
2191 rs6000_explicit_options
.isel
= true;
2192 rs6000_isel
= value
;
2196 rs6000_explicit_options
.isel
= true;
2197 rs6000_parse_yes_no_option ("isel", arg
, &(rs6000_isel
));
2201 rs6000_explicit_options
.spe
= true;
2206 rs6000_explicit_options
.spe
= true;
2207 rs6000_parse_yes_no_option ("spe", arg
, &(rs6000_spe
));
2211 rs6000_debug_name
= arg
;
2214 #ifdef TARGET_USES_SYSV4_OPT
2216 rs6000_abi_name
= arg
;
2220 rs6000_sdata_name
= arg
;
2223 case OPT_mtls_size_
:
2224 rs6000_tls_size_string
= arg
;
2227 case OPT_mrelocatable
:
2230 target_flags
|= MASK_MINIMAL_TOC
;
2231 target_flags_explicit
|= MASK_MINIMAL_TOC
;
2232 TARGET_NO_FP_IN_TOC
= 1;
2236 case OPT_mrelocatable_lib
:
2239 target_flags
|= MASK_RELOCATABLE
| MASK_MINIMAL_TOC
;
2240 target_flags_explicit
|= MASK_RELOCATABLE
| MASK_MINIMAL_TOC
;
2241 TARGET_NO_FP_IN_TOC
= 1;
2245 target_flags
&= ~MASK_RELOCATABLE
;
2246 target_flags_explicit
|= MASK_RELOCATABLE
;
2252 if (!strcmp (arg
, "altivec"))
2254 rs6000_explicit_options
.altivec_abi
= true;
2255 rs6000_altivec_abi
= 1;
2257 /* Enabling the AltiVec ABI turns off the SPE ABI. */
2260 else if (! strcmp (arg
, "no-altivec"))
2262 rs6000_explicit_options
.altivec_abi
= true;
2263 rs6000_altivec_abi
= 0;
2265 else if (! strcmp (arg
, "spe"))
2267 rs6000_explicit_options
.spe_abi
= true;
2269 rs6000_altivec_abi
= 0;
2270 if (!TARGET_SPE_ABI
)
2271 error ("not configured for ABI: '%s'", arg
);
2273 else if (! strcmp (arg
, "no-spe"))
2275 rs6000_explicit_options
.spe_abi
= true;
2279 /* These are here for testing during development only, do not
2280 document in the manual please. */
2281 else if (! strcmp (arg
, "d64"))
2283 rs6000_darwin64_abi
= 1;
2284 warning (0, "Using darwin64 ABI");
2286 else if (! strcmp (arg
, "d32"))
2288 rs6000_darwin64_abi
= 0;
2289 warning (0, "Using old darwin ABI");
2292 else if (! strcmp (arg
, "ibmlongdouble"))
2294 rs6000_explicit_options
.ieee
= true;
2295 rs6000_ieeequad
= 0;
2296 warning (0, "Using IBM extended precision long double");
2298 else if (! strcmp (arg
, "ieeelongdouble"))
2300 rs6000_explicit_options
.ieee
= true;
2301 rs6000_ieeequad
= 1;
2302 warning (0, "Using IEEE extended precision long double");
2307 error ("unknown ABI specified: '%s'", arg
);
2313 rs6000_select
[1].string
= arg
;
2317 rs6000_select
[2].string
= arg
;
2320 case OPT_mtraceback_
:
2321 rs6000_traceback_name
= arg
;
2324 case OPT_mfloat_gprs_
:
2325 rs6000_explicit_options
.float_gprs
= true;
2326 if (! strcmp (arg
, "yes") || ! strcmp (arg
, "single"))
2327 rs6000_float_gprs
= 1;
2328 else if (! strcmp (arg
, "double"))
2329 rs6000_float_gprs
= 2;
2330 else if (! strcmp (arg
, "no"))
2331 rs6000_float_gprs
= 0;
2334 error ("invalid option for -mfloat-gprs: '%s'", arg
);
2339 case OPT_mlong_double_
:
2340 rs6000_explicit_options
.long_double
= true;
2341 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
2342 if (value
!= 64 && value
!= 128)
2344 error ("Unknown switch -mlong-double-%s", arg
);
2345 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
2349 rs6000_long_double_type_size
= value
;
2352 case OPT_msched_costly_dep_
:
2353 rs6000_sched_costly_dep_str
= arg
;
2357 rs6000_explicit_options
.alignment
= true;
2358 if (! strcmp (arg
, "power"))
2360 /* On 64-bit Darwin, power alignment is ABI-incompatible with
2361 some C library functions, so warn about it. The flag may be
2362 useful for performance studies from time to time though, so
2363 don't disable it entirely. */
2364 if (DEFAULT_ABI
== ABI_DARWIN
&& TARGET_64BIT
)
2365 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2366 " it is incompatible with the installed C and C++ libraries");
2367 rs6000_alignment_flags
= MASK_ALIGN_POWER
;
2369 else if (! strcmp (arg
, "natural"))
2370 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
2373 error ("unknown -malign-XXXXX option specified: '%s'", arg
);
2381 /* Do anything needed at the start of the asm file. */
2384 rs6000_file_start (void)
2388 const char *start
= buffer
;
2389 struct rs6000_cpu_select
*ptr
;
2390 const char *default_cpu
= TARGET_CPU_DEFAULT
;
2391 FILE *file
= asm_out_file
;
2393 default_file_start ();
2395 #ifdef TARGET_BI_ARCH
2396 if ((TARGET_DEFAULT
^ target_flags
) & MASK_64BIT
)
2400 if (flag_verbose_asm
)
2402 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
2403 rs6000_select
[0].string
= default_cpu
;
2405 for (i
= 0; i
< ARRAY_SIZE (rs6000_select
); i
++)
2407 ptr
= &rs6000_select
[i
];
2408 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
2410 fprintf (file
, "%s %s%s", start
, ptr
->name
, ptr
->string
);
2415 if (PPC405_ERRATUM77
)
2417 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
2421 #ifdef USING_ELFOS_H
2422 switch (rs6000_sdata
)
2424 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
2425 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
2426 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
2427 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
2430 if (rs6000_sdata
&& g_switch_value
)
2432 fprintf (file
, "%s -G " HOST_WIDE_INT_PRINT_UNSIGNED
, start
,
2442 #ifdef HAVE_AS_GNU_ATTRIBUTE
2443 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
2445 fprintf (file
, "\t.gnu_attribute 4, %d\n",
2446 (TARGET_HARD_FLOAT
&& TARGET_FPRS
) ? 1 : 2);
2447 fprintf (file
, "\t.gnu_attribute 8, %d\n",
2448 (TARGET_ALTIVEC_ABI
? 2
2449 : TARGET_SPE_ABI
? 3
2454 if (DEFAULT_ABI
== ABI_AIX
|| (TARGET_ELF
&& flag_pic
== 2))
2456 switch_to_section (toc_section
);
2457 switch_to_section (text_section
);
2462 /* Return nonzero if this function is known to have a null epilogue. */
2465 direct_return (void)
2467 if (reload_completed
)
2469 rs6000_stack_t
*info
= rs6000_stack_info ();
2471 if (info
->first_gp_reg_save
== 32
2472 && info
->first_fp_reg_save
== 64
2473 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
2474 && ! info
->lr_save_p
2475 && ! info
->cr_save_p
2476 && info
->vrsave_mask
== 0
2484 /* Return the number of instructions it takes to form a constant in an
2485 integer register. */
2488 num_insns_constant_wide (HOST_WIDE_INT value
)
2490 /* signed constant loadable with {cal|addi} */
2491 if ((unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000)
2494 /* constant loadable with {cau|addis} */
2495 else if ((value
& 0xffff) == 0
2496 && (value
>> 31 == -1 || value
>> 31 == 0))
2499 #if HOST_BITS_PER_WIDE_INT == 64
2500 else if (TARGET_POWERPC64
)
2502 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2503 HOST_WIDE_INT high
= value
>> 31;
2505 if (high
== 0 || high
== -1)
2511 return num_insns_constant_wide (high
) + 1;
2513 return (num_insns_constant_wide (high
)
2514 + num_insns_constant_wide (low
) + 1);
2523 num_insns_constant (rtx op
, enum machine_mode mode
)
2525 HOST_WIDE_INT low
, high
;
2527 switch (GET_CODE (op
))
2530 #if HOST_BITS_PER_WIDE_INT == 64
2531 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
2532 && mask64_operand (op
, mode
))
2536 return num_insns_constant_wide (INTVAL (op
));
2539 if (mode
== SFmode
|| mode
== SDmode
)
2544 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
2545 if (DECIMAL_FLOAT_MODE_P (mode
))
2546 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
2548 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
2549 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
2552 if (mode
== VOIDmode
|| mode
== DImode
)
2554 high
= CONST_DOUBLE_HIGH (op
);
2555 low
= CONST_DOUBLE_LOW (op
);
2562 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
2563 if (DECIMAL_FLOAT_MODE_P (mode
))
2564 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, l
);
2566 REAL_VALUE_TO_TARGET_DOUBLE (rv
, l
);
2567 high
= l
[WORDS_BIG_ENDIAN
== 0];
2568 low
= l
[WORDS_BIG_ENDIAN
!= 0];
2572 return (num_insns_constant_wide (low
)
2573 + num_insns_constant_wide (high
));
2576 if ((high
== 0 && low
>= 0)
2577 || (high
== -1 && low
< 0))
2578 return num_insns_constant_wide (low
);
2580 else if (mask64_operand (op
, mode
))
2584 return num_insns_constant_wide (high
) + 1;
2587 return (num_insns_constant_wide (high
)
2588 + num_insns_constant_wide (low
) + 1);
2596 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
2597 If the mode of OP is MODE_VECTOR_INT, this simply returns the
2598 corresponding element of the vector, but for V4SFmode and V2SFmode,
2599 the corresponding "float" is interpreted as an SImode integer. */
2601 static HOST_WIDE_INT
2602 const_vector_elt_as_int (rtx op
, unsigned int elt
)
2604 rtx tmp
= CONST_VECTOR_ELT (op
, elt
);
2605 if (GET_MODE (op
) == V4SFmode
2606 || GET_MODE (op
) == V2SFmode
)
2607 tmp
= gen_lowpart (SImode
, tmp
);
2608 return INTVAL (tmp
);
2611 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
2612 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
2613 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
2614 all items are set to the same value and contain COPIES replicas of the
2615 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
2616 operand and the others are set to the value of the operand's msb. */
2619 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
2621 enum machine_mode mode
= GET_MODE (op
);
2622 enum machine_mode inner
= GET_MODE_INNER (mode
);
2625 unsigned nunits
= GET_MODE_NUNITS (mode
);
2626 unsigned bitsize
= GET_MODE_BITSIZE (inner
);
2627 unsigned mask
= GET_MODE_MASK (inner
);
2629 HOST_WIDE_INT val
= const_vector_elt_as_int (op
, nunits
- 1);
2630 HOST_WIDE_INT splat_val
= val
;
2631 HOST_WIDE_INT msb_val
= val
> 0 ? 0 : -1;
2633 /* Construct the value to be splatted, if possible. If not, return 0. */
2634 for (i
= 2; i
<= copies
; i
*= 2)
2636 HOST_WIDE_INT small_val
;
2638 small_val
= splat_val
>> bitsize
;
2640 if (splat_val
!= ((small_val
<< bitsize
) | (small_val
& mask
)))
2642 splat_val
= small_val
;
2645 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
2646 if (EASY_VECTOR_15 (splat_val
))
2649 /* Also check if we can splat, and then add the result to itself. Do so if
2650 the value is positive, of if the splat instruction is using OP's mode;
2651 for splat_val < 0, the splat and the add should use the same mode. */
2652 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
2653 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
2659 /* Check if VAL is present in every STEP-th element, and the
2660 other elements are filled with its most significant bit. */
2661 for (i
= 0; i
< nunits
- 1; ++i
)
2663 HOST_WIDE_INT desired_val
;
2664 if (((i
+ 1) & (step
- 1)) == 0)
2667 desired_val
= msb_val
;
2669 if (desired_val
!= const_vector_elt_as_int (op
, i
))
2677 /* Return true if OP is of the given MODE and can be synthesized
2678 with a vspltisb, vspltish or vspltisw. */
2681 easy_altivec_constant (rtx op
, enum machine_mode mode
)
2683 unsigned step
, copies
;
2685 if (mode
== VOIDmode
)
2686 mode
= GET_MODE (op
);
2687 else if (mode
!= GET_MODE (op
))
2690 /* Start with a vspltisw. */
2691 step
= GET_MODE_NUNITS (mode
) / 4;
2694 if (vspltis_constant (op
, step
, copies
))
2697 /* Then try with a vspltish. */
2703 if (vspltis_constant (op
, step
, copies
))
2706 /* And finally a vspltisb. */
2712 if (vspltis_constant (op
, step
, copies
))
2718 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
2719 result is OP. Abort if it is not possible. */
2722 gen_easy_altivec_constant (rtx op
)
2724 enum machine_mode mode
= GET_MODE (op
);
2725 int nunits
= GET_MODE_NUNITS (mode
);
2726 rtx last
= CONST_VECTOR_ELT (op
, nunits
- 1);
2727 unsigned step
= nunits
/ 4;
2728 unsigned copies
= 1;
2730 /* Start with a vspltisw. */
2731 if (vspltis_constant (op
, step
, copies
))
2732 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, last
));
2734 /* Then try with a vspltish. */
2740 if (vspltis_constant (op
, step
, copies
))
2741 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, last
));
2743 /* And finally a vspltisb. */
2749 if (vspltis_constant (op
, step
, copies
))
2750 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, last
));
2756 output_vec_const_move (rtx
*operands
)
2759 enum machine_mode mode
;
2764 mode
= GET_MODE (dest
);
2769 if (zero_constant (vec
, mode
))
2770 return "vxor %0,%0,%0";
2772 splat_vec
= gen_easy_altivec_constant (vec
);
2773 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
2774 operands
[1] = XEXP (splat_vec
, 0);
2775 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
2778 switch (GET_MODE (splat_vec
))
2781 return "vspltisw %0,%1";
2784 return "vspltish %0,%1";
2787 return "vspltisb %0,%1";
2794 gcc_assert (TARGET_SPE
);
2796 /* Vector constant 0 is handled as a splitter of V2SI, and in the
2797 pattern of V1DI, V4HI, and V2SF.
2799 FIXME: We should probably return # and add post reload
2800 splitters for these, but this way is so easy ;-). */
2801 cst
= INTVAL (CONST_VECTOR_ELT (vec
, 0));
2802 cst2
= INTVAL (CONST_VECTOR_ELT (vec
, 1));
2803 operands
[1] = CONST_VECTOR_ELT (vec
, 0);
2804 operands
[2] = CONST_VECTOR_ELT (vec
, 1);
2806 return "li %0,%1\n\tevmergelo %0,%0,%0";
2808 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
2811 /* Initialize TARGET of vector PAIRED to VALS. */
2814 paired_expand_vector_init (rtx target
, rtx vals
)
2816 enum machine_mode mode
= GET_MODE (target
);
2817 int n_elts
= GET_MODE_NUNITS (mode
);
2819 rtx x
, new, tmp
, constant_op
, op1
, op2
;
2822 for (i
= 0; i
< n_elts
; ++i
)
2824 x
= XVECEXP (vals
, 0, i
);
2825 if (!CONSTANT_P (x
))
2830 /* Load from constant pool. */
2831 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
2837 /* The vector is initialized only with non-constants. */
2838 new = gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
2839 XVECEXP (vals
, 0, 1));
2841 emit_move_insn (target
, new);
2845 /* One field is non-constant and the other one is a constant. Load the
2846 constant from the constant pool and use ps_merge instruction to
2847 construct the whole vector. */
2848 op1
= XVECEXP (vals
, 0, 0);
2849 op2
= XVECEXP (vals
, 0, 1);
2851 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
2853 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
2854 emit_move_insn (tmp
, constant_op
);
2856 if (CONSTANT_P (op1
))
2857 new = gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
2859 new = gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
2861 emit_move_insn (target
, new);
2865 paired_expand_vector_move (rtx operands
[])
2867 rtx op0
= operands
[0], op1
= operands
[1];
2869 emit_move_insn (op0
, op1
);
2872 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
2873 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
2874 operands for the relation operation COND. This is a recursive
2878 paired_emit_vector_compare (enum rtx_code rcode
,
2879 rtx dest
, rtx op0
, rtx op1
,
2880 rtx cc_op0
, rtx cc_op1
)
2882 rtx tmp
= gen_reg_rtx (V2SFmode
);
2883 rtx tmp1
, max
, min
, equal_zero
;
2885 gcc_assert (TARGET_PAIRED_FLOAT
);
2886 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
2892 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
2896 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
2897 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
2901 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
2904 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
2907 tmp1
= gen_reg_rtx (V2SFmode
);
2908 max
= gen_reg_rtx (V2SFmode
);
2909 min
= gen_reg_rtx (V2SFmode
);
2910 equal_zero
= gen_reg_rtx (V2SFmode
);
2912 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
2913 emit_insn (gen_selv2sf4
2914 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
2915 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
2916 emit_insn (gen_selv2sf4
2917 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
2918 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
2919 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
2922 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
2925 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
2928 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
2931 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
2934 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
2943 /* Emit vector conditional expression.
2944 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
2945 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
2948 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
2949 rtx cond
, rtx cc_op0
, rtx cc_op1
)
2951 enum rtx_code rcode
= GET_CODE (cond
);
2953 if (!TARGET_PAIRED_FLOAT
)
2956 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
2961 /* Initialize vector TARGET to VALS. */
2964 rs6000_expand_vector_init (rtx target
, rtx vals
)
2966 enum machine_mode mode
= GET_MODE (target
);
2967 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
2968 int n_elts
= GET_MODE_NUNITS (mode
);
2969 int n_var
= 0, one_var
= -1;
2970 bool all_same
= true, all_const_zero
= true;
2974 for (i
= 0; i
< n_elts
; ++i
)
2976 x
= XVECEXP (vals
, 0, i
);
2977 if (!CONSTANT_P (x
))
2978 ++n_var
, one_var
= i
;
2979 else if (x
!= CONST0_RTX (inner_mode
))
2980 all_const_zero
= false;
2982 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
2988 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
2989 if (mode
!= V4SFmode
&& all_const_zero
)
2991 /* Zero register. */
2992 emit_insn (gen_rtx_SET (VOIDmode
, target
,
2993 gen_rtx_XOR (mode
, target
, target
)));
2996 else if (mode
!= V4SFmode
&& easy_vector_constant (const_vec
, mode
))
2998 /* Splat immediate. */
2999 emit_insn (gen_rtx_SET (VOIDmode
, target
, const_vec
));
3003 ; /* Splat vector element. */
3006 /* Load from constant pool. */
3007 emit_move_insn (target
, const_vec
);
3012 /* Store value to stack temp. Load vector element. Splat. */
3015 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
), 0);
3016 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
3017 XVECEXP (vals
, 0, 0));
3018 x
= gen_rtx_UNSPEC (VOIDmode
,
3019 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
3020 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
3022 gen_rtx_SET (VOIDmode
,
3025 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
3026 gen_rtx_PARALLEL (VOIDmode
,
3027 gen_rtvec (1, const0_rtx
)));
3028 emit_insn (gen_rtx_SET (VOIDmode
, target
,
3029 gen_rtx_VEC_DUPLICATE (mode
, x
)));
3033 /* One field is non-constant. Load constant then overwrite
3037 rtx copy
= copy_rtx (vals
);
3039 /* Load constant part of vector, substitute neighboring value for
3041 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
3042 rs6000_expand_vector_init (target
, copy
);
3044 /* Insert variable. */
3045 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
3049 /* Construct the vector in memory one field at a time
3050 and load the whole vector. */
3051 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
), 0);
3052 for (i
= 0; i
< n_elts
; i
++)
3053 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
3054 i
* GET_MODE_SIZE (inner_mode
)),
3055 XVECEXP (vals
, 0, i
));
3056 emit_move_insn (target
, mem
);
3059 /* Set field ELT of TARGET to VAL. */
3062 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
3064 enum machine_mode mode
= GET_MODE (target
);
3065 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
3066 rtx reg
= gen_reg_rtx (mode
);
3068 int width
= GET_MODE_SIZE (inner_mode
);
3071 /* Load single variable value. */
3072 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
), 0);
3073 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
3074 x
= gen_rtx_UNSPEC (VOIDmode
,
3075 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
3076 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
3078 gen_rtx_SET (VOIDmode
,
3082 /* Linear sequence. */
3083 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
3084 for (i
= 0; i
< 16; ++i
)
3085 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
3087 /* Set permute mask to insert element into target. */
3088 for (i
= 0; i
< width
; ++i
)
3089 XVECEXP (mask
, 0, elt
*width
+ i
)
3090 = GEN_INT (i
+ 0x10);
3091 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
3092 x
= gen_rtx_UNSPEC (mode
,
3093 gen_rtvec (3, target
, reg
,
3094 force_reg (V16QImode
, x
)),
3096 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
3099 /* Extract field ELT from VEC into TARGET. */
3102 rs6000_expand_vector_extract (rtx target
, rtx vec
, int elt
)
3104 enum machine_mode mode
= GET_MODE (vec
);
3105 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
3108 /* Allocate mode-sized buffer. */
3109 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
), 0);
3111 /* Add offset to field within buffer matching vector element. */
3112 mem
= adjust_address_nv (mem
, mode
, elt
* GET_MODE_SIZE (inner_mode
));
3114 /* Store single field into mode-sized buffer. */
3115 x
= gen_rtx_UNSPEC (VOIDmode
,
3116 gen_rtvec (1, const0_rtx
), UNSPEC_STVE
);
3117 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
3119 gen_rtx_SET (VOIDmode
,
3122 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
3125 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
3126 implement ANDing by the mask IN. */
3128 build_mask64_2_operands (rtx in
, rtx
*out
)
3130 #if HOST_BITS_PER_WIDE_INT >= 64
3131 unsigned HOST_WIDE_INT c
, lsb
, m1
, m2
;
3134 gcc_assert (GET_CODE (in
) == CONST_INT
);
3139 /* Assume c initially something like 0x00fff000000fffff. The idea
3140 is to rotate the word so that the middle ^^^^^^ group of zeros
3141 is at the MS end and can be cleared with an rldicl mask. We then
3142 rotate back and clear off the MS ^^ group of zeros with a
3144 c
= ~c
; /* c == 0xff000ffffff00000 */
3145 lsb
= c
& -c
; /* lsb == 0x0000000000100000 */
3146 m1
= -lsb
; /* m1 == 0xfffffffffff00000 */
3147 c
= ~c
; /* c == 0x00fff000000fffff */
3148 c
&= -lsb
; /* c == 0x00fff00000000000 */
3149 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
3150 c
= ~c
; /* c == 0xff000fffffffffff */
3151 c
&= -lsb
; /* c == 0xff00000000000000 */
3153 while ((lsb
>>= 1) != 0)
3154 shift
++; /* shift == 44 on exit from loop */
3155 m1
<<= 64 - shift
; /* m1 == 0xffffff0000000000 */
3156 m1
= ~m1
; /* m1 == 0x000000ffffffffff */
3157 m2
= ~c
; /* m2 == 0x00ffffffffffffff */
3161 /* Assume c initially something like 0xff000f0000000000. The idea
3162 is to rotate the word so that the ^^^ middle group of zeros
3163 is at the LS end and can be cleared with an rldicr mask. We then
3164 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
3166 lsb
= c
& -c
; /* lsb == 0x0000010000000000 */
3167 m2
= -lsb
; /* m2 == 0xffffff0000000000 */
3168 c
= ~c
; /* c == 0x00fff0ffffffffff */
3169 c
&= -lsb
; /* c == 0x00fff00000000000 */
3170 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
3171 c
= ~c
; /* c == 0xff000fffffffffff */
3172 c
&= -lsb
; /* c == 0xff00000000000000 */
3174 while ((lsb
>>= 1) != 0)
3175 shift
++; /* shift == 44 on exit from loop */
3176 m1
= ~c
; /* m1 == 0x00ffffffffffffff */
3177 m1
>>= shift
; /* m1 == 0x0000000000000fff */
3178 m1
= ~m1
; /* m1 == 0xfffffffffffff000 */
3181 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
3182 masks will be all 1's. We are guaranteed more than one transition. */
3183 out
[0] = GEN_INT (64 - shift
);
3184 out
[1] = GEN_INT (m1
);
3185 out
[2] = GEN_INT (shift
);
3186 out
[3] = GEN_INT (m2
);
3194 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
3197 invalid_e500_subreg (rtx op
, enum machine_mode mode
)
3199 if (TARGET_E500_DOUBLE
)
3201 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
3202 subreg:TI and reg:TF. Decimal float modes are like integer
3203 modes (only low part of each register used) for this
3205 if (GET_CODE (op
) == SUBREG
3206 && (mode
== SImode
|| mode
== DImode
|| mode
== TImode
3207 || mode
== DDmode
|| mode
== TDmode
)
3208 && REG_P (SUBREG_REG (op
))
3209 && (GET_MODE (SUBREG_REG (op
)) == DFmode
3210 || GET_MODE (SUBREG_REG (op
)) == TFmode
))
3213 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
3215 if (GET_CODE (op
) == SUBREG
3216 && (mode
== DFmode
|| mode
== TFmode
)
3217 && REG_P (SUBREG_REG (op
))
3218 && (GET_MODE (SUBREG_REG (op
)) == DImode
3219 || GET_MODE (SUBREG_REG (op
)) == TImode
3220 || GET_MODE (SUBREG_REG (op
)) == DDmode
3221 || GET_MODE (SUBREG_REG (op
)) == TDmode
))
3226 && GET_CODE (op
) == SUBREG
3228 && REG_P (SUBREG_REG (op
))
3229 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op
))))
3235 /* AIX increases natural record alignment to doubleword if the first
3236 field is an FP double while the FP fields remain word aligned. */
3239 rs6000_special_round_type_align (tree type
, unsigned int computed
,
3240 unsigned int specified
)
3242 unsigned int align
= MAX (computed
, specified
);
3243 tree field
= TYPE_FIELDS (type
);
3245 /* Skip all non field decls */
3246 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
3247 field
= TREE_CHAIN (field
);
3249 if (field
!= NULL
&& field
!= type
)
3251 type
= TREE_TYPE (field
);
3252 while (TREE_CODE (type
) == ARRAY_TYPE
)
3253 type
= TREE_TYPE (type
);
3255 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
3256 align
= MAX (align
, 64);
3262 /* Darwin increases record alignment to the natural alignment of
3266 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
3267 unsigned int specified
)
3269 unsigned int align
= MAX (computed
, specified
);
3271 if (TYPE_PACKED (type
))
3274 /* Find the first field, looking down into aggregates. */
3276 tree field
= TYPE_FIELDS (type
);
3277 /* Skip all non field decls */
3278 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
3279 field
= TREE_CHAIN (field
);
3282 type
= TREE_TYPE (field
);
3283 while (TREE_CODE (type
) == ARRAY_TYPE
)
3284 type
= TREE_TYPE (type
);
3285 } while (AGGREGATE_TYPE_P (type
));
3287 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
3288 align
= MAX (align
, TYPE_ALIGN (type
));
3293 /* Return 1 for an operand in small memory on V.4/eabi. */
3296 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
3297 enum machine_mode mode ATTRIBUTE_UNUSED
)
3302 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
3305 if (DEFAULT_ABI
!= ABI_V4
)
3308 /* Vector and float memory instructions have a limited offset on the
3309 SPE, so using a vector or float variable directly as an operand is
3312 && (SPE_VECTOR_MODE (mode
) || FLOAT_MODE_P (mode
)))
3315 if (GET_CODE (op
) == SYMBOL_REF
)
3318 else if (GET_CODE (op
) != CONST
3319 || GET_CODE (XEXP (op
, 0)) != PLUS
3320 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
3321 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
3326 rtx sum
= XEXP (op
, 0);
3327 HOST_WIDE_INT summand
;
3329 /* We have to be careful here, because it is the referenced address
3330 that must be 32k from _SDA_BASE_, not just the symbol. */
3331 summand
= INTVAL (XEXP (sum
, 1));
3332 if (summand
< 0 || (unsigned HOST_WIDE_INT
) summand
> g_switch_value
)
3335 sym_ref
= XEXP (sum
, 0);
3338 return SYMBOL_REF_SMALL_P (sym_ref
);
3344 /* Return true if either operand is a general purpose register. */
3347 gpr_or_gpr_p (rtx op0
, rtx op1
)
3349 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
3350 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
3354 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address. */
3357 constant_pool_expr_1 (rtx op
, int *have_sym
, int *have_toc
)
3359 switch (GET_CODE (op
))
3362 if (RS6000_SYMBOL_REF_TLS_P (op
))
3364 else if (CONSTANT_POOL_ADDRESS_P (op
))
3366 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (op
), Pmode
))
3374 else if (! strcmp (XSTR (op
, 0), toc_label_name
))
3383 return (constant_pool_expr_1 (XEXP (op
, 0), have_sym
, have_toc
)
3384 && constant_pool_expr_1 (XEXP (op
, 1), have_sym
, have_toc
));
3386 return constant_pool_expr_1 (XEXP (op
, 0), have_sym
, have_toc
);
3395 constant_pool_expr_p (rtx op
)
3399 return constant_pool_expr_1 (op
, &have_sym
, &have_toc
) && have_sym
;
3403 toc_relative_expr_p (rtx op
)
3407 return constant_pool_expr_1 (op
, &have_sym
, &have_toc
) && have_toc
;
3411 legitimate_constant_pool_address_p (rtx x
)
3414 && GET_CODE (x
) == PLUS
3415 && GET_CODE (XEXP (x
, 0)) == REG
3416 && (TARGET_MINIMAL_TOC
|| REGNO (XEXP (x
, 0)) == TOC_REGISTER
)
3417 && constant_pool_expr_p (XEXP (x
, 1)));
3421 legitimate_small_data_p (enum machine_mode mode
, rtx x
)
3423 return (DEFAULT_ABI
== ABI_V4
3424 && !flag_pic
&& !TARGET_TOC
3425 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
3426 && small_data_operand (x
, mode
));
3429 /* SPE offset addressing is limited to 5-bits worth of double words. */
3430 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
3433 rs6000_legitimate_offset_address_p (enum machine_mode mode
, rtx x
, int strict
)
3435 unsigned HOST_WIDE_INT offset
, extra
;
3437 if (GET_CODE (x
) != PLUS
)
3439 if (GET_CODE (XEXP (x
, 0)) != REG
)
3441 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
3443 if (legitimate_constant_pool_address_p (x
))
3445 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
3448 offset
= INTVAL (XEXP (x
, 1));
3456 /* AltiVec vector modes. Only reg+reg addressing is valid and
3457 constant offset zero should not occur due to canonicalization. */
3464 /* Paired vector modes. Only reg+reg addressing is valid and
3465 constant offset zero should not occur due to canonicalization. */
3466 if (TARGET_PAIRED_FLOAT
)
3468 /* SPE vector modes. */
3469 return SPE_CONST_OFFSET_OK (offset
);
3472 if (TARGET_E500_DOUBLE
)
3473 return SPE_CONST_OFFSET_OK (offset
);
3477 /* On e500v2, we may have:
3479 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
3481 Which gets addressed with evldd instructions. */
3482 if (TARGET_E500_DOUBLE
)
3483 return SPE_CONST_OFFSET_OK (offset
);
3485 if (mode
== DFmode
|| mode
== DDmode
|| !TARGET_POWERPC64
)
3487 else if (offset
& 3)
3492 if (TARGET_E500_DOUBLE
)
3493 return (SPE_CONST_OFFSET_OK (offset
)
3494 && SPE_CONST_OFFSET_OK (offset
+ 8));
3498 if (mode
== TFmode
|| mode
== TDmode
|| !TARGET_POWERPC64
)
3500 else if (offset
& 3)
3511 return (offset
< 0x10000) && (offset
+ extra
< 0x10000);
3515 legitimate_indexed_address_p (rtx x
, int strict
)
3519 if (GET_CODE (x
) != PLUS
)
3525 /* Recognize the rtl generated by reload which we know will later be
3526 replaced with proper base and index regs. */
3528 && reload_in_progress
3529 && (REG_P (op0
) || GET_CODE (op0
) == PLUS
)
3533 return (REG_P (op0
) && REG_P (op1
)
3534 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
3535 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
3536 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
3537 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
3541 legitimate_indirect_address_p (rtx x
, int strict
)
3543 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
3547 macho_lo_sum_memory_operand (rtx x
, enum machine_mode mode
)
3549 if (!TARGET_MACHO
|| !flag_pic
3550 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
3554 if (GET_CODE (x
) != LO_SUM
)
3556 if (GET_CODE (XEXP (x
, 0)) != REG
)
3558 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
3562 return CONSTANT_P (x
);
3566 legitimate_lo_sum_address_p (enum machine_mode mode
, rtx x
, int strict
)
3568 if (GET_CODE (x
) != LO_SUM
)
3570 if (GET_CODE (XEXP (x
, 0)) != REG
)
3572 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
3574 /* Restrict addressing for DI because of our SUBREG hackery. */
3575 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
3576 || mode
== DDmode
|| mode
== TDmode
3581 if (TARGET_ELF
|| TARGET_MACHO
)
3583 if (DEFAULT_ABI
!= ABI_AIX
&& DEFAULT_ABI
!= ABI_DARWIN
&& flag_pic
)
3587 if (GET_MODE_NUNITS (mode
) != 1)
3589 if (GET_MODE_BITSIZE (mode
) > 64
3590 || (GET_MODE_BITSIZE (mode
) > 32 && !TARGET_POWERPC64
3591 && !(TARGET_HARD_FLOAT
&& TARGET_FPRS
3592 && (mode
== DFmode
|| mode
== DDmode
))))
3595 return CONSTANT_P (x
);
3602 /* Try machine-dependent ways of modifying an illegitimate address
3603 to be legitimate. If we find one, return the new, valid address.
3604 This is used from only one place: `memory_address' in explow.c.
3606 OLDX is the address as it was before break_out_memory_refs was
3607 called. In some cases it is useful to look at this to decide what
3610 MODE is passed so that this function can use GO_IF_LEGITIMATE_ADDRESS.
3612 It is always safe for this function to do nothing. It exists to
3613 recognize opportunities to optimize the output.
3615 On RS/6000, first check for the sum of a register with a constant
3616 integer that is out of range. If so, generate code to add the
3617 constant with the low-order 16 bits masked to the register and force
3618 this result into another register (this can be done with `cau').
3619 Then generate an address of REG+(CONST&0xffff), allowing for the
3620 possibility of bit 16 being a one.
3622 Then check for the sum of a register and something not constant, try to
3623 load the other things into a register and return the sum. */
3626 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
3627 enum machine_mode mode
)
3629 if (GET_CODE (x
) == SYMBOL_REF
)
3631 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
3633 return rs6000_legitimize_tls_address (x
, model
);
3636 if (GET_CODE (x
) == PLUS
3637 && GET_CODE (XEXP (x
, 0)) == REG
3638 && GET_CODE (XEXP (x
, 1)) == CONST_INT
3639 && (unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000) >= 0x10000
3640 && !(SPE_VECTOR_MODE (mode
)
3641 || ALTIVEC_VECTOR_MODE (mode
)
3642 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
3643 || mode
== DImode
|| mode
== DDmode
3644 || mode
== TDmode
))))
3646 HOST_WIDE_INT high_int
, low_int
;
3648 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
3649 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
3650 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
3651 GEN_INT (high_int
)), 0);
3652 return gen_rtx_PLUS (Pmode
, sum
, GEN_INT (low_int
));
3654 else if (GET_CODE (x
) == PLUS
3655 && GET_CODE (XEXP (x
, 0)) == REG
3656 && GET_CODE (XEXP (x
, 1)) != CONST_INT
3657 && GET_MODE_NUNITS (mode
) == 1
3658 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
)
3660 || ((mode
!= DImode
&& mode
!= DFmode
&& mode
!= DDmode
)
3661 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
)))
3662 && (TARGET_POWERPC64
|| mode
!= DImode
)
3667 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
3668 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
3670 else if (ALTIVEC_VECTOR_MODE (mode
))
3674 /* Make sure both operands are registers. */
3675 if (GET_CODE (x
) == PLUS
)
3676 return gen_rtx_PLUS (Pmode
, force_reg (Pmode
, XEXP (x
, 0)),
3677 force_reg (Pmode
, XEXP (x
, 1)));
3679 reg
= force_reg (Pmode
, x
);
3682 else if (SPE_VECTOR_MODE (mode
)
3683 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
3684 || mode
== DDmode
|| mode
== TDmode
3685 || mode
== DImode
)))
3689 /* We accept [reg + reg] and [reg + OFFSET]. */
3691 if (GET_CODE (x
) == PLUS
)
3693 rtx op1
= XEXP (x
, 0);
3694 rtx op2
= XEXP (x
, 1);
3697 op1
= force_reg (Pmode
, op1
);
3699 if (GET_CODE (op2
) != REG
3700 && (GET_CODE (op2
) != CONST_INT
3701 || !SPE_CONST_OFFSET_OK (INTVAL (op2
))
3702 || (GET_MODE_SIZE (mode
) > 8
3703 && !SPE_CONST_OFFSET_OK (INTVAL (op2
) + 8))))
3704 op2
= force_reg (Pmode
, op2
);
3706 /* We can't always do [reg + reg] for these, because [reg +
3707 reg + offset] is not a legitimate addressing mode. */
3708 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
3710 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
3711 return force_reg (Pmode
, y
);
3716 return force_reg (Pmode
, x
);
3722 && GET_CODE (x
) != CONST_INT
3723 && GET_CODE (x
) != CONST_DOUBLE
3725 && GET_MODE_NUNITS (mode
) == 1
3726 && (GET_MODE_BITSIZE (mode
) <= 32
3727 || ((TARGET_HARD_FLOAT
&& TARGET_FPRS
)
3728 && (mode
== DFmode
|| mode
== DDmode
))))
3730 rtx reg
= gen_reg_rtx (Pmode
);
3731 emit_insn (gen_elf_high (reg
, x
));
3732 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
3734 else if (TARGET_MACHO
&& TARGET_32BIT
&& TARGET_NO_TOC
3737 && ! MACHO_DYNAMIC_NO_PIC_P
3739 && GET_CODE (x
) != CONST_INT
3740 && GET_CODE (x
) != CONST_DOUBLE
3742 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
)
3743 || (mode
!= DFmode
&& mode
!= DDmode
))
3747 rtx reg
= gen_reg_rtx (Pmode
);
3748 emit_insn (gen_macho_high (reg
, x
));
3749 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
3752 && GET_CODE (x
) == SYMBOL_REF
3753 && constant_pool_expr_p (x
)
3754 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
3756 return create_TOC_reference (x
);
3762 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
3763 We need to emit DTP-relative relocations. */
3766 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
3771 fputs ("\t.long\t", file
);
3774 fputs (DOUBLE_INT_ASM_OP
, file
);
3779 output_addr_const (file
, x
);
3780 fputs ("@dtprel+0x8000", file
);
3783 /* Construct the SYMBOL_REF for the tls_get_addr function. */
3785 static GTY(()) rtx rs6000_tls_symbol
;
3787 rs6000_tls_get_addr (void)
3789 if (!rs6000_tls_symbol
)
3790 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
3792 return rs6000_tls_symbol
;
3795 /* Construct the SYMBOL_REF for TLS GOT references. */
3797 static GTY(()) rtx rs6000_got_symbol
;
3799 rs6000_got_sym (void)
3801 if (!rs6000_got_symbol
)
3803 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
3804 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
3805 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
3808 return rs6000_got_symbol
;
3811 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3812 this (thread-local) address. */
3815 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
3819 dest
= gen_reg_rtx (Pmode
);
3820 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
3826 tlsreg
= gen_rtx_REG (Pmode
, 13);
3827 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
3831 tlsreg
= gen_rtx_REG (Pmode
, 2);
3832 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
3836 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
3840 tmp
= gen_reg_rtx (Pmode
);
3843 tlsreg
= gen_rtx_REG (Pmode
, 13);
3844 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
3848 tlsreg
= gen_rtx_REG (Pmode
, 2);
3849 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
3853 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
3855 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
3860 rtx r3
, got
, tga
, tmp1
, tmp2
, eqv
;
3862 /* We currently use relocations like @got@tlsgd for tls, which
3863 means the linker will handle allocation of tls entries, placing
3864 them in the .got section. So use a pointer to the .got section,
3865 not one to secondary TOC sections used by 64-bit -mminimal-toc,
3866 or to secondary GOT sections used by 32-bit -fPIC. */
3868 got
= gen_rtx_REG (Pmode
, 2);
3872 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
3875 rtx gsym
= rs6000_got_sym ();
3876 got
= gen_reg_rtx (Pmode
);
3878 rs6000_emit_move (got
, gsym
, Pmode
);
3884 tmp1
= gen_reg_rtx (Pmode
);
3885 tmp2
= gen_reg_rtx (Pmode
);
3886 tmp3
= gen_reg_rtx (Pmode
);
3887 mem
= gen_const_mem (Pmode
, tmp1
);
3889 first
= emit_insn (gen_load_toc_v4_PIC_1b (gsym
));
3890 emit_move_insn (tmp1
,
3891 gen_rtx_REG (Pmode
, LR_REGNO
));
3892 emit_move_insn (tmp2
, mem
);
3893 emit_insn (gen_addsi3 (tmp3
, tmp1
, tmp2
));
3894 last
= emit_move_insn (got
, tmp3
);
3895 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
3900 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
3902 r3
= gen_rtx_REG (Pmode
, 3);
3903 tga
= rs6000_tls_get_addr ();
3905 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
3906 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
3907 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
3908 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
3909 else if (DEFAULT_ABI
== ABI_V4
)
3910 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
3915 insn
= emit_call_insn (insn
);
3916 RTL_CONST_CALL_P (insn
) = 1;
3917 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r3
);
3918 insn
= get_insns ();
3920 emit_libcall_block (insn
, dest
, r3
, addr
);
3922 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
3924 r3
= gen_rtx_REG (Pmode
, 3);
3925 tga
= rs6000_tls_get_addr ();
3927 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
3928 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
3929 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
3930 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
3931 else if (DEFAULT_ABI
== ABI_V4
)
3932 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
3937 insn
= emit_call_insn (insn
);
3938 RTL_CONST_CALL_P (insn
) = 1;
3939 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r3
);
3940 insn
= get_insns ();
3942 tmp1
= gen_reg_rtx (Pmode
);
3943 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
3945 emit_libcall_block (insn
, tmp1
, r3
, eqv
);
3946 if (rs6000_tls_size
== 16)
3949 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
3951 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
3953 else if (rs6000_tls_size
== 32)
3955 tmp2
= gen_reg_rtx (Pmode
);
3957 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
3959 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
3962 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
3964 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
3968 tmp2
= gen_reg_rtx (Pmode
);
3970 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
3972 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
3974 insn
= gen_rtx_SET (Pmode
, dest
,
3975 gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
3981 /* IE, or 64-bit offset LE. */
3982 tmp2
= gen_reg_rtx (Pmode
);
3984 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
3986 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
3989 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
3991 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
3999 /* Return 1 if X contains a thread-local symbol. */
4002 rs6000_tls_referenced_p (rtx x
)
4004 if (! TARGET_HAVE_TLS
)
4007 return for_each_rtx (&x
, &rs6000_tls_symbol_ref_1
, 0);
4010 /* Return 1 if *X is a thread-local symbol. This is the same as
4011 rs6000_tls_symbol_ref except for the type of the unused argument. */
4014 rs6000_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
4016 return RS6000_SYMBOL_REF_TLS_P (*x
);
4019 /* The convention appears to be to define this wherever it is used.
4020 With legitimize_reload_address now defined here, REG_MODE_OK_FOR_BASE_P
4021 is now used here. */
4022 #ifndef REG_MODE_OK_FOR_BASE_P
4023 #define REG_MODE_OK_FOR_BASE_P(REGNO, MODE) REG_OK_FOR_BASE_P (REGNO)
4026 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4027 replace the input X, or the original X if no replacement is called for.
4028 The output parameter *WIN is 1 if the calling macro should goto WIN,
4031 For RS/6000, we wish to handle large displacements off a base
4032 register by splitting the addend across an addiu/addis and the mem insn.
4033 This cuts number of extra insns needed from 3 to 1.
4035 On Darwin, we use this to generate code for floating point constants.
4036 A movsf_low is generated so we wind up with 2 instructions rather than 3.
4037 The Darwin code is inside #if TARGET_MACHO because only then is
4038 machopic_function_base_name() defined. */
4040 rs6000_legitimize_reload_address (rtx x
, enum machine_mode mode
,
4041 int opnum
, int type
,
4042 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
4044 /* We must recognize output that we have already generated ourselves. */
4045 if (GET_CODE (x
) == PLUS
4046 && GET_CODE (XEXP (x
, 0)) == PLUS
4047 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
4048 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
4049 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4051 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4052 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4053 opnum
, (enum reload_type
)type
);
4059 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
4060 && GET_CODE (x
) == LO_SUM
4061 && GET_CODE (XEXP (x
, 0)) == PLUS
4062 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
4063 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
4064 && GET_CODE (XEXP (XEXP (XEXP (x
, 0), 1), 0)) == CONST
4065 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
4066 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == MINUS
4067 && GET_CODE (XEXP (XEXP (XEXP (x
, 1), 0), 0)) == SYMBOL_REF
4068 && GET_CODE (XEXP (XEXP (XEXP (x
, 1), 0), 1)) == SYMBOL_REF
)
4070 /* Result of previous invocation of this function on Darwin
4071 floating point constant. */
4072 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4073 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
4074 opnum
, (enum reload_type
)type
);
4080 /* Force ld/std non-word aligned offset into base register by wrapping
4082 if (GET_CODE (x
) == PLUS
4083 && GET_CODE (XEXP (x
, 0)) == REG
4084 && REGNO (XEXP (x
, 0)) < 32
4085 && REG_MODE_OK_FOR_BASE_P (XEXP (x
, 0), mode
)
4086 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4087 && (INTVAL (XEXP (x
, 1)) & 3) != 0
4088 && !ALTIVEC_VECTOR_MODE (mode
)
4089 && GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
4090 && TARGET_POWERPC64
)
4092 x
= gen_rtx_PLUS (GET_MODE (x
), x
, GEN_INT (0));
4093 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4094 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4095 opnum
, (enum reload_type
) type
);
4100 if (GET_CODE (x
) == PLUS
4101 && GET_CODE (XEXP (x
, 0)) == REG
4102 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
4103 && REG_MODE_OK_FOR_BASE_P (XEXP (x
, 0), mode
)
4104 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4105 && !SPE_VECTOR_MODE (mode
)
4106 && !(TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
4107 || mode
== DDmode
|| mode
== TDmode
4109 && !ALTIVEC_VECTOR_MODE (mode
))
4111 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
4112 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
4114 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
4116 /* Check for 32-bit overflow. */
4117 if (high
+ low
!= val
)
4123 /* Reload the high part into a base reg; leave the low part
4124 in the mem directly. */
4126 x
= gen_rtx_PLUS (GET_MODE (x
),
4127 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
4131 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4132 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4133 opnum
, (enum reload_type
)type
);
4138 if (GET_CODE (x
) == SYMBOL_REF
4139 && !ALTIVEC_VECTOR_MODE (mode
)
4140 && !SPE_VECTOR_MODE (mode
)
4142 && DEFAULT_ABI
== ABI_DARWIN
4143 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
4145 && DEFAULT_ABI
== ABI_V4
4148 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
4149 The same goes for DImode without 64-bit gprs and DFmode and DDmode
4153 && (mode
!= DImode
|| TARGET_POWERPC64
)
4154 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
4155 || (TARGET_FPRS
&& TARGET_HARD_FLOAT
)))
4160 rtx offset
= gen_rtx_CONST (Pmode
,
4161 gen_rtx_MINUS (Pmode
, x
,
4162 machopic_function_base_sym ()));
4163 x
= gen_rtx_LO_SUM (GET_MODE (x
),
4164 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
4165 gen_rtx_HIGH (Pmode
, offset
)), offset
);
4169 x
= gen_rtx_LO_SUM (GET_MODE (x
),
4170 gen_rtx_HIGH (Pmode
, x
), x
);
4172 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4173 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
4174 opnum
, (enum reload_type
)type
);
4179 /* Reload an offset address wrapped by an AND that represents the
4180 masking of the lower bits. Strip the outer AND and let reload
4181 convert the offset address into an indirect address. */
4183 && ALTIVEC_VECTOR_MODE (mode
)
4184 && GET_CODE (x
) == AND
4185 && GET_CODE (XEXP (x
, 0)) == PLUS
4186 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
4187 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
4188 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4189 && INTVAL (XEXP (x
, 1)) == -16)
4197 && GET_CODE (x
) == SYMBOL_REF
4198 && constant_pool_expr_p (x
)
4199 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), mode
))
4201 x
= create_TOC_reference (x
);
4209 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
4210 that is a valid memory address for an instruction.
4211 The MODE argument is the machine mode for the MEM expression
4212 that wants to use this address.
4214 On the RS/6000, there are four valid address: a SYMBOL_REF that
4215 refers to a constant pool entry of an address (or the sum of it
4216 plus a constant), a short (16-bit signed) constant plus a register,
4217 the sum of two registers, or a register indirect, possibly with an
4218 auto-increment. For DFmode, DDmode and DImode with a constant plus
4219 register, we must ensure that both words are addressable or PowerPC64
4220 with offset word aligned.
4222 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
4223 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
4224 because adjacent memory cells are accessed by adding word-sized offsets
4225 during assembly output. */
4227 rs6000_legitimate_address (enum machine_mode mode
, rtx x
, int reg_ok_strict
)
4229 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
4231 && ALTIVEC_VECTOR_MODE (mode
)
4232 && GET_CODE (x
) == AND
4233 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4234 && INTVAL (XEXP (x
, 1)) == -16)
4237 if (RS6000_SYMBOL_REF_TLS_P (x
))
4239 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
4241 if ((GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
4242 && !ALTIVEC_VECTOR_MODE (mode
)
4243 && !SPE_VECTOR_MODE (mode
)
4246 /* Restrict addressing for DI because of our SUBREG hackery. */
4247 && !(TARGET_E500_DOUBLE
4248 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
4250 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
4252 if (legitimate_small_data_p (mode
, x
))
4254 if (legitimate_constant_pool_address_p (x
))
4256 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
4258 && GET_CODE (x
) == PLUS
4259 && GET_CODE (XEXP (x
, 0)) == REG
4260 && (XEXP (x
, 0) == virtual_stack_vars_rtx
4261 || XEXP (x
, 0) == arg_pointer_rtx
)
4262 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4264 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
))
4269 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
)
4271 || (mode
!= DFmode
&& mode
!= DDmode
)
4272 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
))
4273 && (TARGET_POWERPC64
|| mode
!= DImode
)
4274 && legitimate_indexed_address_p (x
, reg_ok_strict
))
4276 if (GET_CODE (x
) == PRE_MODIFY
4280 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
)
4282 || ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_E500_DOUBLE
))
4283 && (TARGET_POWERPC64
|| mode
!= DImode
)
4284 && !ALTIVEC_VECTOR_MODE (mode
)
4285 && !SPE_VECTOR_MODE (mode
)
4286 /* Restrict addressing for DI because of our SUBREG hackery. */
4287 && !(TARGET_E500_DOUBLE
4288 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
4290 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
4291 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1), reg_ok_strict
)
4292 || legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
))
4293 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
4295 if (legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
4300 /* Go to LABEL if ADDR (a legitimate address expression)
4301 has an effect that depends on the machine mode it is used for.
4303 On the RS/6000 this is true of all integral offsets (since AltiVec
4304 modes don't allow them) or is a pre-increment or decrement.
4306 ??? Except that due to conceptual problems in offsettable_address_p
4307 we can't really report the problems of integral offsets. So leave
4308 this assuming that the adjustable offset must be valid for the
4309 sub-words of a TFmode operand, which is what we had before. */
4312 rs6000_mode_dependent_address (rtx addr
)
4314 switch (GET_CODE (addr
))
4317 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
4319 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
4320 return val
+ 12 + 0x8000 >= 0x10000;
4327 /* Auto-increment cases are now treated generically in recog.c. */
4329 return TARGET_UPDATE
;
4338 /* More elaborate version of recog's offsettable_memref_p predicate
4339 that works around the ??? note of rs6000_mode_dependent_address.
4340 In particular it accepts
4342 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
4344 in 32-bit mode, that the recog predicate rejects. */
4347 rs6000_offsettable_memref_p (rtx op
)
4352 /* First mimic offsettable_memref_p. */
4353 if (offsettable_address_p (1, GET_MODE (op
), XEXP (op
, 0)))
4356 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
4357 the latter predicate knows nothing about the mode of the memory
4358 reference and, therefore, assumes that it is the largest supported
4359 mode (TFmode). As a consequence, legitimate offsettable memory
4360 references are rejected. rs6000_legitimate_offset_address_p contains
4361 the correct logic for the PLUS case of rs6000_mode_dependent_address. */
4362 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0), 1);
4365 /* Return number of consecutive hard regs needed starting at reg REGNO
4366 to hold something of mode MODE.
4367 This is ordinarily the length in words of a value of mode MODE
4368 but can be less for certain modes in special long registers.
4370 For the SPE, GPRs are 64 bits but only 32 bits are visible in
4371 scalar instructions. The upper 32 bits are only available to the
4374 POWER and PowerPC GPRs hold 32 bits worth;
4375 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
4378 rs6000_hard_regno_nregs (int regno
, enum machine_mode mode
)
4380 if (FP_REGNO_P (regno
))
4381 return (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
4383 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
4384 return (GET_MODE_SIZE (mode
) + UNITS_PER_SPE_WORD
- 1) / UNITS_PER_SPE_WORD
;
4386 if (ALTIVEC_REGNO_P (regno
))
4388 (GET_MODE_SIZE (mode
) + UNITS_PER_ALTIVEC_WORD
- 1) / UNITS_PER_ALTIVEC_WORD
;
4390 /* The value returned for SCmode in the E500 double case is 2 for
4391 ABI compatibility; storing an SCmode value in a single register
4392 would require function_arg and rs6000_spe_function_arg to handle
4393 SCmode so as to pass the value correctly in a pair of
4395 if (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
) && mode
!= SCmode
4396 && !DECIMAL_FLOAT_MODE_P (mode
))
4397 return (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
4399 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
4402 /* Change register usage conditional on target flags. */
4404 rs6000_conditional_register_usage (void)
4408 /* Set MQ register fixed (already call_used) if not POWER
4409 architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
4414 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
4416 fixed_regs
[13] = call_used_regs
[13]
4417 = call_really_used_regs
[13] = 1;
4419 /* Conditionally disable FPRs. */
4420 if (TARGET_SOFT_FLOAT
|| !TARGET_FPRS
)
4421 for (i
= 32; i
< 64; i
++)
4422 fixed_regs
[i
] = call_used_regs
[i
]
4423 = call_really_used_regs
[i
] = 1;
4425 /* The TOC register is not killed across calls in a way that is
4426 visible to the compiler. */
4427 if (DEFAULT_ABI
== ABI_AIX
)
4428 call_really_used_regs
[2] = 0;
4430 if (DEFAULT_ABI
== ABI_V4
4431 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
4433 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
4435 if (DEFAULT_ABI
== ABI_V4
4436 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
4438 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4439 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4440 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
4442 if (DEFAULT_ABI
== ABI_DARWIN
4443 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
4444 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4445 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4446 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
4448 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
4449 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4450 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
4454 global_regs
[SPEFSCR_REGNO
] = 1;
4455 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
4456 registers in prologues and epilogues. We no longer use r14
4457 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
4458 pool for link-compatibility with older versions of GCC. Once
4459 "old" code has died out, we can return r14 to the allocation
4462 = call_used_regs
[14]
4463 = call_really_used_regs
[14] = 1;
4466 if (!TARGET_ALTIVEC
)
4468 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
4469 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
4470 call_really_used_regs
[VRSAVE_REGNO
] = 1;
4474 global_regs
[VSCR_REGNO
] = 1;
4476 if (TARGET_ALTIVEC_ABI
)
4478 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
4479 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
4481 /* AIX reserves VR20:31 in non-extended ABI mode. */
4483 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
4484 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
4488 /* Try to output insns to set TARGET equal to the constant C if it can
4489 be done in less than N insns. Do all computations in MODE.
4490 Returns the place where the output has been placed if it can be
4491 done and the insns have been emitted. If it would take more than N
4492 insns, zero is returned and no insns and emitted. */
4495 rs6000_emit_set_const (rtx dest
, enum machine_mode mode
,
4496 rtx source
, int n ATTRIBUTE_UNUSED
)
4498 rtx result
, insn
, set
;
4499 HOST_WIDE_INT c0
, c1
;
4506 dest
= gen_reg_rtx (mode
);
4507 emit_insn (gen_rtx_SET (VOIDmode
, dest
, source
));
4511 result
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
4513 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (result
),
4514 GEN_INT (INTVAL (source
)
4515 & (~ (HOST_WIDE_INT
) 0xffff))));
4516 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
4517 gen_rtx_IOR (SImode
, copy_rtx (result
),
4518 GEN_INT (INTVAL (source
) & 0xffff))));
4523 switch (GET_CODE (source
))
4526 c0
= INTVAL (source
);
4531 #if HOST_BITS_PER_WIDE_INT >= 64
4532 c0
= CONST_DOUBLE_LOW (source
);
4535 c0
= CONST_DOUBLE_LOW (source
);
4536 c1
= CONST_DOUBLE_HIGH (source
);
4544 result
= rs6000_emit_set_long_const (dest
, c0
, c1
);
4551 insn
= get_last_insn ();
4552 set
= single_set (insn
);
4553 if (! CONSTANT_P (SET_SRC (set
)))
4554 set_unique_reg_note (insn
, REG_EQUAL
, source
);
4559 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
4560 fall back to a straight forward decomposition. We do this to avoid
4561 exponential run times encountered when looking for longer sequences
4562 with rs6000_emit_set_const. */
4564 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
4566 if (!TARGET_POWERPC64
)
4568 rtx operand1
, operand2
;
4570 operand1
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
== 0,
4572 operand2
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
!= 0,
4574 emit_move_insn (operand1
, GEN_INT (c1
));
4575 emit_move_insn (operand2
, GEN_INT (c2
));
4579 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
4582 ud2
= (c1
& 0xffff0000) >> 16;
4583 #if HOST_BITS_PER_WIDE_INT >= 64
4587 ud4
= (c2
& 0xffff0000) >> 16;
4589 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
4590 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
4593 emit_move_insn (dest
, GEN_INT (((ud1
^ 0x8000) - 0x8000)));
4595 emit_move_insn (dest
, GEN_INT (ud1
));
4598 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
4599 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
4602 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
4605 emit_move_insn (dest
, GEN_INT (ud2
<< 16));
4607 emit_move_insn (copy_rtx (dest
),
4608 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4611 else if ((ud4
== 0xffff && (ud3
& 0x8000))
4612 || (ud4
== 0 && ! (ud3
& 0x8000)))
4615 emit_move_insn (dest
, GEN_INT (((ud3
<< 16) ^ 0x80000000)
4618 emit_move_insn (dest
, GEN_INT (ud3
<< 16));
4621 emit_move_insn (copy_rtx (dest
),
4622 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4624 emit_move_insn (copy_rtx (dest
),
4625 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
4628 emit_move_insn (copy_rtx (dest
),
4629 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4635 emit_move_insn (dest
, GEN_INT (((ud4
<< 16) ^ 0x80000000)
4638 emit_move_insn (dest
, GEN_INT (ud4
<< 16));
4641 emit_move_insn (copy_rtx (dest
),
4642 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4645 emit_move_insn (copy_rtx (dest
),
4646 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
4649 emit_move_insn (copy_rtx (dest
),
4650 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4651 GEN_INT (ud2
<< 16)));
4653 emit_move_insn (copy_rtx (dest
),
4654 gen_rtx_IOR (DImode
, copy_rtx (dest
), GEN_INT (ud1
)));
4660 /* Helper for the following. Get rid of [r+r] memory refs
4661 in cases where it won't work (TImode, TFmode, TDmode). */
4664 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
4666 if (GET_CODE (operands
[0]) == MEM
4667 && GET_CODE (XEXP (operands
[0], 0)) != REG
4668 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0))
4669 && ! reload_in_progress
)
4671 = replace_equiv_address (operands
[0],
4672 copy_addr_to_reg (XEXP (operands
[0], 0)));
4674 if (GET_CODE (operands
[1]) == MEM
4675 && GET_CODE (XEXP (operands
[1], 0)) != REG
4676 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0))
4677 && ! reload_in_progress
)
4679 = replace_equiv_address (operands
[1],
4680 copy_addr_to_reg (XEXP (operands
[1], 0)));
4683 /* Emit a move from SOURCE to DEST in mode MODE. */
4685 rs6000_emit_move (rtx dest
, rtx source
, enum machine_mode mode
)
4689 operands
[1] = source
;
4691 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
4692 if (GET_CODE (operands
[1]) == CONST_DOUBLE
4693 && ! FLOAT_MODE_P (mode
)
4694 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
4696 /* FIXME. This should never happen. */
4697 /* Since it seems that it does, do the safe thing and convert
4699 operands
[1] = gen_int_mode (CONST_DOUBLE_LOW (operands
[1]), mode
);
4701 gcc_assert (GET_CODE (operands
[1]) != CONST_DOUBLE
4702 || FLOAT_MODE_P (mode
)
4703 || ((CONST_DOUBLE_HIGH (operands
[1]) != 0
4704 || CONST_DOUBLE_LOW (operands
[1]) < 0)
4705 && (CONST_DOUBLE_HIGH (operands
[1]) != -1
4706 || CONST_DOUBLE_LOW (operands
[1]) >= 0)));
4708 /* Check if GCC is setting up a block move that will end up using FP
4709 registers as temporaries. We must make sure this is acceptable. */
4710 if (GET_CODE (operands
[0]) == MEM
4711 && GET_CODE (operands
[1]) == MEM
4713 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
4714 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
4715 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
4716 ? 32 : MEM_ALIGN (operands
[0])))
4717 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
4719 : MEM_ALIGN (operands
[1]))))
4720 && ! MEM_VOLATILE_P (operands
[0])
4721 && ! MEM_VOLATILE_P (operands
[1]))
4723 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
4724 adjust_address (operands
[1], SImode
, 0));
4725 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
4726 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
4730 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
4731 && !gpc_reg_operand (operands
[1], mode
))
4732 operands
[1] = force_reg (mode
, operands
[1]);
4734 if (mode
== SFmode
&& ! TARGET_POWERPC
4735 && TARGET_HARD_FLOAT
&& TARGET_FPRS
4736 && GET_CODE (operands
[0]) == MEM
)
4740 if (reload_in_progress
|| reload_completed
)
4741 regnum
= true_regnum (operands
[1]);
4742 else if (GET_CODE (operands
[1]) == REG
)
4743 regnum
= REGNO (operands
[1]);
4747 /* If operands[1] is a register, on POWER it may have
4748 double-precision data in it, so truncate it to single
4750 if (FP_REGNO_P (regnum
) || regnum
>= FIRST_PSEUDO_REGISTER
)
4753 newreg
= (!can_create_pseudo_p () ? copy_rtx (operands
[1])
4754 : gen_reg_rtx (mode
));
4755 emit_insn (gen_aux_truncdfsf2 (newreg
, operands
[1]));
4756 operands
[1] = newreg
;
4760 /* Recognize the case where operand[1] is a reference to thread-local
4761 data and load its address to a register. */
4762 if (rs6000_tls_referenced_p (operands
[1]))
4764 enum tls_model model
;
4765 rtx tmp
= operands
[1];
4768 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
4770 addend
= XEXP (XEXP (tmp
, 0), 1);
4771 tmp
= XEXP (XEXP (tmp
, 0), 0);
4774 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
4775 model
= SYMBOL_REF_TLS_MODEL (tmp
);
4776 gcc_assert (model
!= 0);
4778 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
4781 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
4782 tmp
= force_operand (tmp
, operands
[0]);
4787 /* Handle the case where reload calls us with an invalid address. */
4788 if (reload_in_progress
&& mode
== Pmode
4789 && (! general_operand (operands
[1], mode
)
4790 || ! nonimmediate_operand (operands
[0], mode
)))
4793 /* 128-bit constant floating-point values on Darwin should really be
4794 loaded as two parts. */
4795 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
4796 && mode
== TFmode
&& GET_CODE (operands
[1]) == CONST_DOUBLE
)
4798 /* DImode is used, not DFmode, because simplify_gen_subreg doesn't
4799 know how to get a DFmode SUBREG of a TFmode. */
4800 enum machine_mode imode
= (TARGET_E500_DOUBLE
? DFmode
: DImode
);
4801 rs6000_emit_move (simplify_gen_subreg (imode
, operands
[0], mode
, 0),
4802 simplify_gen_subreg (imode
, operands
[1], mode
, 0),
4804 rs6000_emit_move (simplify_gen_subreg (imode
, operands
[0], mode
,
4805 GET_MODE_SIZE (imode
)),
4806 simplify_gen_subreg (imode
, operands
[1], mode
,
4807 GET_MODE_SIZE (imode
)),
4812 if (reload_in_progress
&& cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
4813 cfun
->machine
->sdmode_stack_slot
=
4814 eliminate_regs (cfun
->machine
->sdmode_stack_slot
, VOIDmode
, NULL_RTX
);
4816 if (reload_in_progress
4818 && MEM_P (operands
[0])
4819 && rtx_equal_p (operands
[0], cfun
->machine
->sdmode_stack_slot
)
4820 && REG_P (operands
[1]))
4822 if (FP_REGNO_P (REGNO (operands
[1])))
4824 rtx mem
= adjust_address_nv (operands
[0], DDmode
, 0);
4825 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
4826 emit_insn (gen_movsd_store (mem
, operands
[1]));
4828 else if (INT_REGNO_P (REGNO (operands
[1])))
4830 rtx mem
= adjust_address_nv (operands
[0], mode
, 4);
4831 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
4832 emit_insn (gen_movsd_hardfloat (mem
, operands
[1]));
4838 if (reload_in_progress
4840 && REG_P (operands
[0])
4841 && MEM_P (operands
[1])
4842 && rtx_equal_p (operands
[1], cfun
->machine
->sdmode_stack_slot
))
4844 if (FP_REGNO_P (REGNO (operands
[0])))
4846 rtx mem
= adjust_address_nv (operands
[1], DDmode
, 0);
4847 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
4848 emit_insn (gen_movsd_load (operands
[0], mem
));
4850 else if (INT_REGNO_P (REGNO (operands
[0])))
4852 rtx mem
= adjust_address_nv (operands
[1], mode
, 4);
4853 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
4854 emit_insn (gen_movsd_hardfloat (operands
[0], mem
));
4861 /* FIXME: In the long term, this switch statement should go away
4862 and be replaced by a sequence of tests based on things like
4868 if (CONSTANT_P (operands
[1])
4869 && GET_CODE (operands
[1]) != CONST_INT
)
4870 operands
[1] = force_const_mem (mode
, operands
[1]);
4875 rs6000_eliminate_indexed_memrefs (operands
);
4882 if (CONSTANT_P (operands
[1])
4883 && ! easy_fp_constant (operands
[1], mode
))
4884 operands
[1] = force_const_mem (mode
, operands
[1]);
4895 if (CONSTANT_P (operands
[1])
4896 && !easy_vector_constant (operands
[1], mode
))
4897 operands
[1] = force_const_mem (mode
, operands
[1]);
4902 /* Use default pattern for address of ELF small data */
4905 && DEFAULT_ABI
== ABI_V4
4906 && (GET_CODE (operands
[1]) == SYMBOL_REF
4907 || GET_CODE (operands
[1]) == CONST
)
4908 && small_data_operand (operands
[1], mode
))
4910 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
4914 if (DEFAULT_ABI
== ABI_V4
4915 && mode
== Pmode
&& mode
== SImode
4916 && flag_pic
== 1 && got_operand (operands
[1], mode
))
4918 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
4922 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
4926 && CONSTANT_P (operands
[1])
4927 && GET_CODE (operands
[1]) != HIGH
4928 && GET_CODE (operands
[1]) != CONST_INT
)
4930 rtx target
= (!can_create_pseudo_p ()
4932 : gen_reg_rtx (mode
));
4934 /* If this is a function address on -mcall-aixdesc,
4935 convert it to the address of the descriptor. */
4936 if (DEFAULT_ABI
== ABI_AIX
4937 && GET_CODE (operands
[1]) == SYMBOL_REF
4938 && XSTR (operands
[1], 0)[0] == '.')
4940 const char *name
= XSTR (operands
[1], 0);
4942 while (*name
== '.')
4944 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
4945 CONSTANT_POOL_ADDRESS_P (new_ref
)
4946 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
4947 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
4948 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
4949 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
4950 operands
[1] = new_ref
;
4953 if (DEFAULT_ABI
== ABI_DARWIN
)
4956 if (MACHO_DYNAMIC_NO_PIC_P
)
4958 /* Take care of any required data indirection. */
4959 operands
[1] = rs6000_machopic_legitimize_pic_address (
4960 operands
[1], mode
, operands
[0]);
4961 if (operands
[0] != operands
[1])
4962 emit_insn (gen_rtx_SET (VOIDmode
,
4963 operands
[0], operands
[1]));
4967 emit_insn (gen_macho_high (target
, operands
[1]));
4968 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
4972 emit_insn (gen_elf_high (target
, operands
[1]));
4973 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
4977 /* If this is a SYMBOL_REF that refers to a constant pool entry,
4978 and we have put it in the TOC, we just need to make a TOC-relative
4981 && GET_CODE (operands
[1]) == SYMBOL_REF
4982 && constant_pool_expr_p (operands
[1])
4983 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (operands
[1]),
4984 get_pool_mode (operands
[1])))
4986 operands
[1] = create_TOC_reference (operands
[1]);
4988 else if (mode
== Pmode
4989 && CONSTANT_P (operands
[1])
4990 && ((GET_CODE (operands
[1]) != CONST_INT
4991 && ! easy_fp_constant (operands
[1], mode
))
4992 || (GET_CODE (operands
[1]) == CONST_INT
4993 && num_insns_constant (operands
[1], mode
) > 2)
4994 || (GET_CODE (operands
[0]) == REG
4995 && FP_REGNO_P (REGNO (operands
[0]))))
4996 && GET_CODE (operands
[1]) != HIGH
4997 && ! legitimate_constant_pool_address_p (operands
[1])
4998 && ! toc_relative_expr_p (operands
[1]))
5000 /* Emit a USE operation so that the constant isn't deleted if
5001 expensive optimizations are turned on because nobody
5002 references it. This should only be done for operands that
5003 contain SYMBOL_REFs with CONSTANT_POOL_ADDRESS_P set.
5004 This should not be done for operands that contain LABEL_REFs.
5005 For now, we just handle the obvious case. */
5006 if (GET_CODE (operands
[1]) != LABEL_REF
)
5007 emit_use (operands
[1]);
5010 /* Darwin uses a special PIC legitimizer. */
5011 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
5014 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
5016 if (operands
[0] != operands
[1])
5017 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
5022 /* If we are to limit the number of things we put in the TOC and
5023 this is a symbol plus a constant we can add in one insn,
5024 just put the symbol in the TOC and add the constant. Don't do
5025 this if reload is in progress. */
5026 if (GET_CODE (operands
[1]) == CONST
5027 && TARGET_NO_SUM_IN_TOC
&& ! reload_in_progress
5028 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
5029 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
5030 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
5031 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
5032 && ! side_effects_p (operands
[0]))
5035 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
5036 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
5038 sym
= force_reg (mode
, sym
);
5040 emit_insn (gen_addsi3 (operands
[0], sym
, other
));
5042 emit_insn (gen_adddi3 (operands
[0], sym
, other
));
5046 operands
[1] = force_const_mem (mode
, operands
[1]);
5049 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
5050 && constant_pool_expr_p (XEXP (operands
[1], 0))
5051 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
5052 get_pool_constant (XEXP (operands
[1], 0)),
5053 get_pool_mode (XEXP (operands
[1], 0))))
5056 = gen_const_mem (mode
,
5057 create_TOC_reference (XEXP (operands
[1], 0)));
5058 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
5064 rs6000_eliminate_indexed_memrefs (operands
);
5068 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
5070 gen_rtx_SET (VOIDmode
,
5071 operands
[0], operands
[1]),
5072 gen_rtx_CLOBBER (VOIDmode
,
5073 gen_rtx_SCRATCH (SImode
)))));
5082 /* Above, we may have called force_const_mem which may have returned
5083 an invalid address. If we can, fix this up; otherwise, reload will
5084 have to deal with it. */
5085 if (GET_CODE (operands
[1]) == MEM
&& ! reload_in_progress
)
5086 operands
[1] = validize_mem (operands
[1]);
5089 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
5092 /* Nonzero if we can use a floating-point register to pass this arg. */
5093 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
5094 (SCALAR_FLOAT_MODE_P (MODE) \
5095 && (CUM)->fregno <= FP_ARG_MAX_REG \
5096 && TARGET_HARD_FLOAT && TARGET_FPRS)
5098 /* Nonzero if we can use an AltiVec register to pass this arg. */
5099 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
5100 (ALTIVEC_VECTOR_MODE (MODE) \
5101 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
5102 && TARGET_ALTIVEC_ABI \
5105 /* Return a nonzero value to say to return the function value in
5106 memory, just as large structures are always returned. TYPE will be
5107 the data type of the value, and FNTYPE will be the type of the
5108 function doing the returning, or @code{NULL} for libcalls.
5110 The AIX ABI for the RS/6000 specifies that all structures are
5111 returned in memory. The Darwin ABI does the same. The SVR4 ABI
5112 specifies that structures <= 8 bytes are returned in r3/r4, but a
5113 draft put them in memory, and GCC used to implement the draft
5114 instead of the final standard. Therefore, aix_struct_return
5115 controls this instead of DEFAULT_ABI; V.4 targets needing backward
5116 compatibility can change DRAFT_V4_STRUCT_RET to override the
5117 default, and -m switches get the final word. See
5118 rs6000_override_options for more details.
5120 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
5121 long double support is enabled. These values are returned in memory.
5123 int_size_in_bytes returns -1 for variable size objects, which go in
5124 memory always. The cast to unsigned makes -1 > 8. */
5127 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
5129 /* In the darwin64 abi, try to use registers for larger structs
5131 if (rs6000_darwin64_abi
5132 && TREE_CODE (type
) == RECORD_TYPE
5133 && int_size_in_bytes (type
) > 0)
5135 CUMULATIVE_ARGS valcum
;
5139 valcum
.fregno
= FP_ARG_MIN_REG
;
5140 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
5141 /* Do a trial code generation as if this were going to be passed
5142 as an argument; if any part goes in memory, we return NULL. */
5143 valret
= rs6000_darwin64_record_arg (&valcum
, type
, 1, true);
5146 /* Otherwise fall through to more conventional ABI rules. */
5149 if (AGGREGATE_TYPE_P (type
)
5150 && (aix_struct_return
5151 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
5154 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
5155 modes only exist for GCC vector types if -maltivec. */
5156 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
5157 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
5160 /* Return synthetic vectors in memory. */
5161 if (TREE_CODE (type
) == VECTOR_TYPE
5162 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
5164 static bool warned_for_return_big_vectors
= false;
5165 if (!warned_for_return_big_vectors
)
5167 warning (0, "GCC vector returned by reference: "
5168 "non-standard ABI extension with no compatibility guarantee");
5169 warned_for_return_big_vectors
= true;
5174 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& TYPE_MODE (type
) == TFmode
)
5180 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5181 for a call to a function whose data type is FNTYPE.
5182 For a library call, FNTYPE is 0.
5184 For incoming args we set the number of arguments in the prototype large
5185 so we never return a PARALLEL. */
5188 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
5189 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
5190 int libcall
, int n_named_args
)
5192 static CUMULATIVE_ARGS zero_cumulative
;
5194 *cum
= zero_cumulative
;
5196 cum
->fregno
= FP_ARG_MIN_REG
;
5197 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
5198 cum
->prototype
= (fntype
&& TYPE_ARG_TYPES (fntype
));
5199 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
5200 ? CALL_LIBCALL
: CALL_NORMAL
);
5201 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
5202 cum
->stdarg
= fntype
5203 && (TYPE_ARG_TYPES (fntype
) != 0
5204 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype
)))
5205 != void_type_node
));
5207 cum
->nargs_prototype
= 0;
5208 if (incoming
|| cum
->prototype
)
5209 cum
->nargs_prototype
= n_named_args
;
5211 /* Check for a longcall attribute. */
5212 if ((!fntype
&& rs6000_default_long_calls
)
5214 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
5215 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
5216 cum
->call_cookie
|= CALL_LONG
;
5218 if (TARGET_DEBUG_ARG
)
5220 fprintf (stderr
, "\ninit_cumulative_args:");
5223 tree ret_type
= TREE_TYPE (fntype
);
5224 fprintf (stderr
, " ret code = %s,",
5225 tree_code_name
[ (int)TREE_CODE (ret_type
) ]);
5228 if (cum
->call_cookie
& CALL_LONG
)
5229 fprintf (stderr
, " longcall,");
5231 fprintf (stderr
, " proto = %d, nargs = %d\n",
5232 cum
->prototype
, cum
->nargs_prototype
);
5237 && TARGET_ALTIVEC_ABI
5238 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
5240 error ("cannot return value in vector register because"
5241 " altivec instructions are disabled, use -maltivec"
5246 /* Return true if TYPE must be passed on the stack and not in registers. */
5249 rs6000_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
5251 if (DEFAULT_ABI
== ABI_AIX
|| TARGET_64BIT
)
5252 return must_pass_in_stack_var_size (mode
, type
);
5254 return must_pass_in_stack_var_size_or_pad (mode
, type
);
5257 /* If defined, a C expression which determines whether, and in which
5258 direction, to pad out an argument with extra space. The value
5259 should be of type `enum direction': either `upward' to pad above
5260 the argument, `downward' to pad below, or `none' to inhibit
5263 For the AIX ABI structs are always stored left shifted in their
5267 function_arg_padding (enum machine_mode mode
, const_tree type
)
5269 #ifndef AGGREGATE_PADDING_FIXED
5270 #define AGGREGATE_PADDING_FIXED 0
5272 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
5273 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
5276 if (!AGGREGATE_PADDING_FIXED
)
5278 /* GCC used to pass structures of the same size as integer types as
5279 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
5280 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
5281 passed padded downward, except that -mstrict-align further
5282 muddied the water in that multi-component structures of 2 and 4
5283 bytes in size were passed padded upward.
5285 The following arranges for best compatibility with previous
5286 versions of gcc, but removes the -mstrict-align dependency. */
5287 if (BYTES_BIG_ENDIAN
)
5289 HOST_WIDE_INT size
= 0;
5291 if (mode
== BLKmode
)
5293 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
5294 size
= int_size_in_bytes (type
);
5297 size
= GET_MODE_SIZE (mode
);
5299 if (size
== 1 || size
== 2 || size
== 4)
5305 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
5307 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
5311 /* Fall back to the default. */
5312 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
5315 /* If defined, a C expression that gives the alignment boundary, in bits,
5316 of an argument with the specified mode and type. If it is not defined,
5317 PARM_BOUNDARY is used for all arguments.
5319 V.4 wants long longs and doubles to be double word aligned. Just
5320 testing the mode size is a boneheaded way to do this as it means
5321 that other types such as complex int are also double word aligned.
5322 However, we're stuck with this because changing the ABI might break
5323 existing library interfaces.
5325 Doubleword align SPE vectors.
5326 Quadword align Altivec vectors.
5327 Quadword align large synthetic vector types. */
5330 function_arg_boundary (enum machine_mode mode
, tree type
)
5332 if (DEFAULT_ABI
== ABI_V4
5333 && (GET_MODE_SIZE (mode
) == 8
5334 || (TARGET_HARD_FLOAT
5336 && (mode
== TFmode
|| mode
== TDmode
))))
5338 else if (SPE_VECTOR_MODE (mode
)
5339 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
5340 && int_size_in_bytes (type
) >= 8
5341 && int_size_in_bytes (type
) < 16))
5343 else if (ALTIVEC_VECTOR_MODE (mode
)
5344 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
5345 && int_size_in_bytes (type
) >= 16))
5347 else if (rs6000_darwin64_abi
&& mode
== BLKmode
5348 && type
&& TYPE_ALIGN (type
) > 64)
5351 return PARM_BOUNDARY
;
5354 /* For a function parm of MODE and TYPE, return the starting word in
5355 the parameter area. NWORDS of the parameter area are already used. */
5358 rs6000_parm_start (enum machine_mode mode
, tree type
, unsigned int nwords
)
5361 unsigned int parm_offset
;
5363 align
= function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
5364 parm_offset
= DEFAULT_ABI
== ABI_V4
? 2 : 6;
5365 return nwords
+ (-(parm_offset
+ nwords
) & align
);
5368 /* Compute the size (in words) of a function argument. */
5370 static unsigned long
5371 rs6000_arg_size (enum machine_mode mode
, tree type
)
5375 if (mode
!= BLKmode
)
5376 size
= GET_MODE_SIZE (mode
);
5378 size
= int_size_in_bytes (type
);
5381 return (size
+ 3) >> 2;
5383 return (size
+ 7) >> 3;
5386 /* Use this to flush pending int fields. */
5389 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
5390 HOST_WIDE_INT bitpos
)
5392 unsigned int startbit
, endbit
;
5393 int intregs
, intoffset
;
5394 enum machine_mode mode
;
5396 if (cum
->intoffset
== -1)
5399 intoffset
= cum
->intoffset
;
5400 cum
->intoffset
= -1;
5402 if (intoffset
% BITS_PER_WORD
!= 0)
5404 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
5406 if (mode
== BLKmode
)
5408 /* We couldn't find an appropriate mode, which happens,
5409 e.g., in packed structs when there are 3 bytes to load.
5410 Back intoffset back to the beginning of the word in this
5412 intoffset
= intoffset
& -BITS_PER_WORD
;
5416 startbit
= intoffset
& -BITS_PER_WORD
;
5417 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
5418 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
5419 cum
->words
+= intregs
;
5422 /* The darwin64 ABI calls for us to recurse down through structs,
5423 looking for elements passed in registers. Unfortunately, we have
5424 to track int register count here also because of misalignments
5425 in powerpc alignment mode. */
5428 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
5430 HOST_WIDE_INT startbitpos
)
5434 for (f
= TYPE_FIELDS (type
); f
; f
= TREE_CHAIN (f
))
5435 if (TREE_CODE (f
) == FIELD_DECL
)
5437 HOST_WIDE_INT bitpos
= startbitpos
;
5438 tree ftype
= TREE_TYPE (f
);
5439 enum machine_mode mode
;
5440 if (ftype
== error_mark_node
)
5442 mode
= TYPE_MODE (ftype
);
5444 if (DECL_SIZE (f
) != 0
5445 && host_integerp (bit_position (f
), 1))
5446 bitpos
+= int_bit_position (f
);
5448 /* ??? FIXME: else assume zero offset. */
5450 if (TREE_CODE (ftype
) == RECORD_TYPE
)
5451 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
5452 else if (USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
5454 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
);
5455 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
5456 cum
->words
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
5458 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, 1))
5460 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
);
5464 else if (cum
->intoffset
== -1)
5465 cum
->intoffset
= bitpos
;
5469 /* Update the data in CUM to advance over an argument
5470 of mode MODE and data type TYPE.
5471 (TYPE is null for libcalls where that information may not be available.)
5473 Note that for args passed by reference, function_arg will be called
5474 with MODE and TYPE set to that of the pointer to the arg, not the arg
5478 function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
5479 tree type
, int named
, int depth
)
5483 /* Only tick off an argument if we're not recursing. */
5485 cum
->nargs_prototype
--;
5487 if (TARGET_ALTIVEC_ABI
5488 && (ALTIVEC_VECTOR_MODE (mode
)
5489 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
5490 && int_size_in_bytes (type
) == 16)))
5494 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
5497 if (!TARGET_ALTIVEC
)
5498 error ("cannot pass argument in vector register because"
5499 " altivec instructions are disabled, use -maltivec"
5502 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
5503 even if it is going to be passed in a vector register.
5504 Darwin does the same for variable-argument functions. */
5505 if ((DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5506 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
5516 /* Vector parameters must be 16-byte aligned. This places
5517 them at 2 mod 4 in terms of words in 32-bit mode, since
5518 the parameter save area starts at offset 24 from the
5519 stack. In 64-bit mode, they just have to start on an
5520 even word, since the parameter save area is 16-byte
5521 aligned. Space for GPRs is reserved even if the argument
5522 will be passed in memory. */
5524 align
= (2 - cum
->words
) & 3;
5526 align
= cum
->words
& 1;
5527 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
5529 if (TARGET_DEBUG_ARG
)
5531 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
5533 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
5534 cum
->nargs_prototype
, cum
->prototype
,
5535 GET_MODE_NAME (mode
));
5539 else if (TARGET_SPE_ABI
&& TARGET_SPE
&& SPE_VECTOR_MODE (mode
)
5541 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
5544 else if (rs6000_darwin64_abi
5546 && TREE_CODE (type
) == RECORD_TYPE
5547 && (size
= int_size_in_bytes (type
)) > 0)
5549 /* Variable sized types have size == -1 and are
5550 treated as if consisting entirely of ints.
5551 Pad to 16 byte boundary if needed. */
5552 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
5553 && (cum
->words
% 2) != 0)
5555 /* For varargs, we can just go up by the size of the struct. */
5557 cum
->words
+= (size
+ 7) / 8;
5560 /* It is tempting to say int register count just goes up by
5561 sizeof(type)/8, but this is wrong in a case such as
5562 { int; double; int; } [powerpc alignment]. We have to
5563 grovel through the fields for these too. */
5565 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
5566 rs6000_darwin64_record_arg_advance_flush (cum
,
5567 size
* BITS_PER_UNIT
);
5570 else if (DEFAULT_ABI
== ABI_V4
)
5572 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
5573 && (mode
== SFmode
|| mode
== DFmode
5574 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
5575 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)))
5577 /* _Decimal128 must use an even/odd register pair. This assumes
5578 that the register number is odd when fregno is odd. */
5579 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
5582 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
5583 <= FP_ARG_V4_MAX_REG
)
5584 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
5587 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
5588 if (mode
== DFmode
|| mode
== TFmode
5589 || mode
== DDmode
|| mode
== TDmode
)
5590 cum
->words
+= cum
->words
& 1;
5591 cum
->words
+= rs6000_arg_size (mode
, type
);
5596 int n_words
= rs6000_arg_size (mode
, type
);
5597 int gregno
= cum
->sysv_gregno
;
5599 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
5600 (r7,r8) or (r9,r10). As does any other 2 word item such
5601 as complex int due to a historical mistake. */
5603 gregno
+= (1 - gregno
) & 1;
5605 /* Multi-reg args are not split between registers and stack. */
5606 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
5608 /* Long long and SPE vectors are aligned on the stack.
5609 So are other 2 word items such as complex int due to
5610 a historical mistake. */
5612 cum
->words
+= cum
->words
& 1;
5613 cum
->words
+= n_words
;
5616 /* Note: continuing to accumulate gregno past when we've started
5617 spilling to the stack indicates the fact that we've started
5618 spilling to the stack to expand_builtin_saveregs. */
5619 cum
->sysv_gregno
= gregno
+ n_words
;
5622 if (TARGET_DEBUG_ARG
)
5624 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
5625 cum
->words
, cum
->fregno
);
5626 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
5627 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
5628 fprintf (stderr
, "mode = %4s, named = %d\n",
5629 GET_MODE_NAME (mode
), named
);
5634 int n_words
= rs6000_arg_size (mode
, type
);
5635 int start_words
= cum
->words
;
5636 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
5638 cum
->words
= align_words
+ n_words
;
5640 if (SCALAR_FLOAT_MODE_P (mode
)
5641 && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
5643 /* _Decimal128 must be passed in an even/odd float register pair.
5644 This assumes that the register number is odd when fregno is
5646 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
5648 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
5651 if (TARGET_DEBUG_ARG
)
5653 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
5654 cum
->words
, cum
->fregno
);
5655 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
5656 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
5657 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
5658 named
, align_words
- start_words
, depth
);
5664 spe_build_register_parallel (enum machine_mode mode
, int gregno
)
5671 r1
= gen_rtx_REG (DImode
, gregno
);
5672 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
5673 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, r1
));
5677 r1
= gen_rtx_REG (DImode
, gregno
);
5678 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
5679 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
5680 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
5681 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r3
));
5684 r1
= gen_rtx_REG (DImode
, gregno
);
5685 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
5686 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
5687 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
5688 r5
= gen_rtx_REG (DImode
, gregno
+ 4);
5689 r5
= gen_rtx_EXPR_LIST (VOIDmode
, r5
, GEN_INT (16));
5690 r7
= gen_rtx_REG (DImode
, gregno
+ 6);
5691 r7
= gen_rtx_EXPR_LIST (VOIDmode
, r7
, GEN_INT (24));
5692 return gen_rtx_PARALLEL (mode
, gen_rtvec (4, r1
, r3
, r5
, r7
));
5699 /* Determine where to put a SIMD argument on the SPE. */
5701 rs6000_spe_function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
5704 int gregno
= cum
->sysv_gregno
;
5706 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
5707 are passed and returned in a pair of GPRs for ABI compatibility. */
5708 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
5709 || mode
== DCmode
|| mode
== TCmode
))
5711 int n_words
= rs6000_arg_size (mode
, type
);
5713 /* Doubles go in an odd/even register pair (r5/r6, etc). */
5715 gregno
+= (1 - gregno
) & 1;
5717 /* Multi-reg args are not split between registers and stack. */
5718 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
5721 return spe_build_register_parallel (mode
, gregno
);
5725 int n_words
= rs6000_arg_size (mode
, type
);
5727 /* SPE vectors are put in odd registers. */
5728 if (n_words
== 2 && (gregno
& 1) == 0)
5731 if (gregno
+ n_words
- 1 <= GP_ARG_MAX_REG
)
5734 enum machine_mode m
= SImode
;
5736 r1
= gen_rtx_REG (m
, gregno
);
5737 r1
= gen_rtx_EXPR_LIST (m
, r1
, const0_rtx
);
5738 r2
= gen_rtx_REG (m
, gregno
+ 1);
5739 r2
= gen_rtx_EXPR_LIST (m
, r2
, GEN_INT (4));
5740 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
5747 if (gregno
<= GP_ARG_MAX_REG
)
5748 return gen_rtx_REG (mode
, gregno
);
5754 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
5755 structure between cum->intoffset and bitpos to integer registers. */
5758 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
5759 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
5761 enum machine_mode mode
;
5763 unsigned int startbit
, endbit
;
5764 int this_regno
, intregs
, intoffset
;
5767 if (cum
->intoffset
== -1)
5770 intoffset
= cum
->intoffset
;
5771 cum
->intoffset
= -1;
5773 /* If this is the trailing part of a word, try to only load that
5774 much into the register. Otherwise load the whole register. Note
5775 that in the latter case we may pick up unwanted bits. It's not a
5776 problem at the moment but may wish to revisit. */
5778 if (intoffset
% BITS_PER_WORD
!= 0)
5780 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
5782 if (mode
== BLKmode
)
5784 /* We couldn't find an appropriate mode, which happens,
5785 e.g., in packed structs when there are 3 bytes to load.
5786 Back intoffset back to the beginning of the word in this
5788 intoffset
= intoffset
& -BITS_PER_WORD
;
5795 startbit
= intoffset
& -BITS_PER_WORD
;
5796 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
5797 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
5798 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
5800 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
5803 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
5807 intoffset
/= BITS_PER_UNIT
;
5810 regno
= GP_ARG_MIN_REG
+ this_regno
;
5811 reg
= gen_rtx_REG (mode
, regno
);
5813 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
5816 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
5820 while (intregs
> 0);
5823 /* Recursive workhorse for the following. */
5826 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
5827 HOST_WIDE_INT startbitpos
, rtx rvec
[],
5832 for (f
= TYPE_FIELDS (type
); f
; f
= TREE_CHAIN (f
))
5833 if (TREE_CODE (f
) == FIELD_DECL
)
5835 HOST_WIDE_INT bitpos
= startbitpos
;
5836 tree ftype
= TREE_TYPE (f
);
5837 enum machine_mode mode
;
5838 if (ftype
== error_mark_node
)
5840 mode
= TYPE_MODE (ftype
);
5842 if (DECL_SIZE (f
) != 0
5843 && host_integerp (bit_position (f
), 1))
5844 bitpos
+= int_bit_position (f
);
5846 /* ??? FIXME: else assume zero offset. */
5848 if (TREE_CODE (ftype
) == RECORD_TYPE
)
5849 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
5850 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
5855 case SCmode
: mode
= SFmode
; break;
5856 case DCmode
: mode
= DFmode
; break;
5857 case TCmode
: mode
= TFmode
; break;
5861 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
5863 = gen_rtx_EXPR_LIST (VOIDmode
,
5864 gen_rtx_REG (mode
, cum
->fregno
++),
5865 GEN_INT (bitpos
/ BITS_PER_UNIT
));
5866 if (mode
== TFmode
|| mode
== TDmode
)
5869 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, ftype
, 1))
5871 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
5873 = gen_rtx_EXPR_LIST (VOIDmode
,
5874 gen_rtx_REG (mode
, cum
->vregno
++),
5875 GEN_INT (bitpos
/ BITS_PER_UNIT
));
5877 else if (cum
->intoffset
== -1)
5878 cum
->intoffset
= bitpos
;
5882 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
5883 the register(s) to be used for each field and subfield of a struct
5884 being passed by value, along with the offset of where the
5885 register's value may be found in the block. FP fields go in FP
5886 register, vector fields go in vector registers, and everything
5887 else goes in int registers, packed as in memory.
5889 This code is also used for function return values. RETVAL indicates
5890 whether this is the case.
5892 Much of this is taken from the SPARC V9 port, which has a similar
5893 calling convention. */
5896 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
5897 int named
, bool retval
)
5899 rtx rvec
[FIRST_PSEUDO_REGISTER
];
5900 int k
= 1, kbase
= 1;
5901 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
5902 /* This is a copy; modifications are not visible to our caller. */
5903 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
5904 CUMULATIVE_ARGS
*cum
= ©_cum
;
5906 /* Pad to 16 byte boundary if needed. */
5907 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
5908 && (cum
->words
% 2) != 0)
5915 /* Put entries into rvec[] for individual FP and vector fields, and
5916 for the chunks of memory that go in int regs. Note we start at
5917 element 1; 0 is reserved for an indication of using memory, and
5918 may or may not be filled in below. */
5919 rs6000_darwin64_record_arg_recurse (cum
, type
, 0, rvec
, &k
);
5920 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
5922 /* If any part of the struct went on the stack put all of it there.
5923 This hack is because the generic code for
5924 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
5925 parts of the struct are not at the beginning. */
5929 return NULL_RTX
; /* doesn't go in registers at all */
5931 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
5933 if (k
> 1 || cum
->use_stack
)
5934 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
5939 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
5942 rs6000_mixed_function_arg (enum machine_mode mode
, tree type
, int align_words
)
5946 rtx rvec
[GP_ARG_NUM_REG
+ 1];
5948 if (align_words
>= GP_ARG_NUM_REG
)
5951 n_units
= rs6000_arg_size (mode
, type
);
5953 /* Optimize the simple case where the arg fits in one gpr, except in
5954 the case of BLKmode due to assign_parms assuming that registers are
5955 BITS_PER_WORD wide. */
5957 || (n_units
== 1 && mode
!= BLKmode
))
5958 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
5961 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
5962 /* Not all of the arg fits in gprs. Say that it goes in memory too,
5963 using a magic NULL_RTX component.
5964 This is not strictly correct. Only some of the arg belongs in
5965 memory, not all of it. However, the normal scheme using
5966 function_arg_partial_nregs can result in unusual subregs, eg.
5967 (subreg:SI (reg:DF) 4), which are not handled well. The code to
5968 store the whole arg to memory is often more efficient than code
5969 to store pieces, and we know that space is available in the right
5970 place for the whole arg. */
5971 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
5976 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
5977 rtx off
= GEN_INT (i
++ * 4);
5978 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
5980 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
5982 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
5985 /* Determine where to put an argument to a function.
5986 Value is zero to push the argument on the stack,
5987 or a hard register in which to store the argument.
5989 MODE is the argument's machine mode.
5990 TYPE is the data type of the argument (as a tree).
5991 This is null for libcalls where that information may
5993 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5994 the preceding args and about the function being called. It is
5995 not modified in this routine.
5996 NAMED is nonzero if this argument is a named parameter
5997 (otherwise it is an extra parameter matching an ellipsis).
5999 On RS/6000 the first eight words of non-FP are normally in registers
6000 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
6001 Under V.4, the first 8 FP args are in registers.
6003 If this is floating-point and no prototype is specified, we use
6004 both an FP and integer register (or possibly FP reg and stack). Library
6005 functions (when CALL_LIBCALL is set) always have the proper types for args,
6006 so we can pass the FP value just in one register. emit_library_function
6007 doesn't support PARALLEL anyway.
6009 Note that for args passed by reference, function_arg will be called
6010 with MODE and TYPE set to that of the pointer to the arg, not the arg
6014 function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6015 tree type
, int named
)
6017 enum rs6000_abi abi
= DEFAULT_ABI
;
6019 /* Return a marker to indicate whether CR1 needs to set or clear the
6020 bit that V.4 uses to say fp args were passed in registers.
6021 Assume that we don't need the marker for software floating point,
6022 or compiler generated library calls. */
6023 if (mode
== VOIDmode
)
6026 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
6028 || (cum
->nargs_prototype
< 0
6029 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
))))
6031 /* For the SPE, we need to crxor CR6 always. */
6033 return GEN_INT (cum
->call_cookie
| CALL_V4_SET_FP_ARGS
);
6034 else if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
6035 return GEN_INT (cum
->call_cookie
6036 | ((cum
->fregno
== FP_ARG_MIN_REG
)
6037 ? CALL_V4_SET_FP_ARGS
6038 : CALL_V4_CLEAR_FP_ARGS
));
6041 return GEN_INT (cum
->call_cookie
);
6044 if (rs6000_darwin64_abi
&& mode
== BLKmode
6045 && TREE_CODE (type
) == RECORD_TYPE
)
6047 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, false);
6048 if (rslt
!= NULL_RTX
)
6050 /* Else fall through to usual handling. */
6053 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
6054 if (TARGET_64BIT
&& ! cum
->prototype
)
6056 /* Vector parameters get passed in vector register
6057 and also in GPRs or memory, in absence of prototype. */
6060 align_words
= (cum
->words
+ 1) & ~1;
6062 if (align_words
>= GP_ARG_NUM_REG
)
6068 slot
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
6070 return gen_rtx_PARALLEL (mode
,
6072 gen_rtx_EXPR_LIST (VOIDmode
,
6074 gen_rtx_EXPR_LIST (VOIDmode
,
6075 gen_rtx_REG (mode
, cum
->vregno
),
6079 return gen_rtx_REG (mode
, cum
->vregno
);
6080 else if (TARGET_ALTIVEC_ABI
6081 && (ALTIVEC_VECTOR_MODE (mode
)
6082 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
6083 && int_size_in_bytes (type
) == 16)))
6085 if (named
|| abi
== ABI_V4
)
6089 /* Vector parameters to varargs functions under AIX or Darwin
6090 get passed in memory and possibly also in GPRs. */
6091 int align
, align_words
, n_words
;
6092 enum machine_mode part_mode
;
6094 /* Vector parameters must be 16-byte aligned. This places them at
6095 2 mod 4 in terms of words in 32-bit mode, since the parameter
6096 save area starts at offset 24 from the stack. In 64-bit mode,
6097 they just have to start on an even word, since the parameter
6098 save area is 16-byte aligned. */
6100 align
= (2 - cum
->words
) & 3;
6102 align
= cum
->words
& 1;
6103 align_words
= cum
->words
+ align
;
6105 /* Out of registers? Memory, then. */
6106 if (align_words
>= GP_ARG_NUM_REG
)
6109 if (TARGET_32BIT
&& TARGET_POWERPC64
)
6110 return rs6000_mixed_function_arg (mode
, type
, align_words
);
6112 /* The vector value goes in GPRs. Only the part of the
6113 value in GPRs is reported here. */
6115 n_words
= rs6000_arg_size (mode
, type
);
6116 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
6117 /* Fortunately, there are only two possibilities, the value
6118 is either wholly in GPRs or half in GPRs and half not. */
6121 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
6124 else if (TARGET_SPE_ABI
&& TARGET_SPE
6125 && (SPE_VECTOR_MODE (mode
)
6126 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
6129 || mode
== TCmode
))))
6130 return rs6000_spe_function_arg (cum
, mode
, type
);
6132 else if (abi
== ABI_V4
)
6134 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
6135 && (mode
== SFmode
|| mode
== DFmode
6136 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
6137 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
6139 /* _Decimal128 must use an even/odd register pair. This assumes
6140 that the register number is odd when fregno is odd. */
6141 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
6144 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
6145 <= FP_ARG_V4_MAX_REG
)
6146 return gen_rtx_REG (mode
, cum
->fregno
);
6152 int n_words
= rs6000_arg_size (mode
, type
);
6153 int gregno
= cum
->sysv_gregno
;
6155 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
6156 (r7,r8) or (r9,r10). As does any other 2 word item such
6157 as complex int due to a historical mistake. */
6159 gregno
+= (1 - gregno
) & 1;
6161 /* Multi-reg args are not split between registers and stack. */
6162 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
6165 if (TARGET_32BIT
&& TARGET_POWERPC64
)
6166 return rs6000_mixed_function_arg (mode
, type
,
6167 gregno
- GP_ARG_MIN_REG
);
6168 return gen_rtx_REG (mode
, gregno
);
6173 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
6175 /* _Decimal128 must be passed in an even/odd float register pair.
6176 This assumes that the register number is odd when fregno is odd. */
6177 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
6180 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
6182 rtx rvec
[GP_ARG_NUM_REG
+ 1];
6186 enum machine_mode fmode
= mode
;
6187 unsigned long n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
6189 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
6191 /* Currently, we only ever need one reg here because complex
6192 doubles are split. */
6193 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
6194 && (fmode
== TFmode
|| fmode
== TDmode
));
6196 /* Long double or _Decimal128 split over regs and memory. */
6197 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
6200 /* Do we also need to pass this arg in the parameter save
6203 && (cum
->nargs_prototype
<= 0
6204 || (DEFAULT_ABI
== ABI_AIX
6206 && align_words
>= GP_ARG_NUM_REG
)));
6208 if (!needs_psave
&& mode
== fmode
)
6209 return gen_rtx_REG (fmode
, cum
->fregno
);
6214 /* Describe the part that goes in gprs or the stack.
6215 This piece must come first, before the fprs. */
6216 if (align_words
< GP_ARG_NUM_REG
)
6218 unsigned long n_words
= rs6000_arg_size (mode
, type
);
6220 if (align_words
+ n_words
> GP_ARG_NUM_REG
6221 || (TARGET_32BIT
&& TARGET_POWERPC64
))
6223 /* If this is partially on the stack, then we only
6224 include the portion actually in registers here. */
6225 enum machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
6228 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
6229 /* Not all of the arg fits in gprs. Say that it
6230 goes in memory too, using a magic NULL_RTX
6231 component. Also see comment in
6232 rs6000_mixed_function_arg for why the normal
6233 function_arg_partial_nregs scheme doesn't work
6235 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
,
6239 r
= gen_rtx_REG (rmode
,
6240 GP_ARG_MIN_REG
+ align_words
);
6241 off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
6242 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
6244 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
6248 /* The whole arg fits in gprs. */
6249 r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
6250 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
6254 /* It's entirely in memory. */
6255 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
6258 /* Describe where this piece goes in the fprs. */
6259 r
= gen_rtx_REG (fmode
, cum
->fregno
);
6260 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
6262 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
6264 else if (align_words
< GP_ARG_NUM_REG
)
6266 if (TARGET_32BIT
&& TARGET_POWERPC64
)
6267 return rs6000_mixed_function_arg (mode
, type
, align_words
);
6269 if (mode
== BLKmode
)
6272 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
6279 /* For an arg passed partly in registers and partly in memory, this is
6280 the number of bytes passed in registers. For args passed entirely in
6281 registers or entirely in memory, zero. When an arg is described by a
6282 PARALLEL, perhaps using more than one register type, this function
6283 returns the number of bytes used by the first element of the PARALLEL. */
6286 rs6000_arg_partial_bytes (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6287 tree type
, bool named
)
6292 if (DEFAULT_ABI
== ABI_V4
)
6295 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
)
6296 && cum
->nargs_prototype
>= 0)
6299 /* In this complicated case we just disable the partial_nregs code. */
6300 if (rs6000_darwin64_abi
&& mode
== BLKmode
6301 && TREE_CODE (type
) == RECORD_TYPE
6302 && int_size_in_bytes (type
) > 0)
6305 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
6307 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
6309 /* If we are passing this arg in the fixed parameter save area
6310 (gprs or memory) as well as fprs, then this function should
6311 return the number of partial bytes passed in the parameter
6312 save area rather than partial bytes passed in fprs. */
6314 && (cum
->nargs_prototype
<= 0
6315 || (DEFAULT_ABI
== ABI_AIX
6317 && align_words
>= GP_ARG_NUM_REG
)))
6319 else if (cum
->fregno
+ ((GET_MODE_SIZE (mode
) + 7) >> 3)
6320 > FP_ARG_MAX_REG
+ 1)
6321 ret
= (FP_ARG_MAX_REG
+ 1 - cum
->fregno
) * 8;
6322 else if (cum
->nargs_prototype
>= 0)
6326 if (align_words
< GP_ARG_NUM_REG
6327 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
6328 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
6330 if (ret
!= 0 && TARGET_DEBUG_ARG
)
6331 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
6336 /* A C expression that indicates when an argument must be passed by
6337 reference. If nonzero for an argument, a copy of that argument is
6338 made in memory and a pointer to the argument is passed instead of
6339 the argument itself. The pointer is passed in whatever way is
6340 appropriate for passing a pointer to that type.
6342 Under V.4, aggregates and long double are passed by reference.
6344 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
6345 reference unless the AltiVec vector extension ABI is in force.
6347 As an extension to all ABIs, variable sized types are passed by
6351 rs6000_pass_by_reference (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
6352 enum machine_mode mode
, const_tree type
,
6353 bool named ATTRIBUTE_UNUSED
)
6355 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& mode
== TFmode
)
6357 if (TARGET_DEBUG_ARG
)
6358 fprintf (stderr
, "function_arg_pass_by_reference: V4 long double\n");
6365 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
6367 if (TARGET_DEBUG_ARG
)
6368 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
6372 if (int_size_in_bytes (type
) < 0)
6374 if (TARGET_DEBUG_ARG
)
6375 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
6379 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
6380 modes only exist for GCC vector types if -maltivec. */
6381 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
6383 if (TARGET_DEBUG_ARG
)
6384 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
6388 /* Pass synthetic vectors in memory. */
6389 if (TREE_CODE (type
) == VECTOR_TYPE
6390 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
6392 static bool warned_for_pass_big_vectors
= false;
6393 if (TARGET_DEBUG_ARG
)
6394 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
6395 if (!warned_for_pass_big_vectors
)
6397 warning (0, "GCC vector passed by reference: "
6398 "non-standard ABI extension with no compatibility guarantee");
6399 warned_for_pass_big_vectors
= true;
6408 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
6411 enum machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
6416 for (i
= 0; i
< nregs
; i
++)
6418 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
6419 if (reload_completed
)
6421 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
6424 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
6425 i
* GET_MODE_SIZE (reg_mode
));
6428 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
6432 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
6436 /* Perform any needed actions needed for a function that is receiving a
6437 variable number of arguments.
6441 MODE and TYPE are the mode and type of the current parameter.
6443 PRETEND_SIZE is a variable that should be set to the amount of stack
6444 that must be pushed by the prolog to pretend that our caller pushed
6447 Normally, this macro will push all remaining incoming registers on the
6448 stack and set PRETEND_SIZE to the length of the registers pushed. */
6451 setup_incoming_varargs (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6452 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
6455 CUMULATIVE_ARGS next_cum
;
6456 int reg_size
= TARGET_32BIT
? 4 : 8;
6457 rtx save_area
= NULL_RTX
, mem
;
6458 int first_reg_offset
;
6461 /* Skip the last named argument. */
6463 function_arg_advance (&next_cum
, mode
, type
, 1, 0);
6465 if (DEFAULT_ABI
== ABI_V4
)
6467 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
6471 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
6472 HOST_WIDE_INT offset
= 0;
6474 /* Try to optimize the size of the varargs save area.
6475 The ABI requires that ap.reg_save_area is doubleword
6476 aligned, but we don't need to allocate space for all
6477 the bytes, only those to which we actually will save
6479 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
6480 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
6481 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
6482 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
6483 && cfun
->va_list_fpr_size
)
6486 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
6487 * UNITS_PER_FP_WORD
;
6488 if (cfun
->va_list_fpr_size
6489 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
6490 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
6492 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
6493 * UNITS_PER_FP_WORD
;
6497 offset
= -((first_reg_offset
* reg_size
) & ~7);
6498 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
6500 gpr_reg_num
= cfun
->va_list_gpr_size
;
6501 if (reg_size
== 4 && (first_reg_offset
& 1))
6504 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
6507 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
6509 - (int) (GP_ARG_NUM_REG
* reg_size
);
6511 if (gpr_size
+ fpr_size
)
6514 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
6515 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
6516 reg_save_area
= XEXP (reg_save_area
, 0);
6517 if (GET_CODE (reg_save_area
) == PLUS
)
6519 gcc_assert (XEXP (reg_save_area
, 0)
6520 == virtual_stack_vars_rtx
);
6521 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
6522 offset
+= INTVAL (XEXP (reg_save_area
, 1));
6525 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
6528 cfun
->machine
->varargs_save_offset
= offset
;
6529 save_area
= plus_constant (virtual_stack_vars_rtx
, offset
);
6534 first_reg_offset
= next_cum
.words
;
6535 save_area
= virtual_incoming_args_rtx
;
6537 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
6538 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
6541 set
= get_varargs_alias_set ();
6542 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
6543 && cfun
->va_list_gpr_size
)
6545 int nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
6547 if (va_list_gpr_counter_field
)
6549 /* V4 va_list_gpr_size counts number of registers needed. */
6550 if (nregs
> cfun
->va_list_gpr_size
)
6551 nregs
= cfun
->va_list_gpr_size
;
6555 /* char * va_list instead counts number of bytes needed. */
6556 if (nregs
> cfun
->va_list_gpr_size
/ reg_size
)
6557 nregs
= cfun
->va_list_gpr_size
/ reg_size
;
6560 mem
= gen_rtx_MEM (BLKmode
,
6561 plus_constant (save_area
,
6562 first_reg_offset
* reg_size
));
6563 MEM_NOTRAP_P (mem
) = 1;
6564 set_mem_alias_set (mem
, set
);
6565 set_mem_align (mem
, BITS_PER_WORD
);
6567 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
6571 /* Save FP registers if needed. */
6572 if (DEFAULT_ABI
== ABI_V4
6573 && TARGET_HARD_FLOAT
&& TARGET_FPRS
6575 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
6576 && cfun
->va_list_fpr_size
)
6578 int fregno
= next_cum
.fregno
, nregs
;
6579 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
6580 rtx lab
= gen_label_rtx ();
6581 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
6582 * UNITS_PER_FP_WORD
);
6585 (gen_rtx_SET (VOIDmode
,
6587 gen_rtx_IF_THEN_ELSE (VOIDmode
,
6588 gen_rtx_NE (VOIDmode
, cr1
,
6590 gen_rtx_LABEL_REF (VOIDmode
, lab
),
6594 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
6595 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
6597 mem
= gen_rtx_MEM (DFmode
, plus_constant (save_area
, off
));
6598 MEM_NOTRAP_P (mem
) = 1;
6599 set_mem_alias_set (mem
, set
);
6600 set_mem_align (mem
, GET_MODE_ALIGNMENT (DFmode
));
6601 emit_move_insn (mem
, gen_rtx_REG (DFmode
, fregno
));
6608 /* Create the va_list data type. */
6611 rs6000_build_builtin_va_list (void)
6613 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
6615 /* For AIX, prefer 'char *' because that's what the system
6616 header files like. */
6617 if (DEFAULT_ABI
!= ABI_V4
)
6618 return build_pointer_type (char_type_node
);
6620 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
6621 type_decl
= build_decl (TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
6623 f_gpr
= build_decl (FIELD_DECL
, get_identifier ("gpr"),
6624 unsigned_char_type_node
);
6625 f_fpr
= build_decl (FIELD_DECL
, get_identifier ("fpr"),
6626 unsigned_char_type_node
);
6627 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
6629 f_res
= build_decl (FIELD_DECL
, get_identifier ("reserved"),
6630 short_unsigned_type_node
);
6631 f_ovf
= build_decl (FIELD_DECL
, get_identifier ("overflow_arg_area"),
6633 f_sav
= build_decl (FIELD_DECL
, get_identifier ("reg_save_area"),
6636 va_list_gpr_counter_field
= f_gpr
;
6637 va_list_fpr_counter_field
= f_fpr
;
6639 DECL_FIELD_CONTEXT (f_gpr
) = record
;
6640 DECL_FIELD_CONTEXT (f_fpr
) = record
;
6641 DECL_FIELD_CONTEXT (f_res
) = record
;
6642 DECL_FIELD_CONTEXT (f_ovf
) = record
;
6643 DECL_FIELD_CONTEXT (f_sav
) = record
;
6645 TREE_CHAIN (record
) = type_decl
;
6646 TYPE_NAME (record
) = type_decl
;
6647 TYPE_FIELDS (record
) = f_gpr
;
6648 TREE_CHAIN (f_gpr
) = f_fpr
;
6649 TREE_CHAIN (f_fpr
) = f_res
;
6650 TREE_CHAIN (f_res
) = f_ovf
;
6651 TREE_CHAIN (f_ovf
) = f_sav
;
6653 layout_type (record
);
6655 /* The correct type is an array type of one element. */
6656 return build_array_type (record
, build_index_type (size_zero_node
));
6659 /* Implement va_start. */
6662 rs6000_va_start (tree valist
, rtx nextarg
)
6664 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
6665 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
6666 tree gpr
, fpr
, ovf
, sav
, t
;
6668 /* Only SVR4 needs something special. */
6669 if (DEFAULT_ABI
!= ABI_V4
)
6671 std_expand_builtin_va_start (valist
, nextarg
);
6675 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
6676 f_fpr
= TREE_CHAIN (f_gpr
);
6677 f_res
= TREE_CHAIN (f_fpr
);
6678 f_ovf
= TREE_CHAIN (f_res
);
6679 f_sav
= TREE_CHAIN (f_ovf
);
6681 valist
= build_va_arg_indirect_ref (valist
);
6682 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
6683 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
6684 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
6685 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
6687 /* Count number of gp and fp argument registers used. */
6688 words
= crtl
->args
.info
.words
;
6689 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
6691 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
6694 if (TARGET_DEBUG_ARG
)
6695 fprintf (stderr
, "va_start: words = "HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
6696 HOST_WIDE_INT_PRINT_DEC
", n_fpr = "HOST_WIDE_INT_PRINT_DEC
"\n",
6697 words
, n_gpr
, n_fpr
);
6699 if (cfun
->va_list_gpr_size
)
6701 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (gpr
), gpr
,
6702 build_int_cst (NULL_TREE
, n_gpr
));
6703 TREE_SIDE_EFFECTS (t
) = 1;
6704 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6707 if (cfun
->va_list_fpr_size
)
6709 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (fpr
), fpr
,
6710 build_int_cst (NULL_TREE
, n_fpr
));
6711 TREE_SIDE_EFFECTS (t
) = 1;
6712 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6715 /* Find the overflow area. */
6716 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
6718 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ovf
), t
,
6719 size_int (words
* UNITS_PER_WORD
));
6720 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (ovf
), ovf
, t
);
6721 TREE_SIDE_EFFECTS (t
) = 1;
6722 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6724 /* If there were no va_arg invocations, don't set up the register
6726 if (!cfun
->va_list_gpr_size
6727 && !cfun
->va_list_fpr_size
6728 && n_gpr
< GP_ARG_NUM_REG
6729 && n_fpr
< FP_ARG_V4_MAX_REG
)
6732 /* Find the register save area. */
6733 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
6734 if (cfun
->machine
->varargs_save_offset
)
6735 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (sav
), t
,
6736 size_int (cfun
->machine
->varargs_save_offset
));
6737 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (sav
), sav
, t
);
6738 TREE_SIDE_EFFECTS (t
) = 1;
6739 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6742 /* Implement va_arg. */
6745 rs6000_gimplify_va_arg (tree valist
, tree type
, tree
*pre_p
, tree
*post_p
)
6747 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
6748 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
6749 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
6750 tree lab_false
, lab_over
, addr
;
6752 tree ptrtype
= build_pointer_type (type
);
6755 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
6757 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
6758 return build_va_arg_indirect_ref (t
);
6761 if (DEFAULT_ABI
!= ABI_V4
)
6763 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
6765 tree elem_type
= TREE_TYPE (type
);
6766 enum machine_mode elem_mode
= TYPE_MODE (elem_type
);
6767 int elem_size
= GET_MODE_SIZE (elem_mode
);
6769 if (elem_size
< UNITS_PER_WORD
)
6771 tree real_part
, imag_part
;
6772 tree post
= NULL_TREE
;
6774 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
6776 /* Copy the value into a temporary, lest the formal temporary
6777 be reused out from under us. */
6778 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
6779 append_to_statement_list (post
, pre_p
);
6781 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
6784 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
6788 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
6791 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
6792 f_fpr
= TREE_CHAIN (f_gpr
);
6793 f_res
= TREE_CHAIN (f_fpr
);
6794 f_ovf
= TREE_CHAIN (f_res
);
6795 f_sav
= TREE_CHAIN (f_ovf
);
6797 valist
= build_va_arg_indirect_ref (valist
);
6798 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
6799 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), valist
, f_fpr
, NULL_TREE
);
6800 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), valist
, f_ovf
, NULL_TREE
);
6801 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), valist
, f_sav
, NULL_TREE
);
6803 size
= int_size_in_bytes (type
);
6804 rsize
= (size
+ 3) / 4;
6807 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
6808 && (TYPE_MODE (type
) == SFmode
6809 || TYPE_MODE (type
) == DFmode
6810 || TYPE_MODE (type
) == TFmode
6811 || TYPE_MODE (type
) == SDmode
6812 || TYPE_MODE (type
) == DDmode
6813 || TYPE_MODE (type
) == TDmode
))
6815 /* FP args go in FP registers, if present. */
6817 n_reg
= (size
+ 7) / 8;
6820 if (TYPE_MODE (type
) != SFmode
&& TYPE_MODE (type
) != SDmode
)
6825 /* Otherwise into GP registers. */
6834 /* Pull the value out of the saved registers.... */
6837 addr
= create_tmp_var (ptr_type_node
, "addr");
6838 DECL_POINTER_ALIAS_SET (addr
) = get_varargs_alias_set ();
6840 /* AltiVec vectors never go in registers when -mabi=altivec. */
6841 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
6845 lab_false
= create_artificial_label ();
6846 lab_over
= create_artificial_label ();
6848 /* Long long and SPE vectors are aligned in the registers.
6849 As are any other 2 gpr item such as complex int due to a
6850 historical mistake. */
6852 if (n_reg
== 2 && reg
== gpr
)
6855 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), reg
,
6856 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
6857 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), reg
, u
);
6859 /* _Decimal128 is passed in even/odd fpr pairs; the stored
6860 reg number is 0 for f1, so we want to make it odd. */
6861 else if (reg
== fpr
&& TYPE_MODE (type
) == TDmode
)
6864 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), reg
,
6865 build_int_cst (TREE_TYPE (reg
), 1));
6866 u
= build2 (MODIFY_EXPR
, void_type_node
, reg
, t
);
6869 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
6870 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
6871 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
6872 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
6873 gimplify_and_add (t
, pre_p
);
6877 t
= build2 (POINTER_PLUS_EXPR
, ptr_type_node
, sav
, size_int (sav_ofs
));
6879 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), reg
,
6880 build_int_cst (TREE_TYPE (reg
), n_reg
));
6881 u
= fold_convert (sizetype
, u
);
6882 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
6883 t
= build2 (POINTER_PLUS_EXPR
, ptr_type_node
, t
, u
);
6885 /* _Decimal32 varargs are located in the second word of the 64-bit
6886 FP register for 32-bit binaries. */
6887 if (!TARGET_POWERPC64
6888 && TARGET_HARD_FLOAT
&& TARGET_FPRS
6889 && TYPE_MODE (type
) == SDmode
)
6890 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, size_int (size
));
6892 t
= build2 (GIMPLE_MODIFY_STMT
, void_type_node
, addr
, t
);
6893 gimplify_and_add (t
, pre_p
);
6895 t
= build1 (GOTO_EXPR
, void_type_node
, lab_over
);
6896 gimplify_and_add (t
, pre_p
);
6898 t
= build1 (LABEL_EXPR
, void_type_node
, lab_false
);
6899 append_to_statement_list (t
, pre_p
);
6901 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
6903 /* Ensure that we don't find any more args in regs.
6904 Alignment has taken care of for special cases. */
6905 t
= build_gimple_modify_stmt (reg
,
6906 build_int_cst (TREE_TYPE (reg
), 8));
6907 gimplify_and_add (t
, pre_p
);
6911 /* ... otherwise out of the overflow area. */
6913 /* Care for on-stack alignment if needed. */
6917 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, size_int (align
- 1));
6918 t
= fold_convert (sizetype
, t
);
6919 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
6921 t
= fold_convert (TREE_TYPE (ovf
), t
);
6923 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
6925 u
= build2 (GIMPLE_MODIFY_STMT
, void_type_node
, addr
, t
);
6926 gimplify_and_add (u
, pre_p
);
6928 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, size_int (size
));
6929 t
= build2 (GIMPLE_MODIFY_STMT
, TREE_TYPE (ovf
), ovf
, t
);
6930 gimplify_and_add (t
, pre_p
);
6934 t
= build1 (LABEL_EXPR
, void_type_node
, lab_over
);
6935 append_to_statement_list (t
, pre_p
);
6938 if (STRICT_ALIGNMENT
6939 && (TYPE_ALIGN (type
)
6940 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
6942 /* The value (of type complex double, for example) may not be
6943 aligned in memory in the saved registers, so copy via a
6944 temporary. (This is the same code as used for SPARC.) */
6945 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
6946 tree dest_addr
= build_fold_addr_expr (tmp
);
6948 tree copy
= build_call_expr (implicit_built_in_decls
[BUILT_IN_MEMCPY
],
6949 3, dest_addr
, addr
, size_int (rsize
* 4));
6951 gimplify_and_add (copy
, pre_p
);
6955 addr
= fold_convert (ptrtype
, addr
);
6956 return build_va_arg_indirect_ref (addr
);
6962 def_builtin (int mask
, const char *name
, tree type
, int code
)
6964 if ((mask
& target_flags
) || TARGET_PAIRED_FLOAT
)
6966 if (rs6000_builtin_decls
[code
])
6969 rs6000_builtin_decls
[code
] =
6970 add_builtin_function (name
, type
, code
, BUILT_IN_MD
,
6975 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
6977 static const struct builtin_description bdesc_3arg
[] =
6979 { MASK_ALTIVEC
, CODE_FOR_altivec_vmaddfp
, "__builtin_altivec_vmaddfp", ALTIVEC_BUILTIN_VMADDFP
},
6980 { MASK_ALTIVEC
, CODE_FOR_altivec_vmhaddshs
, "__builtin_altivec_vmhaddshs", ALTIVEC_BUILTIN_VMHADDSHS
},
6981 { MASK_ALTIVEC
, CODE_FOR_altivec_vmhraddshs
, "__builtin_altivec_vmhraddshs", ALTIVEC_BUILTIN_VMHRADDSHS
},
6982 { MASK_ALTIVEC
, CODE_FOR_altivec_vmladduhm
, "__builtin_altivec_vmladduhm", ALTIVEC_BUILTIN_VMLADDUHM
},
6983 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumubm
, "__builtin_altivec_vmsumubm", ALTIVEC_BUILTIN_VMSUMUBM
},
6984 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsummbm
, "__builtin_altivec_vmsummbm", ALTIVEC_BUILTIN_VMSUMMBM
},
6985 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumuhm
, "__builtin_altivec_vmsumuhm", ALTIVEC_BUILTIN_VMSUMUHM
},
6986 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumshm
, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM
},
6987 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumuhs
, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS
},
6988 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumshs
, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS
},
6989 { MASK_ALTIVEC
, CODE_FOR_altivec_vnmsubfp
, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP
},
6990 { MASK_ALTIVEC
, CODE_FOR_altivec_vperm_v4sf
, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF
},
6991 { MASK_ALTIVEC
, CODE_FOR_altivec_vperm_v4si
, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI
},
6992 { MASK_ALTIVEC
, CODE_FOR_altivec_vperm_v8hi
, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI
},
6993 { MASK_ALTIVEC
, CODE_FOR_altivec_vperm_v16qi
, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI
},
6994 { MASK_ALTIVEC
, CODE_FOR_altivec_vsel_v4sf
, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF
},
6995 { MASK_ALTIVEC
, CODE_FOR_altivec_vsel_v4si
, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI
},
6996 { MASK_ALTIVEC
, CODE_FOR_altivec_vsel_v8hi
, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI
},
6997 { MASK_ALTIVEC
, CODE_FOR_altivec_vsel_v16qi
, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI
},
6998 { MASK_ALTIVEC
, CODE_FOR_altivec_vsldoi_v16qi
, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI
},
6999 { MASK_ALTIVEC
, CODE_FOR_altivec_vsldoi_v8hi
, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI
},
7000 { MASK_ALTIVEC
, CODE_FOR_altivec_vsldoi_v4si
, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI
},
7001 { MASK_ALTIVEC
, CODE_FOR_altivec_vsldoi_v4sf
, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF
},
7003 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_madd", ALTIVEC_BUILTIN_VEC_MADD
},
7004 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_madds", ALTIVEC_BUILTIN_VEC_MADDS
},
7005 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mladd", ALTIVEC_BUILTIN_VEC_MLADD
},
7006 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mradds", ALTIVEC_BUILTIN_VEC_MRADDS
},
7007 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_msum", ALTIVEC_BUILTIN_VEC_MSUM
},
7008 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumshm", ALTIVEC_BUILTIN_VEC_VMSUMSHM
},
7009 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumuhm", ALTIVEC_BUILTIN_VEC_VMSUMUHM
},
7010 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsummbm", ALTIVEC_BUILTIN_VEC_VMSUMMBM
},
7011 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumubm", ALTIVEC_BUILTIN_VEC_VMSUMUBM
},
7012 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_msums", ALTIVEC_BUILTIN_VEC_MSUMS
},
7013 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumshs", ALTIVEC_BUILTIN_VEC_VMSUMSHS
},
7014 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumuhs", ALTIVEC_BUILTIN_VEC_VMSUMUHS
},
7015 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_nmsub", ALTIVEC_BUILTIN_VEC_NMSUB
},
7016 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_perm", ALTIVEC_BUILTIN_VEC_PERM
},
7017 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sel", ALTIVEC_BUILTIN_VEC_SEL
},
7019 { 0, CODE_FOR_paired_msub
, "__builtin_paired_msub", PAIRED_BUILTIN_MSUB
},
7020 { 0, CODE_FOR_paired_madd
, "__builtin_paired_madd", PAIRED_BUILTIN_MADD
},
7021 { 0, CODE_FOR_paired_madds0
, "__builtin_paired_madds0", PAIRED_BUILTIN_MADDS0
},
7022 { 0, CODE_FOR_paired_madds1
, "__builtin_paired_madds1", PAIRED_BUILTIN_MADDS1
},
7023 { 0, CODE_FOR_paired_nmsub
, "__builtin_paired_nmsub", PAIRED_BUILTIN_NMSUB
},
7024 { 0, CODE_FOR_paired_nmadd
, "__builtin_paired_nmadd", PAIRED_BUILTIN_NMADD
},
7025 { 0, CODE_FOR_paired_sum0
, "__builtin_paired_sum0", PAIRED_BUILTIN_SUM0
},
7026 { 0, CODE_FOR_paired_sum1
, "__builtin_paired_sum1", PAIRED_BUILTIN_SUM1
},
7027 { 0, CODE_FOR_selv2sf4
, "__builtin_paired_selv2sf4", PAIRED_BUILTIN_SELV2SF4
},
7030 /* DST operations: void foo (void *, const int, const char). */
7032 static const struct builtin_description bdesc_dst
[] =
7034 { MASK_ALTIVEC
, CODE_FOR_altivec_dst
, "__builtin_altivec_dst", ALTIVEC_BUILTIN_DST
},
7035 { MASK_ALTIVEC
, CODE_FOR_altivec_dstt
, "__builtin_altivec_dstt", ALTIVEC_BUILTIN_DSTT
},
7036 { MASK_ALTIVEC
, CODE_FOR_altivec_dstst
, "__builtin_altivec_dstst", ALTIVEC_BUILTIN_DSTST
},
7037 { MASK_ALTIVEC
, CODE_FOR_altivec_dststt
, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT
},
7039 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_dst", ALTIVEC_BUILTIN_VEC_DST
},
7040 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_dstt", ALTIVEC_BUILTIN_VEC_DSTT
},
7041 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_dstst", ALTIVEC_BUILTIN_VEC_DSTST
},
7042 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_dststt", ALTIVEC_BUILTIN_VEC_DSTSTT
}
7045 /* Simple binary operations: VECc = foo (VECa, VECb). */
7047 static struct builtin_description bdesc_2arg
[] =
7049 { MASK_ALTIVEC
, CODE_FOR_addv16qi3
, "__builtin_altivec_vaddubm", ALTIVEC_BUILTIN_VADDUBM
},
7050 { MASK_ALTIVEC
, CODE_FOR_addv8hi3
, "__builtin_altivec_vadduhm", ALTIVEC_BUILTIN_VADDUHM
},
7051 { MASK_ALTIVEC
, CODE_FOR_addv4si3
, "__builtin_altivec_vadduwm", ALTIVEC_BUILTIN_VADDUWM
},
7052 { MASK_ALTIVEC
, CODE_FOR_addv4sf3
, "__builtin_altivec_vaddfp", ALTIVEC_BUILTIN_VADDFP
},
7053 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddcuw
, "__builtin_altivec_vaddcuw", ALTIVEC_BUILTIN_VADDCUW
},
7054 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddubs
, "__builtin_altivec_vaddubs", ALTIVEC_BUILTIN_VADDUBS
},
7055 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddsbs
, "__builtin_altivec_vaddsbs", ALTIVEC_BUILTIN_VADDSBS
},
7056 { MASK_ALTIVEC
, CODE_FOR_altivec_vadduhs
, "__builtin_altivec_vadduhs", ALTIVEC_BUILTIN_VADDUHS
},
7057 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddshs
, "__builtin_altivec_vaddshs", ALTIVEC_BUILTIN_VADDSHS
},
7058 { MASK_ALTIVEC
, CODE_FOR_altivec_vadduws
, "__builtin_altivec_vadduws", ALTIVEC_BUILTIN_VADDUWS
},
7059 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddsws
, "__builtin_altivec_vaddsws", ALTIVEC_BUILTIN_VADDSWS
},
7060 { MASK_ALTIVEC
, CODE_FOR_andv4si3
, "__builtin_altivec_vand", ALTIVEC_BUILTIN_VAND
},
7061 { MASK_ALTIVEC
, CODE_FOR_andcv4si3
, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC
},
7062 { MASK_ALTIVEC
, CODE_FOR_altivec_vavgub
, "__builtin_altivec_vavgub", ALTIVEC_BUILTIN_VAVGUB
},
7063 { MASK_ALTIVEC
, CODE_FOR_altivec_vavgsb
, "__builtin_altivec_vavgsb", ALTIVEC_BUILTIN_VAVGSB
},
7064 { MASK_ALTIVEC
, CODE_FOR_altivec_vavguh
, "__builtin_altivec_vavguh", ALTIVEC_BUILTIN_VAVGUH
},
7065 { MASK_ALTIVEC
, CODE_FOR_altivec_vavgsh
, "__builtin_altivec_vavgsh", ALTIVEC_BUILTIN_VAVGSH
},
7066 { MASK_ALTIVEC
, CODE_FOR_altivec_vavguw
, "__builtin_altivec_vavguw", ALTIVEC_BUILTIN_VAVGUW
},
7067 { MASK_ALTIVEC
, CODE_FOR_altivec_vavgsw
, "__builtin_altivec_vavgsw", ALTIVEC_BUILTIN_VAVGSW
},
7068 { MASK_ALTIVEC
, CODE_FOR_altivec_vcfux
, "__builtin_altivec_vcfux", ALTIVEC_BUILTIN_VCFUX
},
7069 { MASK_ALTIVEC
, CODE_FOR_altivec_vcfsx
, "__builtin_altivec_vcfsx", ALTIVEC_BUILTIN_VCFSX
},
7070 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpbfp
, "__builtin_altivec_vcmpbfp", ALTIVEC_BUILTIN_VCMPBFP
},
7071 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpequb
, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB
},
7072 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpequh
, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH
},
7073 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpequw
, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW
},
7074 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpeqfp
, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP
},
7075 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgefp
, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP
},
7076 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtub
, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB
},
7077 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtsb
, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB
},
7078 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtuh
, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH
},
7079 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtsh
, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH
},
7080 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtuw
, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW
},
7081 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtsw
, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW
},
7082 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtfp
, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP
},
7083 { MASK_ALTIVEC
, CODE_FOR_altivec_vctsxs
, "__builtin_altivec_vctsxs", ALTIVEC_BUILTIN_VCTSXS
},
7084 { MASK_ALTIVEC
, CODE_FOR_altivec_vctuxs
, "__builtin_altivec_vctuxs", ALTIVEC_BUILTIN_VCTUXS
},
7085 { MASK_ALTIVEC
, CODE_FOR_umaxv16qi3
, "__builtin_altivec_vmaxub", ALTIVEC_BUILTIN_VMAXUB
},
7086 { MASK_ALTIVEC
, CODE_FOR_smaxv16qi3
, "__builtin_altivec_vmaxsb", ALTIVEC_BUILTIN_VMAXSB
},
7087 { MASK_ALTIVEC
, CODE_FOR_umaxv8hi3
, "__builtin_altivec_vmaxuh", ALTIVEC_BUILTIN_VMAXUH
},
7088 { MASK_ALTIVEC
, CODE_FOR_smaxv8hi3
, "__builtin_altivec_vmaxsh", ALTIVEC_BUILTIN_VMAXSH
},
7089 { MASK_ALTIVEC
, CODE_FOR_umaxv4si3
, "__builtin_altivec_vmaxuw", ALTIVEC_BUILTIN_VMAXUW
},
7090 { MASK_ALTIVEC
, CODE_FOR_smaxv4si3
, "__builtin_altivec_vmaxsw", ALTIVEC_BUILTIN_VMAXSW
},
7091 { MASK_ALTIVEC
, CODE_FOR_smaxv4sf3
, "__builtin_altivec_vmaxfp", ALTIVEC_BUILTIN_VMAXFP
},
7092 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrghb
, "__builtin_altivec_vmrghb", ALTIVEC_BUILTIN_VMRGHB
},
7093 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrghh
, "__builtin_altivec_vmrghh", ALTIVEC_BUILTIN_VMRGHH
},
7094 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrghw
, "__builtin_altivec_vmrghw", ALTIVEC_BUILTIN_VMRGHW
},
7095 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrglb
, "__builtin_altivec_vmrglb", ALTIVEC_BUILTIN_VMRGLB
},
7096 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrglh
, "__builtin_altivec_vmrglh", ALTIVEC_BUILTIN_VMRGLH
},
7097 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrglw
, "__builtin_altivec_vmrglw", ALTIVEC_BUILTIN_VMRGLW
},
7098 { MASK_ALTIVEC
, CODE_FOR_uminv16qi3
, "__builtin_altivec_vminub", ALTIVEC_BUILTIN_VMINUB
},
7099 { MASK_ALTIVEC
, CODE_FOR_sminv16qi3
, "__builtin_altivec_vminsb", ALTIVEC_BUILTIN_VMINSB
},
7100 { MASK_ALTIVEC
, CODE_FOR_uminv8hi3
, "__builtin_altivec_vminuh", ALTIVEC_BUILTIN_VMINUH
},
7101 { MASK_ALTIVEC
, CODE_FOR_sminv8hi3
, "__builtin_altivec_vminsh", ALTIVEC_BUILTIN_VMINSH
},
7102 { MASK_ALTIVEC
, CODE_FOR_uminv4si3
, "__builtin_altivec_vminuw", ALTIVEC_BUILTIN_VMINUW
},
7103 { MASK_ALTIVEC
, CODE_FOR_sminv4si3
, "__builtin_altivec_vminsw", ALTIVEC_BUILTIN_VMINSW
},
7104 { MASK_ALTIVEC
, CODE_FOR_sminv4sf3
, "__builtin_altivec_vminfp", ALTIVEC_BUILTIN_VMINFP
},
7105 { MASK_ALTIVEC
, CODE_FOR_altivec_vmuleub
, "__builtin_altivec_vmuleub", ALTIVEC_BUILTIN_VMULEUB
},
7106 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulesb
, "__builtin_altivec_vmulesb", ALTIVEC_BUILTIN_VMULESB
},
7107 { MASK_ALTIVEC
, CODE_FOR_altivec_vmuleuh
, "__builtin_altivec_vmuleuh", ALTIVEC_BUILTIN_VMULEUH
},
7108 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulesh
, "__builtin_altivec_vmulesh", ALTIVEC_BUILTIN_VMULESH
},
7109 { MASK_ALTIVEC
, CODE_FOR_altivec_vmuloub
, "__builtin_altivec_vmuloub", ALTIVEC_BUILTIN_VMULOUB
},
7110 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulosb
, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB
},
7111 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulouh
, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH
},
7112 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulosh
, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH
},
7113 { MASK_ALTIVEC
, CODE_FOR_altivec_norv4si3
, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR
},
7114 { MASK_ALTIVEC
, CODE_FOR_iorv4si3
, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR
},
7115 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum
, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM
},
7116 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum
, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM
},
7117 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkpx
, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX
},
7118 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkshss
, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS
},
7119 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkswss
, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS
},
7120 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhus
, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS
},
7121 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkshus
, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS
},
7122 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwus
, "__builtin_altivec_vpkuwus", ALTIVEC_BUILTIN_VPKUWUS
},
7123 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkswus
, "__builtin_altivec_vpkswus", ALTIVEC_BUILTIN_VPKSWUS
},
7124 { MASK_ALTIVEC
, CODE_FOR_altivec_vrlb
, "__builtin_altivec_vrlb", ALTIVEC_BUILTIN_VRLB
},
7125 { MASK_ALTIVEC
, CODE_FOR_altivec_vrlh
, "__builtin_altivec_vrlh", ALTIVEC_BUILTIN_VRLH
},
7126 { MASK_ALTIVEC
, CODE_FOR_altivec_vrlw
, "__builtin_altivec_vrlw", ALTIVEC_BUILTIN_VRLW
},
7127 { MASK_ALTIVEC
, CODE_FOR_vashlv16qi3
, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB
},
7128 { MASK_ALTIVEC
, CODE_FOR_vashlv8hi3
, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH
},
7129 { MASK_ALTIVEC
, CODE_FOR_vashlv4si3
, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW
},
7130 { MASK_ALTIVEC
, CODE_FOR_altivec_vsl
, "__builtin_altivec_vsl", ALTIVEC_BUILTIN_VSL
},
7131 { MASK_ALTIVEC
, CODE_FOR_altivec_vslo
, "__builtin_altivec_vslo", ALTIVEC_BUILTIN_VSLO
},
7132 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltb
, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB
},
7133 { MASK_ALTIVEC
, CODE_FOR_altivec_vsplth
, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH
},
7134 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltw
, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW
},
7135 { MASK_ALTIVEC
, CODE_FOR_vlshrv16qi3
, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB
},
7136 { MASK_ALTIVEC
, CODE_FOR_vlshrv8hi3
, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH
},
7137 { MASK_ALTIVEC
, CODE_FOR_vlshrv4si3
, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW
},
7138 { MASK_ALTIVEC
, CODE_FOR_vashrv16qi3
, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB
},
7139 { MASK_ALTIVEC
, CODE_FOR_vashrv8hi3
, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH
},
7140 { MASK_ALTIVEC
, CODE_FOR_vashrv4si3
, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW
},
7141 { MASK_ALTIVEC
, CODE_FOR_altivec_vsr
, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR
},
7142 { MASK_ALTIVEC
, CODE_FOR_altivec_vsro
, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO
},
7143 { MASK_ALTIVEC
, CODE_FOR_subv16qi3
, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM
},
7144 { MASK_ALTIVEC
, CODE_FOR_subv8hi3
, "__builtin_altivec_vsubuhm", ALTIVEC_BUILTIN_VSUBUHM
},
7145 { MASK_ALTIVEC
, CODE_FOR_subv4si3
, "__builtin_altivec_vsubuwm", ALTIVEC_BUILTIN_VSUBUWM
},
7146 { MASK_ALTIVEC
, CODE_FOR_subv4sf3
, "__builtin_altivec_vsubfp", ALTIVEC_BUILTIN_VSUBFP
},
7147 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubcuw
, "__builtin_altivec_vsubcuw", ALTIVEC_BUILTIN_VSUBCUW
},
7148 { MASK_ALTIVEC
, CODE_FOR_altivec_vsububs
, "__builtin_altivec_vsububs", ALTIVEC_BUILTIN_VSUBUBS
},
7149 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubsbs
, "__builtin_altivec_vsubsbs", ALTIVEC_BUILTIN_VSUBSBS
},
7150 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubuhs
, "__builtin_altivec_vsubuhs", ALTIVEC_BUILTIN_VSUBUHS
},
7151 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubshs
, "__builtin_altivec_vsubshs", ALTIVEC_BUILTIN_VSUBSHS
},
7152 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubuws
, "__builtin_altivec_vsubuws", ALTIVEC_BUILTIN_VSUBUWS
},
7153 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubsws
, "__builtin_altivec_vsubsws", ALTIVEC_BUILTIN_VSUBSWS
},
7154 { MASK_ALTIVEC
, CODE_FOR_altivec_vsum4ubs
, "__builtin_altivec_vsum4ubs", ALTIVEC_BUILTIN_VSUM4UBS
},
7155 { MASK_ALTIVEC
, CODE_FOR_altivec_vsum4sbs
, "__builtin_altivec_vsum4sbs", ALTIVEC_BUILTIN_VSUM4SBS
},
7156 { MASK_ALTIVEC
, CODE_FOR_altivec_vsum4shs
, "__builtin_altivec_vsum4shs", ALTIVEC_BUILTIN_VSUM4SHS
},
7157 { MASK_ALTIVEC
, CODE_FOR_altivec_vsum2sws
, "__builtin_altivec_vsum2sws", ALTIVEC_BUILTIN_VSUM2SWS
},
7158 { MASK_ALTIVEC
, CODE_FOR_altivec_vsumsws
, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS
},
7159 { MASK_ALTIVEC
, CODE_FOR_xorv4si3
, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR
},
7161 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD
},
7162 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP
},
7163 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM
},
7164 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vadduhm", ALTIVEC_BUILTIN_VEC_VADDUHM
},
7165 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddubm", ALTIVEC_BUILTIN_VEC_VADDUBM
},
7166 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_addc", ALTIVEC_BUILTIN_VEC_ADDC
},
7167 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_adds", ALTIVEC_BUILTIN_VEC_ADDS
},
7168 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddsws", ALTIVEC_BUILTIN_VEC_VADDSWS
},
7169 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vadduws", ALTIVEC_BUILTIN_VEC_VADDUWS
},
7170 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddshs", ALTIVEC_BUILTIN_VEC_VADDSHS
},
7171 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vadduhs", ALTIVEC_BUILTIN_VEC_VADDUHS
},
7172 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddsbs", ALTIVEC_BUILTIN_VEC_VADDSBS
},
7173 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddubs", ALTIVEC_BUILTIN_VEC_VADDUBS
},
7174 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_and", ALTIVEC_BUILTIN_VEC_AND
},
7175 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_andc", ALTIVEC_BUILTIN_VEC_ANDC
},
7176 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_avg", ALTIVEC_BUILTIN_VEC_AVG
},
7177 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavgsw", ALTIVEC_BUILTIN_VEC_VAVGSW
},
7178 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavguw", ALTIVEC_BUILTIN_VEC_VAVGUW
},
7179 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavgsh", ALTIVEC_BUILTIN_VEC_VAVGSH
},
7180 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavguh", ALTIVEC_BUILTIN_VEC_VAVGUH
},
7181 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavgsb", ALTIVEC_BUILTIN_VEC_VAVGSB
},
7182 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavgub", ALTIVEC_BUILTIN_VEC_VAVGUB
},
7183 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmpb", ALTIVEC_BUILTIN_VEC_CMPB
},
7184 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmpeq", ALTIVEC_BUILTIN_VEC_CMPEQ
},
7185 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpeqfp", ALTIVEC_BUILTIN_VEC_VCMPEQFP
},
7186 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpequw", ALTIVEC_BUILTIN_VEC_VCMPEQUW
},
7187 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpequh", ALTIVEC_BUILTIN_VEC_VCMPEQUH
},
7188 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpequb", ALTIVEC_BUILTIN_VEC_VCMPEQUB
},
7189 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmpge", ALTIVEC_BUILTIN_VEC_CMPGE
},
7190 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmpgt", ALTIVEC_BUILTIN_VEC_CMPGT
},
7191 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtfp", ALTIVEC_BUILTIN_VEC_VCMPGTFP
},
7192 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtsw", ALTIVEC_BUILTIN_VEC_VCMPGTSW
},
7193 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtuw", ALTIVEC_BUILTIN_VEC_VCMPGTUW
},
7194 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtsh", ALTIVEC_BUILTIN_VEC_VCMPGTSH
},
7195 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtuh", ALTIVEC_BUILTIN_VEC_VCMPGTUH
},
7196 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtsb", ALTIVEC_BUILTIN_VEC_VCMPGTSB
},
7197 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtub", ALTIVEC_BUILTIN_VEC_VCMPGTUB
},
7198 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmple", ALTIVEC_BUILTIN_VEC_CMPLE
},
7199 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmplt", ALTIVEC_BUILTIN_VEC_CMPLT
},
7200 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_max", ALTIVEC_BUILTIN_VEC_MAX
},
7201 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxfp", ALTIVEC_BUILTIN_VEC_VMAXFP
},
7202 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxsw", ALTIVEC_BUILTIN_VEC_VMAXSW
},
7203 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxuw", ALTIVEC_BUILTIN_VEC_VMAXUW
},
7204 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxsh", ALTIVEC_BUILTIN_VEC_VMAXSH
},
7205 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxuh", ALTIVEC_BUILTIN_VEC_VMAXUH
},
7206 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxsb", ALTIVEC_BUILTIN_VEC_VMAXSB
},
7207 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxub", ALTIVEC_BUILTIN_VEC_VMAXUB
},
7208 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mergeh", ALTIVEC_BUILTIN_VEC_MERGEH
},
7209 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrghw", ALTIVEC_BUILTIN_VEC_VMRGHW
},
7210 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrghh", ALTIVEC_BUILTIN_VEC_VMRGHH
},
7211 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrghb", ALTIVEC_BUILTIN_VEC_VMRGHB
},
7212 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mergel", ALTIVEC_BUILTIN_VEC_MERGEL
},
7213 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrglw", ALTIVEC_BUILTIN_VEC_VMRGLW
},
7214 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrglh", ALTIVEC_BUILTIN_VEC_VMRGLH
},
7215 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrglb", ALTIVEC_BUILTIN_VEC_VMRGLB
},
7216 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_min", ALTIVEC_BUILTIN_VEC_MIN
},
7217 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminfp", ALTIVEC_BUILTIN_VEC_VMINFP
},
7218 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminsw", ALTIVEC_BUILTIN_VEC_VMINSW
},
7219 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminuw", ALTIVEC_BUILTIN_VEC_VMINUW
},
7220 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminsh", ALTIVEC_BUILTIN_VEC_VMINSH
},
7221 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminuh", ALTIVEC_BUILTIN_VEC_VMINUH
},
7222 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminsb", ALTIVEC_BUILTIN_VEC_VMINSB
},
7223 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminub", ALTIVEC_BUILTIN_VEC_VMINUB
},
7224 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mule", ALTIVEC_BUILTIN_VEC_MULE
},
7225 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmuleub", ALTIVEC_BUILTIN_VEC_VMULEUB
},
7226 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulesb", ALTIVEC_BUILTIN_VEC_VMULESB
},
7227 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmuleuh", ALTIVEC_BUILTIN_VEC_VMULEUH
},
7228 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulesh", ALTIVEC_BUILTIN_VEC_VMULESH
},
7229 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mulo", ALTIVEC_BUILTIN_VEC_MULO
},
7230 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulosh", ALTIVEC_BUILTIN_VEC_VMULOSH
},
7231 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulouh", ALTIVEC_BUILTIN_VEC_VMULOUH
},
7232 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulosb", ALTIVEC_BUILTIN_VEC_VMULOSB
},
7233 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmuloub", ALTIVEC_BUILTIN_VEC_VMULOUB
},
7234 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_nor", ALTIVEC_BUILTIN_VEC_NOR
},
7235 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_or", ALTIVEC_BUILTIN_VEC_OR
},
7236 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_pack", ALTIVEC_BUILTIN_VEC_PACK
},
7237 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkuwum", ALTIVEC_BUILTIN_VEC_VPKUWUM
},
7238 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkuhum", ALTIVEC_BUILTIN_VEC_VPKUHUM
},
7239 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_packpx", ALTIVEC_BUILTIN_VEC_PACKPX
},
7240 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_packs", ALTIVEC_BUILTIN_VEC_PACKS
},
7241 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkswss", ALTIVEC_BUILTIN_VEC_VPKSWSS
},
7242 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkuwus", ALTIVEC_BUILTIN_VEC_VPKUWUS
},
7243 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkshss", ALTIVEC_BUILTIN_VEC_VPKSHSS
},
7244 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkuhus", ALTIVEC_BUILTIN_VEC_VPKUHUS
},
7245 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_packsu", ALTIVEC_BUILTIN_VEC_PACKSU
},
7246 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkswus", ALTIVEC_BUILTIN_VEC_VPKSWUS
},
7247 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkshus", ALTIVEC_BUILTIN_VEC_VPKSHUS
},
7248 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_rl", ALTIVEC_BUILTIN_VEC_RL
},
7249 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vrlw", ALTIVEC_BUILTIN_VEC_VRLW
},
7250 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vrlh", ALTIVEC_BUILTIN_VEC_VRLH
},
7251 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vrlb", ALTIVEC_BUILTIN_VEC_VRLB
},
7252 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sl", ALTIVEC_BUILTIN_VEC_SL
},
7253 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vslw", ALTIVEC_BUILTIN_VEC_VSLW
},
7254 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vslh", ALTIVEC_BUILTIN_VEC_VSLH
},
7255 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vslb", ALTIVEC_BUILTIN_VEC_VSLB
},
7256 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sll", ALTIVEC_BUILTIN_VEC_SLL
},
7257 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_slo", ALTIVEC_BUILTIN_VEC_SLO
},
7258 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sr", ALTIVEC_BUILTIN_VEC_SR
},
7259 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrw", ALTIVEC_BUILTIN_VEC_VSRW
},
7260 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrh", ALTIVEC_BUILTIN_VEC_VSRH
},
7261 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrb", ALTIVEC_BUILTIN_VEC_VSRB
},
7262 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sra", ALTIVEC_BUILTIN_VEC_SRA
},
7263 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsraw", ALTIVEC_BUILTIN_VEC_VSRAW
},
7264 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrah", ALTIVEC_BUILTIN_VEC_VSRAH
},
7265 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrab", ALTIVEC_BUILTIN_VEC_VSRAB
},
7266 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_srl", ALTIVEC_BUILTIN_VEC_SRL
},
7267 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sro", ALTIVEC_BUILTIN_VEC_SRO
},
7268 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sub", ALTIVEC_BUILTIN_VEC_SUB
},
7269 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubfp", ALTIVEC_BUILTIN_VEC_VSUBFP
},
7270 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubuwm", ALTIVEC_BUILTIN_VEC_VSUBUWM
},
7271 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubuhm", ALTIVEC_BUILTIN_VEC_VSUBUHM
},
7272 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsububm", ALTIVEC_BUILTIN_VEC_VSUBUBM
},
7273 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_subc", ALTIVEC_BUILTIN_VEC_SUBC
},
7274 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_subs", ALTIVEC_BUILTIN_VEC_SUBS
},
7275 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubsws", ALTIVEC_BUILTIN_VEC_VSUBSWS
},
7276 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubuws", ALTIVEC_BUILTIN_VEC_VSUBUWS
},
7277 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubshs", ALTIVEC_BUILTIN_VEC_VSUBSHS
},
7278 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubuhs", ALTIVEC_BUILTIN_VEC_VSUBUHS
},
7279 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubsbs", ALTIVEC_BUILTIN_VEC_VSUBSBS
},
7280 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsububs", ALTIVEC_BUILTIN_VEC_VSUBUBS
},
7281 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sum4s", ALTIVEC_BUILTIN_VEC_SUM4S
},
7282 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsum4shs", ALTIVEC_BUILTIN_VEC_VSUM4SHS
},
7283 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsum4sbs", ALTIVEC_BUILTIN_VEC_VSUM4SBS
},
7284 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsum4ubs", ALTIVEC_BUILTIN_VEC_VSUM4UBS
},
7285 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sum2s", ALTIVEC_BUILTIN_VEC_SUM2S
},
7286 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS
},
7287 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR
},
7289 { 0, CODE_FOR_divv2sf3
, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3
},
7290 { 0, CODE_FOR_addv2sf3
, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3
},
7291 { 0, CODE_FOR_subv2sf3
, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3
},
7292 { 0, CODE_FOR_mulv2sf3
, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3
},
7293 { 0, CODE_FOR_paired_muls0
, "__builtin_paired_muls0", PAIRED_BUILTIN_MULS0
},
7294 { 0, CODE_FOR_paired_muls1
, "__builtin_paired_muls1", PAIRED_BUILTIN_MULS1
},
7295 { 0, CODE_FOR_paired_merge00
, "__builtin_paired_merge00", PAIRED_BUILTIN_MERGE00
},
7296 { 0, CODE_FOR_paired_merge01
, "__builtin_paired_merge01", PAIRED_BUILTIN_MERGE01
},
7297 { 0, CODE_FOR_paired_merge10
, "__builtin_paired_merge10", PAIRED_BUILTIN_MERGE10
},
7298 { 0, CODE_FOR_paired_merge11
, "__builtin_paired_merge11", PAIRED_BUILTIN_MERGE11
},
7300 /* Place holder, leave as first spe builtin. */
7301 { 0, CODE_FOR_spe_evaddw
, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW
},
7302 { 0, CODE_FOR_spe_evand
, "__builtin_spe_evand", SPE_BUILTIN_EVAND
},
7303 { 0, CODE_FOR_spe_evandc
, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC
},
7304 { 0, CODE_FOR_spe_evdivws
, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS
},
7305 { 0, CODE_FOR_spe_evdivwu
, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU
},
7306 { 0, CODE_FOR_spe_eveqv
, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV
},
7307 { 0, CODE_FOR_spe_evfsadd
, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD
},
7308 { 0, CODE_FOR_spe_evfsdiv
, "__builtin_spe_evfsdiv", SPE_BUILTIN_EVFSDIV
},
7309 { 0, CODE_FOR_spe_evfsmul
, "__builtin_spe_evfsmul", SPE_BUILTIN_EVFSMUL
},
7310 { 0, CODE_FOR_spe_evfssub
, "__builtin_spe_evfssub", SPE_BUILTIN_EVFSSUB
},
7311 { 0, CODE_FOR_spe_evmergehi
, "__builtin_spe_evmergehi", SPE_BUILTIN_EVMERGEHI
},
7312 { 0, CODE_FOR_spe_evmergehilo
, "__builtin_spe_evmergehilo", SPE_BUILTIN_EVMERGEHILO
},
7313 { 0, CODE_FOR_spe_evmergelo
, "__builtin_spe_evmergelo", SPE_BUILTIN_EVMERGELO
},
7314 { 0, CODE_FOR_spe_evmergelohi
, "__builtin_spe_evmergelohi", SPE_BUILTIN_EVMERGELOHI
},
7315 { 0, CODE_FOR_spe_evmhegsmfaa
, "__builtin_spe_evmhegsmfaa", SPE_BUILTIN_EVMHEGSMFAA
},
7316 { 0, CODE_FOR_spe_evmhegsmfan
, "__builtin_spe_evmhegsmfan", SPE_BUILTIN_EVMHEGSMFAN
},
7317 { 0, CODE_FOR_spe_evmhegsmiaa
, "__builtin_spe_evmhegsmiaa", SPE_BUILTIN_EVMHEGSMIAA
},
7318 { 0, CODE_FOR_spe_evmhegsmian
, "__builtin_spe_evmhegsmian", SPE_BUILTIN_EVMHEGSMIAN
},
7319 { 0, CODE_FOR_spe_evmhegumiaa
, "__builtin_spe_evmhegumiaa", SPE_BUILTIN_EVMHEGUMIAA
},
7320 { 0, CODE_FOR_spe_evmhegumian
, "__builtin_spe_evmhegumian", SPE_BUILTIN_EVMHEGUMIAN
},
7321 { 0, CODE_FOR_spe_evmhesmf
, "__builtin_spe_evmhesmf", SPE_BUILTIN_EVMHESMF
},
7322 { 0, CODE_FOR_spe_evmhesmfa
, "__builtin_spe_evmhesmfa", SPE_BUILTIN_EVMHESMFA
},
7323 { 0, CODE_FOR_spe_evmhesmfaaw
, "__builtin_spe_evmhesmfaaw", SPE_BUILTIN_EVMHESMFAAW
},
7324 { 0, CODE_FOR_spe_evmhesmfanw
, "__builtin_spe_evmhesmfanw", SPE_BUILTIN_EVMHESMFANW
},
7325 { 0, CODE_FOR_spe_evmhesmi
, "__builtin_spe_evmhesmi", SPE_BUILTIN_EVMHESMI
},
7326 { 0, CODE_FOR_spe_evmhesmia
, "__builtin_spe_evmhesmia", SPE_BUILTIN_EVMHESMIA
},
7327 { 0, CODE_FOR_spe_evmhesmiaaw
, "__builtin_spe_evmhesmiaaw", SPE_BUILTIN_EVMHESMIAAW
},
7328 { 0, CODE_FOR_spe_evmhesmianw
, "__builtin_spe_evmhesmianw", SPE_BUILTIN_EVMHESMIANW
},
7329 { 0, CODE_FOR_spe_evmhessf
, "__builtin_spe_evmhessf", SPE_BUILTIN_EVMHESSF
},
7330 { 0, CODE_FOR_spe_evmhessfa
, "__builtin_spe_evmhessfa", SPE_BUILTIN_EVMHESSFA
},
7331 { 0, CODE_FOR_spe_evmhessfaaw
, "__builtin_spe_evmhessfaaw", SPE_BUILTIN_EVMHESSFAAW
},
7332 { 0, CODE_FOR_spe_evmhessfanw
, "__builtin_spe_evmhessfanw", SPE_BUILTIN_EVMHESSFANW
},
7333 { 0, CODE_FOR_spe_evmhessiaaw
, "__builtin_spe_evmhessiaaw", SPE_BUILTIN_EVMHESSIAAW
},
7334 { 0, CODE_FOR_spe_evmhessianw
, "__builtin_spe_evmhessianw", SPE_BUILTIN_EVMHESSIANW
},
7335 { 0, CODE_FOR_spe_evmheumi
, "__builtin_spe_evmheumi", SPE_BUILTIN_EVMHEUMI
},
7336 { 0, CODE_FOR_spe_evmheumia
, "__builtin_spe_evmheumia", SPE_BUILTIN_EVMHEUMIA
},
7337 { 0, CODE_FOR_spe_evmheumiaaw
, "__builtin_spe_evmheumiaaw", SPE_BUILTIN_EVMHEUMIAAW
},
7338 { 0, CODE_FOR_spe_evmheumianw
, "__builtin_spe_evmheumianw", SPE_BUILTIN_EVMHEUMIANW
},
7339 { 0, CODE_FOR_spe_evmheusiaaw
, "__builtin_spe_evmheusiaaw", SPE_BUILTIN_EVMHEUSIAAW
},
7340 { 0, CODE_FOR_spe_evmheusianw
, "__builtin_spe_evmheusianw", SPE_BUILTIN_EVMHEUSIANW
},
7341 { 0, CODE_FOR_spe_evmhogsmfaa
, "__builtin_spe_evmhogsmfaa", SPE_BUILTIN_EVMHOGSMFAA
},
7342 { 0, CODE_FOR_spe_evmhogsmfan
, "__builtin_spe_evmhogsmfan", SPE_BUILTIN_EVMHOGSMFAN
},
7343 { 0, CODE_FOR_spe_evmhogsmiaa
, "__builtin_spe_evmhogsmiaa", SPE_BUILTIN_EVMHOGSMIAA
},
7344 { 0, CODE_FOR_spe_evmhogsmian
, "__builtin_spe_evmhogsmian", SPE_BUILTIN_EVMHOGSMIAN
},
7345 { 0, CODE_FOR_spe_evmhogumiaa
, "__builtin_spe_evmhogumiaa", SPE_BUILTIN_EVMHOGUMIAA
},
7346 { 0, CODE_FOR_spe_evmhogumian
, "__builtin_spe_evmhogumian", SPE_BUILTIN_EVMHOGUMIAN
},
7347 { 0, CODE_FOR_spe_evmhosmf
, "__builtin_spe_evmhosmf", SPE_BUILTIN_EVMHOSMF
},
7348 { 0, CODE_FOR_spe_evmhosmfa
, "__builtin_spe_evmhosmfa", SPE_BUILTIN_EVMHOSMFA
},
7349 { 0, CODE_FOR_spe_evmhosmfaaw
, "__builtin_spe_evmhosmfaaw", SPE_BUILTIN_EVMHOSMFAAW
},
7350 { 0, CODE_FOR_spe_evmhosmfanw
, "__builtin_spe_evmhosmfanw", SPE_BUILTIN_EVMHOSMFANW
},
7351 { 0, CODE_FOR_spe_evmhosmi
, "__builtin_spe_evmhosmi", SPE_BUILTIN_EVMHOSMI
},
7352 { 0, CODE_FOR_spe_evmhosmia
, "__builtin_spe_evmhosmia", SPE_BUILTIN_EVMHOSMIA
},
7353 { 0, CODE_FOR_spe_evmhosmiaaw
, "__builtin_spe_evmhosmiaaw", SPE_BUILTIN_EVMHOSMIAAW
},
7354 { 0, CODE_FOR_spe_evmhosmianw
, "__builtin_spe_evmhosmianw", SPE_BUILTIN_EVMHOSMIANW
},
7355 { 0, CODE_FOR_spe_evmhossf
, "__builtin_spe_evmhossf", SPE_BUILTIN_EVMHOSSF
},
7356 { 0, CODE_FOR_spe_evmhossfa
, "__builtin_spe_evmhossfa", SPE_BUILTIN_EVMHOSSFA
},
7357 { 0, CODE_FOR_spe_evmhossfaaw
, "__builtin_spe_evmhossfaaw", SPE_BUILTIN_EVMHOSSFAAW
},
7358 { 0, CODE_FOR_spe_evmhossfanw
, "__builtin_spe_evmhossfanw", SPE_BUILTIN_EVMHOSSFANW
},
7359 { 0, CODE_FOR_spe_evmhossiaaw
, "__builtin_spe_evmhossiaaw", SPE_BUILTIN_EVMHOSSIAAW
},
7360 { 0, CODE_FOR_spe_evmhossianw
, "__builtin_spe_evmhossianw", SPE_BUILTIN_EVMHOSSIANW
},
7361 { 0, CODE_FOR_spe_evmhoumi
, "__builtin_spe_evmhoumi", SPE_BUILTIN_EVMHOUMI
},
7362 { 0, CODE_FOR_spe_evmhoumia
, "__builtin_spe_evmhoumia", SPE_BUILTIN_EVMHOUMIA
},
7363 { 0, CODE_FOR_spe_evmhoumiaaw
, "__builtin_spe_evmhoumiaaw", SPE_BUILTIN_EVMHOUMIAAW
},
7364 { 0, CODE_FOR_spe_evmhoumianw
, "__builtin_spe_evmhoumianw", SPE_BUILTIN_EVMHOUMIANW
},
7365 { 0, CODE_FOR_spe_evmhousiaaw
, "__builtin_spe_evmhousiaaw", SPE_BUILTIN_EVMHOUSIAAW
},
7366 { 0, CODE_FOR_spe_evmhousianw
, "__builtin_spe_evmhousianw", SPE_BUILTIN_EVMHOUSIANW
},
7367 { 0, CODE_FOR_spe_evmwhsmf
, "__builtin_spe_evmwhsmf", SPE_BUILTIN_EVMWHSMF
},
7368 { 0, CODE_FOR_spe_evmwhsmfa
, "__builtin_spe_evmwhsmfa", SPE_BUILTIN_EVMWHSMFA
},
7369 { 0, CODE_FOR_spe_evmwhsmi
, "__builtin_spe_evmwhsmi", SPE_BUILTIN_EVMWHSMI
},
7370 { 0, CODE_FOR_spe_evmwhsmia
, "__builtin_spe_evmwhsmia", SPE_BUILTIN_EVMWHSMIA
},
7371 { 0, CODE_FOR_spe_evmwhssf
, "__builtin_spe_evmwhssf", SPE_BUILTIN_EVMWHSSF
},
7372 { 0, CODE_FOR_spe_evmwhssfa
, "__builtin_spe_evmwhssfa", SPE_BUILTIN_EVMWHSSFA
},
7373 { 0, CODE_FOR_spe_evmwhumi
, "__builtin_spe_evmwhumi", SPE_BUILTIN_EVMWHUMI
},
7374 { 0, CODE_FOR_spe_evmwhumia
, "__builtin_spe_evmwhumia", SPE_BUILTIN_EVMWHUMIA
},
7375 { 0, CODE_FOR_spe_evmwlsmiaaw
, "__builtin_spe_evmwlsmiaaw", SPE_BUILTIN_EVMWLSMIAAW
},
7376 { 0, CODE_FOR_spe_evmwlsmianw
, "__builtin_spe_evmwlsmianw", SPE_BUILTIN_EVMWLSMIANW
},
7377 { 0, CODE_FOR_spe_evmwlssiaaw
, "__builtin_spe_evmwlssiaaw", SPE_BUILTIN_EVMWLSSIAAW
},
7378 { 0, CODE_FOR_spe_evmwlssianw
, "__builtin_spe_evmwlssianw", SPE_BUILTIN_EVMWLSSIANW
},
7379 { 0, CODE_FOR_spe_evmwlumi
, "__builtin_spe_evmwlumi", SPE_BUILTIN_EVMWLUMI
},
7380 { 0, CODE_FOR_spe_evmwlumia
, "__builtin_spe_evmwlumia", SPE_BUILTIN_EVMWLUMIA
},
7381 { 0, CODE_FOR_spe_evmwlumiaaw
, "__builtin_spe_evmwlumiaaw", SPE_BUILTIN_EVMWLUMIAAW
},
7382 { 0, CODE_FOR_spe_evmwlumianw
, "__builtin_spe_evmwlumianw", SPE_BUILTIN_EVMWLUMIANW
},
7383 { 0, CODE_FOR_spe_evmwlusiaaw
, "__builtin_spe_evmwlusiaaw", SPE_BUILTIN_EVMWLUSIAAW
},
7384 { 0, CODE_FOR_spe_evmwlusianw
, "__builtin_spe_evmwlusianw", SPE_BUILTIN_EVMWLUSIANW
},
7385 { 0, CODE_FOR_spe_evmwsmf
, "__builtin_spe_evmwsmf", SPE_BUILTIN_EVMWSMF
},
7386 { 0, CODE_FOR_spe_evmwsmfa
, "__builtin_spe_evmwsmfa", SPE_BUILTIN_EVMWSMFA
},
7387 { 0, CODE_FOR_spe_evmwsmfaa
, "__builtin_spe_evmwsmfaa", SPE_BUILTIN_EVMWSMFAA
},
7388 { 0, CODE_FOR_spe_evmwsmfan
, "__builtin_spe_evmwsmfan", SPE_BUILTIN_EVMWSMFAN
},
7389 { 0, CODE_FOR_spe_evmwsmi
, "__builtin_spe_evmwsmi", SPE_BUILTIN_EVMWSMI
},
7390 { 0, CODE_FOR_spe_evmwsmia
, "__builtin_spe_evmwsmia", SPE_BUILTIN_EVMWSMIA
},
7391 { 0, CODE_FOR_spe_evmwsmiaa
, "__builtin_spe_evmwsmiaa", SPE_BUILTIN_EVMWSMIAA
},
7392 { 0, CODE_FOR_spe_evmwsmian
, "__builtin_spe_evmwsmian", SPE_BUILTIN_EVMWSMIAN
},
7393 { 0, CODE_FOR_spe_evmwssf
, "__builtin_spe_evmwssf", SPE_BUILTIN_EVMWSSF
},
7394 { 0, CODE_FOR_spe_evmwssfa
, "__builtin_spe_evmwssfa", SPE_BUILTIN_EVMWSSFA
},
7395 { 0, CODE_FOR_spe_evmwssfaa
, "__builtin_spe_evmwssfaa", SPE_BUILTIN_EVMWSSFAA
},
7396 { 0, CODE_FOR_spe_evmwssfan
, "__builtin_spe_evmwssfan", SPE_BUILTIN_EVMWSSFAN
},
7397 { 0, CODE_FOR_spe_evmwumi
, "__builtin_spe_evmwumi", SPE_BUILTIN_EVMWUMI
},
7398 { 0, CODE_FOR_spe_evmwumia
, "__builtin_spe_evmwumia", SPE_BUILTIN_EVMWUMIA
},
7399 { 0, CODE_FOR_spe_evmwumiaa
, "__builtin_spe_evmwumiaa", SPE_BUILTIN_EVMWUMIAA
},
7400 { 0, CODE_FOR_spe_evmwumian
, "__builtin_spe_evmwumian", SPE_BUILTIN_EVMWUMIAN
},
7401 { 0, CODE_FOR_spe_evnand
, "__builtin_spe_evnand", SPE_BUILTIN_EVNAND
},
7402 { 0, CODE_FOR_spe_evnor
, "__builtin_spe_evnor", SPE_BUILTIN_EVNOR
},
7403 { 0, CODE_FOR_spe_evor
, "__builtin_spe_evor", SPE_BUILTIN_EVOR
},
7404 { 0, CODE_FOR_spe_evorc
, "__builtin_spe_evorc", SPE_BUILTIN_EVORC
},
7405 { 0, CODE_FOR_spe_evrlw
, "__builtin_spe_evrlw", SPE_BUILTIN_EVRLW
},
7406 { 0, CODE_FOR_spe_evslw
, "__builtin_spe_evslw", SPE_BUILTIN_EVSLW
},
7407 { 0, CODE_FOR_spe_evsrws
, "__builtin_spe_evsrws", SPE_BUILTIN_EVSRWS
},
7408 { 0, CODE_FOR_spe_evsrwu
, "__builtin_spe_evsrwu", SPE_BUILTIN_EVSRWU
},
7409 { 0, CODE_FOR_spe_evsubfw
, "__builtin_spe_evsubfw", SPE_BUILTIN_EVSUBFW
},
7411 /* SPE binary operations expecting a 5-bit unsigned literal. */
7412 { 0, CODE_FOR_spe_evaddiw
, "__builtin_spe_evaddiw", SPE_BUILTIN_EVADDIW
},
7414 { 0, CODE_FOR_spe_evrlwi
, "__builtin_spe_evrlwi", SPE_BUILTIN_EVRLWI
},
7415 { 0, CODE_FOR_spe_evslwi
, "__builtin_spe_evslwi", SPE_BUILTIN_EVSLWI
},
7416 { 0, CODE_FOR_spe_evsrwis
, "__builtin_spe_evsrwis", SPE_BUILTIN_EVSRWIS
},
7417 { 0, CODE_FOR_spe_evsrwiu
, "__builtin_spe_evsrwiu", SPE_BUILTIN_EVSRWIU
},
7418 { 0, CODE_FOR_spe_evsubifw
, "__builtin_spe_evsubifw", SPE_BUILTIN_EVSUBIFW
},
7419 { 0, CODE_FOR_spe_evmwhssfaa
, "__builtin_spe_evmwhssfaa", SPE_BUILTIN_EVMWHSSFAA
},
7420 { 0, CODE_FOR_spe_evmwhssmaa
, "__builtin_spe_evmwhssmaa", SPE_BUILTIN_EVMWHSSMAA
},
7421 { 0, CODE_FOR_spe_evmwhsmfaa
, "__builtin_spe_evmwhsmfaa", SPE_BUILTIN_EVMWHSMFAA
},
7422 { 0, CODE_FOR_spe_evmwhsmiaa
, "__builtin_spe_evmwhsmiaa", SPE_BUILTIN_EVMWHSMIAA
},
7423 { 0, CODE_FOR_spe_evmwhusiaa
, "__builtin_spe_evmwhusiaa", SPE_BUILTIN_EVMWHUSIAA
},
7424 { 0, CODE_FOR_spe_evmwhumiaa
, "__builtin_spe_evmwhumiaa", SPE_BUILTIN_EVMWHUMIAA
},
7425 { 0, CODE_FOR_spe_evmwhssfan
, "__builtin_spe_evmwhssfan", SPE_BUILTIN_EVMWHSSFAN
},
7426 { 0, CODE_FOR_spe_evmwhssian
, "__builtin_spe_evmwhssian", SPE_BUILTIN_EVMWHSSIAN
},
7427 { 0, CODE_FOR_spe_evmwhsmfan
, "__builtin_spe_evmwhsmfan", SPE_BUILTIN_EVMWHSMFAN
},
7428 { 0, CODE_FOR_spe_evmwhsmian
, "__builtin_spe_evmwhsmian", SPE_BUILTIN_EVMWHSMIAN
},
7429 { 0, CODE_FOR_spe_evmwhusian
, "__builtin_spe_evmwhusian", SPE_BUILTIN_EVMWHUSIAN
},
7430 { 0, CODE_FOR_spe_evmwhumian
, "__builtin_spe_evmwhumian", SPE_BUILTIN_EVMWHUMIAN
},
7431 { 0, CODE_FOR_spe_evmwhgssfaa
, "__builtin_spe_evmwhgssfaa", SPE_BUILTIN_EVMWHGSSFAA
},
7432 { 0, CODE_FOR_spe_evmwhgsmfaa
, "__builtin_spe_evmwhgsmfaa", SPE_BUILTIN_EVMWHGSMFAA
},
7433 { 0, CODE_FOR_spe_evmwhgsmiaa
, "__builtin_spe_evmwhgsmiaa", SPE_BUILTIN_EVMWHGSMIAA
},
7434 { 0, CODE_FOR_spe_evmwhgumiaa
, "__builtin_spe_evmwhgumiaa", SPE_BUILTIN_EVMWHGUMIAA
},
7435 { 0, CODE_FOR_spe_evmwhgssfan
, "__builtin_spe_evmwhgssfan", SPE_BUILTIN_EVMWHGSSFAN
},
7436 { 0, CODE_FOR_spe_evmwhgsmfan
, "__builtin_spe_evmwhgsmfan", SPE_BUILTIN_EVMWHGSMFAN
},
7437 { 0, CODE_FOR_spe_evmwhgsmian
, "__builtin_spe_evmwhgsmian", SPE_BUILTIN_EVMWHGSMIAN
},
7438 { 0, CODE_FOR_spe_evmwhgumian
, "__builtin_spe_evmwhgumian", SPE_BUILTIN_EVMWHGUMIAN
},
7439 { 0, CODE_FOR_spe_brinc
, "__builtin_spe_brinc", SPE_BUILTIN_BRINC
},
7441 /* Place-holder. Leave as last binary SPE builtin. */
7442 { 0, CODE_FOR_xorv2si3
, "__builtin_spe_evxor", SPE_BUILTIN_EVXOR
}
7445 /* AltiVec predicates. */
7447 struct builtin_description_predicates
7449 const unsigned int mask
;
7450 const enum insn_code icode
;
7452 const char *const name
;
7453 const enum rs6000_builtins code
;
7456 static const struct builtin_description_predicates bdesc_altivec_preds
[] =
7458 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4sf
, "*vcmpbfp.", "__builtin_altivec_vcmpbfp_p", ALTIVEC_BUILTIN_VCMPBFP_P
},
7459 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4sf
, "*vcmpeqfp.", "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P
},
7460 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4sf
, "*vcmpgefp.", "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P
},
7461 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4sf
, "*vcmpgtfp.", "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P
},
7462 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4si
, "*vcmpequw.", "__builtin_altivec_vcmpequw_p", ALTIVEC_BUILTIN_VCMPEQUW_P
},
7463 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4si
, "*vcmpgtsw.", "__builtin_altivec_vcmpgtsw_p", ALTIVEC_BUILTIN_VCMPGTSW_P
},
7464 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4si
, "*vcmpgtuw.", "__builtin_altivec_vcmpgtuw_p", ALTIVEC_BUILTIN_VCMPGTUW_P
},
7465 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v8hi
, "*vcmpgtuh.", "__builtin_altivec_vcmpgtuh_p", ALTIVEC_BUILTIN_VCMPGTUH_P
},
7466 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v8hi
, "*vcmpgtsh.", "__builtin_altivec_vcmpgtsh_p", ALTIVEC_BUILTIN_VCMPGTSH_P
},
7467 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v8hi
, "*vcmpequh.", "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P
},
7468 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v16qi
, "*vcmpequb.", "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P
},
7469 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v16qi
, "*vcmpgtsb.", "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P
},
7470 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v16qi
, "*vcmpgtub.", "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P
},
7472 { MASK_ALTIVEC
, 0, NULL
, "__builtin_vec_vcmpeq_p", ALTIVEC_BUILTIN_VCMPEQ_P
},
7473 { MASK_ALTIVEC
, 0, NULL
, "__builtin_vec_vcmpgt_p", ALTIVEC_BUILTIN_VCMPGT_P
},
7474 { MASK_ALTIVEC
, 0, NULL
, "__builtin_vec_vcmpge_p", ALTIVEC_BUILTIN_VCMPGE_P
}
7477 /* SPE predicates. */
7478 static struct builtin_description bdesc_spe_predicates
[] =
7480 /* Place-holder. Leave as first. */
7481 { 0, CODE_FOR_spe_evcmpeq
, "__builtin_spe_evcmpeq", SPE_BUILTIN_EVCMPEQ
},
7482 { 0, CODE_FOR_spe_evcmpgts
, "__builtin_spe_evcmpgts", SPE_BUILTIN_EVCMPGTS
},
7483 { 0, CODE_FOR_spe_evcmpgtu
, "__builtin_spe_evcmpgtu", SPE_BUILTIN_EVCMPGTU
},
7484 { 0, CODE_FOR_spe_evcmplts
, "__builtin_spe_evcmplts", SPE_BUILTIN_EVCMPLTS
},
7485 { 0, CODE_FOR_spe_evcmpltu
, "__builtin_spe_evcmpltu", SPE_BUILTIN_EVCMPLTU
},
7486 { 0, CODE_FOR_spe_evfscmpeq
, "__builtin_spe_evfscmpeq", SPE_BUILTIN_EVFSCMPEQ
},
7487 { 0, CODE_FOR_spe_evfscmpgt
, "__builtin_spe_evfscmpgt", SPE_BUILTIN_EVFSCMPGT
},
7488 { 0, CODE_FOR_spe_evfscmplt
, "__builtin_spe_evfscmplt", SPE_BUILTIN_EVFSCMPLT
},
7489 { 0, CODE_FOR_spe_evfststeq
, "__builtin_spe_evfststeq", SPE_BUILTIN_EVFSTSTEQ
},
7490 { 0, CODE_FOR_spe_evfststgt
, "__builtin_spe_evfststgt", SPE_BUILTIN_EVFSTSTGT
},
7491 /* Place-holder. Leave as last. */
7492 { 0, CODE_FOR_spe_evfststlt
, "__builtin_spe_evfststlt", SPE_BUILTIN_EVFSTSTLT
},
7495 /* SPE evsel predicates. */
7496 static struct builtin_description bdesc_spe_evsel
[] =
7498 /* Place-holder. Leave as first. */
7499 { 0, CODE_FOR_spe_evcmpgts
, "__builtin_spe_evsel_gts", SPE_BUILTIN_EVSEL_CMPGTS
},
7500 { 0, CODE_FOR_spe_evcmpgtu
, "__builtin_spe_evsel_gtu", SPE_BUILTIN_EVSEL_CMPGTU
},
7501 { 0, CODE_FOR_spe_evcmplts
, "__builtin_spe_evsel_lts", SPE_BUILTIN_EVSEL_CMPLTS
},
7502 { 0, CODE_FOR_spe_evcmpltu
, "__builtin_spe_evsel_ltu", SPE_BUILTIN_EVSEL_CMPLTU
},
7503 { 0, CODE_FOR_spe_evcmpeq
, "__builtin_spe_evsel_eq", SPE_BUILTIN_EVSEL_CMPEQ
},
7504 { 0, CODE_FOR_spe_evfscmpgt
, "__builtin_spe_evsel_fsgt", SPE_BUILTIN_EVSEL_FSCMPGT
},
7505 { 0, CODE_FOR_spe_evfscmplt
, "__builtin_spe_evsel_fslt", SPE_BUILTIN_EVSEL_FSCMPLT
},
7506 { 0, CODE_FOR_spe_evfscmpeq
, "__builtin_spe_evsel_fseq", SPE_BUILTIN_EVSEL_FSCMPEQ
},
7507 { 0, CODE_FOR_spe_evfststgt
, "__builtin_spe_evsel_fststgt", SPE_BUILTIN_EVSEL_FSTSTGT
},
7508 { 0, CODE_FOR_spe_evfststlt
, "__builtin_spe_evsel_fststlt", SPE_BUILTIN_EVSEL_FSTSTLT
},
7509 /* Place-holder. Leave as last. */
7510 { 0, CODE_FOR_spe_evfststeq
, "__builtin_spe_evsel_fststeq", SPE_BUILTIN_EVSEL_FSTSTEQ
},
7513 /* PAIRED predicates. */
7514 static const struct builtin_description bdesc_paired_preds
[] =
7516 /* Place-holder. Leave as first. */
7517 { 0, CODE_FOR_paired_cmpu0
, "__builtin_paired_cmpu0", PAIRED_BUILTIN_CMPU0
},
7518 /* Place-holder. Leave as last. */
7519 { 0, CODE_FOR_paired_cmpu1
, "__builtin_paired_cmpu1", PAIRED_BUILTIN_CMPU1
},
7522 /* ABS* operations. */
7524 static const struct builtin_description bdesc_abs
[] =
7526 { MASK_ALTIVEC
, CODE_FOR_absv4si2
, "__builtin_altivec_abs_v4si", ALTIVEC_BUILTIN_ABS_V4SI
},
7527 { MASK_ALTIVEC
, CODE_FOR_absv8hi2
, "__builtin_altivec_abs_v8hi", ALTIVEC_BUILTIN_ABS_V8HI
},
7528 { MASK_ALTIVEC
, CODE_FOR_absv4sf2
, "__builtin_altivec_abs_v4sf", ALTIVEC_BUILTIN_ABS_V4SF
},
7529 { MASK_ALTIVEC
, CODE_FOR_absv16qi2
, "__builtin_altivec_abs_v16qi", ALTIVEC_BUILTIN_ABS_V16QI
},
7530 { MASK_ALTIVEC
, CODE_FOR_altivec_abss_v4si
, "__builtin_altivec_abss_v4si", ALTIVEC_BUILTIN_ABSS_V4SI
},
7531 { MASK_ALTIVEC
, CODE_FOR_altivec_abss_v8hi
, "__builtin_altivec_abss_v8hi", ALTIVEC_BUILTIN_ABSS_V8HI
},
7532 { MASK_ALTIVEC
, CODE_FOR_altivec_abss_v16qi
, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI
}
7535 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
7538 static struct builtin_description bdesc_1arg
[] =
7540 { MASK_ALTIVEC
, CODE_FOR_altivec_vexptefp
, "__builtin_altivec_vexptefp", ALTIVEC_BUILTIN_VEXPTEFP
},
7541 { MASK_ALTIVEC
, CODE_FOR_altivec_vlogefp
, "__builtin_altivec_vlogefp", ALTIVEC_BUILTIN_VLOGEFP
},
7542 { MASK_ALTIVEC
, CODE_FOR_altivec_vrefp
, "__builtin_altivec_vrefp", ALTIVEC_BUILTIN_VREFP
},
7543 { MASK_ALTIVEC
, CODE_FOR_altivec_vrfim
, "__builtin_altivec_vrfim", ALTIVEC_BUILTIN_VRFIM
},
7544 { MASK_ALTIVEC
, CODE_FOR_altivec_vrfin
, "__builtin_altivec_vrfin", ALTIVEC_BUILTIN_VRFIN
},
7545 { MASK_ALTIVEC
, CODE_FOR_altivec_vrfip
, "__builtin_altivec_vrfip", ALTIVEC_BUILTIN_VRFIP
},
7546 { MASK_ALTIVEC
, CODE_FOR_ftruncv4sf2
, "__builtin_altivec_vrfiz", ALTIVEC_BUILTIN_VRFIZ
},
7547 { MASK_ALTIVEC
, CODE_FOR_altivec_vrsqrtefp
, "__builtin_altivec_vrsqrtefp", ALTIVEC_BUILTIN_VRSQRTEFP
},
7548 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltisb
, "__builtin_altivec_vspltisb", ALTIVEC_BUILTIN_VSPLTISB
},
7549 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltish
, "__builtin_altivec_vspltish", ALTIVEC_BUILTIN_VSPLTISH
},
7550 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltisw
, "__builtin_altivec_vspltisw", ALTIVEC_BUILTIN_VSPLTISW
},
7551 { MASK_ALTIVEC
, CODE_FOR_altivec_vupkhsb
, "__builtin_altivec_vupkhsb", ALTIVEC_BUILTIN_VUPKHSB
},
7552 { MASK_ALTIVEC
, CODE_FOR_altivec_vupkhpx
, "__builtin_altivec_vupkhpx", ALTIVEC_BUILTIN_VUPKHPX
},
7553 { MASK_ALTIVEC
, CODE_FOR_altivec_vupkhsh
, "__builtin_altivec_vupkhsh", ALTIVEC_BUILTIN_VUPKHSH
},
7554 { MASK_ALTIVEC
, CODE_FOR_altivec_vupklsb
, "__builtin_altivec_vupklsb", ALTIVEC_BUILTIN_VUPKLSB
},
7555 { MASK_ALTIVEC
, CODE_FOR_altivec_vupklpx
, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX
},
7556 { MASK_ALTIVEC
, CODE_FOR_altivec_vupklsh
, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH
},
7558 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_abs", ALTIVEC_BUILTIN_VEC_ABS
},
7559 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_abss", ALTIVEC_BUILTIN_VEC_ABSS
},
7560 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_ceil", ALTIVEC_BUILTIN_VEC_CEIL
},
7561 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_expte", ALTIVEC_BUILTIN_VEC_EXPTE
},
7562 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_floor", ALTIVEC_BUILTIN_VEC_FLOOR
},
7563 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_loge", ALTIVEC_BUILTIN_VEC_LOGE
},
7564 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mtvscr", ALTIVEC_BUILTIN_VEC_MTVSCR
},
7565 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_re", ALTIVEC_BUILTIN_VEC_RE
},
7566 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_round", ALTIVEC_BUILTIN_VEC_ROUND
},
7567 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_rsqrte", ALTIVEC_BUILTIN_VEC_RSQRTE
},
7568 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_trunc", ALTIVEC_BUILTIN_VEC_TRUNC
},
7569 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_unpackh", ALTIVEC_BUILTIN_VEC_UNPACKH
},
7570 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupkhsh", ALTIVEC_BUILTIN_VEC_VUPKHSH
},
7571 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupkhpx", ALTIVEC_BUILTIN_VEC_VUPKHPX
},
7572 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupkhsb", ALTIVEC_BUILTIN_VEC_VUPKHSB
},
7573 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_unpackl", ALTIVEC_BUILTIN_VEC_UNPACKL
},
7574 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupklpx", ALTIVEC_BUILTIN_VEC_VUPKLPX
},
7575 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupklsh", ALTIVEC_BUILTIN_VEC_VUPKLSH
},
7576 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupklsb", ALTIVEC_BUILTIN_VEC_VUPKLSB
},
7578 /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
7579 end with SPE_BUILTIN_EVSUBFUSIAAW. */
7580 { 0, CODE_FOR_spe_evabs
, "__builtin_spe_evabs", SPE_BUILTIN_EVABS
},
7581 { 0, CODE_FOR_spe_evaddsmiaaw
, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW
},
7582 { 0, CODE_FOR_spe_evaddssiaaw
, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW
},
7583 { 0, CODE_FOR_spe_evaddumiaaw
, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW
},
7584 { 0, CODE_FOR_spe_evaddusiaaw
, "__builtin_spe_evaddusiaaw", SPE_BUILTIN_EVADDUSIAAW
},
7585 { 0, CODE_FOR_spe_evcntlsw
, "__builtin_spe_evcntlsw", SPE_BUILTIN_EVCNTLSW
},
7586 { 0, CODE_FOR_spe_evcntlzw
, "__builtin_spe_evcntlzw", SPE_BUILTIN_EVCNTLZW
},
7587 { 0, CODE_FOR_spe_evextsb
, "__builtin_spe_evextsb", SPE_BUILTIN_EVEXTSB
},
7588 { 0, CODE_FOR_spe_evextsh
, "__builtin_spe_evextsh", SPE_BUILTIN_EVEXTSH
},
7589 { 0, CODE_FOR_spe_evfsabs
, "__builtin_spe_evfsabs", SPE_BUILTIN_EVFSABS
},
7590 { 0, CODE_FOR_spe_evfscfsf
, "__builtin_spe_evfscfsf", SPE_BUILTIN_EVFSCFSF
},
7591 { 0, CODE_FOR_spe_evfscfsi
, "__builtin_spe_evfscfsi", SPE_BUILTIN_EVFSCFSI
},
7592 { 0, CODE_FOR_spe_evfscfuf
, "__builtin_spe_evfscfuf", SPE_BUILTIN_EVFSCFUF
},
7593 { 0, CODE_FOR_spe_evfscfui
, "__builtin_spe_evfscfui", SPE_BUILTIN_EVFSCFUI
},
7594 { 0, CODE_FOR_spe_evfsctsf
, "__builtin_spe_evfsctsf", SPE_BUILTIN_EVFSCTSF
},
7595 { 0, CODE_FOR_spe_evfsctsi
, "__builtin_spe_evfsctsi", SPE_BUILTIN_EVFSCTSI
},
7596 { 0, CODE_FOR_spe_evfsctsiz
, "__builtin_spe_evfsctsiz", SPE_BUILTIN_EVFSCTSIZ
},
7597 { 0, CODE_FOR_spe_evfsctuf
, "__builtin_spe_evfsctuf", SPE_BUILTIN_EVFSCTUF
},
7598 { 0, CODE_FOR_spe_evfsctui
, "__builtin_spe_evfsctui", SPE_BUILTIN_EVFSCTUI
},
7599 { 0, CODE_FOR_spe_evfsctuiz
, "__builtin_spe_evfsctuiz", SPE_BUILTIN_EVFSCTUIZ
},
7600 { 0, CODE_FOR_spe_evfsnabs
, "__builtin_spe_evfsnabs", SPE_BUILTIN_EVFSNABS
},
7601 { 0, CODE_FOR_spe_evfsneg
, "__builtin_spe_evfsneg", SPE_BUILTIN_EVFSNEG
},
7602 { 0, CODE_FOR_spe_evmra
, "__builtin_spe_evmra", SPE_BUILTIN_EVMRA
},
7603 { 0, CODE_FOR_negv2si2
, "__builtin_spe_evneg", SPE_BUILTIN_EVNEG
},
7604 { 0, CODE_FOR_spe_evrndw
, "__builtin_spe_evrndw", SPE_BUILTIN_EVRNDW
},
7605 { 0, CODE_FOR_spe_evsubfsmiaaw
, "__builtin_spe_evsubfsmiaaw", SPE_BUILTIN_EVSUBFSMIAAW
},
7606 { 0, CODE_FOR_spe_evsubfssiaaw
, "__builtin_spe_evsubfssiaaw", SPE_BUILTIN_EVSUBFSSIAAW
},
7607 { 0, CODE_FOR_spe_evsubfumiaaw
, "__builtin_spe_evsubfumiaaw", SPE_BUILTIN_EVSUBFUMIAAW
},
7609 /* Place-holder. Leave as last unary SPE builtin. */
7610 { 0, CODE_FOR_spe_evsubfusiaaw
, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW
},
7612 { 0, CODE_FOR_absv2sf2
, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2
},
7613 { 0, CODE_FOR_nabsv2sf2
, "__builtin_paired_nabsv2sf2", PAIRED_BUILTIN_NABSV2SF2
},
7614 { 0, CODE_FOR_negv2sf2
, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2
},
7615 { 0, CODE_FOR_sqrtv2sf2
, "__builtin_paired_sqrtv2sf2", PAIRED_BUILTIN_SQRTV2SF2
},
7616 { 0, CODE_FOR_resv2sf2
, "__builtin_paired_resv2sf2", PAIRED_BUILTIN_RESV2SF2
}
7620 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
7623 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7624 rtx op0
= expand_normal (arg0
);
7625 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
7626 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
7628 if (icode
== CODE_FOR_nothing
)
7629 /* Builtin not supported on this processor. */
7632 /* If we got invalid arguments bail out before generating bad rtl. */
7633 if (arg0
== error_mark_node
)
7636 if (icode
== CODE_FOR_altivec_vspltisb
7637 || icode
== CODE_FOR_altivec_vspltish
7638 || icode
== CODE_FOR_altivec_vspltisw
7639 || icode
== CODE_FOR_spe_evsplatfi
7640 || icode
== CODE_FOR_spe_evsplati
)
7642 /* Only allow 5-bit *signed* literals. */
7643 if (GET_CODE (op0
) != CONST_INT
7644 || INTVAL (op0
) > 15
7645 || INTVAL (op0
) < -16)
7647 error ("argument 1 must be a 5-bit signed literal");
7653 || GET_MODE (target
) != tmode
7654 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
7655 target
= gen_reg_rtx (tmode
);
7657 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
7658 op0
= copy_to_mode_reg (mode0
, op0
);
7660 pat
= GEN_FCN (icode
) (target
, op0
);
7669 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
7671 rtx pat
, scratch1
, scratch2
;
7672 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7673 rtx op0
= expand_normal (arg0
);
7674 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
7675 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
7677 /* If we have invalid arguments, bail out before generating bad rtl. */
7678 if (arg0
== error_mark_node
)
7682 || GET_MODE (target
) != tmode
7683 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
7684 target
= gen_reg_rtx (tmode
);
7686 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
7687 op0
= copy_to_mode_reg (mode0
, op0
);
7689 scratch1
= gen_reg_rtx (mode0
);
7690 scratch2
= gen_reg_rtx (mode0
);
7692 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
7701 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
7704 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7705 tree arg1
= CALL_EXPR_ARG (exp
, 1);
7706 rtx op0
= expand_normal (arg0
);
7707 rtx op1
= expand_normal (arg1
);
7708 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
7709 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
7710 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
7712 if (icode
== CODE_FOR_nothing
)
7713 /* Builtin not supported on this processor. */
7716 /* If we got invalid arguments bail out before generating bad rtl. */
7717 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
7720 if (icode
== CODE_FOR_altivec_vcfux
7721 || icode
== CODE_FOR_altivec_vcfsx
7722 || icode
== CODE_FOR_altivec_vctsxs
7723 || icode
== CODE_FOR_altivec_vctuxs
7724 || icode
== CODE_FOR_altivec_vspltb
7725 || icode
== CODE_FOR_altivec_vsplth
7726 || icode
== CODE_FOR_altivec_vspltw
7727 || icode
== CODE_FOR_spe_evaddiw
7728 || icode
== CODE_FOR_spe_evldd
7729 || icode
== CODE_FOR_spe_evldh
7730 || icode
== CODE_FOR_spe_evldw
7731 || icode
== CODE_FOR_spe_evlhhesplat
7732 || icode
== CODE_FOR_spe_evlhhossplat
7733 || icode
== CODE_FOR_spe_evlhhousplat
7734 || icode
== CODE_FOR_spe_evlwhe
7735 || icode
== CODE_FOR_spe_evlwhos
7736 || icode
== CODE_FOR_spe_evlwhou
7737 || icode
== CODE_FOR_spe_evlwhsplat
7738 || icode
== CODE_FOR_spe_evlwwsplat
7739 || icode
== CODE_FOR_spe_evrlwi
7740 || icode
== CODE_FOR_spe_evslwi
7741 || icode
== CODE_FOR_spe_evsrwis
7742 || icode
== CODE_FOR_spe_evsubifw
7743 || icode
== CODE_FOR_spe_evsrwiu
)
7745 /* Only allow 5-bit unsigned literals. */
7747 if (TREE_CODE (arg1
) != INTEGER_CST
7748 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
7750 error ("argument 2 must be a 5-bit unsigned literal");
7756 || GET_MODE (target
) != tmode
7757 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
7758 target
= gen_reg_rtx (tmode
);
7760 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
7761 op0
= copy_to_mode_reg (mode0
, op0
);
7762 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
7763 op1
= copy_to_mode_reg (mode1
, op1
);
7765 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
7774 altivec_expand_predicate_builtin (enum insn_code icode
, const char *opcode
,
7775 tree exp
, rtx target
)
7778 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
7779 tree arg0
= CALL_EXPR_ARG (exp
, 1);
7780 tree arg1
= CALL_EXPR_ARG (exp
, 2);
7781 rtx op0
= expand_normal (arg0
);
7782 rtx op1
= expand_normal (arg1
);
7783 enum machine_mode tmode
= SImode
;
7784 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
7785 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
7788 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
7790 error ("argument 1 of __builtin_altivec_predicate must be a constant");
7794 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
7796 gcc_assert (mode0
== mode1
);
7798 /* If we have invalid arguments, bail out before generating bad rtl. */
7799 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
7803 || GET_MODE (target
) != tmode
7804 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
7805 target
= gen_reg_rtx (tmode
);
7807 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
7808 op0
= copy_to_mode_reg (mode0
, op0
);
7809 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
7810 op1
= copy_to_mode_reg (mode1
, op1
);
7812 scratch
= gen_reg_rtx (mode0
);
7814 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
,
7815 gen_rtx_SYMBOL_REF (Pmode
, opcode
));
7820 /* The vec_any* and vec_all* predicates use the same opcodes for two
7821 different operations, but the bits in CR6 will be different
7822 depending on what information we want. So we have to play tricks
7823 with CR6 to get the right bits out.
7825 If you think this is disgusting, look at the specs for the
7826 AltiVec predicates. */
7828 switch (cr6_form_int
)
7831 emit_insn (gen_cr6_test_for_zero (target
));
7834 emit_insn (gen_cr6_test_for_zero_reverse (target
));
7837 emit_insn (gen_cr6_test_for_lt (target
));
7840 emit_insn (gen_cr6_test_for_lt_reverse (target
));
7843 error ("argument 1 of __builtin_altivec_predicate is out of range");
7851 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
7854 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7855 tree arg1
= CALL_EXPR_ARG (exp
, 1);
7856 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
7857 enum machine_mode mode0
= Pmode
;
7858 enum machine_mode mode1
= Pmode
;
7859 rtx op0
= expand_normal (arg0
);
7860 rtx op1
= expand_normal (arg1
);
7862 if (icode
== CODE_FOR_nothing
)
7863 /* Builtin not supported on this processor. */
7866 /* If we got invalid arguments bail out before generating bad rtl. */
7867 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
7871 || GET_MODE (target
) != tmode
7872 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
7873 target
= gen_reg_rtx (tmode
);
7875 op1
= copy_to_mode_reg (mode1
, op1
);
7877 if (op0
== const0_rtx
)
7879 addr
= gen_rtx_MEM (tmode
, op1
);
7883 op0
= copy_to_mode_reg (mode0
, op0
);
7884 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
7887 pat
= GEN_FCN (icode
) (target
, addr
);
7897 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
7900 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7901 tree arg1
= CALL_EXPR_ARG (exp
, 1);
7902 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
7903 enum machine_mode mode0
= Pmode
;
7904 enum machine_mode mode1
= Pmode
;
7905 rtx op0
= expand_normal (arg0
);
7906 rtx op1
= expand_normal (arg1
);
7908 if (icode
== CODE_FOR_nothing
)
7909 /* Builtin not supported on this processor. */
7912 /* If we got invalid arguments bail out before generating bad rtl. */
7913 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
7917 || GET_MODE (target
) != tmode
7918 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
7919 target
= gen_reg_rtx (tmode
);
7921 op1
= copy_to_mode_reg (mode1
, op1
);
7923 if (op0
== const0_rtx
)
7925 addr
= gen_rtx_MEM (tmode
, op1
);
7929 op0
= copy_to_mode_reg (mode0
, op0
);
7930 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
7933 pat
= GEN_FCN (icode
) (target
, addr
);
7943 spe_expand_stv_builtin (enum insn_code icode
, tree exp
)
7945 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7946 tree arg1
= CALL_EXPR_ARG (exp
, 1);
7947 tree arg2
= CALL_EXPR_ARG (exp
, 2);
7948 rtx op0
= expand_normal (arg0
);
7949 rtx op1
= expand_normal (arg1
);
7950 rtx op2
= expand_normal (arg2
);
7952 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
7953 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
7954 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
7956 /* Invalid arguments. Bail before doing anything stoopid! */
7957 if (arg0
== error_mark_node
7958 || arg1
== error_mark_node
7959 || arg2
== error_mark_node
)
7962 if (! (*insn_data
[icode
].operand
[2].predicate
) (op0
, mode2
))
7963 op0
= copy_to_mode_reg (mode2
, op0
);
7964 if (! (*insn_data
[icode
].operand
[0].predicate
) (op1
, mode0
))
7965 op1
= copy_to_mode_reg (mode0
, op1
);
7966 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
7967 op2
= copy_to_mode_reg (mode1
, op2
);
7969 pat
= GEN_FCN (icode
) (op1
, op2
, op0
);
7976 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
7978 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7979 tree arg1
= CALL_EXPR_ARG (exp
, 1);
7980 tree arg2
= CALL_EXPR_ARG (exp
, 2);
7981 rtx op0
= expand_normal (arg0
);
7982 rtx op1
= expand_normal (arg1
);
7983 rtx op2
= expand_normal (arg2
);
7985 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
7986 enum machine_mode mode1
= Pmode
;
7987 enum machine_mode mode2
= Pmode
;
7989 /* Invalid arguments. Bail before doing anything stoopid! */
7990 if (arg0
== error_mark_node
7991 || arg1
== error_mark_node
7992 || arg2
== error_mark_node
)
7995 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
7996 op0
= copy_to_mode_reg (tmode
, op0
);
7998 op2
= copy_to_mode_reg (mode2
, op2
);
8000 if (op1
== const0_rtx
)
8002 addr
= gen_rtx_MEM (tmode
, op2
);
8006 op1
= copy_to_mode_reg (mode1
, op1
);
8007 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
8010 pat
= GEN_FCN (icode
) (addr
, op0
);
8017 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
8019 tree arg0
= CALL_EXPR_ARG (exp
, 0);
8020 tree arg1
= CALL_EXPR_ARG (exp
, 1);
8021 tree arg2
= CALL_EXPR_ARG (exp
, 2);
8022 rtx op0
= expand_normal (arg0
);
8023 rtx op1
= expand_normal (arg1
);
8024 rtx op2
= expand_normal (arg2
);
8026 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
8027 enum machine_mode mode1
= Pmode
;
8028 enum machine_mode mode2
= Pmode
;
8030 /* Invalid arguments. Bail before doing anything stoopid! */
8031 if (arg0
== error_mark_node
8032 || arg1
== error_mark_node
8033 || arg2
== error_mark_node
)
8036 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
8037 op0
= copy_to_mode_reg (tmode
, op0
);
8039 op2
= copy_to_mode_reg (mode2
, op2
);
8041 if (op1
== const0_rtx
)
8043 addr
= gen_rtx_MEM (tmode
, op2
);
8047 op1
= copy_to_mode_reg (mode1
, op1
);
8048 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
8051 pat
= GEN_FCN (icode
) (addr
, op0
);
8058 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
8061 tree arg0
= CALL_EXPR_ARG (exp
, 0);
8062 tree arg1
= CALL_EXPR_ARG (exp
, 1);
8063 tree arg2
= CALL_EXPR_ARG (exp
, 2);
8064 rtx op0
= expand_normal (arg0
);
8065 rtx op1
= expand_normal (arg1
);
8066 rtx op2
= expand_normal (arg2
);
8067 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
8068 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
8069 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
8070 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
8072 if (icode
== CODE_FOR_nothing
)
8073 /* Builtin not supported on this processor. */
8076 /* If we got invalid arguments bail out before generating bad rtl. */
8077 if (arg0
== error_mark_node
8078 || arg1
== error_mark_node
8079 || arg2
== error_mark_node
)
8082 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
8083 || icode
== CODE_FOR_altivec_vsldoi_v4si
8084 || icode
== CODE_FOR_altivec_vsldoi_v8hi
8085 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
8087 /* Only allow 4-bit unsigned literals. */
8089 if (TREE_CODE (arg2
) != INTEGER_CST
8090 || TREE_INT_CST_LOW (arg2
) & ~0xf)
8092 error ("argument 3 must be a 4-bit unsigned literal");
8098 || GET_MODE (target
) != tmode
8099 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8100 target
= gen_reg_rtx (tmode
);
8102 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
8103 op0
= copy_to_mode_reg (mode0
, op0
);
8104 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
8105 op1
= copy_to_mode_reg (mode1
, op1
);
8106 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
8107 op2
= copy_to_mode_reg (mode2
, op2
);
8109 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
8110 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
8112 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
8120 /* Expand the lvx builtins. */
8122 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
8124 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8125 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8127 enum machine_mode tmode
, mode0
;
8129 enum insn_code icode
;
8133 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
8134 icode
= CODE_FOR_altivec_lvx_v16qi
;
8136 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
8137 icode
= CODE_FOR_altivec_lvx_v8hi
;
8139 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
8140 icode
= CODE_FOR_altivec_lvx_v4si
;
8142 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
8143 icode
= CODE_FOR_altivec_lvx_v4sf
;
8152 arg0
= CALL_EXPR_ARG (exp
, 0);
8153 op0
= expand_normal (arg0
);
8154 tmode
= insn_data
[icode
].operand
[0].mode
;
8155 mode0
= insn_data
[icode
].operand
[1].mode
;
8158 || GET_MODE (target
) != tmode
8159 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8160 target
= gen_reg_rtx (tmode
);
8162 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
8163 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
8165 pat
= GEN_FCN (icode
) (target
, op0
);
8172 /* Expand the stvx builtins. */
8174 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
8177 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8178 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8180 enum machine_mode mode0
, mode1
;
8182 enum insn_code icode
;
8186 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
8187 icode
= CODE_FOR_altivec_stvx_v16qi
;
8189 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
8190 icode
= CODE_FOR_altivec_stvx_v8hi
;
8192 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
8193 icode
= CODE_FOR_altivec_stvx_v4si
;
8195 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
8196 icode
= CODE_FOR_altivec_stvx_v4sf
;
8203 arg0
= CALL_EXPR_ARG (exp
, 0);
8204 arg1
= CALL_EXPR_ARG (exp
, 1);
8205 op0
= expand_normal (arg0
);
8206 op1
= expand_normal (arg1
);
8207 mode0
= insn_data
[icode
].operand
[0].mode
;
8208 mode1
= insn_data
[icode
].operand
[1].mode
;
8210 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
8211 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
8212 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
8213 op1
= copy_to_mode_reg (mode1
, op1
);
8215 pat
= GEN_FCN (icode
) (op0
, op1
);
8223 /* Expand the dst builtins. */
8225 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
8228 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8229 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8230 tree arg0
, arg1
, arg2
;
8231 enum machine_mode mode0
, mode1
, mode2
;
8232 rtx pat
, op0
, op1
, op2
;
8233 const struct builtin_description
*d
;
8238 /* Handle DST variants. */
8240 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
8241 if (d
->code
== fcode
)
8243 arg0
= CALL_EXPR_ARG (exp
, 0);
8244 arg1
= CALL_EXPR_ARG (exp
, 1);
8245 arg2
= CALL_EXPR_ARG (exp
, 2);
8246 op0
= expand_normal (arg0
);
8247 op1
= expand_normal (arg1
);
8248 op2
= expand_normal (arg2
);
8249 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
8250 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
8251 mode2
= insn_data
[d
->icode
].operand
[2].mode
;
8253 /* Invalid arguments, bail out before generating bad rtl. */
8254 if (arg0
== error_mark_node
8255 || arg1
== error_mark_node
8256 || arg2
== error_mark_node
)
8261 if (TREE_CODE (arg2
) != INTEGER_CST
8262 || TREE_INT_CST_LOW (arg2
) & ~0x3)
8264 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
8268 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
8269 op0
= copy_to_mode_reg (Pmode
, op0
);
8270 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
8271 op1
= copy_to_mode_reg (mode1
, op1
);
8273 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
8283 /* Expand vec_init builtin. */
8285 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
8287 enum machine_mode tmode
= TYPE_MODE (type
);
8288 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
8289 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
8290 rtvec v
= rtvec_alloc (n_elt
);
8292 gcc_assert (VECTOR_MODE_P (tmode
));
8293 gcc_assert (n_elt
== call_expr_nargs (exp
));
8295 for (i
= 0; i
< n_elt
; ++i
)
8297 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
8298 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
8301 if (!target
|| !register_operand (target
, tmode
))
8302 target
= gen_reg_rtx (tmode
);
8304 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
8308 /* Return the integer constant in ARG. Constrain it to be in the range
8309 of the subparts of VEC_TYPE; issue an error if not. */
8312 get_element_number (tree vec_type
, tree arg
)
8314 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
8316 if (!host_integerp (arg
, 1)
8317 || (elt
= tree_low_cst (arg
, 1), elt
> max
))
8319 error ("selector must be an integer constant in the range 0..%wi", max
);
8326 /* Expand vec_set builtin. */
8328 altivec_expand_vec_set_builtin (tree exp
)
8330 enum machine_mode tmode
, mode1
;
8331 tree arg0
, arg1
, arg2
;
8335 arg0
= CALL_EXPR_ARG (exp
, 0);
8336 arg1
= CALL_EXPR_ARG (exp
, 1);
8337 arg2
= CALL_EXPR_ARG (exp
, 2);
8339 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
8340 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
8341 gcc_assert (VECTOR_MODE_P (tmode
));
8343 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, 0);
8344 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, 0);
8345 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
8347 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
8348 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
8350 op0
= force_reg (tmode
, op0
);
8351 op1
= force_reg (mode1
, op1
);
8353 rs6000_expand_vector_set (op0
, op1
, elt
);
8358 /* Expand vec_ext builtin. */
8360 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
8362 enum machine_mode tmode
, mode0
;
8367 arg0
= CALL_EXPR_ARG (exp
, 0);
8368 arg1
= CALL_EXPR_ARG (exp
, 1);
8370 op0
= expand_normal (arg0
);
8371 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
8373 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
8374 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
8375 gcc_assert (VECTOR_MODE_P (mode0
));
8377 op0
= force_reg (mode0
, op0
);
8379 if (optimize
|| !target
|| !register_operand (target
, tmode
))
8380 target
= gen_reg_rtx (tmode
);
8382 rs6000_expand_vector_extract (target
, op0
, elt
);
8387 /* Expand the builtin in EXP and store the result in TARGET. Store
8388 true in *EXPANDEDP if we found a builtin to expand. */
8390 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
8392 const struct builtin_description
*d
;
8393 const struct builtin_description_predicates
*dp
;
8395 enum insn_code icode
;
8396 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8399 enum machine_mode tmode
, mode0
;
8400 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8402 if (fcode
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
8403 && fcode
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
)
8406 error ("unresolved overload for Altivec builtin %qF", fndecl
);
8410 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
8414 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
8418 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
8426 case ALTIVEC_BUILTIN_STVX
:
8427 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx
, exp
);
8428 case ALTIVEC_BUILTIN_STVEBX
:
8429 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
8430 case ALTIVEC_BUILTIN_STVEHX
:
8431 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
8432 case ALTIVEC_BUILTIN_STVEWX
:
8433 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
8434 case ALTIVEC_BUILTIN_STVXL
:
8435 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl
, exp
);
8437 case ALTIVEC_BUILTIN_MFVSCR
:
8438 icode
= CODE_FOR_altivec_mfvscr
;
8439 tmode
= insn_data
[icode
].operand
[0].mode
;
8442 || GET_MODE (target
) != tmode
8443 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8444 target
= gen_reg_rtx (tmode
);
8446 pat
= GEN_FCN (icode
) (target
);
8452 case ALTIVEC_BUILTIN_MTVSCR
:
8453 icode
= CODE_FOR_altivec_mtvscr
;
8454 arg0
= CALL_EXPR_ARG (exp
, 0);
8455 op0
= expand_normal (arg0
);
8456 mode0
= insn_data
[icode
].operand
[0].mode
;
8458 /* If we got invalid arguments bail out before generating bad rtl. */
8459 if (arg0
== error_mark_node
)
8462 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
8463 op0
= copy_to_mode_reg (mode0
, op0
);
8465 pat
= GEN_FCN (icode
) (op0
);
8470 case ALTIVEC_BUILTIN_DSSALL
:
8471 emit_insn (gen_altivec_dssall ());
8474 case ALTIVEC_BUILTIN_DSS
:
8475 icode
= CODE_FOR_altivec_dss
;
8476 arg0
= CALL_EXPR_ARG (exp
, 0);
8478 op0
= expand_normal (arg0
);
8479 mode0
= insn_data
[icode
].operand
[0].mode
;
8481 /* If we got invalid arguments bail out before generating bad rtl. */
8482 if (arg0
== error_mark_node
)
8485 if (TREE_CODE (arg0
) != INTEGER_CST
8486 || TREE_INT_CST_LOW (arg0
) & ~0x3)
8488 error ("argument to dss must be a 2-bit unsigned literal");
8492 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
8493 op0
= copy_to_mode_reg (mode0
, op0
);
8495 emit_insn (gen_altivec_dss (op0
));
8498 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
8499 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
8500 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
8501 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
8502 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
8504 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
8505 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
8506 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
8507 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
8508 return altivec_expand_vec_set_builtin (exp
);
8510 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
8511 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
8512 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
8513 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
8514 return altivec_expand_vec_ext_builtin (exp
, target
);
8521 /* Expand abs* operations. */
8523 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
8524 if (d
->code
== fcode
)
8525 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
8527 /* Expand the AltiVec predicates. */
8528 dp
= bdesc_altivec_preds
;
8529 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, dp
++)
8530 if (dp
->code
== fcode
)
8531 return altivec_expand_predicate_builtin (dp
->icode
, dp
->opcode
,
8534 /* LV* are funky. We initialized them differently. */
8537 case ALTIVEC_BUILTIN_LVSL
:
8538 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
8540 case ALTIVEC_BUILTIN_LVSR
:
8541 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
8543 case ALTIVEC_BUILTIN_LVEBX
:
8544 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
8546 case ALTIVEC_BUILTIN_LVEHX
:
8547 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
8549 case ALTIVEC_BUILTIN_LVEWX
:
8550 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
8552 case ALTIVEC_BUILTIN_LVXL
:
8553 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl
,
8555 case ALTIVEC_BUILTIN_LVX
:
8556 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx
,
8567 /* Expand the builtin in EXP and store the result in TARGET. Store
8568 true in *EXPANDEDP if we found a builtin to expand. */
8570 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
8572 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8573 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8574 const struct builtin_description
*d
;
8581 case PAIRED_BUILTIN_STX
:
8582 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
8583 case PAIRED_BUILTIN_LX
:
8584 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
8590 /* Expand the paired predicates. */
8591 d
= bdesc_paired_preds
;
8592 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
8593 if (d
->code
== fcode
)
8594 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
8600 /* Binops that need to be initialized manually, but can be expanded
8601 automagically by rs6000_expand_binop_builtin. */
8602 static struct builtin_description bdesc_2arg_spe
[] =
8604 { 0, CODE_FOR_spe_evlddx
, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX
},
8605 { 0, CODE_FOR_spe_evldwx
, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX
},
8606 { 0, CODE_FOR_spe_evldhx
, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX
},
8607 { 0, CODE_FOR_spe_evlwhex
, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX
},
8608 { 0, CODE_FOR_spe_evlwhoux
, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX
},
8609 { 0, CODE_FOR_spe_evlwhosx
, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX
},
8610 { 0, CODE_FOR_spe_evlwwsplatx
, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX
},
8611 { 0, CODE_FOR_spe_evlwhsplatx
, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX
},
8612 { 0, CODE_FOR_spe_evlhhesplatx
, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX
},
8613 { 0, CODE_FOR_spe_evlhhousplatx
, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX
},
8614 { 0, CODE_FOR_spe_evlhhossplatx
, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX
},
8615 { 0, CODE_FOR_spe_evldd
, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD
},
8616 { 0, CODE_FOR_spe_evldw
, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW
},
8617 { 0, CODE_FOR_spe_evldh
, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH
},
8618 { 0, CODE_FOR_spe_evlwhe
, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE
},
8619 { 0, CODE_FOR_spe_evlwhou
, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU
},
8620 { 0, CODE_FOR_spe_evlwhos
, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS
},
8621 { 0, CODE_FOR_spe_evlwwsplat
, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT
},
8622 { 0, CODE_FOR_spe_evlwhsplat
, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT
},
8623 { 0, CODE_FOR_spe_evlhhesplat
, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT
},
8624 { 0, CODE_FOR_spe_evlhhousplat
, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT
},
8625 { 0, CODE_FOR_spe_evlhhossplat
, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT
}
8628 /* Expand the builtin in EXP and store the result in TARGET. Store
8629 true in *EXPANDEDP if we found a builtin to expand.
8631 This expands the SPE builtins that are not simple unary and binary
8634 spe_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
8636 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8638 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8639 enum insn_code icode
;
8640 enum machine_mode tmode
, mode0
;
8642 struct builtin_description
*d
;
8647 /* Syntax check for a 5-bit unsigned immediate. */
8650 case SPE_BUILTIN_EVSTDD
:
8651 case SPE_BUILTIN_EVSTDH
:
8652 case SPE_BUILTIN_EVSTDW
:
8653 case SPE_BUILTIN_EVSTWHE
:
8654 case SPE_BUILTIN_EVSTWHO
:
8655 case SPE_BUILTIN_EVSTWWE
:
8656 case SPE_BUILTIN_EVSTWWO
:
8657 arg1
= CALL_EXPR_ARG (exp
, 2);
8658 if (TREE_CODE (arg1
) != INTEGER_CST
8659 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
8661 error ("argument 2 must be a 5-bit unsigned literal");
8669 /* The evsplat*i instructions are not quite generic. */
8672 case SPE_BUILTIN_EVSPLATFI
:
8673 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi
,
8675 case SPE_BUILTIN_EVSPLATI
:
8676 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati
,
8682 d
= (struct builtin_description
*) bdesc_2arg_spe
;
8683 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg_spe
); ++i
, ++d
)
8684 if (d
->code
== fcode
)
8685 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
8687 d
= (struct builtin_description
*) bdesc_spe_predicates
;
8688 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, ++d
)
8689 if (d
->code
== fcode
)
8690 return spe_expand_predicate_builtin (d
->icode
, exp
, target
);
8692 d
= (struct builtin_description
*) bdesc_spe_evsel
;
8693 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, ++d
)
8694 if (d
->code
== fcode
)
8695 return spe_expand_evsel_builtin (d
->icode
, exp
, target
);
8699 case SPE_BUILTIN_EVSTDDX
:
8700 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx
, exp
);
8701 case SPE_BUILTIN_EVSTDHX
:
8702 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx
, exp
);
8703 case SPE_BUILTIN_EVSTDWX
:
8704 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx
, exp
);
8705 case SPE_BUILTIN_EVSTWHEX
:
8706 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex
, exp
);
8707 case SPE_BUILTIN_EVSTWHOX
:
8708 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox
, exp
);
8709 case SPE_BUILTIN_EVSTWWEX
:
8710 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex
, exp
);
8711 case SPE_BUILTIN_EVSTWWOX
:
8712 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox
, exp
);
8713 case SPE_BUILTIN_EVSTDD
:
8714 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd
, exp
);
8715 case SPE_BUILTIN_EVSTDH
:
8716 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh
, exp
);
8717 case SPE_BUILTIN_EVSTDW
:
8718 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw
, exp
);
8719 case SPE_BUILTIN_EVSTWHE
:
8720 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe
, exp
);
8721 case SPE_BUILTIN_EVSTWHO
:
8722 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho
, exp
);
8723 case SPE_BUILTIN_EVSTWWE
:
8724 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe
, exp
);
8725 case SPE_BUILTIN_EVSTWWO
:
8726 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo
, exp
);
8727 case SPE_BUILTIN_MFSPEFSCR
:
8728 icode
= CODE_FOR_spe_mfspefscr
;
8729 tmode
= insn_data
[icode
].operand
[0].mode
;
8732 || GET_MODE (target
) != tmode
8733 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8734 target
= gen_reg_rtx (tmode
);
8736 pat
= GEN_FCN (icode
) (target
);
8741 case SPE_BUILTIN_MTSPEFSCR
:
8742 icode
= CODE_FOR_spe_mtspefscr
;
8743 arg0
= CALL_EXPR_ARG (exp
, 0);
8744 op0
= expand_normal (arg0
);
8745 mode0
= insn_data
[icode
].operand
[0].mode
;
8747 if (arg0
== error_mark_node
)
8750 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
8751 op0
= copy_to_mode_reg (mode0
, op0
);
8753 pat
= GEN_FCN (icode
) (op0
);
8766 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
8768 rtx pat
, scratch
, tmp
;
8769 tree form
= CALL_EXPR_ARG (exp
, 0);
8770 tree arg0
= CALL_EXPR_ARG (exp
, 1);
8771 tree arg1
= CALL_EXPR_ARG (exp
, 2);
8772 rtx op0
= expand_normal (arg0
);
8773 rtx op1
= expand_normal (arg1
);
8774 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
8775 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
8779 if (TREE_CODE (form
) != INTEGER_CST
)
8781 error ("argument 1 of __builtin_paired_predicate must be a constant");
8785 form_int
= TREE_INT_CST_LOW (form
);
8787 gcc_assert (mode0
== mode1
);
8789 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
8793 || GET_MODE (target
) != SImode
8794 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
8795 target
= gen_reg_rtx (SImode
);
8796 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
8797 op0
= copy_to_mode_reg (mode0
, op0
);
8798 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
8799 op1
= copy_to_mode_reg (mode1
, op1
);
8801 scratch
= gen_reg_rtx (CCFPmode
);
8803 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
8825 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
8828 error ("argument 1 of __builtin_paired_predicate is out of range");
8832 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
8833 emit_move_insn (target
, tmp
);
8838 spe_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
8840 rtx pat
, scratch
, tmp
;
8841 tree form
= CALL_EXPR_ARG (exp
, 0);
8842 tree arg0
= CALL_EXPR_ARG (exp
, 1);
8843 tree arg1
= CALL_EXPR_ARG (exp
, 2);
8844 rtx op0
= expand_normal (arg0
);
8845 rtx op1
= expand_normal (arg1
);
8846 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
8847 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
8851 if (TREE_CODE (form
) != INTEGER_CST
)
8853 error ("argument 1 of __builtin_spe_predicate must be a constant");
8857 form_int
= TREE_INT_CST_LOW (form
);
8859 gcc_assert (mode0
== mode1
);
8861 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
8865 || GET_MODE (target
) != SImode
8866 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
8867 target
= gen_reg_rtx (SImode
);
8869 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
8870 op0
= copy_to_mode_reg (mode0
, op0
);
8871 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
8872 op1
= copy_to_mode_reg (mode1
, op1
);
8874 scratch
= gen_reg_rtx (CCmode
);
8876 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
8881 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
8882 _lower_. We use one compare, but look in different bits of the
8883 CR for each variant.
8885 There are 2 elements in each SPE simd type (upper/lower). The CR
8886 bits are set as follows:
8888 BIT0 | BIT 1 | BIT 2 | BIT 3
8889 U | L | (U | L) | (U & L)
8891 So, for an "all" relationship, BIT 3 would be set.
8892 For an "any" relationship, BIT 2 would be set. Etc.
8894 Following traditional nomenclature, these bits map to:
8896 BIT0 | BIT 1 | BIT 2 | BIT 3
8899 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
8904 /* All variant. OV bit. */
8906 /* We need to get to the OV bit, which is the ORDERED bit. We
8907 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
8908 that's ugly and will make validate_condition_mode die.
8909 So let's just use another pattern. */
8910 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
8912 /* Any variant. EQ bit. */
8916 /* Upper variant. LT bit. */
8920 /* Lower variant. GT bit. */
8925 error ("argument 1 of __builtin_spe_predicate is out of range");
8929 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
8930 emit_move_insn (target
, tmp
);
8935 /* The evsel builtins look like this:
8937 e = __builtin_spe_evsel_OP (a, b, c, d);
8941 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
8942 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
8946 spe_expand_evsel_builtin (enum insn_code icode
, tree exp
, rtx target
)
8949 tree arg0
= CALL_EXPR_ARG (exp
, 0);
8950 tree arg1
= CALL_EXPR_ARG (exp
, 1);
8951 tree arg2
= CALL_EXPR_ARG (exp
, 2);
8952 tree arg3
= CALL_EXPR_ARG (exp
, 3);
8953 rtx op0
= expand_normal (arg0
);
8954 rtx op1
= expand_normal (arg1
);
8955 rtx op2
= expand_normal (arg2
);
8956 rtx op3
= expand_normal (arg3
);
8957 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
8958 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
8960 gcc_assert (mode0
== mode1
);
8962 if (arg0
== error_mark_node
|| arg1
== error_mark_node
8963 || arg2
== error_mark_node
|| arg3
== error_mark_node
)
8967 || GET_MODE (target
) != mode0
8968 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode0
))
8969 target
= gen_reg_rtx (mode0
);
8971 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
8972 op0
= copy_to_mode_reg (mode0
, op0
);
8973 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
8974 op1
= copy_to_mode_reg (mode0
, op1
);
8975 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
8976 op2
= copy_to_mode_reg (mode0
, op2
);
8977 if (! (*insn_data
[icode
].operand
[1].predicate
) (op3
, mode1
))
8978 op3
= copy_to_mode_reg (mode0
, op3
);
8980 /* Generate the compare. */
8981 scratch
= gen_reg_rtx (CCmode
);
8982 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
8987 if (mode0
== V2SImode
)
8988 emit_insn (gen_spe_evsel (target
, op2
, op3
, scratch
));
8990 emit_insn (gen_spe_evsel_fs (target
, op2
, op3
, scratch
));
8995 /* Expand an expression EXP that calls a built-in function,
8996 with result going to TARGET if that's convenient
8997 (and in mode MODE if that's convenient).
8998 SUBTARGET may be used as the target for computing one of EXP's operands.
8999 IGNORE is nonzero if the value is to be ignored. */
9002 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
9003 enum machine_mode mode ATTRIBUTE_UNUSED
,
9004 int ignore ATTRIBUTE_UNUSED
)
9006 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
9007 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
9008 const struct builtin_description
*d
;
9013 if (fcode
== RS6000_BUILTIN_RECIP
)
9014 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
9016 if (fcode
== RS6000_BUILTIN_RECIPF
)
9017 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
9019 if (fcode
== RS6000_BUILTIN_RSQRTF
)
9020 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
9022 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_LOAD
9023 || fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
9025 int icode
= (int) CODE_FOR_altivec_lvsr
;
9026 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9027 enum machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
9031 gcc_assert (TARGET_ALTIVEC
);
9033 arg
= CALL_EXPR_ARG (exp
, 0);
9034 gcc_assert (TREE_CODE (TREE_TYPE (arg
)) == POINTER_TYPE
);
9035 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
9036 addr
= memory_address (mode
, op
);
9037 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
9041 /* For the load case need to negate the address. */
9042 op
= gen_reg_rtx (GET_MODE (addr
));
9043 emit_insn (gen_rtx_SET (VOIDmode
, op
,
9044 gen_rtx_NEG (GET_MODE (addr
), addr
)));
9046 op
= gen_rtx_MEM (mode
, op
);
9049 || GET_MODE (target
) != tmode
9050 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9051 target
= gen_reg_rtx (tmode
);
9053 /*pat = gen_altivec_lvsr (target, op);*/
9054 pat
= GEN_FCN (icode
) (target
, op
);
9062 /* FIXME: There's got to be a nicer way to handle this case than
9063 constructing a new CALL_EXPR. */
9064 if (fcode
== ALTIVEC_BUILTIN_VCFUX
9065 || fcode
== ALTIVEC_BUILTIN_VCFSX
)
9067 if (call_expr_nargs (exp
) == 1)
9068 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
9069 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
9074 ret
= altivec_expand_builtin (exp
, target
, &success
);
9081 ret
= spe_expand_builtin (exp
, target
, &success
);
9086 if (TARGET_PAIRED_FLOAT
)
9088 ret
= paired_expand_builtin (exp
, target
, &success
);
9094 gcc_assert (TARGET_ALTIVEC
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
);
9096 /* Handle simple unary operations. */
9097 d
= (struct builtin_description
*) bdesc_1arg
;
9098 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
9099 if (d
->code
== fcode
)
9100 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
9102 /* Handle simple binary operations. */
9103 d
= (struct builtin_description
*) bdesc_2arg
;
9104 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
9105 if (d
->code
== fcode
)
9106 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
9108 /* Handle simple ternary operations. */
9110 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
9111 if (d
->code
== fcode
)
9112 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
9118 build_opaque_vector_type (tree node
, int nunits
)
9120 node
= copy_node (node
);
9121 TYPE_MAIN_VARIANT (node
) = node
;
9122 return build_vector_type (node
, nunits
);
9126 rs6000_init_builtins (void)
9128 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
9129 V2SF_type_node
= build_vector_type (float_type_node
, 2);
9130 V4HI_type_node
= build_vector_type (intHI_type_node
, 4);
9131 V4SI_type_node
= build_vector_type (intSI_type_node
, 4);
9132 V4SF_type_node
= build_vector_type (float_type_node
, 4);
9133 V8HI_type_node
= build_vector_type (intHI_type_node
, 8);
9134 V16QI_type_node
= build_vector_type (intQI_type_node
, 16);
9136 unsigned_V16QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 16);
9137 unsigned_V8HI_type_node
= build_vector_type (unsigned_intHI_type_node
, 8);
9138 unsigned_V4SI_type_node
= build_vector_type (unsigned_intSI_type_node
, 4);
9140 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
9141 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
9142 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
9143 opaque_V4SI_type_node
= copy_node (V4SI_type_node
);
9145 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
9146 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
9147 'vector unsigned short'. */
9149 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
9150 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
9151 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
9152 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
9154 long_integer_type_internal_node
= long_integer_type_node
;
9155 long_unsigned_type_internal_node
= long_unsigned_type_node
;
9156 intQI_type_internal_node
= intQI_type_node
;
9157 uintQI_type_internal_node
= unsigned_intQI_type_node
;
9158 intHI_type_internal_node
= intHI_type_node
;
9159 uintHI_type_internal_node
= unsigned_intHI_type_node
;
9160 intSI_type_internal_node
= intSI_type_node
;
9161 uintSI_type_internal_node
= unsigned_intSI_type_node
;
9162 float_type_internal_node
= float_type_node
;
9163 void_type_internal_node
= void_type_node
;
9165 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9166 get_identifier ("__bool char"),
9167 bool_char_type_node
));
9168 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9169 get_identifier ("__bool short"),
9170 bool_short_type_node
));
9171 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9172 get_identifier ("__bool int"),
9173 bool_int_type_node
));
9174 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9175 get_identifier ("__pixel"),
9178 bool_V16QI_type_node
= build_vector_type (bool_char_type_node
, 16);
9179 bool_V8HI_type_node
= build_vector_type (bool_short_type_node
, 8);
9180 bool_V4SI_type_node
= build_vector_type (bool_int_type_node
, 4);
9181 pixel_V8HI_type_node
= build_vector_type (pixel_type_node
, 8);
9183 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9184 get_identifier ("__vector unsigned char"),
9185 unsigned_V16QI_type_node
));
9186 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9187 get_identifier ("__vector signed char"),
9189 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9190 get_identifier ("__vector __bool char"),
9191 bool_V16QI_type_node
));
9193 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9194 get_identifier ("__vector unsigned short"),
9195 unsigned_V8HI_type_node
));
9196 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9197 get_identifier ("__vector signed short"),
9199 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9200 get_identifier ("__vector __bool short"),
9201 bool_V8HI_type_node
));
9203 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9204 get_identifier ("__vector unsigned int"),
9205 unsigned_V4SI_type_node
));
9206 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9207 get_identifier ("__vector signed int"),
9209 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9210 get_identifier ("__vector __bool int"),
9211 bool_V4SI_type_node
));
9213 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9214 get_identifier ("__vector float"),
9216 (*lang_hooks
.decls
.pushdecl
) (build_decl (TYPE_DECL
,
9217 get_identifier ("__vector __pixel"),
9218 pixel_V8HI_type_node
));
9220 if (TARGET_PAIRED_FLOAT
)
9221 paired_init_builtins ();
9223 spe_init_builtins ();
9225 altivec_init_builtins ();
9226 if (TARGET_ALTIVEC
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
)
9227 rs6000_common_init_builtins ();
9228 if (TARGET_PPC_GFXOPT
)
9230 tree ftype
= build_function_type_list (float_type_node
,
9234 def_builtin (MASK_PPC_GFXOPT
, "__builtin_recipdivf", ftype
,
9235 RS6000_BUILTIN_RECIPF
);
9237 ftype
= build_function_type_list (float_type_node
,
9240 def_builtin (MASK_PPC_GFXOPT
, "__builtin_rsqrtf", ftype
,
9241 RS6000_BUILTIN_RSQRTF
);
9245 tree ftype
= build_function_type_list (double_type_node
,
9249 def_builtin (MASK_POPCNTB
, "__builtin_recipdiv", ftype
,
9250 RS6000_BUILTIN_RECIP
);
9255 /* AIX libm provides clog as __clog. */
9256 if (built_in_decls
[BUILT_IN_CLOG
])
9257 set_user_assembler_name (built_in_decls
[BUILT_IN_CLOG
], "__clog");
9260 #ifdef SUBTARGET_INIT_BUILTINS
9261 SUBTARGET_INIT_BUILTINS
;
9265 /* Search through a set of builtins and enable the mask bits.
9266 DESC is an array of builtins.
9267 SIZE is the total number of builtins.
9268 START is the builtin enum at which to start.
9269 END is the builtin enum at which to end. */
9271 enable_mask_for_builtins (struct builtin_description
*desc
, int size
,
9272 enum rs6000_builtins start
,
9273 enum rs6000_builtins end
)
9277 for (i
= 0; i
< size
; ++i
)
9278 if (desc
[i
].code
== start
)
9284 for (; i
< size
; ++i
)
9286 /* Flip all the bits on. */
9287 desc
[i
].mask
= target_flags
;
9288 if (desc
[i
].code
== end
)
9294 spe_init_builtins (void)
9296 tree endlink
= void_list_node
;
9297 tree puint_type_node
= build_pointer_type (unsigned_type_node
);
9298 tree pushort_type_node
= build_pointer_type (short_unsigned_type_node
);
9299 struct builtin_description
*d
;
9302 tree v2si_ftype_4_v2si
9303 = build_function_type
9304 (opaque_V2SI_type_node
,
9305 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9306 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9307 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9308 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9311 tree v2sf_ftype_4_v2sf
9312 = build_function_type
9313 (opaque_V2SF_type_node
,
9314 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9315 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9316 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9317 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9320 tree int_ftype_int_v2si_v2si
9321 = build_function_type
9323 tree_cons (NULL_TREE
, integer_type_node
,
9324 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9325 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9328 tree int_ftype_int_v2sf_v2sf
9329 = build_function_type
9331 tree_cons (NULL_TREE
, integer_type_node
,
9332 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9333 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9336 tree void_ftype_v2si_puint_int
9337 = build_function_type (void_type_node
,
9338 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9339 tree_cons (NULL_TREE
, puint_type_node
,
9340 tree_cons (NULL_TREE
,
9344 tree void_ftype_v2si_puint_char
9345 = build_function_type (void_type_node
,
9346 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9347 tree_cons (NULL_TREE
, puint_type_node
,
9348 tree_cons (NULL_TREE
,
9352 tree void_ftype_v2si_pv2si_int
9353 = build_function_type (void_type_node
,
9354 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9355 tree_cons (NULL_TREE
, opaque_p_V2SI_type_node
,
9356 tree_cons (NULL_TREE
,
9360 tree void_ftype_v2si_pv2si_char
9361 = build_function_type (void_type_node
,
9362 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9363 tree_cons (NULL_TREE
, opaque_p_V2SI_type_node
,
9364 tree_cons (NULL_TREE
,
9369 = build_function_type (void_type_node
,
9370 tree_cons (NULL_TREE
, integer_type_node
, endlink
));
9373 = build_function_type (integer_type_node
, endlink
);
9375 tree v2si_ftype_pv2si_int
9376 = build_function_type (opaque_V2SI_type_node
,
9377 tree_cons (NULL_TREE
, opaque_p_V2SI_type_node
,
9378 tree_cons (NULL_TREE
, integer_type_node
,
9381 tree v2si_ftype_puint_int
9382 = build_function_type (opaque_V2SI_type_node
,
9383 tree_cons (NULL_TREE
, puint_type_node
,
9384 tree_cons (NULL_TREE
, integer_type_node
,
9387 tree v2si_ftype_pushort_int
9388 = build_function_type (opaque_V2SI_type_node
,
9389 tree_cons (NULL_TREE
, pushort_type_node
,
9390 tree_cons (NULL_TREE
, integer_type_node
,
9393 tree v2si_ftype_signed_char
9394 = build_function_type (opaque_V2SI_type_node
,
9395 tree_cons (NULL_TREE
, signed_char_type_node
,
9398 /* The initialization of the simple binary and unary builtins is
9399 done in rs6000_common_init_builtins, but we have to enable the
9400 mask bits here manually because we have run out of `target_flags'
9401 bits. We really need to redesign this mask business. */
9403 enable_mask_for_builtins ((struct builtin_description
*) bdesc_2arg
,
9404 ARRAY_SIZE (bdesc_2arg
),
9407 enable_mask_for_builtins ((struct builtin_description
*) bdesc_1arg
,
9408 ARRAY_SIZE (bdesc_1arg
),
9410 SPE_BUILTIN_EVSUBFUSIAAW
);
9411 enable_mask_for_builtins ((struct builtin_description
*) bdesc_spe_predicates
,
9412 ARRAY_SIZE (bdesc_spe_predicates
),
9413 SPE_BUILTIN_EVCMPEQ
,
9414 SPE_BUILTIN_EVFSTSTLT
);
9415 enable_mask_for_builtins ((struct builtin_description
*) bdesc_spe_evsel
,
9416 ARRAY_SIZE (bdesc_spe_evsel
),
9417 SPE_BUILTIN_EVSEL_CMPGTS
,
9418 SPE_BUILTIN_EVSEL_FSTSTEQ
);
9420 (*lang_hooks
.decls
.pushdecl
)
9421 (build_decl (TYPE_DECL
, get_identifier ("__ev64_opaque__"),
9422 opaque_V2SI_type_node
));
9424 /* Initialize irregular SPE builtins. */
9426 def_builtin (target_flags
, "__builtin_spe_mtspefscr", void_ftype_int
, SPE_BUILTIN_MTSPEFSCR
);
9427 def_builtin (target_flags
, "__builtin_spe_mfspefscr", int_ftype_void
, SPE_BUILTIN_MFSPEFSCR
);
9428 def_builtin (target_flags
, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDDX
);
9429 def_builtin (target_flags
, "__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDHX
);
9430 def_builtin (target_flags
, "__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDWX
);
9431 def_builtin (target_flags
, "__builtin_spe_evstwhex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHEX
);
9432 def_builtin (target_flags
, "__builtin_spe_evstwhox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHOX
);
9433 def_builtin (target_flags
, "__builtin_spe_evstwwex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWEX
);
9434 def_builtin (target_flags
, "__builtin_spe_evstwwox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWOX
);
9435 def_builtin (target_flags
, "__builtin_spe_evstdd", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDD
);
9436 def_builtin (target_flags
, "__builtin_spe_evstdh", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDH
);
9437 def_builtin (target_flags
, "__builtin_spe_evstdw", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDW
);
9438 def_builtin (target_flags
, "__builtin_spe_evstwhe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHE
);
9439 def_builtin (target_flags
, "__builtin_spe_evstwho", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHO
);
9440 def_builtin (target_flags
, "__builtin_spe_evstwwe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWE
);
9441 def_builtin (target_flags
, "__builtin_spe_evstwwo", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWO
);
9442 def_builtin (target_flags
, "__builtin_spe_evsplatfi", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATFI
);
9443 def_builtin (target_flags
, "__builtin_spe_evsplati", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATI
);
9446 def_builtin (target_flags
, "__builtin_spe_evlddx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDDX
);
9447 def_builtin (target_flags
, "__builtin_spe_evldwx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDWX
);
9448 def_builtin (target_flags
, "__builtin_spe_evldhx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDHX
);
9449 def_builtin (target_flags
, "__builtin_spe_evlwhex", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHEX
);
9450 def_builtin (target_flags
, "__builtin_spe_evlwhoux", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOUX
);
9451 def_builtin (target_flags
, "__builtin_spe_evlwhosx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOSX
);
9452 def_builtin (target_flags
, "__builtin_spe_evlwwsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLATX
);
9453 def_builtin (target_flags
, "__builtin_spe_evlwhsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLATX
);
9454 def_builtin (target_flags
, "__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLATX
);
9455 def_builtin (target_flags
, "__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLATX
);
9456 def_builtin (target_flags
, "__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLATX
);
9457 def_builtin (target_flags
, "__builtin_spe_evldd", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDD
);
9458 def_builtin (target_flags
, "__builtin_spe_evldw", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDW
);
9459 def_builtin (target_flags
, "__builtin_spe_evldh", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDH
);
9460 def_builtin (target_flags
, "__builtin_spe_evlhhesplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLAT
);
9461 def_builtin (target_flags
, "__builtin_spe_evlhhossplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLAT
);
9462 def_builtin (target_flags
, "__builtin_spe_evlhhousplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLAT
);
9463 def_builtin (target_flags
, "__builtin_spe_evlwhe", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHE
);
9464 def_builtin (target_flags
, "__builtin_spe_evlwhos", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOS
);
9465 def_builtin (target_flags
, "__builtin_spe_evlwhou", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOU
);
9466 def_builtin (target_flags
, "__builtin_spe_evlwhsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLAT
);
9467 def_builtin (target_flags
, "__builtin_spe_evlwwsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLAT
);
9470 d
= (struct builtin_description
*) bdesc_spe_predicates
;
9471 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, d
++)
9475 switch (insn_data
[d
->icode
].operand
[1].mode
)
9478 type
= int_ftype_int_v2si_v2si
;
9481 type
= int_ftype_int_v2sf_v2sf
;
9487 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
9490 /* Evsel predicates. */
9491 d
= (struct builtin_description
*) bdesc_spe_evsel
;
9492 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, d
++)
9496 switch (insn_data
[d
->icode
].operand
[1].mode
)
9499 type
= v2si_ftype_4_v2si
;
9502 type
= v2sf_ftype_4_v2sf
;
9508 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
9513 paired_init_builtins (void)
9515 const struct builtin_description
*d
;
9517 tree endlink
= void_list_node
;
9519 tree int_ftype_int_v2sf_v2sf
9520 = build_function_type
9522 tree_cons (NULL_TREE
, integer_type_node
,
9523 tree_cons (NULL_TREE
, V2SF_type_node
,
9524 tree_cons (NULL_TREE
, V2SF_type_node
,
9526 tree pcfloat_type_node
=
9527 build_pointer_type (build_qualified_type
9528 (float_type_node
, TYPE_QUAL_CONST
));
9530 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
9531 long_integer_type_node
,
9534 tree void_ftype_v2sf_long_pcfloat
=
9535 build_function_type_list (void_type_node
,
9537 long_integer_type_node
,
9542 def_builtin (0, "__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
9546 def_builtin (0, "__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
9547 PAIRED_BUILTIN_STX
);
9550 d
= bdesc_paired_preds
;
9551 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
9555 switch (insn_data
[d
->icode
].operand
[1].mode
)
9558 type
= int_ftype_int_v2sf_v2sf
;
9564 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
9569 altivec_init_builtins (void)
9571 const struct builtin_description
*d
;
9572 const struct builtin_description_predicates
*dp
;
9576 tree pfloat_type_node
= build_pointer_type (float_type_node
);
9577 tree pint_type_node
= build_pointer_type (integer_type_node
);
9578 tree pshort_type_node
= build_pointer_type (short_integer_type_node
);
9579 tree pchar_type_node
= build_pointer_type (char_type_node
);
9581 tree pvoid_type_node
= build_pointer_type (void_type_node
);
9583 tree pcfloat_type_node
= build_pointer_type (build_qualified_type (float_type_node
, TYPE_QUAL_CONST
));
9584 tree pcint_type_node
= build_pointer_type (build_qualified_type (integer_type_node
, TYPE_QUAL_CONST
));
9585 tree pcshort_type_node
= build_pointer_type (build_qualified_type (short_integer_type_node
, TYPE_QUAL_CONST
));
9586 tree pcchar_type_node
= build_pointer_type (build_qualified_type (char_type_node
, TYPE_QUAL_CONST
));
9588 tree pcvoid_type_node
= build_pointer_type (build_qualified_type (void_type_node
, TYPE_QUAL_CONST
));
9590 tree int_ftype_opaque
9591 = build_function_type_list (integer_type_node
,
9592 opaque_V4SI_type_node
, NULL_TREE
);
9594 tree opaque_ftype_opaque_int
9595 = build_function_type_list (opaque_V4SI_type_node
,
9596 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
9597 tree opaque_ftype_opaque_opaque_int
9598 = build_function_type_list (opaque_V4SI_type_node
,
9599 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
9600 integer_type_node
, NULL_TREE
);
9601 tree int_ftype_int_opaque_opaque
9602 = build_function_type_list (integer_type_node
,
9603 integer_type_node
, opaque_V4SI_type_node
,
9604 opaque_V4SI_type_node
, NULL_TREE
);
9605 tree int_ftype_int_v4si_v4si
9606 = build_function_type_list (integer_type_node
,
9607 integer_type_node
, V4SI_type_node
,
9608 V4SI_type_node
, NULL_TREE
);
9609 tree v4sf_ftype_pcfloat
9610 = build_function_type_list (V4SF_type_node
, pcfloat_type_node
, NULL_TREE
);
9611 tree void_ftype_pfloat_v4sf
9612 = build_function_type_list (void_type_node
,
9613 pfloat_type_node
, V4SF_type_node
, NULL_TREE
);
9614 tree v4si_ftype_pcint
9615 = build_function_type_list (V4SI_type_node
, pcint_type_node
, NULL_TREE
);
9616 tree void_ftype_pint_v4si
9617 = build_function_type_list (void_type_node
,
9618 pint_type_node
, V4SI_type_node
, NULL_TREE
);
9619 tree v8hi_ftype_pcshort
9620 = build_function_type_list (V8HI_type_node
, pcshort_type_node
, NULL_TREE
);
9621 tree void_ftype_pshort_v8hi
9622 = build_function_type_list (void_type_node
,
9623 pshort_type_node
, V8HI_type_node
, NULL_TREE
);
9624 tree v16qi_ftype_pcchar
9625 = build_function_type_list (V16QI_type_node
, pcchar_type_node
, NULL_TREE
);
9626 tree void_ftype_pchar_v16qi
9627 = build_function_type_list (void_type_node
,
9628 pchar_type_node
, V16QI_type_node
, NULL_TREE
);
9629 tree void_ftype_v4si
9630 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
9631 tree v8hi_ftype_void
9632 = build_function_type (V8HI_type_node
, void_list_node
);
9633 tree void_ftype_void
9634 = build_function_type (void_type_node
, void_list_node
);
9636 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
9638 tree opaque_ftype_long_pcvoid
9639 = build_function_type_list (opaque_V4SI_type_node
,
9640 long_integer_type_node
, pcvoid_type_node
, NULL_TREE
);
9641 tree v16qi_ftype_long_pcvoid
9642 = build_function_type_list (V16QI_type_node
,
9643 long_integer_type_node
, pcvoid_type_node
, NULL_TREE
);
9644 tree v8hi_ftype_long_pcvoid
9645 = build_function_type_list (V8HI_type_node
,
9646 long_integer_type_node
, pcvoid_type_node
, NULL_TREE
);
9647 tree v4si_ftype_long_pcvoid
9648 = build_function_type_list (V4SI_type_node
,
9649 long_integer_type_node
, pcvoid_type_node
, NULL_TREE
);
9651 tree void_ftype_opaque_long_pvoid
9652 = build_function_type_list (void_type_node
,
9653 opaque_V4SI_type_node
, long_integer_type_node
,
9654 pvoid_type_node
, NULL_TREE
);
9655 tree void_ftype_v4si_long_pvoid
9656 = build_function_type_list (void_type_node
,
9657 V4SI_type_node
, long_integer_type_node
,
9658 pvoid_type_node
, NULL_TREE
);
9659 tree void_ftype_v16qi_long_pvoid
9660 = build_function_type_list (void_type_node
,
9661 V16QI_type_node
, long_integer_type_node
,
9662 pvoid_type_node
, NULL_TREE
);
9663 tree void_ftype_v8hi_long_pvoid
9664 = build_function_type_list (void_type_node
,
9665 V8HI_type_node
, long_integer_type_node
,
9666 pvoid_type_node
, NULL_TREE
);
9667 tree int_ftype_int_v8hi_v8hi
9668 = build_function_type_list (integer_type_node
,
9669 integer_type_node
, V8HI_type_node
,
9670 V8HI_type_node
, NULL_TREE
);
9671 tree int_ftype_int_v16qi_v16qi
9672 = build_function_type_list (integer_type_node
,
9673 integer_type_node
, V16QI_type_node
,
9674 V16QI_type_node
, NULL_TREE
);
9675 tree int_ftype_int_v4sf_v4sf
9676 = build_function_type_list (integer_type_node
,
9677 integer_type_node
, V4SF_type_node
,
9678 V4SF_type_node
, NULL_TREE
);
9679 tree v4si_ftype_v4si
9680 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
9681 tree v8hi_ftype_v8hi
9682 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
9683 tree v16qi_ftype_v16qi
9684 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
9685 tree v4sf_ftype_v4sf
9686 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
9687 tree void_ftype_pcvoid_int_int
9688 = build_function_type_list (void_type_node
,
9689 pcvoid_type_node
, integer_type_node
,
9690 integer_type_node
, NULL_TREE
);
9692 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat
,
9693 ALTIVEC_BUILTIN_LD_INTERNAL_4sf
);
9694 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf
,
9695 ALTIVEC_BUILTIN_ST_INTERNAL_4sf
);
9696 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_ld_internal_4si", v4si_ftype_pcint
,
9697 ALTIVEC_BUILTIN_LD_INTERNAL_4si
);
9698 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_st_internal_4si", void_ftype_pint_v4si
,
9699 ALTIVEC_BUILTIN_ST_INTERNAL_4si
);
9700 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_ld_internal_8hi", v8hi_ftype_pcshort
,
9701 ALTIVEC_BUILTIN_LD_INTERNAL_8hi
);
9702 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_st_internal_8hi", void_ftype_pshort_v8hi
,
9703 ALTIVEC_BUILTIN_ST_INTERNAL_8hi
);
9704 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_ld_internal_16qi", v16qi_ftype_pcchar
,
9705 ALTIVEC_BUILTIN_LD_INTERNAL_16qi
);
9706 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_st_internal_16qi", void_ftype_pchar_v16qi
,
9707 ALTIVEC_BUILTIN_ST_INTERNAL_16qi
);
9708 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
9709 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
9710 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
9711 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
9712 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
9713 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
9714 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
9715 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
9716 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
9717 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
9718 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
9719 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
9720 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
9721 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
9722 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
9723 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
9724 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
9725 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
9726 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
9727 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
9728 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
9729 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
9730 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
9731 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
9732 def_builtin (MASK_ALTIVEC
, "__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
9733 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
9734 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
9735 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
9736 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
9737 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
9739 def_builtin (MASK_ALTIVEC
, "__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
9741 def_builtin (MASK_ALTIVEC
, "__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
9742 def_builtin (MASK_ALTIVEC
, "__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
9743 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
9744 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
9745 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
9746 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
9747 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
9748 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
9749 def_builtin (MASK_ALTIVEC
, "__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
9750 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
9752 /* Add the DST variants. */
9754 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
9755 def_builtin (d
->mask
, d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
9757 /* Initialize the predicates. */
9758 dp
= bdesc_altivec_preds
;
9759 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, dp
++)
9761 enum machine_mode mode1
;
9763 bool is_overloaded
= dp
->code
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
9764 && dp
->code
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
;
9769 mode1
= insn_data
[dp
->icode
].operand
[1].mode
;
9774 type
= int_ftype_int_opaque_opaque
;
9777 type
= int_ftype_int_v4si_v4si
;
9780 type
= int_ftype_int_v8hi_v8hi
;
9783 type
= int_ftype_int_v16qi_v16qi
;
9786 type
= int_ftype_int_v4sf_v4sf
;
9792 def_builtin (dp
->mask
, dp
->name
, type
, dp
->code
);
9795 /* Initialize the abs* operators. */
9797 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
9799 enum machine_mode mode0
;
9802 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
9807 type
= v4si_ftype_v4si
;
9810 type
= v8hi_ftype_v8hi
;
9813 type
= v16qi_ftype_v16qi
;
9816 type
= v4sf_ftype_v4sf
;
9822 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
9829 /* Initialize target builtin that implements
9830 targetm.vectorize.builtin_mask_for_load. */
9832 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
9833 v16qi_ftype_long_pcvoid
,
9834 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
9835 BUILT_IN_MD
, NULL
, NULL_TREE
);
9836 TREE_READONLY (decl
) = 1;
9837 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
9838 altivec_builtin_mask_for_load
= decl
;
9841 /* Access to the vec_init patterns. */
9842 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
9843 integer_type_node
, integer_type_node
,
9844 integer_type_node
, NULL_TREE
);
9845 def_builtin (MASK_ALTIVEC
, "__builtin_vec_init_v4si", ftype
,
9846 ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
9848 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
9849 short_integer_type_node
,
9850 short_integer_type_node
,
9851 short_integer_type_node
,
9852 short_integer_type_node
,
9853 short_integer_type_node
,
9854 short_integer_type_node
,
9855 short_integer_type_node
, NULL_TREE
);
9856 def_builtin (MASK_ALTIVEC
, "__builtin_vec_init_v8hi", ftype
,
9857 ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
9859 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
9860 char_type_node
, char_type_node
,
9861 char_type_node
, char_type_node
,
9862 char_type_node
, char_type_node
,
9863 char_type_node
, char_type_node
,
9864 char_type_node
, char_type_node
,
9865 char_type_node
, char_type_node
,
9866 char_type_node
, char_type_node
,
9867 char_type_node
, NULL_TREE
);
9868 def_builtin (MASK_ALTIVEC
, "__builtin_vec_init_v16qi", ftype
,
9869 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
9871 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
9872 float_type_node
, float_type_node
,
9873 float_type_node
, NULL_TREE
);
9874 def_builtin (MASK_ALTIVEC
, "__builtin_vec_init_v4sf", ftype
,
9875 ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
9877 /* Access to the vec_set patterns. */
9878 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
9880 integer_type_node
, NULL_TREE
);
9881 def_builtin (MASK_ALTIVEC
, "__builtin_vec_set_v4si", ftype
,
9882 ALTIVEC_BUILTIN_VEC_SET_V4SI
);
9884 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
9886 integer_type_node
, NULL_TREE
);
9887 def_builtin (MASK_ALTIVEC
, "__builtin_vec_set_v8hi", ftype
,
9888 ALTIVEC_BUILTIN_VEC_SET_V8HI
);
9890 ftype
= build_function_type_list (V8HI_type_node
, V16QI_type_node
,
9892 integer_type_node
, NULL_TREE
);
9893 def_builtin (MASK_ALTIVEC
, "__builtin_vec_set_v16qi", ftype
,
9894 ALTIVEC_BUILTIN_VEC_SET_V16QI
);
9896 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
9898 integer_type_node
, NULL_TREE
);
9899 def_builtin (MASK_ALTIVEC
, "__builtin_vec_set_v4sf", ftype
,
9900 ALTIVEC_BUILTIN_VEC_SET_V4SF
);
9902 /* Access to the vec_extract patterns. */
9903 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
9904 integer_type_node
, NULL_TREE
);
9905 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ext_v4si", ftype
,
9906 ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
9908 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
9909 integer_type_node
, NULL_TREE
);
9910 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ext_v8hi", ftype
,
9911 ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
9913 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
9914 integer_type_node
, NULL_TREE
);
9915 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ext_v16qi", ftype
,
9916 ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
9918 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
9919 integer_type_node
, NULL_TREE
);
9920 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ext_v4sf", ftype
,
9921 ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
9925 rs6000_common_init_builtins (void)
9927 const struct builtin_description
*d
;
9930 tree v2sf_ftype_v2sf_v2sf_v2sf
9931 = build_function_type_list (V2SF_type_node
,
9932 V2SF_type_node
, V2SF_type_node
,
9933 V2SF_type_node
, NULL_TREE
);
9935 tree v4sf_ftype_v4sf_v4sf_v16qi
9936 = build_function_type_list (V4SF_type_node
,
9937 V4SF_type_node
, V4SF_type_node
,
9938 V16QI_type_node
, NULL_TREE
);
9939 tree v4si_ftype_v4si_v4si_v16qi
9940 = build_function_type_list (V4SI_type_node
,
9941 V4SI_type_node
, V4SI_type_node
,
9942 V16QI_type_node
, NULL_TREE
);
9943 tree v8hi_ftype_v8hi_v8hi_v16qi
9944 = build_function_type_list (V8HI_type_node
,
9945 V8HI_type_node
, V8HI_type_node
,
9946 V16QI_type_node
, NULL_TREE
);
9947 tree v16qi_ftype_v16qi_v16qi_v16qi
9948 = build_function_type_list (V16QI_type_node
,
9949 V16QI_type_node
, V16QI_type_node
,
9950 V16QI_type_node
, NULL_TREE
);
9952 = build_function_type_list (V4SI_type_node
, integer_type_node
, NULL_TREE
);
9954 = build_function_type_list (V8HI_type_node
, integer_type_node
, NULL_TREE
);
9955 tree v16qi_ftype_int
9956 = build_function_type_list (V16QI_type_node
, integer_type_node
, NULL_TREE
);
9957 tree v8hi_ftype_v16qi
9958 = build_function_type_list (V8HI_type_node
, V16QI_type_node
, NULL_TREE
);
9959 tree v4sf_ftype_v4sf
9960 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
9962 tree v2si_ftype_v2si_v2si
9963 = build_function_type_list (opaque_V2SI_type_node
,
9964 opaque_V2SI_type_node
,
9965 opaque_V2SI_type_node
, NULL_TREE
);
9967 tree v2sf_ftype_v2sf_v2sf_spe
9968 = build_function_type_list (opaque_V2SF_type_node
,
9969 opaque_V2SF_type_node
,
9970 opaque_V2SF_type_node
, NULL_TREE
);
9972 tree v2sf_ftype_v2sf_v2sf
9973 = build_function_type_list (V2SF_type_node
,
9975 V2SF_type_node
, NULL_TREE
);
9978 tree v2si_ftype_int_int
9979 = build_function_type_list (opaque_V2SI_type_node
,
9980 integer_type_node
, integer_type_node
,
9983 tree opaque_ftype_opaque
9984 = build_function_type_list (opaque_V4SI_type_node
,
9985 opaque_V4SI_type_node
, NULL_TREE
);
9987 tree v2si_ftype_v2si
9988 = build_function_type_list (opaque_V2SI_type_node
,
9989 opaque_V2SI_type_node
, NULL_TREE
);
9991 tree v2sf_ftype_v2sf_spe
9992 = build_function_type_list (opaque_V2SF_type_node
,
9993 opaque_V2SF_type_node
, NULL_TREE
);
9995 tree v2sf_ftype_v2sf
9996 = build_function_type_list (V2SF_type_node
,
9997 V2SF_type_node
, NULL_TREE
);
9999 tree v2sf_ftype_v2si
10000 = build_function_type_list (opaque_V2SF_type_node
,
10001 opaque_V2SI_type_node
, NULL_TREE
);
10003 tree v2si_ftype_v2sf
10004 = build_function_type_list (opaque_V2SI_type_node
,
10005 opaque_V2SF_type_node
, NULL_TREE
);
10007 tree v2si_ftype_v2si_char
10008 = build_function_type_list (opaque_V2SI_type_node
,
10009 opaque_V2SI_type_node
,
10010 char_type_node
, NULL_TREE
);
10012 tree v2si_ftype_int_char
10013 = build_function_type_list (opaque_V2SI_type_node
,
10014 integer_type_node
, char_type_node
, NULL_TREE
);
10016 tree v2si_ftype_char
10017 = build_function_type_list (opaque_V2SI_type_node
,
10018 char_type_node
, NULL_TREE
);
10020 tree int_ftype_int_int
10021 = build_function_type_list (integer_type_node
,
10022 integer_type_node
, integer_type_node
,
10025 tree opaque_ftype_opaque_opaque
10026 = build_function_type_list (opaque_V4SI_type_node
,
10027 opaque_V4SI_type_node
, opaque_V4SI_type_node
, NULL_TREE
);
10028 tree v4si_ftype_v4si_v4si
10029 = build_function_type_list (V4SI_type_node
,
10030 V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
10031 tree v4sf_ftype_v4si_int
10032 = build_function_type_list (V4SF_type_node
,
10033 V4SI_type_node
, integer_type_node
, NULL_TREE
);
10034 tree v4si_ftype_v4sf_int
10035 = build_function_type_list (V4SI_type_node
,
10036 V4SF_type_node
, integer_type_node
, NULL_TREE
);
10037 tree v4si_ftype_v4si_int
10038 = build_function_type_list (V4SI_type_node
,
10039 V4SI_type_node
, integer_type_node
, NULL_TREE
);
10040 tree v8hi_ftype_v8hi_int
10041 = build_function_type_list (V8HI_type_node
,
10042 V8HI_type_node
, integer_type_node
, NULL_TREE
);
10043 tree v16qi_ftype_v16qi_int
10044 = build_function_type_list (V16QI_type_node
,
10045 V16QI_type_node
, integer_type_node
, NULL_TREE
);
10046 tree v16qi_ftype_v16qi_v16qi_int
10047 = build_function_type_list (V16QI_type_node
,
10048 V16QI_type_node
, V16QI_type_node
,
10049 integer_type_node
, NULL_TREE
);
10050 tree v8hi_ftype_v8hi_v8hi_int
10051 = build_function_type_list (V8HI_type_node
,
10052 V8HI_type_node
, V8HI_type_node
,
10053 integer_type_node
, NULL_TREE
);
10054 tree v4si_ftype_v4si_v4si_int
10055 = build_function_type_list (V4SI_type_node
,
10056 V4SI_type_node
, V4SI_type_node
,
10057 integer_type_node
, NULL_TREE
);
10058 tree v4sf_ftype_v4sf_v4sf_int
10059 = build_function_type_list (V4SF_type_node
,
10060 V4SF_type_node
, V4SF_type_node
,
10061 integer_type_node
, NULL_TREE
);
10062 tree v4sf_ftype_v4sf_v4sf
10063 = build_function_type_list (V4SF_type_node
,
10064 V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
10065 tree opaque_ftype_opaque_opaque_opaque
10066 = build_function_type_list (opaque_V4SI_type_node
,
10067 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
10068 opaque_V4SI_type_node
, NULL_TREE
);
10069 tree v4sf_ftype_v4sf_v4sf_v4si
10070 = build_function_type_list (V4SF_type_node
,
10071 V4SF_type_node
, V4SF_type_node
,
10072 V4SI_type_node
, NULL_TREE
);
10073 tree v4sf_ftype_v4sf_v4sf_v4sf
10074 = build_function_type_list (V4SF_type_node
,
10075 V4SF_type_node
, V4SF_type_node
,
10076 V4SF_type_node
, NULL_TREE
);
10077 tree v4si_ftype_v4si_v4si_v4si
10078 = build_function_type_list (V4SI_type_node
,
10079 V4SI_type_node
, V4SI_type_node
,
10080 V4SI_type_node
, NULL_TREE
);
10081 tree v8hi_ftype_v8hi_v8hi
10082 = build_function_type_list (V8HI_type_node
,
10083 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
10084 tree v8hi_ftype_v8hi_v8hi_v8hi
10085 = build_function_type_list (V8HI_type_node
,
10086 V8HI_type_node
, V8HI_type_node
,
10087 V8HI_type_node
, NULL_TREE
);
10088 tree v4si_ftype_v8hi_v8hi_v4si
10089 = build_function_type_list (V4SI_type_node
,
10090 V8HI_type_node
, V8HI_type_node
,
10091 V4SI_type_node
, NULL_TREE
);
10092 tree v4si_ftype_v16qi_v16qi_v4si
10093 = build_function_type_list (V4SI_type_node
,
10094 V16QI_type_node
, V16QI_type_node
,
10095 V4SI_type_node
, NULL_TREE
);
10096 tree v16qi_ftype_v16qi_v16qi
10097 = build_function_type_list (V16QI_type_node
,
10098 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
10099 tree v4si_ftype_v4sf_v4sf
10100 = build_function_type_list (V4SI_type_node
,
10101 V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
10102 tree v8hi_ftype_v16qi_v16qi
10103 = build_function_type_list (V8HI_type_node
,
10104 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
10105 tree v4si_ftype_v8hi_v8hi
10106 = build_function_type_list (V4SI_type_node
,
10107 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
10108 tree v8hi_ftype_v4si_v4si
10109 = build_function_type_list (V8HI_type_node
,
10110 V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
10111 tree v16qi_ftype_v8hi_v8hi
10112 = build_function_type_list (V16QI_type_node
,
10113 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
10114 tree v4si_ftype_v16qi_v4si
10115 = build_function_type_list (V4SI_type_node
,
10116 V16QI_type_node
, V4SI_type_node
, NULL_TREE
);
10117 tree v4si_ftype_v16qi_v16qi
10118 = build_function_type_list (V4SI_type_node
,
10119 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
10120 tree v4si_ftype_v8hi_v4si
10121 = build_function_type_list (V4SI_type_node
,
10122 V8HI_type_node
, V4SI_type_node
, NULL_TREE
);
10123 tree v4si_ftype_v8hi
10124 = build_function_type_list (V4SI_type_node
, V8HI_type_node
, NULL_TREE
);
10125 tree int_ftype_v4si_v4si
10126 = build_function_type_list (integer_type_node
,
10127 V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
10128 tree int_ftype_v4sf_v4sf
10129 = build_function_type_list (integer_type_node
,
10130 V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
10131 tree int_ftype_v16qi_v16qi
10132 = build_function_type_list (integer_type_node
,
10133 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
10134 tree int_ftype_v8hi_v8hi
10135 = build_function_type_list (integer_type_node
,
10136 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
10138 /* Add the simple ternary operators. */
10140 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
10142 enum machine_mode mode0
, mode1
, mode2
, mode3
;
10144 bool is_overloaded
= d
->code
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10145 && d
->code
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
;
10156 if (d
->name
== 0 || d
->icode
== CODE_FOR_nothing
)
10159 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10160 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10161 mode2
= insn_data
[d
->icode
].operand
[2].mode
;
10162 mode3
= insn_data
[d
->icode
].operand
[3].mode
;
10165 /* When all four are of the same mode. */
10166 if (mode0
== mode1
&& mode1
== mode2
&& mode2
== mode3
)
10171 type
= opaque_ftype_opaque_opaque_opaque
;
10174 type
= v4si_ftype_v4si_v4si_v4si
;
10177 type
= v4sf_ftype_v4sf_v4sf_v4sf
;
10180 type
= v8hi_ftype_v8hi_v8hi_v8hi
;
10183 type
= v16qi_ftype_v16qi_v16qi_v16qi
;
10186 type
= v2sf_ftype_v2sf_v2sf_v2sf
;
10189 gcc_unreachable ();
10192 else if (mode0
== mode1
&& mode1
== mode2
&& mode3
== V16QImode
)
10197 type
= v4si_ftype_v4si_v4si_v16qi
;
10200 type
= v4sf_ftype_v4sf_v4sf_v16qi
;
10203 type
= v8hi_ftype_v8hi_v8hi_v16qi
;
10206 type
= v16qi_ftype_v16qi_v16qi_v16qi
;
10209 gcc_unreachable ();
10212 else if (mode0
== V4SImode
&& mode1
== V16QImode
&& mode2
== V16QImode
10213 && mode3
== V4SImode
)
10214 type
= v4si_ftype_v16qi_v16qi_v4si
;
10215 else if (mode0
== V4SImode
&& mode1
== V8HImode
&& mode2
== V8HImode
10216 && mode3
== V4SImode
)
10217 type
= v4si_ftype_v8hi_v8hi_v4si
;
10218 else if (mode0
== V4SFmode
&& mode1
== V4SFmode
&& mode2
== V4SFmode
10219 && mode3
== V4SImode
)
10220 type
= v4sf_ftype_v4sf_v4sf_v4si
;
10222 /* vchar, vchar, vchar, 4-bit literal. */
10223 else if (mode0
== V16QImode
&& mode1
== mode0
&& mode2
== mode0
10224 && mode3
== QImode
)
10225 type
= v16qi_ftype_v16qi_v16qi_int
;
10227 /* vshort, vshort, vshort, 4-bit literal. */
10228 else if (mode0
== V8HImode
&& mode1
== mode0
&& mode2
== mode0
10229 && mode3
== QImode
)
10230 type
= v8hi_ftype_v8hi_v8hi_int
;
10232 /* vint, vint, vint, 4-bit literal. */
10233 else if (mode0
== V4SImode
&& mode1
== mode0
&& mode2
== mode0
10234 && mode3
== QImode
)
10235 type
= v4si_ftype_v4si_v4si_int
;
10237 /* vfloat, vfloat, vfloat, 4-bit literal. */
10238 else if (mode0
== V4SFmode
&& mode1
== mode0
&& mode2
== mode0
10239 && mode3
== QImode
)
10240 type
= v4sf_ftype_v4sf_v4sf_int
;
10243 gcc_unreachable ();
10245 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
10248 /* Add the simple binary operators. */
10249 d
= (struct builtin_description
*) bdesc_2arg
;
10250 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
10252 enum machine_mode mode0
, mode1
, mode2
;
10254 bool is_overloaded
= d
->code
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10255 && d
->code
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
;
10265 if (d
->name
== 0 || d
->icode
== CODE_FOR_nothing
)
10268 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10269 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10270 mode2
= insn_data
[d
->icode
].operand
[2].mode
;
10273 /* When all three operands are of the same mode. */
10274 if (mode0
== mode1
&& mode1
== mode2
)
10279 type
= opaque_ftype_opaque_opaque
;
10282 type
= v4sf_ftype_v4sf_v4sf
;
10285 type
= v4si_ftype_v4si_v4si
;
10288 type
= v16qi_ftype_v16qi_v16qi
;
10291 type
= v8hi_ftype_v8hi_v8hi
;
10294 type
= v2si_ftype_v2si_v2si
;
10297 if (TARGET_PAIRED_FLOAT
)
10298 type
= v2sf_ftype_v2sf_v2sf
;
10300 type
= v2sf_ftype_v2sf_v2sf_spe
;
10303 type
= int_ftype_int_int
;
10306 gcc_unreachable ();
10310 /* A few other combos we really don't want to do manually. */
10312 /* vint, vfloat, vfloat. */
10313 else if (mode0
== V4SImode
&& mode1
== V4SFmode
&& mode2
== V4SFmode
)
10314 type
= v4si_ftype_v4sf_v4sf
;
10316 /* vshort, vchar, vchar. */
10317 else if (mode0
== V8HImode
&& mode1
== V16QImode
&& mode2
== V16QImode
)
10318 type
= v8hi_ftype_v16qi_v16qi
;
10320 /* vint, vshort, vshort. */
10321 else if (mode0
== V4SImode
&& mode1
== V8HImode
&& mode2
== V8HImode
)
10322 type
= v4si_ftype_v8hi_v8hi
;
10324 /* vshort, vint, vint. */
10325 else if (mode0
== V8HImode
&& mode1
== V4SImode
&& mode2
== V4SImode
)
10326 type
= v8hi_ftype_v4si_v4si
;
10328 /* vchar, vshort, vshort. */
10329 else if (mode0
== V16QImode
&& mode1
== V8HImode
&& mode2
== V8HImode
)
10330 type
= v16qi_ftype_v8hi_v8hi
;
10332 /* vint, vchar, vint. */
10333 else if (mode0
== V4SImode
&& mode1
== V16QImode
&& mode2
== V4SImode
)
10334 type
= v4si_ftype_v16qi_v4si
;
10336 /* vint, vchar, vchar. */
10337 else if (mode0
== V4SImode
&& mode1
== V16QImode
&& mode2
== V16QImode
)
10338 type
= v4si_ftype_v16qi_v16qi
;
10340 /* vint, vshort, vint. */
10341 else if (mode0
== V4SImode
&& mode1
== V8HImode
&& mode2
== V4SImode
)
10342 type
= v4si_ftype_v8hi_v4si
;
10344 /* vint, vint, 5-bit literal. */
10345 else if (mode0
== V4SImode
&& mode1
== V4SImode
&& mode2
== QImode
)
10346 type
= v4si_ftype_v4si_int
;
10348 /* vshort, vshort, 5-bit literal. */
10349 else if (mode0
== V8HImode
&& mode1
== V8HImode
&& mode2
== QImode
)
10350 type
= v8hi_ftype_v8hi_int
;
10352 /* vchar, vchar, 5-bit literal. */
10353 else if (mode0
== V16QImode
&& mode1
== V16QImode
&& mode2
== QImode
)
10354 type
= v16qi_ftype_v16qi_int
;
10356 /* vfloat, vint, 5-bit literal. */
10357 else if (mode0
== V4SFmode
&& mode1
== V4SImode
&& mode2
== QImode
)
10358 type
= v4sf_ftype_v4si_int
;
10360 /* vint, vfloat, 5-bit literal. */
10361 else if (mode0
== V4SImode
&& mode1
== V4SFmode
&& mode2
== QImode
)
10362 type
= v4si_ftype_v4sf_int
;
10364 else if (mode0
== V2SImode
&& mode1
== SImode
&& mode2
== SImode
)
10365 type
= v2si_ftype_int_int
;
10367 else if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
10368 type
= v2si_ftype_v2si_char
;
10370 else if (mode0
== V2SImode
&& mode1
== SImode
&& mode2
== QImode
)
10371 type
= v2si_ftype_int_char
;
10376 gcc_assert (mode0
== SImode
);
10380 type
= int_ftype_v4si_v4si
;
10383 type
= int_ftype_v4sf_v4sf
;
10386 type
= int_ftype_v16qi_v16qi
;
10389 type
= int_ftype_v8hi_v8hi
;
10392 gcc_unreachable ();
10396 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
10399 /* Add the simple unary operators. */
10400 d
= (struct builtin_description
*) bdesc_1arg
;
10401 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
10403 enum machine_mode mode0
, mode1
;
10405 bool is_overloaded
= d
->code
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10406 && d
->code
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
;
10415 if (d
->name
== 0 || d
->icode
== CODE_FOR_nothing
)
10418 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10419 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10422 if (mode0
== V4SImode
&& mode1
== QImode
)
10423 type
= v4si_ftype_int
;
10424 else if (mode0
== V8HImode
&& mode1
== QImode
)
10425 type
= v8hi_ftype_int
;
10426 else if (mode0
== V16QImode
&& mode1
== QImode
)
10427 type
= v16qi_ftype_int
;
10428 else if (mode0
== VOIDmode
&& mode1
== VOIDmode
)
10429 type
= opaque_ftype_opaque
;
10430 else if (mode0
== V4SFmode
&& mode1
== V4SFmode
)
10431 type
= v4sf_ftype_v4sf
;
10432 else if (mode0
== V8HImode
&& mode1
== V16QImode
)
10433 type
= v8hi_ftype_v16qi
;
10434 else if (mode0
== V4SImode
&& mode1
== V8HImode
)
10435 type
= v4si_ftype_v8hi
;
10436 else if (mode0
== V2SImode
&& mode1
== V2SImode
)
10437 type
= v2si_ftype_v2si
;
10438 else if (mode0
== V2SFmode
&& mode1
== V2SFmode
)
10440 if (TARGET_PAIRED_FLOAT
)
10441 type
= v2sf_ftype_v2sf
;
10443 type
= v2sf_ftype_v2sf_spe
;
10445 else if (mode0
== V2SFmode
&& mode1
== V2SImode
)
10446 type
= v2sf_ftype_v2si
;
10447 else if (mode0
== V2SImode
&& mode1
== V2SFmode
)
10448 type
= v2si_ftype_v2sf
;
10449 else if (mode0
== V2SImode
&& mode1
== QImode
)
10450 type
= v2si_ftype_char
;
10452 gcc_unreachable ();
10454 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
10459 rs6000_init_libfuncs (void)
10461 if (DEFAULT_ABI
!= ABI_V4
&& TARGET_XCOFF
10462 && !TARGET_POWER2
&& !TARGET_POWERPC
)
10464 /* AIX library routines for float->int conversion. */
10465 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__itrunc");
10466 set_conv_libfunc (ufix_optab
, SImode
, DFmode
, "__uitrunc");
10467 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_qitrunc");
10468 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_quitrunc");
10471 if (!TARGET_IEEEQUAD
)
10472 /* AIX/Darwin/64-bit Linux quad floating point routines. */
10473 if (!TARGET_XL_COMPAT
)
10475 set_optab_libfunc (add_optab
, TFmode
, "__gcc_qadd");
10476 set_optab_libfunc (sub_optab
, TFmode
, "__gcc_qsub");
10477 set_optab_libfunc (smul_optab
, TFmode
, "__gcc_qmul");
10478 set_optab_libfunc (sdiv_optab
, TFmode
, "__gcc_qdiv");
10480 if (!(TARGET_HARD_FLOAT
&& (TARGET_FPRS
|| TARGET_E500_DOUBLE
)))
10482 set_optab_libfunc (neg_optab
, TFmode
, "__gcc_qneg");
10483 set_optab_libfunc (eq_optab
, TFmode
, "__gcc_qeq");
10484 set_optab_libfunc (ne_optab
, TFmode
, "__gcc_qne");
10485 set_optab_libfunc (gt_optab
, TFmode
, "__gcc_qgt");
10486 set_optab_libfunc (ge_optab
, TFmode
, "__gcc_qge");
10487 set_optab_libfunc (lt_optab
, TFmode
, "__gcc_qlt");
10488 set_optab_libfunc (le_optab
, TFmode
, "__gcc_qle");
10490 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "__gcc_stoq");
10491 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "__gcc_dtoq");
10492 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "__gcc_qtos");
10493 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "__gcc_qtod");
10494 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "__gcc_qtoi");
10495 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "__gcc_qtou");
10496 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "__gcc_itoq");
10497 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "__gcc_utoq");
10500 if (!(TARGET_HARD_FLOAT
&& TARGET_FPRS
))
10501 set_optab_libfunc (unord_optab
, TFmode
, "__gcc_qunord");
10505 set_optab_libfunc (add_optab
, TFmode
, "_xlqadd");
10506 set_optab_libfunc (sub_optab
, TFmode
, "_xlqsub");
10507 set_optab_libfunc (smul_optab
, TFmode
, "_xlqmul");
10508 set_optab_libfunc (sdiv_optab
, TFmode
, "_xlqdiv");
10512 /* 32-bit SVR4 quad floating point routines. */
10514 set_optab_libfunc (add_optab
, TFmode
, "_q_add");
10515 set_optab_libfunc (sub_optab
, TFmode
, "_q_sub");
10516 set_optab_libfunc (neg_optab
, TFmode
, "_q_neg");
10517 set_optab_libfunc (smul_optab
, TFmode
, "_q_mul");
10518 set_optab_libfunc (sdiv_optab
, TFmode
, "_q_div");
10519 if (TARGET_PPC_GPOPT
|| TARGET_POWER2
)
10520 set_optab_libfunc (sqrt_optab
, TFmode
, "_q_sqrt");
10522 set_optab_libfunc (eq_optab
, TFmode
, "_q_feq");
10523 set_optab_libfunc (ne_optab
, TFmode
, "_q_fne");
10524 set_optab_libfunc (gt_optab
, TFmode
, "_q_fgt");
10525 set_optab_libfunc (ge_optab
, TFmode
, "_q_fge");
10526 set_optab_libfunc (lt_optab
, TFmode
, "_q_flt");
10527 set_optab_libfunc (le_optab
, TFmode
, "_q_fle");
10529 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_q_stoq");
10530 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_q_dtoq");
10531 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_q_qtos");
10532 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_q_qtod");
10533 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_q_qtoi");
10534 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_q_qtou");
10535 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_q_itoq");
10536 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_q_utoq");
10541 /* Expand a block clear operation, and return 1 if successful. Return 0
10542 if we should let the compiler generate normal code.
10544 operands[0] is the destination
10545 operands[1] is the length
10546 operands[3] is the alignment */
10549 expand_block_clear (rtx operands
[])
10551 rtx orig_dest
= operands
[0];
10552 rtx bytes_rtx
= operands
[1];
10553 rtx align_rtx
= operands
[3];
10554 bool constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
10555 HOST_WIDE_INT align
;
10556 HOST_WIDE_INT bytes
;
10561 /* If this is not a fixed size move, just call memcpy */
10565 /* This must be a fixed size alignment */
10566 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
10567 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
10569 /* Anything to clear? */
10570 bytes
= INTVAL (bytes_rtx
);
10574 /* Use the builtin memset after a point, to avoid huge code bloat.
10575 When optimize_size, avoid any significant code bloat; calling
10576 memset is about 4 instructions, so allow for one instruction to
10577 load zero and three to do clearing. */
10578 if (TARGET_ALTIVEC
&& align
>= 128)
10580 else if (TARGET_POWERPC64
&& align
>= 32)
10582 else if (TARGET_SPE
&& align
>= 64)
10587 if (optimize_size
&& bytes
> 3 * clear_step
)
10589 if (! optimize_size
&& bytes
> 8 * clear_step
)
10592 for (offset
= 0; bytes
> 0; offset
+= clear_bytes
, bytes
-= clear_bytes
)
10594 enum machine_mode mode
= BLKmode
;
10597 if (bytes
>= 16 && TARGET_ALTIVEC
&& align
>= 128)
10602 else if (bytes
>= 8 && TARGET_SPE
&& align
>= 64)
10607 else if (bytes
>= 8 && TARGET_POWERPC64
10608 /* 64-bit loads and stores require word-aligned
10610 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
10615 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
10616 { /* move 4 bytes */
10620 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
10621 { /* move 2 bytes */
10625 else /* move 1 byte at a time */
10631 dest
= adjust_address (orig_dest
, mode
, offset
);
10633 emit_move_insn (dest
, CONST0_RTX (mode
));
10640 /* Expand a block move operation, and return 1 if successful. Return 0
10641 if we should let the compiler generate normal code.
10643 operands[0] is the destination
10644 operands[1] is the source
10645 operands[2] is the length
10646 operands[3] is the alignment */
10648 #define MAX_MOVE_REG 4
10651 expand_block_move (rtx operands
[])
10653 rtx orig_dest
= operands
[0];
10654 rtx orig_src
= operands
[1];
10655 rtx bytes_rtx
= operands
[2];
10656 rtx align_rtx
= operands
[3];
10657 int constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
10662 rtx stores
[MAX_MOVE_REG
];
10665 /* If this is not a fixed size move, just call memcpy */
10669 /* This must be a fixed size alignment */
10670 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
10671 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
10673 /* Anything to move? */
10674 bytes
= INTVAL (bytes_rtx
);
10678 /* store_one_arg depends on expand_block_move to handle at least the size of
10679 reg_parm_stack_space. */
10680 if (bytes
> (TARGET_POWERPC64
? 64 : 32))
10683 for (offset
= 0; bytes
> 0; offset
+= move_bytes
, bytes
-= move_bytes
)
10686 rtx (*movmemsi
) (rtx
, rtx
, rtx
, rtx
);
10687 rtx (*mov
) (rtx
, rtx
);
10689 enum machine_mode mode
= BLKmode
;
10692 /* Altivec first, since it will be faster than a string move
10693 when it applies, and usually not significantly larger. */
10694 if (TARGET_ALTIVEC
&& bytes
>= 16 && align
>= 128)
10698 gen_func
.mov
= gen_movv4si
;
10700 else if (TARGET_SPE
&& bytes
>= 8 && align
>= 64)
10704 gen_func
.mov
= gen_movv2si
;
10706 else if (TARGET_STRING
10707 && bytes
> 24 /* move up to 32 bytes at a time */
10713 && ! fixed_regs
[10]
10714 && ! fixed_regs
[11]
10715 && ! fixed_regs
[12])
10717 move_bytes
= (bytes
> 32) ? 32 : bytes
;
10718 gen_func
.movmemsi
= gen_movmemsi_8reg
;
10720 else if (TARGET_STRING
10721 && bytes
> 16 /* move up to 24 bytes at a time */
10727 && ! fixed_regs
[10])
10729 move_bytes
= (bytes
> 24) ? 24 : bytes
;
10730 gen_func
.movmemsi
= gen_movmemsi_6reg
;
10732 else if (TARGET_STRING
10733 && bytes
> 8 /* move up to 16 bytes at a time */
10737 && ! fixed_regs
[8])
10739 move_bytes
= (bytes
> 16) ? 16 : bytes
;
10740 gen_func
.movmemsi
= gen_movmemsi_4reg
;
10742 else if (bytes
>= 8 && TARGET_POWERPC64
10743 /* 64-bit loads and stores require word-aligned
10745 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
10749 gen_func
.mov
= gen_movdi
;
10751 else if (TARGET_STRING
&& bytes
> 4 && !TARGET_POWERPC64
)
10752 { /* move up to 8 bytes at a time */
10753 move_bytes
= (bytes
> 8) ? 8 : bytes
;
10754 gen_func
.movmemsi
= gen_movmemsi_2reg
;
10756 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
10757 { /* move 4 bytes */
10760 gen_func
.mov
= gen_movsi
;
10762 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
10763 { /* move 2 bytes */
10766 gen_func
.mov
= gen_movhi
;
10768 else if (TARGET_STRING
&& bytes
> 1)
10769 { /* move up to 4 bytes at a time */
10770 move_bytes
= (bytes
> 4) ? 4 : bytes
;
10771 gen_func
.movmemsi
= gen_movmemsi_1reg
;
10773 else /* move 1 byte at a time */
10777 gen_func
.mov
= gen_movqi
;
10780 src
= adjust_address (orig_src
, mode
, offset
);
10781 dest
= adjust_address (orig_dest
, mode
, offset
);
10783 if (mode
!= BLKmode
)
10785 rtx tmp_reg
= gen_reg_rtx (mode
);
10787 emit_insn ((*gen_func
.mov
) (tmp_reg
, src
));
10788 stores
[num_reg
++] = (*gen_func
.mov
) (dest
, tmp_reg
);
10791 if (mode
== BLKmode
|| num_reg
>= MAX_MOVE_REG
|| bytes
== move_bytes
)
10794 for (i
= 0; i
< num_reg
; i
++)
10795 emit_insn (stores
[i
]);
10799 if (mode
== BLKmode
)
10801 /* Move the address into scratch registers. The movmemsi
10802 patterns require zero offset. */
10803 if (!REG_P (XEXP (src
, 0)))
10805 rtx src_reg
= copy_addr_to_reg (XEXP (src
, 0));
10806 src
= replace_equiv_address (src
, src_reg
);
10808 set_mem_size (src
, GEN_INT (move_bytes
));
10810 if (!REG_P (XEXP (dest
, 0)))
10812 rtx dest_reg
= copy_addr_to_reg (XEXP (dest
, 0));
10813 dest
= replace_equiv_address (dest
, dest_reg
);
10815 set_mem_size (dest
, GEN_INT (move_bytes
));
10817 emit_insn ((*gen_func
.movmemsi
) (dest
, src
,
10818 GEN_INT (move_bytes
& 31),
10827 /* Return a string to perform a load_multiple operation.
10828 operands[0] is the vector.
10829 operands[1] is the source address.
10830 operands[2] is the first destination register. */
10833 rs6000_output_load_multiple (rtx operands
[3])
10835 /* We have to handle the case where the pseudo used to contain the address
10836 is assigned to one of the output registers. */
10838 int words
= XVECLEN (operands
[0], 0);
10841 if (XVECLEN (operands
[0], 0) == 1)
10842 return "{l|lwz} %2,0(%1)";
10844 for (i
= 0; i
< words
; i
++)
10845 if (refers_to_regno_p (REGNO (operands
[2]) + i
,
10846 REGNO (operands
[2]) + i
+ 1, operands
[1], 0))
10850 xop
[0] = GEN_INT (4 * (words
-1));
10851 xop
[1] = operands
[1];
10852 xop
[2] = operands
[2];
10853 output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop
);
10858 xop
[0] = GEN_INT (4 * (words
-1));
10859 xop
[1] = operands
[1];
10860 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
10861 output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop
);
10866 for (j
= 0; j
< words
; j
++)
10869 xop
[0] = GEN_INT (j
* 4);
10870 xop
[1] = operands
[1];
10871 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + j
);
10872 output_asm_insn ("{l|lwz} %2,%0(%1)", xop
);
10874 xop
[0] = GEN_INT (i
* 4);
10875 xop
[1] = operands
[1];
10876 output_asm_insn ("{l|lwz} %1,%0(%1)", xop
);
10881 return "{lsi|lswi} %2,%1,%N0";
10885 /* A validation routine: say whether CODE, a condition code, and MODE
10886 match. The other alternatives either don't make sense or should
10887 never be generated. */
10890 validate_condition_mode (enum rtx_code code
, enum machine_mode mode
)
10892 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
10893 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
10894 && GET_MODE_CLASS (mode
) == MODE_CC
);
10896 /* These don't make sense. */
10897 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
10898 || mode
!= CCUNSmode
);
10900 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
10901 || mode
== CCUNSmode
);
10903 gcc_assert (mode
== CCFPmode
10904 || (code
!= ORDERED
&& code
!= UNORDERED
10905 && code
!= UNEQ
&& code
!= LTGT
10906 && code
!= UNGT
&& code
!= UNLT
10907 && code
!= UNGE
&& code
!= UNLE
));
10909 /* These should never be generated except for
10910 flag_finite_math_only. */
10911 gcc_assert (mode
!= CCFPmode
10912 || flag_finite_math_only
10913 || (code
!= LE
&& code
!= GE
10914 && code
!= UNEQ
&& code
!= LTGT
10915 && code
!= UNGT
&& code
!= UNLT
));
10917 /* These are invalid; the information is not there. */
10918 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
10922 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
10923 mask required to convert the result of a rotate insn into a shift
10924 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
10927 includes_lshift_p (rtx shiftop
, rtx andop
)
10929 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
10931 shift_mask
<<= INTVAL (shiftop
);
10933 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
10936 /* Similar, but for right shift. */
10939 includes_rshift_p (rtx shiftop
, rtx andop
)
10941 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
10943 shift_mask
>>= INTVAL (shiftop
);
10945 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
10948 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
10949 to perform a left shift. It must have exactly SHIFTOP least
10950 significant 0's, then one or more 1's, then zero or more 0's. */
10953 includes_rldic_lshift_p (rtx shiftop
, rtx andop
)
10955 if (GET_CODE (andop
) == CONST_INT
)
10957 HOST_WIDE_INT c
, lsb
, shift_mask
;
10959 c
= INTVAL (andop
);
10960 if (c
== 0 || c
== ~0)
10964 shift_mask
<<= INTVAL (shiftop
);
10966 /* Find the least significant one bit. */
10969 /* It must coincide with the LSB of the shift mask. */
10970 if (-lsb
!= shift_mask
)
10973 /* Invert to look for the next transition (if any). */
10976 /* Remove the low group of ones (originally low group of zeros). */
10979 /* Again find the lsb, and check we have all 1's above. */
10983 else if (GET_CODE (andop
) == CONST_DOUBLE
10984 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
10986 HOST_WIDE_INT low
, high
, lsb
;
10987 HOST_WIDE_INT shift_mask_low
, shift_mask_high
;
10989 low
= CONST_DOUBLE_LOW (andop
);
10990 if (HOST_BITS_PER_WIDE_INT
< 64)
10991 high
= CONST_DOUBLE_HIGH (andop
);
10993 if ((low
== 0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== 0))
10994 || (low
== ~0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0)))
10997 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
10999 shift_mask_high
= ~0;
11000 if (INTVAL (shiftop
) > 32)
11001 shift_mask_high
<<= INTVAL (shiftop
) - 32;
11003 lsb
= high
& -high
;
11005 if (-lsb
!= shift_mask_high
|| INTVAL (shiftop
) < 32)
11011 lsb
= high
& -high
;
11012 return high
== -lsb
;
11015 shift_mask_low
= ~0;
11016 shift_mask_low
<<= INTVAL (shiftop
);
11020 if (-lsb
!= shift_mask_low
)
11023 if (HOST_BITS_PER_WIDE_INT
< 64)
11028 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
11030 lsb
= high
& -high
;
11031 return high
== -lsb
;
11035 return low
== -lsb
&& (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0);
11041 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
11042 to perform a left shift. It must have SHIFTOP or more least
11043 significant 0's, with the remainder of the word 1's. */
11046 includes_rldicr_lshift_p (rtx shiftop
, rtx andop
)
11048 if (GET_CODE (andop
) == CONST_INT
)
11050 HOST_WIDE_INT c
, lsb
, shift_mask
;
11053 shift_mask
<<= INTVAL (shiftop
);
11054 c
= INTVAL (andop
);
11056 /* Find the least significant one bit. */
11059 /* It must be covered by the shift mask.
11060 This test also rejects c == 0. */
11061 if ((lsb
& shift_mask
) == 0)
11064 /* Check we have all 1's above the transition, and reject all 1's. */
11065 return c
== -lsb
&& lsb
!= 1;
11067 else if (GET_CODE (andop
) == CONST_DOUBLE
11068 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
11070 HOST_WIDE_INT low
, lsb
, shift_mask_low
;
11072 low
= CONST_DOUBLE_LOW (andop
);
11074 if (HOST_BITS_PER_WIDE_INT
< 64)
11076 HOST_WIDE_INT high
, shift_mask_high
;
11078 high
= CONST_DOUBLE_HIGH (andop
);
11082 shift_mask_high
= ~0;
11083 if (INTVAL (shiftop
) > 32)
11084 shift_mask_high
<<= INTVAL (shiftop
) - 32;
11086 lsb
= high
& -high
;
11088 if ((lsb
& shift_mask_high
) == 0)
11091 return high
== -lsb
;
11097 shift_mask_low
= ~0;
11098 shift_mask_low
<<= INTVAL (shiftop
);
11102 if ((lsb
& shift_mask_low
) == 0)
11105 return low
== -lsb
&& lsb
!= 1;
11111 /* Return 1 if operands will generate a valid arguments to rlwimi
11112 instruction for insert with right shift in 64-bit mode. The mask may
11113 not start on the first bit or stop on the last bit because wrap-around
11114 effects of instruction do not correspond to semantics of RTL insn. */
11117 insvdi_rshift_rlwimi_p (rtx sizeop
, rtx startop
, rtx shiftop
)
11119 if (INTVAL (startop
) > 32
11120 && INTVAL (startop
) < 64
11121 && INTVAL (sizeop
) > 1
11122 && INTVAL (sizeop
) + INTVAL (startop
) < 64
11123 && INTVAL (shiftop
) > 0
11124 && INTVAL (sizeop
) + INTVAL (shiftop
) < 32
11125 && (64 - (INTVAL (shiftop
) & 63)) >= INTVAL (sizeop
))
11131 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
11132 for lfq and stfq insns iff the registers are hard registers. */
11135 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
11137 /* We might have been passed a SUBREG. */
11138 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
11141 /* We might have been passed non floating point registers. */
11142 if (!FP_REGNO_P (REGNO (reg1
))
11143 || !FP_REGNO_P (REGNO (reg2
)))
11146 return (REGNO (reg1
) == REGNO (reg2
) - 1);
11149 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
11150 addr1 and addr2 must be in consecutive memory locations
11151 (addr2 == addr1 + 8). */
11154 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
11157 unsigned int reg1
, reg2
;
11158 int offset1
, offset2
;
11160 /* The mems cannot be volatile. */
11161 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
11164 addr1
= XEXP (mem1
, 0);
11165 addr2
= XEXP (mem2
, 0);
11167 /* Extract an offset (if used) from the first addr. */
11168 if (GET_CODE (addr1
) == PLUS
)
11170 /* If not a REG, return zero. */
11171 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
11175 reg1
= REGNO (XEXP (addr1
, 0));
11176 /* The offset must be constant! */
11177 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
11179 offset1
= INTVAL (XEXP (addr1
, 1));
11182 else if (GET_CODE (addr1
) != REG
)
11186 reg1
= REGNO (addr1
);
11187 /* This was a simple (mem (reg)) expression. Offset is 0. */
11191 /* And now for the second addr. */
11192 if (GET_CODE (addr2
) == PLUS
)
11194 /* If not a REG, return zero. */
11195 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
11199 reg2
= REGNO (XEXP (addr2
, 0));
11200 /* The offset must be constant. */
11201 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
11203 offset2
= INTVAL (XEXP (addr2
, 1));
11206 else if (GET_CODE (addr2
) != REG
)
11210 reg2
= REGNO (addr2
);
11211 /* This was a simple (mem (reg)) expression. Offset is 0. */
11215 /* Both of these must have the same base register. */
11219 /* The offset for the second addr must be 8 more than the first addr. */
11220 if (offset2
!= offset1
+ 8)
11223 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
11230 rs6000_secondary_memory_needed_rtx (enum machine_mode mode
)
11232 static bool eliminated
= false;
11233 if (mode
!= SDmode
)
11234 return assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
11237 rtx mem
= cfun
->machine
->sdmode_stack_slot
;
11238 gcc_assert (mem
!= NULL_RTX
);
11242 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
11243 cfun
->machine
->sdmode_stack_slot
= mem
;
11251 rs6000_check_sdmode (tree
*tp
, int *walk_subtrees
, void *data ATTRIBUTE_UNUSED
)
11253 /* Don't walk into types. */
11254 if (*tp
== NULL_TREE
|| *tp
== error_mark_node
|| TYPE_P (*tp
))
11256 *walk_subtrees
= 0;
11260 switch (TREE_CODE (*tp
))
11268 case ALIGN_INDIRECT_REF
:
11269 case MISALIGNED_INDIRECT_REF
:
11270 case VIEW_CONVERT_EXPR
:
11271 if (TYPE_MODE (TREE_TYPE (*tp
)) == SDmode
)
11282 /* Allocate a 64-bit stack slot to be used for copying SDmode
11283 values through if this function has any SDmode references. */
11286 rs6000_alloc_sdmode_stack_slot (void)
11290 block_stmt_iterator bsi
;
11292 gcc_assert (cfun
->machine
->sdmode_stack_slot
== NULL_RTX
);
11295 for (bsi
= bsi_start (bb
); !bsi_end_p (bsi
); bsi_next (&bsi
))
11297 tree ret
= walk_tree_without_duplicates (bsi_stmt_ptr (bsi
),
11298 rs6000_check_sdmode
, NULL
);
11301 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
11302 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
11308 /* Check for any SDmode parameters of the function. */
11309 for (t
= DECL_ARGUMENTS (cfun
->decl
); t
; t
= TREE_CHAIN (t
))
11311 if (TREE_TYPE (t
) == error_mark_node
)
11314 if (TYPE_MODE (TREE_TYPE (t
)) == SDmode
11315 || TYPE_MODE (DECL_ARG_TYPE (t
)) == SDmode
)
11317 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
11318 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
11326 rs6000_instantiate_decls (void)
11328 if (cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
11329 instantiate_decl_rtl (cfun
->machine
->sdmode_stack_slot
);
11332 /* Return the register class of a scratch register needed to copy IN into
11333 or out of a register in CLASS in MODE. If it can be done directly,
11334 NO_REGS is returned. */
11337 rs6000_secondary_reload_class (enum reg_class
class,
11338 enum machine_mode mode ATTRIBUTE_UNUSED
,
11343 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
11345 && MACHOPIC_INDIRECT
11349 /* We cannot copy a symbolic operand directly into anything
11350 other than BASE_REGS for TARGET_ELF. So indicate that a
11351 register from BASE_REGS is needed as an intermediate
11354 On Darwin, pic addresses require a load from memory, which
11355 needs a base register. */
11356 if (class != BASE_REGS
11357 && (GET_CODE (in
) == SYMBOL_REF
11358 || GET_CODE (in
) == HIGH
11359 || GET_CODE (in
) == LABEL_REF
11360 || GET_CODE (in
) == CONST
))
11364 if (GET_CODE (in
) == REG
)
11366 regno
= REGNO (in
);
11367 if (regno
>= FIRST_PSEUDO_REGISTER
)
11369 regno
= true_regnum (in
);
11370 if (regno
>= FIRST_PSEUDO_REGISTER
)
11374 else if (GET_CODE (in
) == SUBREG
)
11376 regno
= true_regnum (in
);
11377 if (regno
>= FIRST_PSEUDO_REGISTER
)
11383 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
11385 if (class == GENERAL_REGS
|| class == BASE_REGS
11386 || (regno
>= 0 && INT_REGNO_P (regno
)))
11389 /* Constants, memory, and FP registers can go into FP registers. */
11390 if ((regno
== -1 || FP_REGNO_P (regno
))
11391 && (class == FLOAT_REGS
|| class == NON_SPECIAL_REGS
))
11392 return (mode
!= SDmode
) ? NO_REGS
: GENERAL_REGS
;
11394 /* Memory, and AltiVec registers can go into AltiVec registers. */
11395 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
11396 && class == ALTIVEC_REGS
)
11399 /* We can copy among the CR registers. */
11400 if ((class == CR_REGS
|| class == CR0_REGS
)
11401 && regno
>= 0 && CR_REGNO_P (regno
))
11404 /* Otherwise, we need GENERAL_REGS. */
11405 return GENERAL_REGS
;
11408 /* Given a comparison operation, return the bit number in CCR to test. We
11409 know this is a valid comparison.
11411 SCC_P is 1 if this is for an scc. That means that %D will have been
11412 used instead of %C, so the bits will be in different places.
11414 Return -1 if OP isn't a valid comparison for some reason. */
11417 ccr_bit (rtx op
, int scc_p
)
11419 enum rtx_code code
= GET_CODE (op
);
11420 enum machine_mode cc_mode
;
11425 if (!COMPARISON_P (op
))
11428 reg
= XEXP (op
, 0);
11430 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
11432 cc_mode
= GET_MODE (reg
);
11433 cc_regnum
= REGNO (reg
);
11434 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
11436 validate_condition_mode (code
, cc_mode
);
11438 /* When generating a sCOND operation, only positive conditions are
11441 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
11442 || code
== GTU
|| code
== LTU
);
11447 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
11449 return base_bit
+ 2;
11450 case GT
: case GTU
: case UNLE
:
11451 return base_bit
+ 1;
11452 case LT
: case LTU
: case UNGE
:
11454 case ORDERED
: case UNORDERED
:
11455 return base_bit
+ 3;
11458 /* If scc, we will have done a cror to put the bit in the
11459 unordered position. So test that bit. For integer, this is ! LT
11460 unless this is an scc insn. */
11461 return scc_p
? base_bit
+ 3 : base_bit
;
11464 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
11467 gcc_unreachable ();
11471 /* Return the GOT register. */
11474 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
11476 /* The second flow pass currently (June 1999) can't update
11477 regs_ever_live without disturbing other parts of the compiler, so
11478 update it here to make the prolog/epilogue code happy. */
11479 if (!can_create_pseudo_p ()
11480 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
11481 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
11483 crtl
->uses_pic_offset_table
= 1;
11485 return pic_offset_table_rtx
;
11488 /* Function to init struct machine_function.
11489 This will be called, via a pointer variable,
11490 from push_function_context. */
11492 static struct machine_function
*
11493 rs6000_init_machine_status (void)
11495 return ggc_alloc_cleared (sizeof (machine_function
));
11498 /* These macros test for integers and extract the low-order bits. */
11500 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
11501 && GET_MODE (X) == VOIDmode)
11503 #define INT_LOWPART(X) \
11504 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
11507 extract_MB (rtx op
)
11510 unsigned long val
= INT_LOWPART (op
);
11512 /* If the high bit is zero, the value is the first 1 bit we find
11514 if ((val
& 0x80000000) == 0)
11516 gcc_assert (val
& 0xffffffff);
11519 while (((val
<<= 1) & 0x80000000) == 0)
11524 /* If the high bit is set and the low bit is not, or the mask is all
11525 1's, the value is zero. */
11526 if ((val
& 1) == 0 || (val
& 0xffffffff) == 0xffffffff)
11529 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
11532 while (((val
>>= 1) & 1) != 0)
11539 extract_ME (rtx op
)
11542 unsigned long val
= INT_LOWPART (op
);
11544 /* If the low bit is zero, the value is the first 1 bit we find from
11546 if ((val
& 1) == 0)
11548 gcc_assert (val
& 0xffffffff);
11551 while (((val
>>= 1) & 1) == 0)
11557 /* If the low bit is set and the high bit is not, or the mask is all
11558 1's, the value is 31. */
11559 if ((val
& 0x80000000) == 0 || (val
& 0xffffffff) == 0xffffffff)
11562 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
11565 while (((val
<<= 1) & 0x80000000) != 0)
11571 /* Locate some local-dynamic symbol still in use by this function
11572 so that we can print its name in some tls_ld pattern. */
11574 static const char *
11575 rs6000_get_some_local_dynamic_name (void)
11579 if (cfun
->machine
->some_ld_name
)
11580 return cfun
->machine
->some_ld_name
;
11582 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
11584 && for_each_rtx (&PATTERN (insn
),
11585 rs6000_get_some_local_dynamic_name_1
, 0))
11586 return cfun
->machine
->some_ld_name
;
11588 gcc_unreachable ();
11591 /* Helper function for rs6000_get_some_local_dynamic_name. */
11594 rs6000_get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
11598 if (GET_CODE (x
) == SYMBOL_REF
)
11600 const char *str
= XSTR (x
, 0);
11601 if (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
11603 cfun
->machine
->some_ld_name
= str
;
11611 /* Write out a function code label. */
11614 rs6000_output_function_entry (FILE *file
, const char *fname
)
11616 if (fname
[0] != '.')
11618 switch (DEFAULT_ABI
)
11621 gcc_unreachable ();
11627 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
11636 RS6000_OUTPUT_BASENAME (file
, fname
);
11638 assemble_name (file
, fname
);
11641 /* Print an operand. Recognize special options, documented below. */
11644 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
11645 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
11647 #define SMALL_DATA_RELOC "sda21"
11648 #define SMALL_DATA_REG 0
11652 print_operand (FILE *file
, rtx x
, int code
)
11656 unsigned HOST_WIDE_INT uval
;
11661 /* Write out an instruction after the call which may be replaced
11662 with glue code by the loader. This depends on the AIX version. */
11663 asm_fprintf (file
, RS6000_CALL_GLUE
);
11666 /* %a is output_address. */
11669 /* If X is a constant integer whose low-order 5 bits are zero,
11670 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
11671 in the AIX assembler where "sri" with a zero shift count
11672 writes a trash instruction. */
11673 if (GET_CODE (x
) == CONST_INT
&& (INTVAL (x
) & 31) == 0)
11680 /* If constant, low-order 16 bits of constant, unsigned.
11681 Otherwise, write normally. */
11683 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 0xffff);
11685 print_operand (file
, x
, 0);
11689 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
11690 for 64-bit mask direction. */
11691 putc (((INT_LOWPART (x
) & 1) == 0 ? 'r' : 'l'), file
);
11694 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
11698 /* X is a CR register. Print the number of the GT bit of the CR. */
11699 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
11700 output_operand_lossage ("invalid %%E value");
11702 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 1);
11706 /* Like 'J' but get to the GT bit only. */
11707 gcc_assert (GET_CODE (x
) == REG
);
11709 /* Bit 1 is GT bit. */
11710 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
11712 /* Add one for shift count in rlinm for scc. */
11713 fprintf (file
, "%d", i
+ 1);
11717 /* X is a CR register. Print the number of the EQ bit of the CR */
11718 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
11719 output_operand_lossage ("invalid %%E value");
11721 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
11725 /* X is a CR register. Print the shift count needed to move it
11726 to the high-order four bits. */
11727 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
11728 output_operand_lossage ("invalid %%f value");
11730 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
11734 /* Similar, but print the count for the rotate in the opposite
11736 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
11737 output_operand_lossage ("invalid %%F value");
11739 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
11743 /* X is a constant integer. If it is negative, print "m",
11744 otherwise print "z". This is to make an aze or ame insn. */
11745 if (GET_CODE (x
) != CONST_INT
)
11746 output_operand_lossage ("invalid %%G value");
11747 else if (INTVAL (x
) >= 0)
11754 /* If constant, output low-order five bits. Otherwise, write
11757 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 31);
11759 print_operand (file
, x
, 0);
11763 /* If constant, output low-order six bits. Otherwise, write
11766 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 63);
11768 print_operand (file
, x
, 0);
11772 /* Print `i' if this is a constant, else nothing. */
11778 /* Write the bit number in CCR for jump. */
11779 i
= ccr_bit (x
, 0);
11781 output_operand_lossage ("invalid %%j code");
11783 fprintf (file
, "%d", i
);
11787 /* Similar, but add one for shift count in rlinm for scc and pass
11788 scc flag to `ccr_bit'. */
11789 i
= ccr_bit (x
, 1);
11791 output_operand_lossage ("invalid %%J code");
11793 /* If we want bit 31, write a shift count of zero, not 32. */
11794 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
11798 /* X must be a constant. Write the 1's complement of the
11801 output_operand_lossage ("invalid %%k value");
11803 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INT_LOWPART (x
));
11807 /* X must be a symbolic constant on ELF. Write an
11808 expression suitable for an 'addi' that adds in the low 16
11809 bits of the MEM. */
11810 if (GET_CODE (x
) != CONST
)
11812 print_operand_address (file
, x
);
11813 fputs ("@l", file
);
11817 if (GET_CODE (XEXP (x
, 0)) != PLUS
11818 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
11819 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
11820 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
11821 output_operand_lossage ("invalid %%K value");
11822 print_operand_address (file
, XEXP (XEXP (x
, 0), 0));
11823 fputs ("@l", file
);
11824 /* For GNU as, there must be a non-alphanumeric character
11825 between 'l' and the number. The '-' is added by
11826 print_operand() already. */
11827 if (INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0)
11829 print_operand (file
, XEXP (XEXP (x
, 0), 1), 0);
11833 /* %l is output_asm_label. */
11836 /* Write second word of DImode or DFmode reference. Works on register
11837 or non-indexed memory only. */
11838 if (GET_CODE (x
) == REG
)
11839 fputs (reg_names
[REGNO (x
) + 1], file
);
11840 else if (GET_CODE (x
) == MEM
)
11842 /* Handle possible auto-increment. Since it is pre-increment and
11843 we have already done it, we can just use an offset of word. */
11844 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
11845 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
11846 output_address (plus_constant (XEXP (XEXP (x
, 0), 0),
11848 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
11849 output_address (plus_constant (XEXP (XEXP (x
, 0), 0),
11852 output_address (XEXP (adjust_address_nv (x
, SImode
,
11856 if (small_data_operand (x
, GET_MODE (x
)))
11857 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
11858 reg_names
[SMALL_DATA_REG
]);
11863 /* MB value for a mask operand. */
11864 if (! mask_operand (x
, SImode
))
11865 output_operand_lossage ("invalid %%m value");
11867 fprintf (file
, "%d", extract_MB (x
));
11871 /* ME value for a mask operand. */
11872 if (! mask_operand (x
, SImode
))
11873 output_operand_lossage ("invalid %%M value");
11875 fprintf (file
, "%d", extract_ME (x
));
11878 /* %n outputs the negative of its operand. */
11881 /* Write the number of elements in the vector times 4. */
11882 if (GET_CODE (x
) != PARALLEL
)
11883 output_operand_lossage ("invalid %%N value");
11885 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
11889 /* Similar, but subtract 1 first. */
11890 if (GET_CODE (x
) != PARALLEL
)
11891 output_operand_lossage ("invalid %%O value");
11893 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
11897 /* X is a CONST_INT that is a power of two. Output the logarithm. */
11899 || INT_LOWPART (x
) < 0
11900 || (i
= exact_log2 (INT_LOWPART (x
))) < 0)
11901 output_operand_lossage ("invalid %%p value");
11903 fprintf (file
, "%d", i
);
11907 /* The operand must be an indirect memory reference. The result
11908 is the register name. */
11909 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
11910 || REGNO (XEXP (x
, 0)) >= 32)
11911 output_operand_lossage ("invalid %%P value");
11913 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
11917 /* This outputs the logical code corresponding to a boolean
11918 expression. The expression may have one or both operands
11919 negated (if one, only the first one). For condition register
11920 logical operations, it will also treat the negated
11921 CR codes as NOTs, but not handle NOTs of them. */
11923 const char *const *t
= 0;
11925 enum rtx_code code
= GET_CODE (x
);
11926 static const char * const tbl
[3][3] = {
11927 { "and", "andc", "nor" },
11928 { "or", "orc", "nand" },
11929 { "xor", "eqv", "xor" } };
11933 else if (code
== IOR
)
11935 else if (code
== XOR
)
11938 output_operand_lossage ("invalid %%q value");
11940 if (GET_CODE (XEXP (x
, 0)) != NOT
)
11944 if (GET_CODE (XEXP (x
, 1)) == NOT
)
11962 /* X is a CR register. Print the mask for `mtcrf'. */
11963 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
11964 output_operand_lossage ("invalid %%R value");
11966 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
11970 /* Low 5 bits of 32 - value */
11972 output_operand_lossage ("invalid %%s value");
11974 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INT_LOWPART (x
)) & 31);
11978 /* PowerPC64 mask position. All 0's is excluded.
11979 CONST_INT 32-bit mask is considered sign-extended so any
11980 transition must occur within the CONST_INT, not on the boundary. */
11981 if (! mask64_operand (x
, DImode
))
11982 output_operand_lossage ("invalid %%S value");
11984 uval
= INT_LOWPART (x
);
11986 if (uval
& 1) /* Clear Left */
11988 #if HOST_BITS_PER_WIDE_INT > 64
11989 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
11993 else /* Clear Right */
11996 #if HOST_BITS_PER_WIDE_INT > 64
11997 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
12003 gcc_assert (i
>= 0);
12004 fprintf (file
, "%d", i
);
12008 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
12009 gcc_assert (GET_CODE (x
) == REG
&& GET_MODE (x
) == CCmode
);
12011 /* Bit 3 is OV bit. */
12012 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
12014 /* If we want bit 31, write a shift count of zero, not 32. */
12015 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
12019 /* Print the symbolic name of a branch target register. */
12020 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
12021 && REGNO (x
) != CTR_REGNO
))
12022 output_operand_lossage ("invalid %%T value");
12023 else if (REGNO (x
) == LR_REGNO
)
12024 fputs (TARGET_NEW_MNEMONICS
? "lr" : "r", file
);
12026 fputs ("ctr", file
);
12030 /* High-order 16 bits of constant for use in unsigned operand. */
12032 output_operand_lossage ("invalid %%u value");
12034 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
12035 (INT_LOWPART (x
) >> 16) & 0xffff);
12039 /* High-order 16 bits of constant for use in signed operand. */
12041 output_operand_lossage ("invalid %%v value");
12043 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
12044 (INT_LOWPART (x
) >> 16) & 0xffff);
12048 /* Print `u' if this has an auto-increment or auto-decrement. */
12049 if (GET_CODE (x
) == MEM
12050 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
12051 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
12052 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
12057 /* Print the trap code for this operand. */
12058 switch (GET_CODE (x
))
12061 fputs ("eq", file
); /* 4 */
12064 fputs ("ne", file
); /* 24 */
12067 fputs ("lt", file
); /* 16 */
12070 fputs ("le", file
); /* 20 */
12073 fputs ("gt", file
); /* 8 */
12076 fputs ("ge", file
); /* 12 */
12079 fputs ("llt", file
); /* 2 */
12082 fputs ("lle", file
); /* 6 */
12085 fputs ("lgt", file
); /* 1 */
12088 fputs ("lge", file
); /* 5 */
12091 gcc_unreachable ();
12096 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
12099 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
12100 ((INT_LOWPART (x
) & 0xffff) ^ 0x8000) - 0x8000);
12102 print_operand (file
, x
, 0);
12106 /* MB value for a PowerPC64 rldic operand. */
12107 val
= (GET_CODE (x
) == CONST_INT
12108 ? INTVAL (x
) : CONST_DOUBLE_HIGH (x
));
12113 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
++)
12114 if ((val
<<= 1) < 0)
12117 #if HOST_BITS_PER_WIDE_INT == 32
12118 if (GET_CODE (x
) == CONST_INT
&& i
>= 0)
12119 i
+= 32; /* zero-extend high-part was all 0's */
12120 else if (GET_CODE (x
) == CONST_DOUBLE
&& i
== 32)
12122 val
= CONST_DOUBLE_LOW (x
);
12128 for ( ; i
< 64; i
++)
12129 if ((val
<<= 1) < 0)
12134 fprintf (file
, "%d", i
+ 1);
12138 if (GET_CODE (x
) == MEM
12139 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
12140 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
12141 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
12146 /* Like 'L', for third word of TImode */
12147 if (GET_CODE (x
) == REG
)
12148 fputs (reg_names
[REGNO (x
) + 2], file
);
12149 else if (GET_CODE (x
) == MEM
)
12151 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
12152 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
12153 output_address (plus_constant (XEXP (XEXP (x
, 0), 0), 8));
12154 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
12155 output_address (plus_constant (XEXP (XEXP (x
, 0), 0), 8));
12157 output_address (XEXP (adjust_address_nv (x
, SImode
, 8), 0));
12158 if (small_data_operand (x
, GET_MODE (x
)))
12159 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
12160 reg_names
[SMALL_DATA_REG
]);
12165 /* X is a SYMBOL_REF. Write out the name preceded by a
12166 period and without any trailing data in brackets. Used for function
12167 names. If we are configured for System V (or the embedded ABI) on
12168 the PowerPC, do not emit the period, since those systems do not use
12169 TOCs and the like. */
12170 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
12172 /* Mark the decl as referenced so that cgraph will output the
12174 if (SYMBOL_REF_DECL (x
))
12175 mark_decl_referenced (SYMBOL_REF_DECL (x
));
12177 /* For macho, check to see if we need a stub. */
12180 const char *name
= XSTR (x
, 0);
12182 if (MACHOPIC_INDIRECT
12183 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
12184 name
= machopic_indirection_name (x
, /*stub_p=*/true);
12186 assemble_name (file
, name
);
12188 else if (!DOT_SYMBOLS
)
12189 assemble_name (file
, XSTR (x
, 0));
12191 rs6000_output_function_entry (file
, XSTR (x
, 0));
12195 /* Like 'L', for last word of TImode. */
12196 if (GET_CODE (x
) == REG
)
12197 fputs (reg_names
[REGNO (x
) + 3], file
);
12198 else if (GET_CODE (x
) == MEM
)
12200 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
12201 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
12202 output_address (plus_constant (XEXP (XEXP (x
, 0), 0), 12));
12203 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
12204 output_address (plus_constant (XEXP (XEXP (x
, 0), 0), 12));
12206 output_address (XEXP (adjust_address_nv (x
, SImode
, 12), 0));
12207 if (small_data_operand (x
, GET_MODE (x
)))
12208 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
12209 reg_names
[SMALL_DATA_REG
]);
12213 /* Print AltiVec or SPE memory operand. */
12218 gcc_assert (GET_CODE (x
) == MEM
);
12222 /* Ugly hack because %y is overloaded. */
12223 if ((TARGET_SPE
|| TARGET_E500_DOUBLE
)
12224 && (GET_MODE_SIZE (GET_MODE (x
)) == 8
12225 || GET_MODE (x
) == TFmode
12226 || GET_MODE (x
) == TImode
))
12228 /* Handle [reg]. */
12229 if (GET_CODE (tmp
) == REG
)
12231 fprintf (file
, "0(%s)", reg_names
[REGNO (tmp
)]);
12234 /* Handle [reg+UIMM]. */
12235 else if (GET_CODE (tmp
) == PLUS
&&
12236 GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
12240 gcc_assert (GET_CODE (XEXP (tmp
, 0)) == REG
);
12242 x
= INTVAL (XEXP (tmp
, 1));
12243 fprintf (file
, "%d(%s)", x
, reg_names
[REGNO (XEXP (tmp
, 0))]);
12247 /* Fall through. Must be [reg+reg]. */
12250 && GET_CODE (tmp
) == AND
12251 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
12252 && INTVAL (XEXP (tmp
, 1)) == -16)
12253 tmp
= XEXP (tmp
, 0);
12254 if (GET_CODE (tmp
) == REG
)
12255 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
12258 gcc_assert (GET_CODE (tmp
) == PLUS
12259 && REG_P (XEXP (tmp
, 0))
12260 && REG_P (XEXP (tmp
, 1)));
12262 if (REGNO (XEXP (tmp
, 0)) == 0)
12263 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
12264 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
12266 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
12267 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
12273 if (GET_CODE (x
) == REG
)
12274 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
12275 else if (GET_CODE (x
) == MEM
)
12277 /* We need to handle PRE_INC and PRE_DEC here, since we need to
12278 know the width from the mode. */
12279 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
12280 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
12281 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
12282 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
12283 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
12284 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
12285 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
12286 output_address (XEXP (XEXP (x
, 0), 1));
12288 output_address (XEXP (x
, 0));
12291 output_addr_const (file
, x
);
12295 assemble_name (file
, rs6000_get_some_local_dynamic_name ());
12299 output_operand_lossage ("invalid %%xn code");
12303 /* Print the address of an operand. */
12306 print_operand_address (FILE *file
, rtx x
)
12308 if (GET_CODE (x
) == REG
)
12309 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
12310 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
12311 || GET_CODE (x
) == LABEL_REF
)
12313 output_addr_const (file
, x
);
12314 if (small_data_operand (x
, GET_MODE (x
)))
12315 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
12316 reg_names
[SMALL_DATA_REG
]);
12318 gcc_assert (!TARGET_TOC
);
12320 else if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == REG
)
12322 gcc_assert (REG_P (XEXP (x
, 0)));
12323 if (REGNO (XEXP (x
, 0)) == 0)
12324 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
12325 reg_names
[ REGNO (XEXP (x
, 0)) ]);
12327 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
12328 reg_names
[ REGNO (XEXP (x
, 1)) ]);
12330 else if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
12331 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
12332 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
12334 else if (GET_CODE (x
) == LO_SUM
&& GET_CODE (XEXP (x
, 0)) == REG
12335 && CONSTANT_P (XEXP (x
, 1)))
12337 output_addr_const (file
, XEXP (x
, 1));
12338 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
12342 else if (GET_CODE (x
) == LO_SUM
&& GET_CODE (XEXP (x
, 0)) == REG
12343 && CONSTANT_P (XEXP (x
, 1)))
12345 fprintf (file
, "lo16(");
12346 output_addr_const (file
, XEXP (x
, 1));
12347 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
12350 else if (legitimate_constant_pool_address_p (x
))
12352 if (TARGET_AIX
&& (!TARGET_ELF
|| !TARGET_MINIMAL_TOC
))
12354 rtx contains_minus
= XEXP (x
, 1);
12358 /* Find the (minus (sym) (toc)) buried in X, and temporarily
12359 turn it into (sym) for output_addr_const. */
12360 while (GET_CODE (XEXP (contains_minus
, 0)) != MINUS
)
12361 contains_minus
= XEXP (contains_minus
, 0);
12363 minus
= XEXP (contains_minus
, 0);
12364 symref
= XEXP (minus
, 0);
12365 gcc_assert (GET_CODE (XEXP (minus
, 1)) == SYMBOL_REF
);
12366 XEXP (contains_minus
, 0) = symref
;
12371 name
= XSTR (symref
, 0);
12372 newname
= alloca (strlen (name
) + sizeof ("@toc"));
12373 strcpy (newname
, name
);
12374 strcat (newname
, "@toc");
12375 XSTR (symref
, 0) = newname
;
12377 output_addr_const (file
, XEXP (x
, 1));
12379 XSTR (symref
, 0) = name
;
12380 XEXP (contains_minus
, 0) = minus
;
12383 output_addr_const (file
, XEXP (x
, 1));
12385 fprintf (file
, "(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
12388 gcc_unreachable ();
12391 /* Target hook for assembling integer objects. The PowerPC version has
12392 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
12393 is defined. It also needs to handle DI-mode objects on 64-bit
12397 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
12399 #ifdef RELOCATABLE_NEEDS_FIXUP
12400 /* Special handling for SI values. */
12401 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
12403 static int recurse
= 0;
12405 /* For -mrelocatable, we mark all addresses that need to be fixed up
12406 in the .fixup section. */
12407 if (TARGET_RELOCATABLE
12408 && in_section
!= toc_section
12409 && in_section
!= text_section
12410 && !unlikely_text_section_p (in_section
)
12412 && GET_CODE (x
) != CONST_INT
12413 && GET_CODE (x
) != CONST_DOUBLE
12419 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
12421 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
12422 fprintf (asm_out_file
, "\t.long\t(");
12423 output_addr_const (asm_out_file
, x
);
12424 fprintf (asm_out_file
, ")@fixup\n");
12425 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
12426 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
12427 fprintf (asm_out_file
, "\t.long\t");
12428 assemble_name (asm_out_file
, buf
);
12429 fprintf (asm_out_file
, "\n\t.previous\n");
12433 /* Remove initial .'s to turn a -mcall-aixdesc function
12434 address into the address of the descriptor, not the function
12436 else if (GET_CODE (x
) == SYMBOL_REF
12437 && XSTR (x
, 0)[0] == '.'
12438 && DEFAULT_ABI
== ABI_AIX
)
12440 const char *name
= XSTR (x
, 0);
12441 while (*name
== '.')
12444 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
12448 #endif /* RELOCATABLE_NEEDS_FIXUP */
12449 return default_assemble_integer (x
, size
, aligned_p
);
12452 #ifdef HAVE_GAS_HIDDEN
12453 /* Emit an assembler directive to set symbol visibility for DECL to
12454 VISIBILITY_TYPE. */
12457 rs6000_assemble_visibility (tree decl
, int vis
)
12459 /* Functions need to have their entry point symbol visibility set as
12460 well as their descriptor symbol visibility. */
12461 if (DEFAULT_ABI
== ABI_AIX
12463 && TREE_CODE (decl
) == FUNCTION_DECL
)
12465 static const char * const visibility_types
[] = {
12466 NULL
, "internal", "hidden", "protected"
12469 const char *name
, *type
;
12471 name
= ((* targetm
.strip_name_encoding
)
12472 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
12473 type
= visibility_types
[vis
];
12475 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
12476 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
12479 default_assemble_visibility (decl
, vis
);
12484 rs6000_reverse_condition (enum machine_mode mode
, enum rtx_code code
)
12486 /* Reversal of FP compares takes care -- an ordered compare
12487 becomes an unordered compare and vice versa. */
12488 if (mode
== CCFPmode
12489 && (!flag_finite_math_only
12490 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
12491 || code
== UNEQ
|| code
== LTGT
))
12492 return reverse_condition_maybe_unordered (code
);
12494 return reverse_condition (code
);
12497 /* Generate a compare for CODE. Return a brand-new rtx that
12498 represents the result of the compare. */
12501 rs6000_generate_compare (enum rtx_code code
)
12503 enum machine_mode comp_mode
;
12504 rtx compare_result
;
12506 if (rs6000_compare_fp_p
)
12507 comp_mode
= CCFPmode
;
12508 else if (code
== GTU
|| code
== LTU
12509 || code
== GEU
|| code
== LEU
)
12510 comp_mode
= CCUNSmode
;
12511 else if ((code
== EQ
|| code
== NE
)
12512 && GET_CODE (rs6000_compare_op0
) == SUBREG
12513 && GET_CODE (rs6000_compare_op1
) == SUBREG
12514 && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op0
)
12515 && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op1
))
12516 /* These are unsigned values, perhaps there will be a later
12517 ordering compare that can be shared with this one.
12518 Unfortunately we cannot detect the signedness of the operands
12519 for non-subregs. */
12520 comp_mode
= CCUNSmode
;
12522 comp_mode
= CCmode
;
12524 /* First, the compare. */
12525 compare_result
= gen_reg_rtx (comp_mode
);
12527 /* E500 FP compare instructions on the GPRs. Yuck! */
12528 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
)
12529 && rs6000_compare_fp_p
)
12531 rtx cmp
, or_result
, compare_result2
;
12532 enum machine_mode op_mode
= GET_MODE (rs6000_compare_op0
);
12534 if (op_mode
== VOIDmode
)
12535 op_mode
= GET_MODE (rs6000_compare_op1
);
12537 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
12538 This explains the following mess. */
12542 case EQ
: case UNEQ
: case NE
: case LTGT
:
12546 cmp
= flag_unsafe_math_optimizations
12547 ? gen_tstsfeq_gpr (compare_result
, rs6000_compare_op0
,
12548 rs6000_compare_op1
)
12549 : gen_cmpsfeq_gpr (compare_result
, rs6000_compare_op0
,
12550 rs6000_compare_op1
);
12554 cmp
= flag_unsafe_math_optimizations
12555 ? gen_tstdfeq_gpr (compare_result
, rs6000_compare_op0
,
12556 rs6000_compare_op1
)
12557 : gen_cmpdfeq_gpr (compare_result
, rs6000_compare_op0
,
12558 rs6000_compare_op1
);
12562 cmp
= flag_unsafe_math_optimizations
12563 ? gen_tsttfeq_gpr (compare_result
, rs6000_compare_op0
,
12564 rs6000_compare_op1
)
12565 : gen_cmptfeq_gpr (compare_result
, rs6000_compare_op0
,
12566 rs6000_compare_op1
);
12570 gcc_unreachable ();
12574 case GT
: case GTU
: case UNGT
: case UNGE
: case GE
: case GEU
:
12578 cmp
= flag_unsafe_math_optimizations
12579 ? gen_tstsfgt_gpr (compare_result
, rs6000_compare_op0
,
12580 rs6000_compare_op1
)
12581 : gen_cmpsfgt_gpr (compare_result
, rs6000_compare_op0
,
12582 rs6000_compare_op1
);
12586 cmp
= flag_unsafe_math_optimizations
12587 ? gen_tstdfgt_gpr (compare_result
, rs6000_compare_op0
,
12588 rs6000_compare_op1
)
12589 : gen_cmpdfgt_gpr (compare_result
, rs6000_compare_op0
,
12590 rs6000_compare_op1
);
12594 cmp
= flag_unsafe_math_optimizations
12595 ? gen_tsttfgt_gpr (compare_result
, rs6000_compare_op0
,
12596 rs6000_compare_op1
)
12597 : gen_cmptfgt_gpr (compare_result
, rs6000_compare_op0
,
12598 rs6000_compare_op1
);
12602 gcc_unreachable ();
12606 case LT
: case LTU
: case UNLT
: case UNLE
: case LE
: case LEU
:
12610 cmp
= flag_unsafe_math_optimizations
12611 ? gen_tstsflt_gpr (compare_result
, rs6000_compare_op0
,
12612 rs6000_compare_op1
)
12613 : gen_cmpsflt_gpr (compare_result
, rs6000_compare_op0
,
12614 rs6000_compare_op1
);
12618 cmp
= flag_unsafe_math_optimizations
12619 ? gen_tstdflt_gpr (compare_result
, rs6000_compare_op0
,
12620 rs6000_compare_op1
)
12621 : gen_cmpdflt_gpr (compare_result
, rs6000_compare_op0
,
12622 rs6000_compare_op1
);
12626 cmp
= flag_unsafe_math_optimizations
12627 ? gen_tsttflt_gpr (compare_result
, rs6000_compare_op0
,
12628 rs6000_compare_op1
)
12629 : gen_cmptflt_gpr (compare_result
, rs6000_compare_op0
,
12630 rs6000_compare_op1
);
12634 gcc_unreachable ();
12638 gcc_unreachable ();
12641 /* Synthesize LE and GE from LT/GT || EQ. */
12642 if (code
== LE
|| code
== GE
|| code
== LEU
|| code
== GEU
)
12648 case LE
: code
= LT
; break;
12649 case GE
: code
= GT
; break;
12650 case LEU
: code
= LT
; break;
12651 case GEU
: code
= GT
; break;
12652 default: gcc_unreachable ();
12655 compare_result2
= gen_reg_rtx (CCFPmode
);
12661 cmp
= flag_unsafe_math_optimizations
12662 ? gen_tstsfeq_gpr (compare_result2
, rs6000_compare_op0
,
12663 rs6000_compare_op1
)
12664 : gen_cmpsfeq_gpr (compare_result2
, rs6000_compare_op0
,
12665 rs6000_compare_op1
);
12669 cmp
= flag_unsafe_math_optimizations
12670 ? gen_tstdfeq_gpr (compare_result2
, rs6000_compare_op0
,
12671 rs6000_compare_op1
)
12672 : gen_cmpdfeq_gpr (compare_result2
, rs6000_compare_op0
,
12673 rs6000_compare_op1
);
12677 cmp
= flag_unsafe_math_optimizations
12678 ? gen_tsttfeq_gpr (compare_result2
, rs6000_compare_op0
,
12679 rs6000_compare_op1
)
12680 : gen_cmptfeq_gpr (compare_result2
, rs6000_compare_op0
,
12681 rs6000_compare_op1
);
12685 gcc_unreachable ();
12689 /* OR them together. */
12690 or_result
= gen_reg_rtx (CCFPmode
);
12691 cmp
= gen_e500_cr_ior_compare (or_result
, compare_result
,
12693 compare_result
= or_result
;
12698 if (code
== NE
|| code
== LTGT
)
12708 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
12709 CLOBBERs to match cmptf_internal2 pattern. */
12710 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
12711 && GET_MODE (rs6000_compare_op0
) == TFmode
12712 && !TARGET_IEEEQUAD
12713 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_LONG_DOUBLE_128
)
12714 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
12716 gen_rtx_SET (VOIDmode
,
12718 gen_rtx_COMPARE (comp_mode
,
12719 rs6000_compare_op0
,
12720 rs6000_compare_op1
)),
12721 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12722 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12723 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12724 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12725 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12726 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12727 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12728 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)))));
12729 else if (GET_CODE (rs6000_compare_op1
) == UNSPEC
12730 && XINT (rs6000_compare_op1
, 1) == UNSPEC_SP_TEST
)
12732 rtx op1
= XVECEXP (rs6000_compare_op1
, 0, 0);
12733 comp_mode
= CCEQmode
;
12734 compare_result
= gen_reg_rtx (CCEQmode
);
12736 emit_insn (gen_stack_protect_testdi (compare_result
,
12737 rs6000_compare_op0
, op1
));
12739 emit_insn (gen_stack_protect_testsi (compare_result
,
12740 rs6000_compare_op0
, op1
));
12743 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
12744 gen_rtx_COMPARE (comp_mode
,
12745 rs6000_compare_op0
,
12746 rs6000_compare_op1
)));
12749 /* Some kinds of FP comparisons need an OR operation;
12750 under flag_finite_math_only we don't bother. */
12751 if (rs6000_compare_fp_p
12752 && !flag_finite_math_only
12753 && !(TARGET_HARD_FLOAT
&& !TARGET_FPRS
)
12754 && (code
== LE
|| code
== GE
12755 || code
== UNEQ
|| code
== LTGT
12756 || code
== UNGT
|| code
== UNLT
))
12758 enum rtx_code or1
, or2
;
12759 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
12760 rtx or_result
= gen_reg_rtx (CCEQmode
);
12764 case LE
: or1
= LT
; or2
= EQ
; break;
12765 case GE
: or1
= GT
; or2
= EQ
; break;
12766 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
12767 case LTGT
: or1
= LT
; or2
= GT
; break;
12768 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
12769 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
12770 default: gcc_unreachable ();
12772 validate_condition_mode (or1
, comp_mode
);
12773 validate_condition_mode (or2
, comp_mode
);
12774 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
12775 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
12776 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
12777 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
12779 emit_insn (gen_rtx_SET (VOIDmode
, or_result
, compare2_rtx
));
12781 compare_result
= or_result
;
12785 validate_condition_mode (code
, GET_MODE (compare_result
));
12787 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
12791 /* Emit the RTL for an sCOND pattern. */
12794 rs6000_emit_sCOND (enum rtx_code code
, rtx result
)
12797 enum machine_mode op_mode
;
12798 enum rtx_code cond_code
;
12800 condition_rtx
= rs6000_generate_compare (code
);
12801 cond_code
= GET_CODE (condition_rtx
);
12803 if (rs6000_compare_fp_p
12804 && !TARGET_FPRS
&& TARGET_HARD_FLOAT
)
12808 PUT_MODE (condition_rtx
, SImode
);
12809 t
= XEXP (condition_rtx
, 0);
12811 gcc_assert (cond_code
== NE
|| cond_code
== EQ
);
12813 if (cond_code
== NE
)
12814 emit_insn (gen_e500_flip_gt_bit (t
, t
));
12816 emit_insn (gen_move_from_CR_gt_bit (result
, t
));
12820 if (cond_code
== NE
12821 || cond_code
== GE
|| cond_code
== LE
12822 || cond_code
== GEU
|| cond_code
== LEU
12823 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
12825 rtx not_result
= gen_reg_rtx (CCEQmode
);
12826 rtx not_op
, rev_cond_rtx
;
12827 enum machine_mode cc_mode
;
12829 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
12831 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
12832 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
12833 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
12834 emit_insn (gen_rtx_SET (VOIDmode
, not_result
, not_op
));
12835 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
12838 op_mode
= GET_MODE (rs6000_compare_op0
);
12839 if (op_mode
== VOIDmode
)
12840 op_mode
= GET_MODE (rs6000_compare_op1
);
12842 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| rs6000_compare_fp_p
))
12844 PUT_MODE (condition_rtx
, DImode
);
12845 convert_move (result
, condition_rtx
, 0);
12849 PUT_MODE (condition_rtx
, SImode
);
12850 emit_insn (gen_rtx_SET (VOIDmode
, result
, condition_rtx
));
12854 /* Emit a branch of kind CODE to location LOC. */
12857 rs6000_emit_cbranch (enum rtx_code code
, rtx loc
)
12859 rtx condition_rtx
, loc_ref
;
12861 condition_rtx
= rs6000_generate_compare (code
);
12862 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, loc
);
12863 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
12864 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
12865 loc_ref
, pc_rtx
)));
12868 /* Return the string to output a conditional branch to LABEL, which is
12869 the operand number of the label, or -1 if the branch is really a
12870 conditional return.
12872 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
12873 condition code register and its mode specifies what kind of
12874 comparison we made.
12876 REVERSED is nonzero if we should reverse the sense of the comparison.
12878 INSN is the insn. */
12881 output_cbranch (rtx op
, const char *label
, int reversed
, rtx insn
)
12883 static char string
[64];
12884 enum rtx_code code
= GET_CODE (op
);
12885 rtx cc_reg
= XEXP (op
, 0);
12886 enum machine_mode mode
= GET_MODE (cc_reg
);
12887 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
12888 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
12889 int really_reversed
= reversed
^ need_longbranch
;
12895 validate_condition_mode (code
, mode
);
12897 /* Work out which way this really branches. We could use
12898 reverse_condition_maybe_unordered here always but this
12899 makes the resulting assembler clearer. */
12900 if (really_reversed
)
12902 /* Reversal of FP compares takes care -- an ordered compare
12903 becomes an unordered compare and vice versa. */
12904 if (mode
== CCFPmode
)
12905 code
= reverse_condition_maybe_unordered (code
);
12907 code
= reverse_condition (code
);
12910 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
) && mode
== CCFPmode
)
12912 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
12917 /* Opposite of GT. */
12926 gcc_unreachable ();
12932 /* Not all of these are actually distinct opcodes, but
12933 we distinguish them for clarity of the resulting assembler. */
12934 case NE
: case LTGT
:
12935 ccode
= "ne"; break;
12936 case EQ
: case UNEQ
:
12937 ccode
= "eq"; break;
12939 ccode
= "ge"; break;
12940 case GT
: case GTU
: case UNGT
:
12941 ccode
= "gt"; break;
12943 ccode
= "le"; break;
12944 case LT
: case LTU
: case UNLT
:
12945 ccode
= "lt"; break;
12946 case UNORDERED
: ccode
= "un"; break;
12947 case ORDERED
: ccode
= "nu"; break;
12948 case UNGE
: ccode
= "nl"; break;
12949 case UNLE
: ccode
= "ng"; break;
12951 gcc_unreachable ();
12954 /* Maybe we have a guess as to how likely the branch is.
12955 The old mnemonics don't have a way to specify this information. */
12957 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
12958 if (note
!= NULL_RTX
)
12960 /* PROB is the difference from 50%. */
12961 int prob
= INTVAL (XEXP (note
, 0)) - REG_BR_PROB_BASE
/ 2;
12963 /* Only hint for highly probable/improbable branches on newer
12964 cpus as static prediction overrides processor dynamic
12965 prediction. For older cpus we may as well always hint, but
12966 assume not taken for branches that are very close to 50% as a
12967 mispredicted taken branch is more expensive than a
12968 mispredicted not-taken branch. */
12969 if (rs6000_always_hint
12970 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
12971 && br_prob_note_reliable_p (note
)))
12973 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
12974 && ((prob
> 0) ^ need_longbranch
))
12982 s
+= sprintf (s
, "{b%sr|b%slr%s} ", ccode
, ccode
, pred
);
12984 s
+= sprintf (s
, "{b%s|b%s%s} ", ccode
, ccode
, pred
);
12986 /* We need to escape any '%' characters in the reg_names string.
12987 Assume they'd only be the first character.... */
12988 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
12990 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
12994 /* If the branch distance was too far, we may have to use an
12995 unconditional branch to go the distance. */
12996 if (need_longbranch
)
12997 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
12999 s
+= sprintf (s
, ",%s", label
);
13005 /* Return the string to flip the GT bit on a CR. */
13007 output_e500_flip_gt_bit (rtx dst
, rtx src
)
13009 static char string
[64];
13012 gcc_assert (GET_CODE (dst
) == REG
&& CR_REGNO_P (REGNO (dst
))
13013 && GET_CODE (src
) == REG
&& CR_REGNO_P (REGNO (src
)));
13016 a
= 4 * (REGNO (dst
) - CR0_REGNO
) + 1;
13017 b
= 4 * (REGNO (src
) - CR0_REGNO
) + 1;
13019 sprintf (string
, "crnot %d,%d", a
, b
);
13023 /* Return insn index for the vector compare instruction for given CODE,
13024 and DEST_MODE, OP_MODE. Return INSN_NOT_AVAILABLE if valid insn is
13028 get_vec_cmp_insn (enum rtx_code code
,
13029 enum machine_mode dest_mode
,
13030 enum machine_mode op_mode
)
13032 if (!TARGET_ALTIVEC
)
13033 return INSN_NOT_AVAILABLE
;
13038 if (dest_mode
== V16QImode
&& op_mode
== V16QImode
)
13039 return UNSPEC_VCMPEQUB
;
13040 if (dest_mode
== V8HImode
&& op_mode
== V8HImode
)
13041 return UNSPEC_VCMPEQUH
;
13042 if (dest_mode
== V4SImode
&& op_mode
== V4SImode
)
13043 return UNSPEC_VCMPEQUW
;
13044 if (dest_mode
== V4SImode
&& op_mode
== V4SFmode
)
13045 return UNSPEC_VCMPEQFP
;
13048 if (dest_mode
== V4SImode
&& op_mode
== V4SFmode
)
13049 return UNSPEC_VCMPGEFP
;
13051 if (dest_mode
== V16QImode
&& op_mode
== V16QImode
)
13052 return UNSPEC_VCMPGTSB
;
13053 if (dest_mode
== V8HImode
&& op_mode
== V8HImode
)
13054 return UNSPEC_VCMPGTSH
;
13055 if (dest_mode
== V4SImode
&& op_mode
== V4SImode
)
13056 return UNSPEC_VCMPGTSW
;
13057 if (dest_mode
== V4SImode
&& op_mode
== V4SFmode
)
13058 return UNSPEC_VCMPGTFP
;
13061 if (dest_mode
== V16QImode
&& op_mode
== V16QImode
)
13062 return UNSPEC_VCMPGTUB
;
13063 if (dest_mode
== V8HImode
&& op_mode
== V8HImode
)
13064 return UNSPEC_VCMPGTUH
;
13065 if (dest_mode
== V4SImode
&& op_mode
== V4SImode
)
13066 return UNSPEC_VCMPGTUW
;
13071 return INSN_NOT_AVAILABLE
;
13074 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
13075 DMODE is expected destination mode. This is a recursive function. */
13078 rs6000_emit_vector_compare (enum rtx_code rcode
,
13080 enum machine_mode dmode
)
13084 enum machine_mode dest_mode
;
13085 enum machine_mode op_mode
= GET_MODE (op1
);
13087 gcc_assert (TARGET_ALTIVEC
);
13088 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
13090 /* Floating point vector compare instructions uses destination V4SImode.
13091 Move destination to appropriate mode later. */
13092 if (dmode
== V4SFmode
)
13093 dest_mode
= V4SImode
;
13097 mask
= gen_reg_rtx (dest_mode
);
13098 vec_cmp_insn
= get_vec_cmp_insn (rcode
, dest_mode
, op_mode
);
13100 if (vec_cmp_insn
== INSN_NOT_AVAILABLE
)
13102 bool swap_operands
= false;
13103 bool try_again
= false;
13108 swap_operands
= true;
13113 swap_operands
= true;
13121 /* Invert condition and try again.
13122 e.g., A != B becomes ~(A==B). */
13124 enum rtx_code rev_code
;
13125 enum insn_code nor_code
;
13128 rev_code
= reverse_condition_maybe_unordered (rcode
);
13129 eq_rtx
= rs6000_emit_vector_compare (rev_code
, op0
, op1
,
13132 nor_code
= optab_handler (one_cmpl_optab
, (int)dest_mode
)->insn_code
;
13133 gcc_assert (nor_code
!= CODE_FOR_nothing
);
13134 emit_insn (GEN_FCN (nor_code
) (mask
, eq_rtx
));
13136 if (dmode
!= dest_mode
)
13138 rtx temp
= gen_reg_rtx (dest_mode
);
13139 convert_move (temp
, mask
, 0);
13149 /* Try GT/GTU/LT/LTU OR EQ */
13152 enum insn_code ior_code
;
13153 enum rtx_code new_code
;
13174 gcc_unreachable ();
13177 c_rtx
= rs6000_emit_vector_compare (new_code
,
13178 op0
, op1
, dest_mode
);
13179 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
,
13182 ior_code
= optab_handler (ior_optab
, (int)dest_mode
)->insn_code
;
13183 gcc_assert (ior_code
!= CODE_FOR_nothing
);
13184 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
13185 if (dmode
!= dest_mode
)
13187 rtx temp
= gen_reg_rtx (dest_mode
);
13188 convert_move (temp
, mask
, 0);
13195 gcc_unreachable ();
13200 vec_cmp_insn
= get_vec_cmp_insn (rcode
, dest_mode
, op_mode
);
13201 /* You only get two chances. */
13202 gcc_assert (vec_cmp_insn
!= INSN_NOT_AVAILABLE
);
13214 emit_insn (gen_rtx_SET (VOIDmode
, mask
,
13215 gen_rtx_UNSPEC (dest_mode
,
13216 gen_rtvec (2, op0
, op1
),
13218 if (dmode
!= dest_mode
)
13220 rtx temp
= gen_reg_rtx (dest_mode
);
13221 convert_move (temp
, mask
, 0);
13227 /* Return vector select instruction for MODE. Return INSN_NOT_AVAILABLE, if
13228 valid insn doesn exist for given mode. */
13231 get_vsel_insn (enum machine_mode mode
)
13236 return UNSPEC_VSEL4SI
;
13239 return UNSPEC_VSEL4SF
;
13242 return UNSPEC_VSEL8HI
;
13245 return UNSPEC_VSEL16QI
;
13248 return INSN_NOT_AVAILABLE
;
13251 return INSN_NOT_AVAILABLE
;
13254 /* Emit vector select insn where DEST is destination using
13255 operands OP1, OP2 and MASK. */
13258 rs6000_emit_vector_select (rtx dest
, rtx op1
, rtx op2
, rtx mask
)
13261 enum machine_mode dest_mode
= GET_MODE (dest
);
13262 int vsel_insn_index
= get_vsel_insn (GET_MODE (dest
));
13264 temp
= gen_reg_rtx (dest_mode
);
13266 /* For each vector element, select op1 when mask is 1 otherwise
13268 t
= gen_rtx_SET (VOIDmode
, temp
,
13269 gen_rtx_UNSPEC (dest_mode
,
13270 gen_rtvec (3, op2
, op1
, mask
),
13273 emit_move_insn (dest
, temp
);
13277 /* Emit vector conditional expression.
13278 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
13279 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
13282 rs6000_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
13283 rtx cond
, rtx cc_op0
, rtx cc_op1
)
13285 enum machine_mode dest_mode
= GET_MODE (dest
);
13286 enum rtx_code rcode
= GET_CODE (cond
);
13289 if (!TARGET_ALTIVEC
)
13292 /* Get the vector mask for the given relational operations. */
13293 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, dest_mode
);
13295 rs6000_emit_vector_select (dest
, op1
, op2
, mask
);
13300 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
13301 operands of the last comparison is nonzero/true, FALSE_COND if it
13302 is zero/false. Return 0 if the hardware has no such operation. */
13305 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
13307 enum rtx_code code
= GET_CODE (op
);
13308 rtx op0
= rs6000_compare_op0
;
13309 rtx op1
= rs6000_compare_op1
;
13310 REAL_VALUE_TYPE c1
;
13311 enum machine_mode compare_mode
= GET_MODE (op0
);
13312 enum machine_mode result_mode
= GET_MODE (dest
);
13314 bool is_against_zero
;
13316 /* These modes should always match. */
13317 if (GET_MODE (op1
) != compare_mode
13318 /* In the isel case however, we can use a compare immediate, so
13319 op1 may be a small constant. */
13320 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
13322 if (GET_MODE (true_cond
) != result_mode
)
13324 if (GET_MODE (false_cond
) != result_mode
)
13327 /* First, work out if the hardware can do this at all, or
13328 if it's too slow.... */
13329 if (! rs6000_compare_fp_p
)
13332 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
13335 else if (TARGET_HARD_FLOAT
&& !TARGET_FPRS
13336 && SCALAR_FLOAT_MODE_P (compare_mode
))
13339 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
13341 /* A floating-point subtract might overflow, underflow, or produce
13342 an inexact result, thus changing the floating-point flags, so it
13343 can't be generated if we care about that. It's safe if one side
13344 of the construct is zero, since then no subtract will be
13346 if (SCALAR_FLOAT_MODE_P (compare_mode
)
13347 && flag_trapping_math
&& ! is_against_zero
)
13350 /* Eliminate half of the comparisons by switching operands, this
13351 makes the remaining code simpler. */
13352 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
13353 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
13355 code
= reverse_condition_maybe_unordered (code
);
13357 true_cond
= false_cond
;
13361 /* UNEQ and LTGT take four instructions for a comparison with zero,
13362 it'll probably be faster to use a branch here too. */
13363 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
13366 if (GET_CODE (op1
) == CONST_DOUBLE
)
13367 REAL_VALUE_FROM_CONST_DOUBLE (c1
, op1
);
13369 /* We're going to try to implement comparisons by performing
13370 a subtract, then comparing against zero. Unfortunately,
13371 Inf - Inf is NaN which is not zero, and so if we don't
13372 know that the operand is finite and the comparison
13373 would treat EQ different to UNORDERED, we can't do it. */
13374 if (HONOR_INFINITIES (compare_mode
)
13375 && code
!= GT
&& code
!= UNGE
13376 && (GET_CODE (op1
) != CONST_DOUBLE
|| real_isinf (&c1
))
13377 /* Constructs of the form (a OP b ? a : b) are safe. */
13378 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
13379 || (! rtx_equal_p (op0
, true_cond
)
13380 && ! rtx_equal_p (op1
, true_cond
))))
13383 /* At this point we know we can use fsel. */
13385 /* Reduce the comparison to a comparison against zero. */
13386 if (! is_against_zero
)
13388 temp
= gen_reg_rtx (compare_mode
);
13389 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
13390 gen_rtx_MINUS (compare_mode
, op0
, op1
)));
13392 op1
= CONST0_RTX (compare_mode
);
13395 /* If we don't care about NaNs we can reduce some of the comparisons
13396 down to faster ones. */
13397 if (! HONOR_NANS (compare_mode
))
13403 true_cond
= false_cond
;
13416 /* Now, reduce everything down to a GE. */
13423 temp
= gen_reg_rtx (compare_mode
);
13424 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
13429 temp
= gen_reg_rtx (compare_mode
);
13430 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_ABS (compare_mode
, op0
)));
13435 temp
= gen_reg_rtx (compare_mode
);
13436 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
13437 gen_rtx_NEG (compare_mode
,
13438 gen_rtx_ABS (compare_mode
, op0
))));
13443 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
13444 temp
= gen_reg_rtx (result_mode
);
13445 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
13446 gen_rtx_IF_THEN_ELSE (result_mode
,
13447 gen_rtx_GE (VOIDmode
,
13449 true_cond
, false_cond
)));
13450 false_cond
= true_cond
;
13453 temp
= gen_reg_rtx (compare_mode
);
13454 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
13459 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
13460 temp
= gen_reg_rtx (result_mode
);
13461 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
13462 gen_rtx_IF_THEN_ELSE (result_mode
,
13463 gen_rtx_GE (VOIDmode
,
13465 true_cond
, false_cond
)));
13466 true_cond
= false_cond
;
13469 temp
= gen_reg_rtx (compare_mode
);
13470 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
13475 gcc_unreachable ();
13478 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
13479 gen_rtx_IF_THEN_ELSE (result_mode
,
13480 gen_rtx_GE (VOIDmode
,
13482 true_cond
, false_cond
)));
13486 /* Same as above, but for ints (isel). */
13489 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
13491 rtx condition_rtx
, cr
;
13493 /* All isel implementations thus far are 32-bits. */
13494 if (GET_MODE (rs6000_compare_op0
) != SImode
)
13497 /* We still have to do the compare, because isel doesn't do a
13498 compare, it just looks at the CRx bits set by a previous compare
13500 condition_rtx
= rs6000_generate_compare (GET_CODE (op
));
13501 cr
= XEXP (condition_rtx
, 0);
13503 if (GET_MODE (cr
) == CCmode
)
13504 emit_insn (gen_isel_signed (dest
, condition_rtx
,
13505 true_cond
, false_cond
, cr
));
13507 emit_insn (gen_isel_unsigned (dest
, condition_rtx
,
13508 true_cond
, false_cond
, cr
));
13514 output_isel (rtx
*operands
)
13516 enum rtx_code code
;
13518 code
= GET_CODE (operands
[1]);
13519 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
13521 PUT_CODE (operands
[1], reverse_condition (code
));
13522 return "isel %0,%3,%2,%j1";
13525 return "isel %0,%2,%3,%j1";
13529 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
13531 enum machine_mode mode
= GET_MODE (op0
);
13535 if (code
== SMAX
|| code
== SMIN
)
13540 if (code
== SMAX
|| code
== UMAX
)
13541 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
13542 op0
, op1
, mode
, 0);
13544 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
13545 op1
, op0
, mode
, 0);
13546 gcc_assert (target
);
13547 if (target
!= dest
)
13548 emit_move_insn (dest
, target
);
13551 /* Emit instructions to perform a load-reserved/store-conditional operation.
13552 The operation performed is an atomic
13553 (set M (CODE:MODE M OP))
13554 If not NULL, BEFORE is atomically set to M before the operation, and
13555 AFTER is set to M after the operation (that is, (CODE:MODE M OP)).
13556 If SYNC_P then a memory barrier is emitted before the operation.
13557 Either OP or M may be wrapped in a NOT operation. */
13560 rs6000_emit_sync (enum rtx_code code
, enum machine_mode mode
,
13561 rtx m
, rtx op
, rtx before_param
, rtx after_param
,
13564 enum machine_mode used_mode
;
13565 rtx the_op
, set_before
, set_after
, set_atomic
, cc_scratch
, before
, after
;
13568 HOST_WIDE_INT imask
= GET_MODE_MASK (mode
);
13569 rtx shift
= NULL_RTX
;
13572 emit_insn (gen_memory_barrier ());
13574 if (GET_CODE (m
) == NOT
)
13575 used_m
= XEXP (m
, 0);
13579 /* If this is smaller than SImode, we'll have to use SImode with
13581 if (mode
== QImode
|| mode
== HImode
)
13585 if (MEM_ALIGN (used_m
) >= 32)
13588 if (BYTES_BIG_ENDIAN
)
13589 ishift
= GET_MODE_BITSIZE (SImode
) - GET_MODE_BITSIZE (mode
);
13591 shift
= GEN_INT (ishift
);
13592 used_m
= change_address (used_m
, SImode
, 0);
13596 rtx addrSI
, aligned_addr
;
13597 int shift_mask
= mode
== QImode
? 0x18 : 0x10;
13599 addrSI
= gen_lowpart_common (SImode
,
13600 force_reg (Pmode
, XEXP (used_m
, 0)));
13601 addrSI
= force_reg (SImode
, addrSI
);
13602 shift
= gen_reg_rtx (SImode
);
13604 emit_insn (gen_rlwinm (shift
, addrSI
, GEN_INT (3),
13605 GEN_INT (shift_mask
)));
13606 emit_insn (gen_xorsi3 (shift
, shift
, GEN_INT (shift_mask
)));
13608 aligned_addr
= expand_binop (Pmode
, and_optab
,
13610 GEN_INT (-4), NULL_RTX
,
13611 1, OPTAB_LIB_WIDEN
);
13612 used_m
= change_address (used_m
, SImode
, aligned_addr
);
13613 set_mem_align (used_m
, 32);
13615 /* It's safe to keep the old alias set of USED_M, because
13616 the operation is atomic and only affects the original
13618 if (GET_CODE (m
) == NOT
)
13619 m
= gen_rtx_NOT (SImode
, used_m
);
13623 if (GET_CODE (op
) == NOT
)
13625 oldop
= lowpart_subreg (SImode
, XEXP (op
, 0), mode
);
13626 oldop
= gen_rtx_NOT (SImode
, oldop
);
13629 oldop
= lowpart_subreg (SImode
, op
, mode
);
13635 newop
= expand_binop (SImode
, and_optab
,
13636 oldop
, GEN_INT (imask
), NULL_RTX
,
13637 1, OPTAB_LIB_WIDEN
);
13638 emit_insn (gen_ashlsi3 (newop
, newop
, shift
));
13642 newop
= expand_binop (SImode
, ior_optab
,
13643 oldop
, GEN_INT (~imask
), NULL_RTX
,
13644 1, OPTAB_LIB_WIDEN
);
13645 emit_insn (gen_rotlsi3 (newop
, newop
, shift
));
13653 newop
= expand_binop (SImode
, and_optab
,
13654 oldop
, GEN_INT (imask
), NULL_RTX
,
13655 1, OPTAB_LIB_WIDEN
);
13656 emit_insn (gen_ashlsi3 (newop
, newop
, shift
));
13658 mask
= gen_reg_rtx (SImode
);
13659 emit_move_insn (mask
, GEN_INT (imask
));
13660 emit_insn (gen_ashlsi3 (mask
, mask
, shift
));
13663 newop
= gen_rtx_PLUS (SImode
, m
, newop
);
13665 newop
= gen_rtx_MINUS (SImode
, m
, newop
);
13666 newop
= gen_rtx_AND (SImode
, newop
, mask
);
13667 newop
= gen_rtx_IOR (SImode
, newop
,
13668 gen_rtx_AND (SImode
,
13669 gen_rtx_NOT (SImode
, mask
),
13675 gcc_unreachable ();
13678 if (GET_CODE (m
) == NOT
)
13682 mask
= gen_reg_rtx (SImode
);
13683 emit_move_insn (mask
, GEN_INT (imask
));
13684 emit_insn (gen_ashlsi3 (mask
, mask
, shift
));
13686 xorm
= gen_rtx_XOR (SImode
, used_m
, mask
);
13687 /* Depending on the value of 'op', the XOR or the operation might
13688 be able to be simplified away. */
13689 newop
= simplify_gen_binary (code
, SImode
, xorm
, newop
);
13692 used_mode
= SImode
;
13693 before
= gen_reg_rtx (used_mode
);
13694 after
= gen_reg_rtx (used_mode
);
13699 before
= before_param
;
13700 after
= after_param
;
13702 if (before
== NULL_RTX
)
13703 before
= gen_reg_rtx (used_mode
);
13704 if (after
== NULL_RTX
)
13705 after
= gen_reg_rtx (used_mode
);
13708 if ((code
== PLUS
|| code
== MINUS
|| GET_CODE (m
) == NOT
)
13709 && used_mode
!= mode
)
13710 the_op
= op
; /* Computed above. */
13711 else if (GET_CODE (op
) == NOT
&& GET_CODE (m
) != NOT
)
13712 the_op
= gen_rtx_fmt_ee (code
, used_mode
, op
, m
);
13714 the_op
= gen_rtx_fmt_ee (code
, used_mode
, m
, op
);
13716 set_after
= gen_rtx_SET (VOIDmode
, after
, the_op
);
13717 set_before
= gen_rtx_SET (VOIDmode
, before
, used_m
);
13718 set_atomic
= gen_rtx_SET (VOIDmode
, used_m
,
13719 gen_rtx_UNSPEC (used_mode
,
13720 gen_rtvec (1, the_op
),
13722 cc_scratch
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (CCmode
));
13724 if ((code
== PLUS
|| code
== MINUS
) && used_mode
!= mode
)
13725 vec
= gen_rtvec (5, set_after
, set_before
, set_atomic
, cc_scratch
,
13726 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (SImode
)));
13728 vec
= gen_rtvec (4, set_after
, set_before
, set_atomic
, cc_scratch
);
13729 emit_insn (gen_rtx_PARALLEL (VOIDmode
, vec
));
13731 /* Shift and mask the return values properly. */
13732 if (used_mode
!= mode
&& before_param
)
13734 emit_insn (gen_lshrsi3 (before
, before
, shift
));
13735 convert_move (before_param
, before
, 1);
13738 if (used_mode
!= mode
&& after_param
)
13740 emit_insn (gen_lshrsi3 (after
, after
, shift
));
13741 convert_move (after_param
, after
, 1);
13744 /* The previous sequence will end with a branch that's dependent on
13745 the conditional store, so placing an isync will ensure that no
13746 other instructions (especially, no load or store instructions)
13747 can start before the atomic operation completes. */
13749 emit_insn (gen_isync ());
13752 /* A subroutine of the atomic operation splitters. Jump to LABEL if
13753 COND is true. Mark the jump as unlikely to be taken. */
13756 emit_unlikely_jump (rtx cond
, rtx label
)
13758 rtx very_unlikely
= GEN_INT (REG_BR_PROB_BASE
/ 100 - 1);
13761 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
13762 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
13763 REG_NOTES (x
) = gen_rtx_EXPR_LIST (REG_BR_PROB
, very_unlikely
, NULL_RTX
);
13766 /* A subroutine of the atomic operation splitters. Emit a load-locked
13767 instruction in MODE. */
13770 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
13772 rtx (*fn
) (rtx
, rtx
) = NULL
;
13773 if (mode
== SImode
)
13774 fn
= gen_load_locked_si
;
13775 else if (mode
== DImode
)
13776 fn
= gen_load_locked_di
;
13777 emit_insn (fn (reg
, mem
));
13780 /* A subroutine of the atomic operation splitters. Emit a store-conditional
13781 instruction in MODE. */
13784 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
13786 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
13787 if (mode
== SImode
)
13788 fn
= gen_store_conditional_si
;
13789 else if (mode
== DImode
)
13790 fn
= gen_store_conditional_di
;
13792 /* Emit sync before stwcx. to address PPC405 Erratum. */
13793 if (PPC405_ERRATUM77
)
13794 emit_insn (gen_memory_barrier ());
13796 emit_insn (fn (res
, mem
, val
));
13799 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
13800 to perform. MEM is the memory on which to operate. VAL is the second
13801 operand of the binary operator. BEFORE and AFTER are optional locations to
13802 return the value of MEM either before of after the operation. SCRATCH is
13803 a scratch register. */
13806 rs6000_split_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
13807 rtx before
, rtx after
, rtx scratch
)
13809 enum machine_mode mode
= GET_MODE (mem
);
13810 rtx label
, x
, cond
= gen_rtx_REG (CCmode
, CR0_REGNO
);
13812 emit_insn (gen_memory_barrier ());
13814 label
= gen_label_rtx ();
13815 emit_label (label
);
13816 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
13818 if (before
== NULL_RTX
)
13820 emit_load_locked (mode
, before
, mem
);
13823 x
= gen_rtx_AND (mode
, gen_rtx_NOT (mode
, before
), val
);
13824 else if (code
== AND
)
13825 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (2, before
, val
), UNSPEC_AND
);
13827 x
= gen_rtx_fmt_ee (code
, mode
, before
, val
);
13829 if (after
!= NULL_RTX
)
13830 emit_insn (gen_rtx_SET (VOIDmode
, after
, copy_rtx (x
)));
13831 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
13833 emit_store_conditional (mode
, cond
, mem
, scratch
);
13835 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
13836 emit_unlikely_jump (x
, label
);
13838 emit_insn (gen_isync ());
13841 /* Expand an atomic compare and swap operation. MEM is the memory on which
13842 to operate. OLDVAL is the old value to be compared. NEWVAL is the new
13843 value to be stored. SCRATCH is a scratch GPR. */
13846 rs6000_split_compare_and_swap (rtx retval
, rtx mem
, rtx oldval
, rtx newval
,
13849 enum machine_mode mode
= GET_MODE (mem
);
13850 rtx label1
, label2
, x
, cond
= gen_rtx_REG (CCmode
, CR0_REGNO
);
13852 emit_insn (gen_memory_barrier ());
13854 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
13855 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
13856 emit_label (XEXP (label1
, 0));
13858 emit_load_locked (mode
, retval
, mem
);
13860 x
= gen_rtx_COMPARE (CCmode
, retval
, oldval
);
13861 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
13863 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
13864 emit_unlikely_jump (x
, label2
);
13866 emit_move_insn (scratch
, newval
);
13867 emit_store_conditional (mode
, cond
, mem
, scratch
);
13869 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
13870 emit_unlikely_jump (x
, label1
);
13872 emit_insn (gen_isync ());
13873 emit_label (XEXP (label2
, 0));
13876 /* Expand an atomic test and set operation. MEM is the memory on which
13877 to operate. VAL is the value set. SCRATCH is a scratch GPR. */
13880 rs6000_split_lock_test_and_set (rtx retval
, rtx mem
, rtx val
, rtx scratch
)
13882 enum machine_mode mode
= GET_MODE (mem
);
13883 rtx label
, x
, cond
= gen_rtx_REG (CCmode
, CR0_REGNO
);
13885 emit_insn (gen_memory_barrier ());
13887 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
13888 emit_label (XEXP (label
, 0));
13890 emit_load_locked (mode
, retval
, mem
);
13891 emit_move_insn (scratch
, val
);
13892 emit_store_conditional (mode
, cond
, mem
, scratch
);
13894 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
13895 emit_unlikely_jump (x
, label
);
13897 emit_insn (gen_isync ());
13901 rs6000_expand_compare_and_swapqhi (rtx dst
, rtx mem
, rtx oldval
, rtx newval
)
13903 enum machine_mode mode
= GET_MODE (mem
);
13904 rtx addrSI
, align
, wdst
, shift
, mask
;
13905 HOST_WIDE_INT shift_mask
= mode
== QImode
? 0x18 : 0x10;
13906 HOST_WIDE_INT imask
= GET_MODE_MASK (mode
);
13908 /* Shift amount for subword relative to aligned word. */
13909 addrSI
= force_reg (SImode
, gen_lowpart_common (SImode
, XEXP (mem
, 0)));
13910 shift
= gen_reg_rtx (SImode
);
13911 emit_insn (gen_rlwinm (shift
, addrSI
, GEN_INT (3),
13912 GEN_INT (shift_mask
)));
13913 emit_insn (gen_xorsi3 (shift
, shift
, GEN_INT (shift_mask
)));
13915 /* Shift and mask old value into position within word. */
13916 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
13917 oldval
= expand_binop (SImode
, and_optab
,
13918 oldval
, GEN_INT (imask
), NULL_RTX
,
13919 1, OPTAB_LIB_WIDEN
);
13920 emit_insn (gen_ashlsi3 (oldval
, oldval
, shift
));
13922 /* Shift and mask new value into position within word. */
13923 newval
= convert_modes (SImode
, mode
, newval
, 1);
13924 newval
= expand_binop (SImode
, and_optab
,
13925 newval
, GEN_INT (imask
), NULL_RTX
,
13926 1, OPTAB_LIB_WIDEN
);
13927 emit_insn (gen_ashlsi3 (newval
, newval
, shift
));
13929 /* Mask for insertion. */
13930 mask
= gen_reg_rtx (SImode
);
13931 emit_move_insn (mask
, GEN_INT (imask
));
13932 emit_insn (gen_ashlsi3 (mask
, mask
, shift
));
13934 /* Address of aligned word containing subword. */
13935 align
= expand_binop (Pmode
, and_optab
, XEXP (mem
, 0), GEN_INT (-4),
13936 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
13937 mem
= change_address (mem
, SImode
, align
);
13938 set_mem_align (mem
, 32);
13939 MEM_VOLATILE_P (mem
) = 1;
13941 wdst
= gen_reg_rtx (SImode
);
13942 emit_insn (gen_sync_compare_and_swapqhi_internal (wdst
, mask
,
13943 oldval
, newval
, mem
));
13945 /* Shift the result back. */
13946 emit_insn (gen_lshrsi3 (wdst
, wdst
, shift
));
13948 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
13952 rs6000_split_compare_and_swapqhi (rtx dest
, rtx mask
,
13953 rtx oldval
, rtx newval
, rtx mem
,
13956 rtx label1
, label2
, x
, cond
= gen_rtx_REG (CCmode
, CR0_REGNO
);
13958 emit_insn (gen_memory_barrier ());
13959 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
13960 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
13961 emit_label (XEXP (label1
, 0));
13963 emit_load_locked (SImode
, scratch
, mem
);
13965 /* Mask subword within loaded value for comparison with oldval.
13966 Use UNSPEC_AND to avoid clobber.*/
13967 emit_insn (gen_rtx_SET (SImode
, dest
,
13968 gen_rtx_UNSPEC (SImode
,
13969 gen_rtvec (2, scratch
, mask
),
13972 x
= gen_rtx_COMPARE (CCmode
, dest
, oldval
);
13973 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
13975 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
13976 emit_unlikely_jump (x
, label2
);
13978 /* Clear subword within loaded value for insertion of new value. */
13979 emit_insn (gen_rtx_SET (SImode
, scratch
,
13980 gen_rtx_AND (SImode
,
13981 gen_rtx_NOT (SImode
, mask
), scratch
)));
13982 emit_insn (gen_iorsi3 (scratch
, scratch
, newval
));
13983 emit_store_conditional (SImode
, cond
, mem
, scratch
);
13985 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
13986 emit_unlikely_jump (x
, label1
);
13988 emit_insn (gen_isync ());
13989 emit_label (XEXP (label2
, 0));
13993 /* Emit instructions to move SRC to DST. Called by splitters for
13994 multi-register moves. It will emit at most one instruction for
13995 each register that is accessed; that is, it won't emit li/lis pairs
13996 (or equivalent for 64-bit code). One of SRC or DST must be a hard
14000 rs6000_split_multireg_move (rtx dst
, rtx src
)
14002 /* The register number of the first register being moved. */
14004 /* The mode that is to be moved. */
14005 enum machine_mode mode
;
14006 /* The mode that the move is being done in, and its size. */
14007 enum machine_mode reg_mode
;
14009 /* The number of registers that will be moved. */
14012 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
14013 mode
= GET_MODE (dst
);
14014 nregs
= hard_regno_nregs
[reg
][mode
];
14015 if (FP_REGNO_P (reg
))
14016 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
14017 else if (ALTIVEC_REGNO_P (reg
))
14018 reg_mode
= V16QImode
;
14019 else if (TARGET_E500_DOUBLE
&& mode
== TFmode
)
14022 reg_mode
= word_mode
;
14023 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
14025 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
14027 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
14029 /* Move register range backwards, if we might have destructive
14032 for (i
= nregs
- 1; i
>= 0; i
--)
14033 emit_insn (gen_rtx_SET (VOIDmode
,
14034 simplify_gen_subreg (reg_mode
, dst
, mode
,
14035 i
* reg_mode_size
),
14036 simplify_gen_subreg (reg_mode
, src
, mode
,
14037 i
* reg_mode_size
)));
14043 bool used_update
= false;
14045 if (MEM_P (src
) && INT_REGNO_P (reg
))
14049 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
14050 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
14053 breg
= XEXP (XEXP (src
, 0), 0);
14054 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
14055 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
14056 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
14057 emit_insn (TARGET_32BIT
14058 ? gen_addsi3 (breg
, breg
, delta_rtx
)
14059 : gen_adddi3 (breg
, breg
, delta_rtx
));
14060 src
= replace_equiv_address (src
, breg
);
14062 else if (! rs6000_offsettable_memref_p (src
))
14065 basereg
= gen_rtx_REG (Pmode
, reg
);
14066 emit_insn (gen_rtx_SET (VOIDmode
, basereg
, XEXP (src
, 0)));
14067 src
= replace_equiv_address (src
, basereg
);
14070 breg
= XEXP (src
, 0);
14071 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
14072 breg
= XEXP (breg
, 0);
14074 /* If the base register we are using to address memory is
14075 also a destination reg, then change that register last. */
14077 && REGNO (breg
) >= REGNO (dst
)
14078 && REGNO (breg
) < REGNO (dst
) + nregs
)
14079 j
= REGNO (breg
) - REGNO (dst
);
14082 if (GET_CODE (dst
) == MEM
&& INT_REGNO_P (reg
))
14086 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
14087 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
14090 breg
= XEXP (XEXP (dst
, 0), 0);
14091 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
14092 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
14093 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
14095 /* We have to update the breg before doing the store.
14096 Use store with update, if available. */
14100 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
14101 emit_insn (TARGET_32BIT
14102 ? (TARGET_POWERPC64
14103 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
14104 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
14105 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
14106 used_update
= true;
14109 emit_insn (TARGET_32BIT
14110 ? gen_addsi3 (breg
, breg
, delta_rtx
)
14111 : gen_adddi3 (breg
, breg
, delta_rtx
));
14112 dst
= replace_equiv_address (dst
, breg
);
14115 gcc_assert (rs6000_offsettable_memref_p (dst
));
14118 for (i
= 0; i
< nregs
; i
++)
14120 /* Calculate index to next subword. */
14125 /* If compiler already emitted move of first word by
14126 store with update, no need to do anything. */
14127 if (j
== 0 && used_update
)
14130 emit_insn (gen_rtx_SET (VOIDmode
,
14131 simplify_gen_subreg (reg_mode
, dst
, mode
,
14132 j
* reg_mode_size
),
14133 simplify_gen_subreg (reg_mode
, src
, mode
,
14134 j
* reg_mode_size
)));
14140 /* This page contains routines that are used to determine what the
14141 function prologue and epilogue code will do and write them out. */
14143 /* Return the first fixed-point register that is required to be
14144 saved. 32 if none. */
14147 first_reg_to_save (void)
14151 /* Find lowest numbered live register. */
14152 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
14153 if (df_regs_ever_live_p (first_reg
)
14154 && (! call_used_regs
[first_reg
]
14155 || (first_reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
14156 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
14157 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
14158 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
)))))
14163 && crtl
->uses_pic_offset_table
14164 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
14165 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
14171 /* Similar, for FP regs. */
14174 first_fp_reg_to_save (void)
14178 /* Find lowest numbered live register. */
14179 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
14180 if (df_regs_ever_live_p (first_reg
))
14186 /* Similar, for AltiVec regs. */
14189 first_altivec_reg_to_save (void)
14193 /* Stack frame remains as is unless we are in AltiVec ABI. */
14194 if (! TARGET_ALTIVEC_ABI
)
14195 return LAST_ALTIVEC_REGNO
+ 1;
14197 /* On Darwin, the unwind routines are compiled without
14198 TARGET_ALTIVEC, and use save_world to save/restore the
14199 altivec registers when necessary. */
14200 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
14201 && ! TARGET_ALTIVEC
)
14202 return FIRST_ALTIVEC_REGNO
+ 20;
14204 /* Find lowest numbered live register. */
14205 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
14206 if (df_regs_ever_live_p (i
))
14212 /* Return a 32-bit mask of the AltiVec registers we need to set in
14213 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
14214 the 32-bit word is 0. */
14216 static unsigned int
14217 compute_vrsave_mask (void)
14219 unsigned int i
, mask
= 0;
14221 /* On Darwin, the unwind routines are compiled without
14222 TARGET_ALTIVEC, and use save_world to save/restore the
14223 call-saved altivec registers when necessary. */
14224 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
14225 && ! TARGET_ALTIVEC
)
14228 /* First, find out if we use _any_ altivec registers. */
14229 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
14230 if (df_regs_ever_live_p (i
))
14231 mask
|= ALTIVEC_REG_BIT (i
);
14236 /* Next, remove the argument registers from the set. These must
14237 be in the VRSAVE mask set by the caller, so we don't need to add
14238 them in again. More importantly, the mask we compute here is
14239 used to generate CLOBBERs in the set_vrsave insn, and we do not
14240 wish the argument registers to die. */
14241 for (i
= crtl
->args
.info
.vregno
- 1; i
>= ALTIVEC_ARG_MIN_REG
; --i
)
14242 mask
&= ~ALTIVEC_REG_BIT (i
);
14244 /* Similarly, remove the return value from the set. */
14247 diddle_return_value (is_altivec_return_reg
, &yes
);
14249 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
14255 /* For a very restricted set of circumstances, we can cut down the
14256 size of prologues/epilogues by calling our own save/restore-the-world
14260 compute_save_world_info (rs6000_stack_t
*info_ptr
)
14262 info_ptr
->world_save_p
= 1;
14263 info_ptr
->world_save_p
14264 = (WORLD_SAVE_P (info_ptr
)
14265 && DEFAULT_ABI
== ABI_DARWIN
14266 && ! (cfun
->calls_setjmp
&& flag_exceptions
)
14267 && info_ptr
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
14268 && info_ptr
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
14269 && info_ptr
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
14270 && info_ptr
->cr_save_p
);
14272 /* This will not work in conjunction with sibcalls. Make sure there
14273 are none. (This check is expensive, but seldom executed.) */
14274 if (WORLD_SAVE_P (info_ptr
))
14277 for ( insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
14278 if ( GET_CODE (insn
) == CALL_INSN
14279 && SIBLING_CALL_P (insn
))
14281 info_ptr
->world_save_p
= 0;
14286 if (WORLD_SAVE_P (info_ptr
))
14288 /* Even if we're not touching VRsave, make sure there's room on the
14289 stack for it, if it looks like we're calling SAVE_WORLD, which
14290 will attempt to save it. */
14291 info_ptr
->vrsave_size
= 4;
14293 /* If we are going to save the world, we need to save the link register too. */
14294 info_ptr
->lr_save_p
= 1;
14296 /* "Save" the VRsave register too if we're saving the world. */
14297 if (info_ptr
->vrsave_mask
== 0)
14298 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
14300 /* Because the Darwin register save/restore routines only handle
14301 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
14303 gcc_assert (info_ptr
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
14304 && (info_ptr
->first_altivec_reg_save
14305 >= FIRST_SAVED_ALTIVEC_REGNO
));
14312 is_altivec_return_reg (rtx reg
, void *xyes
)
14314 bool *yes
= (bool *) xyes
;
14315 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
14320 /* Calculate the stack information for the current function. This is
14321 complicated by having two separate calling sequences, the AIX calling
14322 sequence and the V.4 calling sequence.
14324 AIX (and Darwin/Mac OS X) stack frames look like:
14326 SP----> +---------------------------------------+
14327 | back chain to caller | 0 0
14328 +---------------------------------------+
14329 | saved CR | 4 8 (8-11)
14330 +---------------------------------------+
14332 +---------------------------------------+
14333 | reserved for compilers | 12 24
14334 +---------------------------------------+
14335 | reserved for binders | 16 32
14336 +---------------------------------------+
14337 | saved TOC pointer | 20 40
14338 +---------------------------------------+
14339 | Parameter save area (P) | 24 48
14340 +---------------------------------------+
14341 | Alloca space (A) | 24+P etc.
14342 +---------------------------------------+
14343 | Local variable space (L) | 24+P+A
14344 +---------------------------------------+
14345 | Float/int conversion temporary (X) | 24+P+A+L
14346 +---------------------------------------+
14347 | Save area for AltiVec registers (W) | 24+P+A+L+X
14348 +---------------------------------------+
14349 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
14350 +---------------------------------------+
14351 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
14352 +---------------------------------------+
14353 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
14354 +---------------------------------------+
14355 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
14356 +---------------------------------------+
14357 old SP->| back chain to caller's caller |
14358 +---------------------------------------+
14360 The required alignment for AIX configurations is two words (i.e., 8
14364 V.4 stack frames look like:
14366 SP----> +---------------------------------------+
14367 | back chain to caller | 0
14368 +---------------------------------------+
14369 | caller's saved LR | 4
14370 +---------------------------------------+
14371 | Parameter save area (P) | 8
14372 +---------------------------------------+
14373 | Alloca space (A) | 8+P
14374 +---------------------------------------+
14375 | Varargs save area (V) | 8+P+A
14376 +---------------------------------------+
14377 | Local variable space (L) | 8+P+A+V
14378 +---------------------------------------+
14379 | Float/int conversion temporary (X) | 8+P+A+V+L
14380 +---------------------------------------+
14381 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
14382 +---------------------------------------+
14383 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
14384 +---------------------------------------+
14385 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
14386 +---------------------------------------+
14387 | SPE: area for 64-bit GP registers |
14388 +---------------------------------------+
14389 | SPE alignment padding |
14390 +---------------------------------------+
14391 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
14392 +---------------------------------------+
14393 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
14394 +---------------------------------------+
14395 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
14396 +---------------------------------------+
14397 old SP->| back chain to caller's caller |
14398 +---------------------------------------+
14400 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
14401 given. (But note below and in sysv4.h that we require only 8 and
14402 may round up the size of our stack frame anyways. The historical
14403 reason is early versions of powerpc-linux which didn't properly
14404 align the stack at program startup. A happy side-effect is that
14405 -mno-eabi libraries can be used with -meabi programs.)
14407 The EABI configuration defaults to the V.4 layout. However,
14408 the stack alignment requirements may differ. If -mno-eabi is not
14409 given, the required stack alignment is 8 bytes; if -mno-eabi is
14410 given, the required alignment is 16 bytes. (But see V.4 comment
14413 #ifndef ABI_STACK_BOUNDARY
14414 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
14417 static rs6000_stack_t
*
14418 rs6000_stack_info (void)
14420 static rs6000_stack_t info
;
14421 rs6000_stack_t
*info_ptr
= &info
;
14422 int reg_size
= TARGET_32BIT
? 4 : 8;
14426 HOST_WIDE_INT non_fixed_size
;
14428 memset (&info
, 0, sizeof (info
));
14432 /* Cache value so we don't rescan instruction chain over and over. */
14433 if (cfun
->machine
->insn_chain_scanned_p
== 0)
14434 cfun
->machine
->insn_chain_scanned_p
14435 = spe_func_has_64bit_regs_p () + 1;
14436 info_ptr
->spe_64bit_regs_used
= cfun
->machine
->insn_chain_scanned_p
- 1;
14439 /* Select which calling sequence. */
14440 info_ptr
->abi
= DEFAULT_ABI
;
14442 /* Calculate which registers need to be saved & save area size. */
14443 info_ptr
->first_gp_reg_save
= first_reg_to_save ();
14444 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
14445 even if it currently looks like we won't. Reload may need it to
14446 get at a constant; if so, it will have already created a constant
14447 pool entry for it. */
14448 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
14449 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
14450 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
14451 && crtl
->uses_const_pool
14452 && info_ptr
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
14453 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
14455 first_gp
= info_ptr
->first_gp_reg_save
;
14457 info_ptr
->gp_size
= reg_size
* (32 - first_gp
);
14459 /* For the SPE, we have an additional upper 32-bits on each GPR.
14460 Ideally we should save the entire 64-bits only when the upper
14461 half is used in SIMD instructions. Since we only record
14462 registers live (not the size they are used in), this proves
14463 difficult because we'd have to traverse the instruction chain at
14464 the right time, taking reload into account. This is a real pain,
14465 so we opt to save the GPRs in 64-bits always if but one register
14466 gets used in 64-bits. Otherwise, all the registers in the frame
14467 get saved in 32-bits.
14469 So... since when we save all GPRs (except the SP) in 64-bits, the
14470 traditional GP save area will be empty. */
14471 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
14472 info_ptr
->gp_size
= 0;
14474 info_ptr
->first_fp_reg_save
= first_fp_reg_to_save ();
14475 info_ptr
->fp_size
= 8 * (64 - info_ptr
->first_fp_reg_save
);
14477 info_ptr
->first_altivec_reg_save
= first_altivec_reg_to_save ();
14478 info_ptr
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
14479 - info_ptr
->first_altivec_reg_save
);
14481 /* Does this function call anything? */
14482 info_ptr
->calls_p
= (! current_function_is_leaf
14483 || cfun
->machine
->ra_needs_full_frame
);
14485 /* Determine if we need to save the link register. */
14486 if ((DEFAULT_ABI
== ABI_AIX
14488 && !TARGET_PROFILE_KERNEL
)
14489 #ifdef TARGET_RELOCATABLE
14490 || (TARGET_RELOCATABLE
&& (get_pool_size () != 0))
14492 || (info_ptr
->first_fp_reg_save
!= 64
14493 && !FP_SAVE_INLINE (info_ptr
->first_fp_reg_save
))
14494 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
14495 || info_ptr
->calls_p
14496 || rs6000_ra_ever_killed ())
14498 info_ptr
->lr_save_p
= 1;
14499 df_set_regs_ever_live (LR_REGNO
, true);
14502 /* Determine if we need to save the condition code registers. */
14503 if (df_regs_ever_live_p (CR2_REGNO
)
14504 || df_regs_ever_live_p (CR3_REGNO
)
14505 || df_regs_ever_live_p (CR4_REGNO
))
14507 info_ptr
->cr_save_p
= 1;
14508 if (DEFAULT_ABI
== ABI_V4
)
14509 info_ptr
->cr_size
= reg_size
;
14512 /* If the current function calls __builtin_eh_return, then we need
14513 to allocate stack space for registers that will hold data for
14514 the exception handler. */
14515 if (crtl
->calls_eh_return
)
14518 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
14521 /* SPE saves EH registers in 64-bits. */
14522 ehrd_size
= i
* (TARGET_SPE_ABI
14523 && info_ptr
->spe_64bit_regs_used
!= 0
14524 ? UNITS_PER_SPE_WORD
: UNITS_PER_WORD
);
14529 /* Determine various sizes. */
14530 info_ptr
->reg_size
= reg_size
;
14531 info_ptr
->fixed_size
= RS6000_SAVE_AREA
;
14532 info_ptr
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
14533 info_ptr
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
14534 TARGET_ALTIVEC
? 16 : 8);
14535 if (FRAME_GROWS_DOWNWARD
)
14536 info_ptr
->vars_size
14537 += RS6000_ALIGN (info_ptr
->fixed_size
+ info_ptr
->vars_size
14538 + info_ptr
->parm_size
,
14539 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
14540 - (info_ptr
->fixed_size
+ info_ptr
->vars_size
14541 + info_ptr
->parm_size
);
14543 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
14544 info_ptr
->spe_gp_size
= 8 * (32 - first_gp
);
14546 info_ptr
->spe_gp_size
= 0;
14548 if (TARGET_ALTIVEC_ABI
)
14549 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
14551 info_ptr
->vrsave_mask
= 0;
14553 if (TARGET_ALTIVEC_VRSAVE
&& info_ptr
->vrsave_mask
)
14554 info_ptr
->vrsave_size
= 4;
14556 info_ptr
->vrsave_size
= 0;
14558 compute_save_world_info (info_ptr
);
14560 /* Calculate the offsets. */
14561 switch (DEFAULT_ABI
)
14565 gcc_unreachable ();
14569 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
14570 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
14572 if (TARGET_ALTIVEC_ABI
)
14574 info_ptr
->vrsave_save_offset
14575 = info_ptr
->gp_save_offset
- info_ptr
->vrsave_size
;
14577 /* Align stack so vector save area is on a quadword boundary.
14578 The padding goes above the vectors. */
14579 if (info_ptr
->altivec_size
!= 0)
14580 info_ptr
->altivec_padding_size
14581 = info_ptr
->vrsave_save_offset
& 0xF;
14583 info_ptr
->altivec_padding_size
= 0;
14585 info_ptr
->altivec_save_offset
14586 = info_ptr
->vrsave_save_offset
14587 - info_ptr
->altivec_padding_size
14588 - info_ptr
->altivec_size
;
14589 gcc_assert (info_ptr
->altivec_size
== 0
14590 || info_ptr
->altivec_save_offset
% 16 == 0);
14592 /* Adjust for AltiVec case. */
14593 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
- ehrd_size
;
14596 info_ptr
->ehrd_offset
= info_ptr
->gp_save_offset
- ehrd_size
;
14597 info_ptr
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
14598 info_ptr
->lr_save_offset
= 2*reg_size
;
14602 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
14603 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
14604 info_ptr
->cr_save_offset
= info_ptr
->gp_save_offset
- info_ptr
->cr_size
;
14606 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
14608 /* Align stack so SPE GPR save area is aligned on a
14609 double-word boundary. */
14610 if (info_ptr
->spe_gp_size
!= 0)
14611 info_ptr
->spe_padding_size
14612 = 8 - (-info_ptr
->cr_save_offset
% 8);
14614 info_ptr
->spe_padding_size
= 0;
14616 info_ptr
->spe_gp_save_offset
14617 = info_ptr
->cr_save_offset
14618 - info_ptr
->spe_padding_size
14619 - info_ptr
->spe_gp_size
;
14621 /* Adjust for SPE case. */
14622 info_ptr
->ehrd_offset
= info_ptr
->spe_gp_save_offset
;
14624 else if (TARGET_ALTIVEC_ABI
)
14626 info_ptr
->vrsave_save_offset
14627 = info_ptr
->cr_save_offset
- info_ptr
->vrsave_size
;
14629 /* Align stack so vector save area is on a quadword boundary. */
14630 if (info_ptr
->altivec_size
!= 0)
14631 info_ptr
->altivec_padding_size
14632 = 16 - (-info_ptr
->vrsave_save_offset
% 16);
14634 info_ptr
->altivec_padding_size
= 0;
14636 info_ptr
->altivec_save_offset
14637 = info_ptr
->vrsave_save_offset
14638 - info_ptr
->altivec_padding_size
14639 - info_ptr
->altivec_size
;
14641 /* Adjust for AltiVec case. */
14642 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
;
14645 info_ptr
->ehrd_offset
= info_ptr
->cr_save_offset
;
14646 info_ptr
->ehrd_offset
-= ehrd_size
;
14647 info_ptr
->lr_save_offset
= reg_size
;
14651 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
14652 info_ptr
->save_size
= RS6000_ALIGN (info_ptr
->fp_size
14653 + info_ptr
->gp_size
14654 + info_ptr
->altivec_size
14655 + info_ptr
->altivec_padding_size
14656 + info_ptr
->spe_gp_size
14657 + info_ptr
->spe_padding_size
14659 + info_ptr
->cr_size
14660 + info_ptr
->vrsave_size
,
14663 non_fixed_size
= (info_ptr
->vars_size
14664 + info_ptr
->parm_size
14665 + info_ptr
->save_size
);
14667 info_ptr
->total_size
= RS6000_ALIGN (non_fixed_size
+ info_ptr
->fixed_size
,
14668 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
14670 /* Determine if we need to allocate any stack frame:
14672 For AIX we need to push the stack if a frame pointer is needed
14673 (because the stack might be dynamically adjusted), if we are
14674 debugging, if we make calls, or if the sum of fp_save, gp_save,
14675 and local variables are more than the space needed to save all
14676 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
14677 + 18*8 = 288 (GPR13 reserved).
14679 For V.4 we don't have the stack cushion that AIX uses, but assume
14680 that the debugger can handle stackless frames. */
14682 if (info_ptr
->calls_p
)
14683 info_ptr
->push_p
= 1;
14685 else if (DEFAULT_ABI
== ABI_V4
)
14686 info_ptr
->push_p
= non_fixed_size
!= 0;
14688 else if (frame_pointer_needed
)
14689 info_ptr
->push_p
= 1;
14691 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
14692 info_ptr
->push_p
= 1;
14695 info_ptr
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
14697 /* Zero offsets if we're not saving those registers. */
14698 if (info_ptr
->fp_size
== 0)
14699 info_ptr
->fp_save_offset
= 0;
14701 if (info_ptr
->gp_size
== 0)
14702 info_ptr
->gp_save_offset
= 0;
14704 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->altivec_size
== 0)
14705 info_ptr
->altivec_save_offset
= 0;
14707 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->vrsave_mask
== 0)
14708 info_ptr
->vrsave_save_offset
= 0;
14710 if (! TARGET_SPE_ABI
14711 || info_ptr
->spe_64bit_regs_used
== 0
14712 || info_ptr
->spe_gp_size
== 0)
14713 info_ptr
->spe_gp_save_offset
= 0;
14715 if (! info_ptr
->lr_save_p
)
14716 info_ptr
->lr_save_offset
= 0;
14718 if (! info_ptr
->cr_save_p
)
14719 info_ptr
->cr_save_offset
= 0;
14724 /* Return true if the current function uses any GPRs in 64-bit SIMD
14728 spe_func_has_64bit_regs_p (void)
14732 /* Functions that save and restore all the call-saved registers will
14733 need to save/restore the registers in 64-bits. */
14734 if (crtl
->calls_eh_return
14735 || cfun
->calls_setjmp
14736 || crtl
->has_nonlocal_goto
)
14739 insns
= get_insns ();
14741 for (insn
= NEXT_INSN (insns
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
14747 /* FIXME: This should be implemented with attributes...
14749 (set_attr "spe64" "true")....then,
14750 if (get_spe64(insn)) return true;
14752 It's the only reliable way to do the stuff below. */
14754 i
= PATTERN (insn
);
14755 if (GET_CODE (i
) == SET
)
14757 enum machine_mode mode
= GET_MODE (SET_SRC (i
));
14759 if (SPE_VECTOR_MODE (mode
))
14761 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
))
14771 debug_stack_info (rs6000_stack_t
*info
)
14773 const char *abi_string
;
14776 info
= rs6000_stack_info ();
14778 fprintf (stderr
, "\nStack information for function %s:\n",
14779 ((current_function_decl
&& DECL_NAME (current_function_decl
))
14780 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
14785 default: abi_string
= "Unknown"; break;
14786 case ABI_NONE
: abi_string
= "NONE"; break;
14787 case ABI_AIX
: abi_string
= "AIX"; break;
14788 case ABI_DARWIN
: abi_string
= "Darwin"; break;
14789 case ABI_V4
: abi_string
= "V.4"; break;
14792 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
14794 if (TARGET_ALTIVEC_ABI
)
14795 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
14797 if (TARGET_SPE_ABI
)
14798 fprintf (stderr
, "\tSPE ABI extensions enabled.\n");
14800 if (info
->first_gp_reg_save
!= 32)
14801 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
14803 if (info
->first_fp_reg_save
!= 64)
14804 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
14806 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
14807 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
14808 info
->first_altivec_reg_save
);
14810 if (info
->lr_save_p
)
14811 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
14813 if (info
->cr_save_p
)
14814 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
14816 if (info
->vrsave_mask
)
14817 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
14820 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
14823 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
14825 if (info
->gp_save_offset
)
14826 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
14828 if (info
->fp_save_offset
)
14829 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
14831 if (info
->altivec_save_offset
)
14832 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
14833 info
->altivec_save_offset
);
14835 if (info
->spe_gp_save_offset
)
14836 fprintf (stderr
, "\tspe_gp_save_offset = %5d\n",
14837 info
->spe_gp_save_offset
);
14839 if (info
->vrsave_save_offset
)
14840 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
14841 info
->vrsave_save_offset
);
14843 if (info
->lr_save_offset
)
14844 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
14846 if (info
->cr_save_offset
)
14847 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
14849 if (info
->varargs_save_offset
)
14850 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
14852 if (info
->total_size
)
14853 fprintf (stderr
, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
14856 if (info
->vars_size
)
14857 fprintf (stderr
, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
14860 if (info
->parm_size
)
14861 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
14863 if (info
->fixed_size
)
14864 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
14867 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
14869 if (info
->spe_gp_size
)
14870 fprintf (stderr
, "\tspe_gp_size = %5d\n", info
->spe_gp_size
);
14873 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
14875 if (info
->altivec_size
)
14876 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
14878 if (info
->vrsave_size
)
14879 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
14881 if (info
->altivec_padding_size
)
14882 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
14883 info
->altivec_padding_size
);
14885 if (info
->spe_padding_size
)
14886 fprintf (stderr
, "\tspe_padding_size = %5d\n",
14887 info
->spe_padding_size
);
14890 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
14892 if (info
->save_size
)
14893 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
14895 if (info
->reg_size
!= 4)
14896 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
14898 fprintf (stderr
, "\n");
14902 rs6000_return_addr (int count
, rtx frame
)
14904 /* Currently we don't optimize very well between prolog and body
14905 code and for PIC code the code can be actually quite bad, so
14906 don't try to be too clever here. */
14907 if (count
!= 0 || (DEFAULT_ABI
!= ABI_AIX
&& flag_pic
))
14909 cfun
->machine
->ra_needs_full_frame
= 1;
14916 plus_constant (copy_to_reg
14917 (gen_rtx_MEM (Pmode
,
14918 memory_address (Pmode
, frame
))),
14919 RETURN_ADDRESS_OFFSET
)));
14922 cfun
->machine
->ra_need_lr
= 1;
14923 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
14926 /* Say whether a function is a candidate for sibcall handling or not.
14927 We do not allow indirect calls to be optimized into sibling calls.
14928 Also, we can't do it if there are any vector parameters; there's
14929 nowhere to put the VRsave code so it works; note that functions with
14930 vector parameters are required to have a prototype, so the argument
14931 type info must be available here. (The tail recursion case can work
14932 with vector parameters, but there's no way to distinguish here.) */
14934 rs6000_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
14939 if (TARGET_ALTIVEC_VRSAVE
)
14941 for (type
= TYPE_ARG_TYPES (TREE_TYPE (decl
));
14942 type
; type
= TREE_CHAIN (type
))
14944 if (TREE_CODE (TREE_VALUE (type
)) == VECTOR_TYPE
)
14948 if (DEFAULT_ABI
== ABI_DARWIN
14949 || ((*targetm
.binds_local_p
) (decl
)
14950 && (DEFAULT_ABI
!= ABI_AIX
|| !DECL_EXTERNAL (decl
))))
14952 tree attr_list
= TYPE_ATTRIBUTES (TREE_TYPE (decl
));
14954 if (!lookup_attribute ("longcall", attr_list
)
14955 || lookup_attribute ("shortcall", attr_list
))
14962 /* NULL if INSN insn is valid within a low-overhead loop.
14963 Otherwise return why doloop cannot be applied.
14964 PowerPC uses the COUNT register for branch on table instructions. */
14966 static const char *
14967 rs6000_invalid_within_doloop (const_rtx insn
)
14970 return "Function call in the loop.";
14973 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
14974 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
))
14975 return "Computed branch in the loop.";
14981 rs6000_ra_ever_killed (void)
14987 if (crtl
->is_thunk
)
14990 /* regs_ever_live has LR marked as used if any sibcalls are present,
14991 but this should not force saving and restoring in the
14992 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
14993 clobbers LR, so that is inappropriate. */
14995 /* Also, the prologue can generate a store into LR that
14996 doesn't really count, like this:
14999 bcl to set PIC register
15003 When we're called from the epilogue, we need to avoid counting
15004 this as a store. */
15006 push_topmost_sequence ();
15007 top
= get_insns ();
15008 pop_topmost_sequence ();
15009 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
15011 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
15017 if (!SIBLING_CALL_P (insn
))
15020 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
15022 else if (set_of (reg
, insn
) != NULL_RTX
15023 && !prologue_epilogue_contains (insn
))
15030 /* Emit instructions needed to load the TOC register.
15031 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
15032 a constant pool; or for SVR4 -fpic. */
15035 rs6000_emit_load_toc_table (int fromprolog
)
15038 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
15040 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
)
15043 rtx lab
, tmp1
, tmp2
, got
;
15045 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
15046 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
15048 got
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
15050 got
= rs6000_got_sym ();
15051 tmp1
= tmp2
= dest
;
15054 tmp1
= gen_reg_rtx (Pmode
);
15055 tmp2
= gen_reg_rtx (Pmode
);
15057 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
15058 emit_move_insn (tmp1
,
15059 gen_rtx_REG (Pmode
, LR_REGNO
));
15060 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
15061 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
15063 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
15065 emit_insn (gen_load_toc_v4_pic_si ());
15066 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
15068 else if (TARGET_ELF
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
== 2)
15071 rtx temp0
= (fromprolog
15072 ? gen_rtx_REG (Pmode
, 0)
15073 : gen_reg_rtx (Pmode
));
15079 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
15080 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
15082 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
15083 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
15085 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
15086 emit_move_insn (dest
,
15087 gen_rtx_REG (Pmode
, LR_REGNO
));
15088 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
15094 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
15095 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
));
15096 emit_move_insn (dest
,
15097 gen_rtx_REG (Pmode
, LR_REGNO
));
15098 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
15100 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
15102 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
15104 /* This is for AIX code running in non-PIC ELF32. */
15107 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
15108 realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
15110 emit_insn (gen_elf_high (dest
, realsym
));
15111 emit_insn (gen_elf_low (dest
, dest
, realsym
));
15115 gcc_assert (DEFAULT_ABI
== ABI_AIX
);
15118 emit_insn (gen_load_toc_aix_si (dest
));
15120 emit_insn (gen_load_toc_aix_di (dest
));
15124 /* Emit instructions to restore the link register after determining where
15125 its value has been stored. */
15128 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
15130 rs6000_stack_t
*info
= rs6000_stack_info ();
15133 operands
[0] = source
;
15134 operands
[1] = scratch
;
15136 if (info
->lr_save_p
)
15138 rtx frame_rtx
= stack_pointer_rtx
;
15139 HOST_WIDE_INT sp_offset
= 0;
15142 if (frame_pointer_needed
15143 || cfun
->calls_alloca
15144 || info
->total_size
> 32767)
15146 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
15147 emit_move_insn (operands
[1], tmp
);
15148 frame_rtx
= operands
[1];
15150 else if (info
->push_p
)
15151 sp_offset
= info
->total_size
;
15153 tmp
= plus_constant (frame_rtx
, info
->lr_save_offset
+ sp_offset
);
15154 tmp
= gen_frame_mem (Pmode
, tmp
);
15155 emit_move_insn (tmp
, operands
[0]);
15158 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
15161 static GTY(()) alias_set_type set
= -1;
15164 get_TOC_alias_set (void)
15167 set
= new_alias_set ();
15171 /* This returns nonzero if the current function uses the TOC. This is
15172 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
15173 is generated by the ABI_V4 load_toc_* patterns. */
15180 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
15183 rtx pat
= PATTERN (insn
);
15186 if (GET_CODE (pat
) == PARALLEL
)
15187 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
15189 rtx sub
= XVECEXP (pat
, 0, i
);
15190 if (GET_CODE (sub
) == USE
)
15192 sub
= XEXP (sub
, 0);
15193 if (GET_CODE (sub
) == UNSPEC
15194 && XINT (sub
, 1) == UNSPEC_TOC
)
15204 create_TOC_reference (rtx symbol
)
15206 if (!can_create_pseudo_p ())
15207 df_set_regs_ever_live (TOC_REGISTER
, true);
15208 return gen_rtx_PLUS (Pmode
,
15209 gen_rtx_REG (Pmode
, TOC_REGISTER
),
15210 gen_rtx_CONST (Pmode
,
15211 gen_rtx_MINUS (Pmode
, symbol
,
15212 gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
))));
15215 /* If _Unwind_* has been called from within the same module,
15216 toc register is not guaranteed to be saved to 40(1) on function
15217 entry. Save it there in that case. */
15220 rs6000_aix_emit_builtin_unwind_init (void)
15223 rtx stack_top
= gen_reg_rtx (Pmode
);
15224 rtx opcode_addr
= gen_reg_rtx (Pmode
);
15225 rtx opcode
= gen_reg_rtx (SImode
);
15226 rtx tocompare
= gen_reg_rtx (SImode
);
15227 rtx no_toc_save_needed
= gen_label_rtx ();
15229 mem
= gen_frame_mem (Pmode
, hard_frame_pointer_rtx
);
15230 emit_move_insn (stack_top
, mem
);
15232 mem
= gen_frame_mem (Pmode
,
15233 gen_rtx_PLUS (Pmode
, stack_top
,
15234 GEN_INT (2 * GET_MODE_SIZE (Pmode
))));
15235 emit_move_insn (opcode_addr
, mem
);
15236 emit_move_insn (opcode
, gen_rtx_MEM (SImode
, opcode_addr
));
15237 emit_move_insn (tocompare
, gen_int_mode (TARGET_32BIT
? 0x80410014
15238 : 0xE8410028, SImode
));
15240 do_compare_rtx_and_jump (opcode
, tocompare
, EQ
, 1,
15241 SImode
, NULL_RTX
, NULL_RTX
,
15242 no_toc_save_needed
);
15244 mem
= gen_frame_mem (Pmode
,
15245 gen_rtx_PLUS (Pmode
, stack_top
,
15246 GEN_INT (5 * GET_MODE_SIZE (Pmode
))));
15247 emit_move_insn (mem
, gen_rtx_REG (Pmode
, 2));
15248 emit_label (no_toc_save_needed
);
15251 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
15252 and the change to the stack pointer. */
15255 rs6000_emit_stack_tie (void)
15257 rtx mem
= gen_frame_mem (BLKmode
,
15258 gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
));
15260 emit_insn (gen_stack_tie (mem
));
15263 /* Emit the correct code for allocating stack space, as insns.
15264 If COPY_R12, make sure a copy of the old frame is left in r12.
15265 The generated code may use hard register 0 as a temporary. */
15268 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, int copy_r12
)
15271 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
15272 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
15273 rtx todec
= gen_int_mode (-size
, Pmode
);
15275 if (INTVAL (todec
) != -size
)
15277 warning (0, "stack frame too large");
15278 emit_insn (gen_trap ());
15282 if (crtl
->limit_stack
)
15284 if (REG_P (stack_limit_rtx
)
15285 && REGNO (stack_limit_rtx
) > 1
15286 && REGNO (stack_limit_rtx
) <= 31)
15288 emit_insn (TARGET_32BIT
15289 ? gen_addsi3 (tmp_reg
,
15292 : gen_adddi3 (tmp_reg
,
15296 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
15299 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
15301 && DEFAULT_ABI
== ABI_V4
)
15303 rtx toload
= gen_rtx_CONST (VOIDmode
,
15304 gen_rtx_PLUS (Pmode
,
15308 emit_insn (gen_elf_high (tmp_reg
, toload
));
15309 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
15310 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
15314 warning (0, "stack limit expression is not supported");
15317 if (copy_r12
|| ! TARGET_UPDATE
)
15318 emit_move_insn (gen_rtx_REG (Pmode
, 12), stack_reg
);
15324 /* Need a note here so that try_split doesn't get confused. */
15325 if (get_last_insn () == NULL_RTX
)
15326 emit_note (NOTE_INSN_DELETED
);
15327 insn
= emit_move_insn (tmp_reg
, todec
);
15328 try_split (PATTERN (insn
), insn
, 0);
15332 insn
= emit_insn (TARGET_32BIT
15333 ? gen_movsi_update (stack_reg
, stack_reg
,
15335 : gen_movdi_di_update (stack_reg
, stack_reg
,
15336 todec
, stack_reg
));
15340 insn
= emit_insn (TARGET_32BIT
15341 ? gen_addsi3 (stack_reg
, stack_reg
, todec
)
15342 : gen_adddi3 (stack_reg
, stack_reg
, todec
));
15343 emit_move_insn (gen_rtx_MEM (Pmode
, stack_reg
),
15344 gen_rtx_REG (Pmode
, 12));
15347 RTX_FRAME_RELATED_P (insn
) = 1;
15349 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
15350 gen_rtx_SET (VOIDmode
, stack_reg
,
15351 gen_rtx_PLUS (Pmode
, stack_reg
,
15356 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
15357 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
15358 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
15359 deduce these equivalences by itself so it wasn't necessary to hold
15360 its hand so much. */
15363 rs6000_frame_related (rtx insn
, rtx reg
, HOST_WIDE_INT val
,
15364 rtx reg2
, rtx rreg
)
15368 /* copy_rtx will not make unique copies of registers, so we need to
15369 ensure we don't have unwanted sharing here. */
15371 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
15374 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
15376 real
= copy_rtx (PATTERN (insn
));
15378 if (reg2
!= NULL_RTX
)
15379 real
= replace_rtx (real
, reg2
, rreg
);
15381 real
= replace_rtx (real
, reg
,
15382 gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
,
15383 STACK_POINTER_REGNUM
),
15386 /* We expect that 'real' is either a SET or a PARALLEL containing
15387 SETs (and possibly other stuff). In a PARALLEL, all the SETs
15388 are important so they all have to be marked RTX_FRAME_RELATED_P. */
15390 if (GET_CODE (real
) == SET
)
15394 temp
= simplify_rtx (SET_SRC (set
));
15396 SET_SRC (set
) = temp
;
15397 temp
= simplify_rtx (SET_DEST (set
));
15399 SET_DEST (set
) = temp
;
15400 if (GET_CODE (SET_DEST (set
)) == MEM
)
15402 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
15404 XEXP (SET_DEST (set
), 0) = temp
;
15411 gcc_assert (GET_CODE (real
) == PARALLEL
);
15412 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
15413 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
15415 rtx set
= XVECEXP (real
, 0, i
);
15417 temp
= simplify_rtx (SET_SRC (set
));
15419 SET_SRC (set
) = temp
;
15420 temp
= simplify_rtx (SET_DEST (set
));
15422 SET_DEST (set
) = temp
;
15423 if (GET_CODE (SET_DEST (set
)) == MEM
)
15425 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
15427 XEXP (SET_DEST (set
), 0) = temp
;
15429 RTX_FRAME_RELATED_P (set
) = 1;
15433 RTX_FRAME_RELATED_P (insn
) = 1;
15434 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
15439 /* Returns an insn that has a vrsave set operation with the
15440 appropriate CLOBBERs. */
15443 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
15446 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
15447 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
15450 = gen_rtx_SET (VOIDmode
,
15452 gen_rtx_UNSPEC_VOLATILE (SImode
,
15453 gen_rtvec (2, reg
, vrsave
),
15454 UNSPECV_SET_VRSAVE
));
15458 /* We need to clobber the registers in the mask so the scheduler
15459 does not move sets to VRSAVE before sets of AltiVec registers.
15461 However, if the function receives nonlocal gotos, reload will set
15462 all call saved registers live. We will end up with:
15464 (set (reg 999) (mem))
15465 (parallel [ (set (reg vrsave) (unspec blah))
15466 (clobber (reg 999))])
15468 The clobber will cause the store into reg 999 to be dead, and
15469 flow will attempt to delete an epilogue insn. In this case, we
15470 need an unspec use/set of the register. */
15472 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
15473 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
15475 if (!epiloguep
|| call_used_regs
[i
])
15476 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
15477 gen_rtx_REG (V4SImode
, i
));
15480 rtx reg
= gen_rtx_REG (V4SImode
, i
);
15483 = gen_rtx_SET (VOIDmode
,
15485 gen_rtx_UNSPEC (V4SImode
,
15486 gen_rtvec (1, reg
), 27));
15490 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
15492 for (i
= 0; i
< nclobs
; ++i
)
15493 XVECEXP (insn
, 0, i
) = clobs
[i
];
15498 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
15499 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
15502 emit_frame_save (rtx frame_reg
, rtx frame_ptr
, enum machine_mode mode
,
15503 unsigned int regno
, int offset
, HOST_WIDE_INT total_size
)
15505 rtx reg
, offset_rtx
, insn
, mem
, addr
, int_rtx
;
15506 rtx replacea
, replaceb
;
15508 int_rtx
= GEN_INT (offset
);
15510 /* Some cases that need register indexed addressing. */
15511 if ((TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
15512 || (TARGET_E500_DOUBLE
&& mode
== DFmode
)
15514 && SPE_VECTOR_MODE (mode
)
15515 && !SPE_CONST_OFFSET_OK (offset
)))
15517 /* Whomever calls us must make sure r11 is available in the
15518 flow path of instructions in the prologue. */
15519 offset_rtx
= gen_rtx_REG (Pmode
, 11);
15520 emit_move_insn (offset_rtx
, int_rtx
);
15522 replacea
= offset_rtx
;
15523 replaceb
= int_rtx
;
15527 offset_rtx
= int_rtx
;
15528 replacea
= NULL_RTX
;
15529 replaceb
= NULL_RTX
;
15532 reg
= gen_rtx_REG (mode
, regno
);
15533 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, offset_rtx
);
15534 mem
= gen_frame_mem (mode
, addr
);
15536 insn
= emit_move_insn (mem
, reg
);
15538 rs6000_frame_related (insn
, frame_ptr
, total_size
, replacea
, replaceb
);
15541 /* Emit an offset memory reference suitable for a frame store, while
15542 converting to a valid addressing mode. */
15545 gen_frame_mem_offset (enum machine_mode mode
, rtx reg
, int offset
)
15547 rtx int_rtx
, offset_rtx
;
15549 int_rtx
= GEN_INT (offset
);
15551 if ((TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode
))
15552 || (TARGET_E500_DOUBLE
&& mode
== DFmode
))
15554 offset_rtx
= gen_rtx_REG (Pmode
, FIXED_SCRATCH
);
15555 emit_move_insn (offset_rtx
, int_rtx
);
15558 offset_rtx
= int_rtx
;
15560 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, offset_rtx
));
15563 /* Look for user-defined global regs. We should not save and restore these,
15564 and cannot use stmw/lmw if there are any in its range. */
15567 no_global_regs_above (int first_greg
)
15570 for (i
= 0; i
< 32 - first_greg
; i
++)
15571 if (global_regs
[first_greg
+ i
])
15576 #ifndef TARGET_FIX_AND_CONTINUE
15577 #define TARGET_FIX_AND_CONTINUE 0
15580 /* Determine whether the gp REG is really used. */
15583 rs6000_reg_live_or_pic_offset_p (int reg
)
15585 return ((df_regs_ever_live_p (reg
)
15586 && (!call_used_regs
[reg
]
15587 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
15588 && TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
15589 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
15590 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
15591 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
))));
15594 /* Emit function prologue as insns. */
15597 rs6000_emit_prologue (void)
15599 rs6000_stack_t
*info
= rs6000_stack_info ();
15600 enum machine_mode reg_mode
= Pmode
;
15601 int reg_size
= TARGET_32BIT
? 4 : 8;
15602 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
15603 rtx frame_ptr_rtx
= gen_rtx_REG (Pmode
, 12);
15604 rtx frame_reg_rtx
= sp_reg_rtx
;
15605 rtx cr_save_rtx
= NULL_RTX
;
15607 int saving_FPRs_inline
;
15608 int using_store_multiple
;
15609 HOST_WIDE_INT sp_offset
= 0;
15611 if (TARGET_FIX_AND_CONTINUE
)
15613 /* gdb on darwin arranges to forward a function from the old
15614 address by modifying the first 5 instructions of the function
15615 to branch to the overriding function. This is necessary to
15616 permit function pointers that point to the old function to
15617 actually forward to the new function. */
15618 emit_insn (gen_nop ());
15619 emit_insn (gen_nop ());
15620 emit_insn (gen_nop ());
15621 emit_insn (gen_nop ());
15622 emit_insn (gen_nop ());
15625 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
15627 reg_mode
= V2SImode
;
15631 using_store_multiple
= (TARGET_MULTIPLE
&& ! TARGET_POWERPC64
15632 && (!TARGET_SPE_ABI
15633 || info
->spe_64bit_regs_used
== 0)
15634 && info
->first_gp_reg_save
< 31
15635 && no_global_regs_above (info
->first_gp_reg_save
));
15636 saving_FPRs_inline
= (info
->first_fp_reg_save
== 64
15637 || FP_SAVE_INLINE (info
->first_fp_reg_save
)
15638 || crtl
->calls_eh_return
15639 || cfun
->machine
->ra_need_lr
);
15641 /* For V.4, update stack before we do any saving and set back pointer. */
15642 if (! WORLD_SAVE_P (info
)
15644 && (DEFAULT_ABI
== ABI_V4
15645 || crtl
->calls_eh_return
))
15647 if (info
->total_size
< 32767)
15648 sp_offset
= info
->total_size
;
15650 frame_reg_rtx
= frame_ptr_rtx
;
15651 rs6000_emit_allocate_stack (info
->total_size
,
15652 (frame_reg_rtx
!= sp_reg_rtx
15653 && (info
->cr_save_p
15655 || info
->first_fp_reg_save
< 64
15656 || info
->first_gp_reg_save
< 32
15658 if (frame_reg_rtx
!= sp_reg_rtx
)
15659 rs6000_emit_stack_tie ();
15662 /* Handle world saves specially here. */
15663 if (WORLD_SAVE_P (info
))
15670 /* save_world expects lr in r0. */
15671 reg0
= gen_rtx_REG (Pmode
, 0);
15672 if (info
->lr_save_p
)
15674 insn
= emit_move_insn (reg0
,
15675 gen_rtx_REG (Pmode
, LR_REGNO
));
15676 RTX_FRAME_RELATED_P (insn
) = 1;
15679 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
15680 assumptions about the offsets of various bits of the stack
15682 gcc_assert (info
->gp_save_offset
== -220
15683 && info
->fp_save_offset
== -144
15684 && info
->lr_save_offset
== 8
15685 && info
->cr_save_offset
== 4
15688 && (!crtl
->calls_eh_return
15689 || info
->ehrd_offset
== -432)
15690 && info
->vrsave_save_offset
== -224
15691 && info
->altivec_save_offset
== -416);
15693 treg
= gen_rtx_REG (SImode
, 11);
15694 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
15696 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
15697 in R11. It also clobbers R12, so beware! */
15699 /* Preserve CR2 for save_world prologues */
15701 sz
+= 32 - info
->first_gp_reg_save
;
15702 sz
+= 64 - info
->first_fp_reg_save
;
15703 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
15704 p
= rtvec_alloc (sz
);
15706 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
15707 gen_rtx_REG (SImode
,
15709 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
15710 gen_rtx_SYMBOL_REF (Pmode
,
15712 /* We do floats first so that the instruction pattern matches
15714 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
15716 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
15717 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15718 GEN_INT (info
->fp_save_offset
15719 + sp_offset
+ 8 * i
));
15720 rtx mem
= gen_frame_mem (DFmode
, addr
);
15722 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg
);
15724 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
15726 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
15727 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15728 GEN_INT (info
->altivec_save_offset
15729 + sp_offset
+ 16 * i
));
15730 rtx mem
= gen_frame_mem (V4SImode
, addr
);
15732 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg
);
15734 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
15736 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
15737 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15738 GEN_INT (info
->gp_save_offset
15739 + sp_offset
+ reg_size
* i
));
15740 rtx mem
= gen_frame_mem (reg_mode
, addr
);
15742 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg
);
15746 /* CR register traditionally saved as CR2. */
15747 rtx reg
= gen_rtx_REG (reg_mode
, CR2_REGNO
);
15748 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15749 GEN_INT (info
->cr_save_offset
15751 rtx mem
= gen_frame_mem (reg_mode
, addr
);
15753 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg
);
15755 /* Explain about use of R0. */
15756 if (info
->lr_save_p
)
15758 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15759 GEN_INT (info
->lr_save_offset
15761 rtx mem
= gen_frame_mem (reg_mode
, addr
);
15763 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg0
);
15765 /* Explain what happens to the stack pointer. */
15767 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
15768 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, sp_reg_rtx
, newval
);
15771 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
15772 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
15773 treg
, GEN_INT (-info
->total_size
));
15774 sp_offset
= info
->total_size
;
15777 /* If we use the link register, get it into r0. */
15778 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
)
15780 rtx addr
, reg
, mem
;
15782 insn
= emit_move_insn (gen_rtx_REG (Pmode
, 0),
15783 gen_rtx_REG (Pmode
, LR_REGNO
));
15784 RTX_FRAME_RELATED_P (insn
) = 1;
15786 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15787 GEN_INT (info
->lr_save_offset
+ sp_offset
));
15788 reg
= gen_rtx_REG (Pmode
, 0);
15789 mem
= gen_rtx_MEM (Pmode
, addr
);
15790 /* This should not be of rs6000_sr_alias_set, because of
15791 __builtin_return_address. */
15793 insn
= emit_move_insn (mem
, reg
);
15794 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
15795 NULL_RTX
, NULL_RTX
);
15798 /* If we need to save CR, put it into r12. */
15799 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
&& frame_reg_rtx
!= frame_ptr_rtx
)
15803 cr_save_rtx
= gen_rtx_REG (SImode
, 12);
15804 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
15805 RTX_FRAME_RELATED_P (insn
) = 1;
15806 /* Now, there's no way that dwarf2out_frame_debug_expr is going
15807 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
15808 But that's OK. All we have to do is specify that _one_ condition
15809 code register is saved in this stack slot. The thrower's epilogue
15810 will then restore all the call-saved registers.
15811 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
15812 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
,
15813 gen_rtx_REG (SImode
, CR2_REGNO
));
15814 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
15819 /* Do any required saving of fpr's. If only one or two to save, do
15820 it ourselves. Otherwise, call function. */
15821 if (!WORLD_SAVE_P (info
) && saving_FPRs_inline
)
15824 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
15825 if ((df_regs_ever_live_p (info
->first_fp_reg_save
+i
)
15826 && ! call_used_regs
[info
->first_fp_reg_save
+i
]))
15827 emit_frame_save (frame_reg_rtx
, frame_ptr_rtx
, DFmode
,
15828 info
->first_fp_reg_save
+ i
,
15829 info
->fp_save_offset
+ sp_offset
+ 8 * i
,
15832 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
15836 const char *alloc_rname
;
15838 p
= rtvec_alloc (2 + 64 - info
->first_fp_reg_save
);
15840 RTVEC_ELT (p
, 0) = gen_rtx_CLOBBER (VOIDmode
,
15841 gen_rtx_REG (Pmode
,
15843 sprintf (rname
, "%s%d%s", SAVE_FP_PREFIX
,
15844 info
->first_fp_reg_save
- 32, SAVE_FP_SUFFIX
);
15845 alloc_rname
= ggc_strdup (rname
);
15846 RTVEC_ELT (p
, 1) = gen_rtx_USE (VOIDmode
,
15847 gen_rtx_SYMBOL_REF (Pmode
,
15849 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
15851 rtx addr
, reg
, mem
;
15852 reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
15853 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15854 GEN_INT (info
->fp_save_offset
15855 + sp_offset
+ 8*i
));
15856 mem
= gen_frame_mem (DFmode
, addr
);
15858 RTVEC_ELT (p
, i
+ 2) = gen_rtx_SET (VOIDmode
, mem
, reg
);
15860 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
15861 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
15862 NULL_RTX
, NULL_RTX
);
15865 /* Save GPRs. This is done as a PARALLEL if we are using
15866 the store-multiple instructions. */
15867 if (!WORLD_SAVE_P (info
) && using_store_multiple
)
15871 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
15872 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
15874 rtx addr
, reg
, mem
;
15875 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
15876 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15877 GEN_INT (info
->gp_save_offset
15880 mem
= gen_frame_mem (reg_mode
, addr
);
15882 RTVEC_ELT (p
, i
) = gen_rtx_SET (VOIDmode
, mem
, reg
);
15884 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
15885 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
15886 NULL_RTX
, NULL_RTX
);
15888 else if (!WORLD_SAVE_P (info
)
15890 && info
->spe_64bit_regs_used
!= 0
15891 && info
->first_gp_reg_save
!= 32)
15894 rtx spe_save_area_ptr
;
15895 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
15896 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
15897 && !call_used_regs
[STATIC_CHAIN_REGNUM
]);
15899 /* Determine whether we can address all of the registers that need
15900 to be saved with an offset from the stack pointer that fits in
15901 the small const field for SPE memory instructions. */
15902 int spe_regs_addressable_via_sp
15903 = SPE_CONST_OFFSET_OK(info
->spe_gp_save_offset
+ sp_offset
15904 + (32 - info
->first_gp_reg_save
- 1) * reg_size
);
15907 if (spe_regs_addressable_via_sp
)
15909 spe_save_area_ptr
= frame_reg_rtx
;
15910 spe_offset
= info
->spe_gp_save_offset
+ sp_offset
;
15914 /* Make r11 point to the start of the SPE save area. We need
15915 to be careful here if r11 is holding the static chain. If
15916 it is, then temporarily save it in r0. We would use r0 as
15917 our base register here, but using r0 as a base register in
15918 loads and stores means something different from what we
15920 if (using_static_chain_p
)
15922 rtx r0
= gen_rtx_REG (Pmode
, 0);
15924 gcc_assert (info
->first_gp_reg_save
> 11);
15926 emit_move_insn (r0
, gen_rtx_REG (Pmode
, 11));
15929 spe_save_area_ptr
= gen_rtx_REG (Pmode
, 11);
15930 emit_insn (gen_addsi3 (spe_save_area_ptr
, frame_reg_rtx
,
15931 GEN_INT (info
->spe_gp_save_offset
+ sp_offset
)));
15936 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
15937 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
15939 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
15940 rtx offset
, addr
, mem
;
15942 /* We're doing all this to ensure that the offset fits into
15943 the immediate offset of 'evstdd'. */
15944 gcc_assert (SPE_CONST_OFFSET_OK (reg_size
* i
+ spe_offset
));
15946 offset
= GEN_INT (reg_size
* i
+ spe_offset
);
15947 addr
= gen_rtx_PLUS (Pmode
, spe_save_area_ptr
, offset
);
15948 mem
= gen_rtx_MEM (V2SImode
, addr
);
15950 insn
= emit_move_insn (mem
, reg
);
15952 rs6000_frame_related (insn
, spe_save_area_ptr
,
15953 info
->spe_gp_save_offset
15954 + sp_offset
+ reg_size
* i
,
15955 offset
, const0_rtx
);
15958 /* Move the static chain pointer back. */
15959 if (using_static_chain_p
&& !spe_regs_addressable_via_sp
)
15960 emit_move_insn (gen_rtx_REG (Pmode
, 11), gen_rtx_REG (Pmode
, 0));
15962 else if (!WORLD_SAVE_P (info
))
15965 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
15966 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
15968 rtx addr
, reg
, mem
;
15969 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
15971 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15972 GEN_INT (info
->gp_save_offset
15975 mem
= gen_frame_mem (reg_mode
, addr
);
15977 insn
= emit_move_insn (mem
, reg
);
15978 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
15979 NULL_RTX
, NULL_RTX
);
15983 /* ??? There's no need to emit actual instructions here, but it's the
15984 easiest way to get the frame unwind information emitted. */
15985 if (crtl
->calls_eh_return
)
15987 unsigned int i
, regno
;
15989 /* In AIX ABI we need to pretend we save r2 here. */
15992 rtx addr
, reg
, mem
;
15994 reg
= gen_rtx_REG (reg_mode
, 2);
15995 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15996 GEN_INT (sp_offset
+ 5 * reg_size
));
15997 mem
= gen_frame_mem (reg_mode
, addr
);
15999 insn
= emit_move_insn (mem
, reg
);
16000 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16001 NULL_RTX
, NULL_RTX
);
16002 PATTERN (insn
) = gen_blockage ();
16007 regno
= EH_RETURN_DATA_REGNO (i
);
16008 if (regno
== INVALID_REGNUM
)
16011 emit_frame_save (frame_reg_rtx
, frame_ptr_rtx
, reg_mode
, regno
,
16012 info
->ehrd_offset
+ sp_offset
16013 + reg_size
* (int) i
,
16018 /* Save CR if we use any that must be preserved. */
16019 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
16021 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16022 GEN_INT (info
->cr_save_offset
+ sp_offset
));
16023 rtx mem
= gen_frame_mem (SImode
, addr
);
16024 /* See the large comment above about why CR2_REGNO is used. */
16025 rtx magic_eh_cr_reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
16027 /* If r12 was used to hold the original sp, copy cr into r0 now
16029 if (REGNO (frame_reg_rtx
) == 12)
16033 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
16034 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
16035 RTX_FRAME_RELATED_P (insn
) = 1;
16036 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
, magic_eh_cr_reg
);
16037 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
16042 insn
= emit_move_insn (mem
, cr_save_rtx
);
16044 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16045 NULL_RTX
, NULL_RTX
);
16048 /* Update stack and set back pointer unless this is V.4,
16049 for which it was done previously. */
16050 if (!WORLD_SAVE_P (info
) && info
->push_p
16051 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
16053 if (info
->total_size
< 32767)
16054 sp_offset
= info
->total_size
;
16056 frame_reg_rtx
= frame_ptr_rtx
;
16057 rs6000_emit_allocate_stack (info
->total_size
,
16058 (frame_reg_rtx
!= sp_reg_rtx
16059 && ((info
->altivec_size
!= 0)
16060 || (info
->vrsave_mask
!= 0)
16062 if (frame_reg_rtx
!= sp_reg_rtx
)
16063 rs6000_emit_stack_tie ();
16066 /* Set frame pointer, if needed. */
16067 if (frame_pointer_needed
)
16069 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
16071 RTX_FRAME_RELATED_P (insn
) = 1;
16074 /* Save AltiVec registers if needed. Save here because the red zone does
16075 not include AltiVec registers. */
16076 if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
&& info
->altivec_size
!= 0)
16080 /* There should be a non inline version of this, for when we
16081 are saving lots of vector registers. */
16082 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
16083 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
16085 rtx areg
, savereg
, mem
;
16088 offset
= info
->altivec_save_offset
+ sp_offset
16089 + 16 * (i
- info
->first_altivec_reg_save
);
16091 savereg
= gen_rtx_REG (V4SImode
, i
);
16093 areg
= gen_rtx_REG (Pmode
, 0);
16094 emit_move_insn (areg
, GEN_INT (offset
));
16096 /* AltiVec addressing mode is [reg+reg]. */
16097 mem
= gen_frame_mem (V4SImode
,
16098 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
16100 insn
= emit_move_insn (mem
, savereg
);
16102 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16103 areg
, GEN_INT (offset
));
16107 /* VRSAVE is a bit vector representing which AltiVec registers
16108 are used. The OS uses this to determine which vector
16109 registers to save on a context switch. We need to save
16110 VRSAVE on the stack frame, add whatever AltiVec registers we
16111 used in this function, and do the corresponding magic in the
16114 if (TARGET_ALTIVEC
&& TARGET_ALTIVEC_VRSAVE
16115 && info
->vrsave_mask
!= 0)
16117 rtx reg
, mem
, vrsave
;
16120 /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
16121 as frame_reg_rtx and r11 as the static chain pointer for
16122 nested functions. */
16123 reg
= gen_rtx_REG (SImode
, 0);
16124 vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
16126 emit_insn (gen_get_vrsave_internal (reg
));
16128 emit_insn (gen_rtx_SET (VOIDmode
, reg
, vrsave
));
16130 if (!WORLD_SAVE_P (info
))
16133 offset
= info
->vrsave_save_offset
+ sp_offset
;
16134 mem
= gen_frame_mem (SImode
,
16135 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16136 GEN_INT (offset
)));
16137 insn
= emit_move_insn (mem
, reg
);
16140 /* Include the registers in the mask. */
16141 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT ((int) info
->vrsave_mask
)));
16143 insn
= emit_insn (generate_set_vrsave (reg
, info
, 0));
16146 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
16147 if ((TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
16148 || (DEFAULT_ABI
== ABI_V4
16149 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
16150 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
)))
16152 /* If emit_load_toc_table will use the link register, we need to save
16153 it. We use R12 for this purpose because emit_load_toc_table
16154 can use register 0. This allows us to use a plain 'blr' to return
16155 from the procedure more often. */
16156 int save_LR_around_toc_setup
= (TARGET_ELF
16157 && DEFAULT_ABI
!= ABI_AIX
16159 && ! info
->lr_save_p
16160 && EDGE_COUNT (EXIT_BLOCK_PTR
->preds
) > 0);
16161 if (save_LR_around_toc_setup
)
16163 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
16165 insn
= emit_move_insn (frame_ptr_rtx
, lr
);
16166 RTX_FRAME_RELATED_P (insn
) = 1;
16168 rs6000_emit_load_toc_table (TRUE
);
16170 insn
= emit_move_insn (lr
, frame_ptr_rtx
);
16171 RTX_FRAME_RELATED_P (insn
) = 1;
16174 rs6000_emit_load_toc_table (TRUE
);
16178 if (DEFAULT_ABI
== ABI_DARWIN
16179 && flag_pic
&& crtl
->uses_pic_offset_table
)
16181 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
16182 rtx src
= machopic_function_base_sym ();
16184 /* Save and restore LR locally around this call (in R0). */
16185 if (!info
->lr_save_p
)
16186 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
16188 emit_insn (gen_load_macho_picbase (src
));
16190 emit_move_insn (gen_rtx_REG (Pmode
,
16191 RS6000_PIC_OFFSET_TABLE_REGNUM
),
16194 if (!info
->lr_save_p
)
16195 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
16200 /* Write function prologue. */
16203 rs6000_output_function_prologue (FILE *file
,
16204 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
16206 rs6000_stack_t
*info
= rs6000_stack_info ();
16208 if (TARGET_DEBUG_STACK
)
16209 debug_stack_info (info
);
16211 /* Write .extern for any function we will call to save and restore
16213 if (info
->first_fp_reg_save
< 64
16214 && !FP_SAVE_INLINE (info
->first_fp_reg_save
))
16215 fprintf (file
, "\t.extern %s%d%s\n\t.extern %s%d%s\n",
16216 SAVE_FP_PREFIX
, info
->first_fp_reg_save
- 32, SAVE_FP_SUFFIX
,
16217 RESTORE_FP_PREFIX
, info
->first_fp_reg_save
- 32,
16218 RESTORE_FP_SUFFIX
);
16220 /* Write .extern for AIX common mode routines, if needed. */
16221 if (! TARGET_POWER
&& ! TARGET_POWERPC
&& ! common_mode_defined
)
16223 fputs ("\t.extern __mulh\n", file
);
16224 fputs ("\t.extern __mull\n", file
);
16225 fputs ("\t.extern __divss\n", file
);
16226 fputs ("\t.extern __divus\n", file
);
16227 fputs ("\t.extern __quoss\n", file
);
16228 fputs ("\t.extern __quous\n", file
);
16229 common_mode_defined
= 1;
16232 if (! HAVE_prologue
)
16236 /* A NOTE_INSN_DELETED is supposed to be at the start and end of
16237 the "toplevel" insn chain. */
16238 emit_note (NOTE_INSN_DELETED
);
16239 rs6000_emit_prologue ();
16240 emit_note (NOTE_INSN_DELETED
);
16242 /* Expand INSN_ADDRESSES so final() doesn't crash. */
16246 for (insn
= get_insns (); insn
!= 0; insn
= NEXT_INSN (insn
))
16248 INSN_ADDRESSES_NEW (insn
, addr
);
16253 if (TARGET_DEBUG_STACK
)
16254 debug_rtx_list (get_insns (), 100);
16255 final (get_insns (), file
, FALSE
);
16259 rs6000_pic_labelno
++;
16262 /* Non-zero if vmx regs are restored before the frame pop, zero if
16263 we restore after the pop when possible. */
16264 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
16266 /* Emit function epilogue as insns.
16268 At present, dwarf2out_frame_debug_expr doesn't understand
16269 register restores, so we don't bother setting RTX_FRAME_RELATED_P
16270 anywhere in the epilogue. Most of the insns below would in any case
16271 need special notes to explain where r11 is in relation to the stack. */
16274 rs6000_emit_epilogue (int sibcall
)
16276 rs6000_stack_t
*info
;
16277 int restoring_FPRs_inline
;
16278 int using_load_multiple
;
16279 int using_mtcr_multiple
;
16280 int use_backchain_to_restore_sp
;
16282 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
16283 rtx frame_reg_rtx
= sp_reg_rtx
;
16284 enum machine_mode reg_mode
= Pmode
;
16285 int reg_size
= TARGET_32BIT
? 4 : 8;
16288 info
= rs6000_stack_info ();
16290 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
16292 reg_mode
= V2SImode
;
16296 using_load_multiple
= (TARGET_MULTIPLE
&& ! TARGET_POWERPC64
16297 && (!TARGET_SPE_ABI
16298 || info
->spe_64bit_regs_used
== 0)
16299 && info
->first_gp_reg_save
< 31
16300 && no_global_regs_above (info
->first_gp_reg_save
));
16301 restoring_FPRs_inline
= (sibcall
16302 || crtl
->calls_eh_return
16303 || info
->first_fp_reg_save
== 64
16304 || FP_SAVE_INLINE (info
->first_fp_reg_save
));
16305 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
16306 || rs6000_cpu
== PROCESSOR_PPC603
16307 || rs6000_cpu
== PROCESSOR_PPC750
16309 /* Restore via the backchain when we have a large frame, since this
16310 is more efficient than an addis, addi pair. The second condition
16311 here will not trigger at the moment; We don't actually need a
16312 frame pointer for alloca, but the generic parts of the compiler
16313 give us one anyway. */
16314 use_backchain_to_restore_sp
= (info
->total_size
> 32767
16315 || (cfun
->calls_alloca
16316 && !frame_pointer_needed
));
16318 if (WORLD_SAVE_P (info
))
16322 const char *alloc_rname
;
16325 /* eh_rest_world_r10 will return to the location saved in the LR
16326 stack slot (which is not likely to be our caller.)
16327 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
16328 rest_world is similar, except any R10 parameter is ignored.
16329 The exception-handling stuff that was here in 2.95 is no
16330 longer necessary. */
16334 + 32 - info
->first_gp_reg_save
16335 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
16336 + 63 + 1 - info
->first_fp_reg_save
);
16338 strcpy (rname
, ((crtl
->calls_eh_return
) ?
16339 "*eh_rest_world_r10" : "*rest_world"));
16340 alloc_rname
= ggc_strdup (rname
);
16343 RTVEC_ELT (p
, j
++) = gen_rtx_RETURN (VOIDmode
);
16344 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
16345 gen_rtx_REG (Pmode
,
16348 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
16349 /* The instruction pattern requires a clobber here;
16350 it is shared with the restVEC helper. */
16352 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
16355 /* CR register traditionally saved as CR2. */
16356 rtx reg
= gen_rtx_REG (reg_mode
, CR2_REGNO
);
16357 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16358 GEN_INT (info
->cr_save_offset
));
16359 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16361 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, reg
, mem
);
16364 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
16366 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
16367 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16368 GEN_INT (info
->gp_save_offset
16370 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16372 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, reg
, mem
);
16374 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
16376 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
16377 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16378 GEN_INT (info
->altivec_save_offset
16380 rtx mem
= gen_frame_mem (V4SImode
, addr
);
16382 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, reg
, mem
);
16384 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
16386 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
16387 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16388 GEN_INT (info
->fp_save_offset
16390 rtx mem
= gen_frame_mem (DFmode
, addr
);
16392 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, reg
, mem
);
16395 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
16397 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
16399 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
16401 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
16403 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
16404 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
16409 /* frame_reg_rtx + sp_offset points to the top of this stack frame. */
16411 sp_offset
= info
->total_size
;
16413 /* Restore AltiVec registers if we must do so before adjusting the
16415 if (TARGET_ALTIVEC_ABI
16416 && info
->altivec_size
!= 0
16417 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
16418 || (DEFAULT_ABI
!= ABI_V4
16419 && info
->altivec_save_offset
< (TARGET_32BIT
? -220 : -288))))
16423 if (use_backchain_to_restore_sp
)
16425 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
16426 emit_move_insn (frame_reg_rtx
,
16427 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
16430 else if (frame_pointer_needed
)
16431 frame_reg_rtx
= hard_frame_pointer_rtx
;
16433 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
16434 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
16436 rtx addr
, areg
, mem
;
16438 areg
= gen_rtx_REG (Pmode
, 0);
16440 (areg
, GEN_INT (info
->altivec_save_offset
16442 + 16 * (i
- info
->first_altivec_reg_save
)));
16444 /* AltiVec addressing mode is [reg+reg]. */
16445 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
16446 mem
= gen_frame_mem (V4SImode
, addr
);
16448 emit_move_insn (gen_rtx_REG (V4SImode
, i
), mem
);
16452 /* Restore VRSAVE if we must do so before adjusting the stack. */
16454 && TARGET_ALTIVEC_VRSAVE
16455 && info
->vrsave_mask
!= 0
16456 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
16457 || (DEFAULT_ABI
!= ABI_V4
16458 && info
->vrsave_save_offset
< (TARGET_32BIT
? -220 : -288))))
16460 rtx addr
, mem
, reg
;
16462 if (frame_reg_rtx
== sp_reg_rtx
)
16464 if (use_backchain_to_restore_sp
)
16466 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
16467 emit_move_insn (frame_reg_rtx
,
16468 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
16471 else if (frame_pointer_needed
)
16472 frame_reg_rtx
= hard_frame_pointer_rtx
;
16475 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16476 GEN_INT (info
->vrsave_save_offset
+ sp_offset
));
16477 mem
= gen_frame_mem (SImode
, addr
);
16478 reg
= gen_rtx_REG (SImode
, 12);
16479 emit_move_insn (reg
, mem
);
16481 emit_insn (generate_set_vrsave (reg
, info
, 1));
16484 /* If we have a large stack frame, restore the old stack pointer
16485 using the backchain. */
16486 if (use_backchain_to_restore_sp
)
16488 if (frame_reg_rtx
== sp_reg_rtx
)
16490 /* Under V.4, don't reset the stack pointer until after we're done
16491 loading the saved registers. */
16492 if (DEFAULT_ABI
== ABI_V4
)
16493 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
16495 emit_move_insn (frame_reg_rtx
,
16496 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
16499 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
16500 && DEFAULT_ABI
== ABI_V4
)
16501 /* frame_reg_rtx has been set up by the altivec restore. */
16505 emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
16506 frame_reg_rtx
= sp_reg_rtx
;
16509 /* If we have a frame pointer, we can restore the old stack pointer
16511 else if (frame_pointer_needed
)
16513 frame_reg_rtx
= sp_reg_rtx
;
16514 if (DEFAULT_ABI
== ABI_V4
)
16515 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
16517 emit_insn (TARGET_32BIT
16518 ? gen_addsi3 (frame_reg_rtx
, hard_frame_pointer_rtx
,
16519 GEN_INT (info
->total_size
))
16520 : gen_adddi3 (frame_reg_rtx
, hard_frame_pointer_rtx
,
16521 GEN_INT (info
->total_size
)));
16524 else if (info
->push_p
16525 && DEFAULT_ABI
!= ABI_V4
16526 && !crtl
->calls_eh_return
)
16528 emit_insn (TARGET_32BIT
16529 ? gen_addsi3 (sp_reg_rtx
, sp_reg_rtx
,
16530 GEN_INT (info
->total_size
))
16531 : gen_adddi3 (sp_reg_rtx
, sp_reg_rtx
,
16532 GEN_INT (info
->total_size
)));
16536 /* Restore AltiVec registers if we have not done so already. */
16537 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
16538 && TARGET_ALTIVEC_ABI
16539 && info
->altivec_size
!= 0
16540 && (DEFAULT_ABI
== ABI_V4
16541 || info
->altivec_save_offset
>= (TARGET_32BIT
? -220 : -288)))
16545 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
16546 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
16548 rtx addr
, areg
, mem
;
16550 areg
= gen_rtx_REG (Pmode
, 0);
16552 (areg
, GEN_INT (info
->altivec_save_offset
16554 + 16 * (i
- info
->first_altivec_reg_save
)));
16556 /* AltiVec addressing mode is [reg+reg]. */
16557 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
16558 mem
= gen_frame_mem (V4SImode
, addr
);
16560 emit_move_insn (gen_rtx_REG (V4SImode
, i
), mem
);
16564 /* Restore VRSAVE if we have not done so already. */
16565 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
16567 && TARGET_ALTIVEC_VRSAVE
16568 && info
->vrsave_mask
!= 0
16569 && (DEFAULT_ABI
== ABI_V4
16570 || info
->vrsave_save_offset
>= (TARGET_32BIT
? -220 : -288)))
16572 rtx addr
, mem
, reg
;
16574 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16575 GEN_INT (info
->vrsave_save_offset
+ sp_offset
));
16576 mem
= gen_frame_mem (SImode
, addr
);
16577 reg
= gen_rtx_REG (SImode
, 12);
16578 emit_move_insn (reg
, mem
);
16580 emit_insn (generate_set_vrsave (reg
, info
, 1));
16583 /* Get the old lr if we saved it. */
16584 if (info
->lr_save_p
)
16586 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
,
16587 info
->lr_save_offset
+ sp_offset
);
16589 emit_move_insn (gen_rtx_REG (Pmode
, 0), mem
);
16592 /* Get the old cr if we saved it. */
16593 if (info
->cr_save_p
)
16595 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16596 GEN_INT (info
->cr_save_offset
+ sp_offset
));
16597 rtx mem
= gen_frame_mem (SImode
, addr
);
16599 emit_move_insn (gen_rtx_REG (SImode
, 12), mem
);
16602 /* Set LR here to try to overlap restores below. */
16603 if (info
->lr_save_p
)
16604 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
),
16605 gen_rtx_REG (Pmode
, 0));
16607 /* Load exception handler data registers, if needed. */
16608 if (crtl
->calls_eh_return
)
16610 unsigned int i
, regno
;
16614 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16615 GEN_INT (sp_offset
+ 5 * reg_size
));
16616 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16618 emit_move_insn (gen_rtx_REG (reg_mode
, 2), mem
);
16625 regno
= EH_RETURN_DATA_REGNO (i
);
16626 if (regno
== INVALID_REGNUM
)
16629 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
16630 info
->ehrd_offset
+ sp_offset
16631 + reg_size
* (int) i
);
16633 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
16637 /* Restore GPRs. This is done as a PARALLEL if we are using
16638 the load-multiple instructions. */
16639 if (using_load_multiple
)
16642 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
16643 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
16645 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16646 GEN_INT (info
->gp_save_offset
16649 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16652 gen_rtx_SET (VOIDmode
,
16653 gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
16656 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
16658 else if (TARGET_SPE_ABI
16659 && info
->spe_64bit_regs_used
!= 0
16660 && info
->first_gp_reg_save
!= 32)
16662 /* Determine whether we can address all of the registers that need
16663 to be saved with an offset from the stack pointer that fits in
16664 the small const field for SPE memory instructions. */
16665 int spe_regs_addressable_via_sp
16666 = SPE_CONST_OFFSET_OK(info
->spe_gp_save_offset
+ sp_offset
16667 + (32 - info
->first_gp_reg_save
- 1) * reg_size
);
16670 if (spe_regs_addressable_via_sp
)
16671 spe_offset
= info
->spe_gp_save_offset
+ sp_offset
;
16674 rtx old_frame_reg_rtx
= frame_reg_rtx
;
16675 /* Make r11 point to the start of the SPE save area. We worried about
16676 not clobbering it when we were saving registers in the prologue.
16677 There's no need to worry here because the static chain is passed
16678 anew to every function. */
16679 if (frame_reg_rtx
== sp_reg_rtx
)
16680 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
16681 emit_insn (gen_addsi3 (frame_reg_rtx
, old_frame_reg_rtx
,
16682 GEN_INT (info
->spe_gp_save_offset
+ sp_offset
)));
16683 /* Keep the invariant that frame_reg_rtx + sp_offset points
16684 at the top of the stack frame. */
16685 sp_offset
= -info
->spe_gp_save_offset
;
16690 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
16691 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
16693 rtx offset
, addr
, mem
;
16695 /* We're doing all this to ensure that the immediate offset
16696 fits into the immediate field of 'evldd'. */
16697 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset
+ reg_size
* i
));
16699 offset
= GEN_INT (spe_offset
+ reg_size
* i
);
16700 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, offset
);
16701 mem
= gen_rtx_MEM (V2SImode
, addr
);
16703 emit_move_insn (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
16708 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
16709 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
16711 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16712 GEN_INT (info
->gp_save_offset
16715 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16717 emit_move_insn (gen_rtx_REG (reg_mode
,
16718 info
->first_gp_reg_save
+ i
), mem
);
16721 /* Restore fpr's if we need to do it without calling a function. */
16722 if (restoring_FPRs_inline
)
16723 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
16724 if ((df_regs_ever_live_p (info
->first_fp_reg_save
+i
)
16725 && ! call_used_regs
[info
->first_fp_reg_save
+i
]))
16728 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16729 GEN_INT (info
->fp_save_offset
16732 mem
= gen_frame_mem (DFmode
, addr
);
16734 emit_move_insn (gen_rtx_REG (DFmode
,
16735 info
->first_fp_reg_save
+ i
),
16739 /* If we saved cr, restore it here. Just those that were used. */
16740 if (info
->cr_save_p
)
16742 rtx r12_rtx
= gen_rtx_REG (SImode
, 12);
16745 if (using_mtcr_multiple
)
16747 for (i
= 0; i
< 8; i
++)
16748 if (df_regs_ever_live_p (CR0_REGNO
+i
) && ! call_used_regs
[CR0_REGNO
+i
])
16750 gcc_assert (count
);
16753 if (using_mtcr_multiple
&& count
> 1)
16758 p
= rtvec_alloc (count
);
16761 for (i
= 0; i
< 8; i
++)
16762 if (df_regs_ever_live_p (CR0_REGNO
+i
) && ! call_used_regs
[CR0_REGNO
+i
])
16764 rtvec r
= rtvec_alloc (2);
16765 RTVEC_ELT (r
, 0) = r12_rtx
;
16766 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
16767 RTVEC_ELT (p
, ndx
) =
16768 gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+i
),
16769 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
16772 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
16773 gcc_assert (ndx
== count
);
16776 for (i
= 0; i
< 8; i
++)
16777 if (df_regs_ever_live_p (CR0_REGNO
+i
) && ! call_used_regs
[CR0_REGNO
+i
])
16779 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode
,
16785 /* If this is V.4, unwind the stack pointer after all of the loads
16787 if (frame_reg_rtx
!= sp_reg_rtx
)
16789 /* This blockage is needed so that sched doesn't decide to move
16790 the sp change before the register restores. */
16791 rs6000_emit_stack_tie ();
16792 if (sp_offset
!= 0)
16793 emit_insn (gen_addsi3 (sp_reg_rtx
, frame_reg_rtx
,
16794 GEN_INT (sp_offset
)));
16796 emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
16798 else if (sp_offset
!= 0)
16799 emit_insn (TARGET_32BIT
16800 ? gen_addsi3 (sp_reg_rtx
, sp_reg_rtx
,
16801 GEN_INT (sp_offset
))
16802 : gen_adddi3 (sp_reg_rtx
, sp_reg_rtx
,
16803 GEN_INT (sp_offset
)));
16805 if (crtl
->calls_eh_return
)
16807 rtx sa
= EH_RETURN_STACKADJ_RTX
;
16808 emit_insn (TARGET_32BIT
16809 ? gen_addsi3 (sp_reg_rtx
, sp_reg_rtx
, sa
)
16810 : gen_adddi3 (sp_reg_rtx
, sp_reg_rtx
, sa
));
16816 if (! restoring_FPRs_inline
)
16817 p
= rtvec_alloc (3 + 64 - info
->first_fp_reg_save
);
16819 p
= rtvec_alloc (2);
16821 RTVEC_ELT (p
, 0) = gen_rtx_RETURN (VOIDmode
);
16822 RTVEC_ELT (p
, 1) = gen_rtx_USE (VOIDmode
,
16823 gen_rtx_REG (Pmode
,
16826 /* If we have to restore more than two FP registers, branch to the
16827 restore function. It will return to our caller. */
16828 if (! restoring_FPRs_inline
)
16832 const char *alloc_rname
;
16834 sprintf (rname
, "%s%d%s", RESTORE_FP_PREFIX
,
16835 info
->first_fp_reg_save
- 32, RESTORE_FP_SUFFIX
);
16836 alloc_rname
= ggc_strdup (rname
);
16837 RTVEC_ELT (p
, 2) = gen_rtx_USE (VOIDmode
,
16838 gen_rtx_SYMBOL_REF (Pmode
,
16841 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
16844 addr
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
,
16845 GEN_INT (info
->fp_save_offset
+ 8*i
));
16846 mem
= gen_frame_mem (DFmode
, addr
);
16848 RTVEC_ELT (p
, i
+3) =
16849 gen_rtx_SET (VOIDmode
,
16850 gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
),
16855 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
16859 /* Write function epilogue. */
16862 rs6000_output_function_epilogue (FILE *file
,
16863 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
16865 if (! HAVE_epilogue
)
16867 rtx insn
= get_last_insn ();
16868 /* If the last insn was a BARRIER, we don't have to write anything except
16869 the trace table. */
16870 if (GET_CODE (insn
) == NOTE
)
16871 insn
= prev_nonnote_insn (insn
);
16872 if (insn
== 0 || GET_CODE (insn
) != BARRIER
)
16874 /* This is slightly ugly, but at least we don't have two
16875 copies of the epilogue-emitting code. */
16878 /* A NOTE_INSN_DELETED is supposed to be at the start
16879 and end of the "toplevel" insn chain. */
16880 emit_note (NOTE_INSN_DELETED
);
16881 rs6000_emit_epilogue (FALSE
);
16882 emit_note (NOTE_INSN_DELETED
);
16884 /* Expand INSN_ADDRESSES so final() doesn't crash. */
16888 for (insn
= get_insns (); insn
!= 0; insn
= NEXT_INSN (insn
))
16890 INSN_ADDRESSES_NEW (insn
, addr
);
16895 if (TARGET_DEBUG_STACK
)
16896 debug_rtx_list (get_insns (), 100);
16897 final (get_insns (), file
, FALSE
);
16903 macho_branch_islands ();
16904 /* Mach-O doesn't support labels at the end of objects, so if
16905 it looks like we might want one, insert a NOP. */
16907 rtx insn
= get_last_insn ();
16910 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
16911 insn
= PREV_INSN (insn
);
16915 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
16916 fputs ("\tnop\n", file
);
16920 /* Output a traceback table here. See /usr/include/sys/debug.h for info
16923 We don't output a traceback table if -finhibit-size-directive was
16924 used. The documentation for -finhibit-size-directive reads
16925 ``don't output a @code{.size} assembler directive, or anything
16926 else that would cause trouble if the function is split in the
16927 middle, and the two halves are placed at locations far apart in
16928 memory.'' The traceback table has this property, since it
16929 includes the offset from the start of the function to the
16930 traceback table itself.
16932 System V.4 Powerpc's (and the embedded ABI derived from it) use a
16933 different traceback table. */
16934 if (DEFAULT_ABI
== ABI_AIX
&& ! flag_inhibit_size_directive
16935 && rs6000_traceback
!= traceback_none
&& !crtl
->is_thunk
)
16937 const char *fname
= NULL
;
16938 const char *language_string
= lang_hooks
.name
;
16939 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
16941 int optional_tbtab
;
16942 rs6000_stack_t
*info
= rs6000_stack_info ();
16944 if (rs6000_traceback
== traceback_full
)
16945 optional_tbtab
= 1;
16946 else if (rs6000_traceback
== traceback_part
)
16947 optional_tbtab
= 0;
16949 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
16951 if (optional_tbtab
)
16953 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
16954 while (*fname
== '.') /* V.4 encodes . in the name */
16957 /* Need label immediately before tbtab, so we can compute
16958 its offset from the function start. */
16959 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
16960 ASM_OUTPUT_LABEL (file
, fname
);
16963 /* The .tbtab pseudo-op can only be used for the first eight
16964 expressions, since it can't handle the possibly variable
16965 length fields that follow. However, if you omit the optional
16966 fields, the assembler outputs zeros for all optional fields
16967 anyways, giving each variable length field is minimum length
16968 (as defined in sys/debug.h). Thus we can not use the .tbtab
16969 pseudo-op at all. */
16971 /* An all-zero word flags the start of the tbtab, for debuggers
16972 that have to find it by searching forward from the entry
16973 point or from the current pc. */
16974 fputs ("\t.long 0\n", file
);
16976 /* Tbtab format type. Use format type 0. */
16977 fputs ("\t.byte 0,", file
);
16979 /* Language type. Unfortunately, there does not seem to be any
16980 official way to discover the language being compiled, so we
16981 use language_string.
16982 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
16983 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
16984 a number, so for now use 9. */
16985 if (! strcmp (language_string
, "GNU C"))
16987 else if (! strcmp (language_string
, "GNU F77")
16988 || ! strcmp (language_string
, "GNU Fortran"))
16990 else if (! strcmp (language_string
, "GNU Pascal"))
16992 else if (! strcmp (language_string
, "GNU Ada"))
16994 else if (! strcmp (language_string
, "GNU C++")
16995 || ! strcmp (language_string
, "GNU Objective-C++"))
16997 else if (! strcmp (language_string
, "GNU Java"))
16999 else if (! strcmp (language_string
, "GNU Objective-C"))
17002 gcc_unreachable ();
17003 fprintf (file
, "%d,", i
);
17005 /* 8 single bit fields: global linkage (not set for C extern linkage,
17006 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
17007 from start of procedure stored in tbtab, internal function, function
17008 has controlled storage, function has no toc, function uses fp,
17009 function logs/aborts fp operations. */
17010 /* Assume that fp operations are used if any fp reg must be saved. */
17011 fprintf (file
, "%d,",
17012 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
17014 /* 6 bitfields: function is interrupt handler, name present in
17015 proc table, function calls alloca, on condition directives
17016 (controls stack walks, 3 bits), saves condition reg, saves
17018 /* The `function calls alloca' bit seems to be set whenever reg 31 is
17019 set up as a frame pointer, even when there is no alloca call. */
17020 fprintf (file
, "%d,",
17021 ((optional_tbtab
<< 6)
17022 | ((optional_tbtab
& frame_pointer_needed
) << 5)
17023 | (info
->cr_save_p
<< 1)
17024 | (info
->lr_save_p
)));
17026 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
17028 fprintf (file
, "%d,",
17029 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
17031 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
17032 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
17034 if (optional_tbtab
)
17036 /* Compute the parameter info from the function decl argument
17039 int next_parm_info_bit
= 31;
17041 for (decl
= DECL_ARGUMENTS (current_function_decl
);
17042 decl
; decl
= TREE_CHAIN (decl
))
17044 rtx parameter
= DECL_INCOMING_RTL (decl
);
17045 enum machine_mode mode
= GET_MODE (parameter
);
17047 if (GET_CODE (parameter
) == REG
)
17049 if (SCALAR_FLOAT_MODE_P (mode
))
17070 gcc_unreachable ();
17073 /* If only one bit will fit, don't or in this entry. */
17074 if (next_parm_info_bit
> 0)
17075 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
17076 next_parm_info_bit
-= 2;
17080 fixed_parms
+= ((GET_MODE_SIZE (mode
)
17081 + (UNITS_PER_WORD
- 1))
17083 next_parm_info_bit
-= 1;
17089 /* Number of fixed point parameters. */
17090 /* This is actually the number of words of fixed point parameters; thus
17091 an 8 byte struct counts as 2; and thus the maximum value is 8. */
17092 fprintf (file
, "%d,", fixed_parms
);
17094 /* 2 bitfields: number of floating point parameters (7 bits), parameters
17096 /* This is actually the number of fp registers that hold parameters;
17097 and thus the maximum value is 13. */
17098 /* Set parameters on stack bit if parameters are not in their original
17099 registers, regardless of whether they are on the stack? Xlc
17100 seems to set the bit when not optimizing. */
17101 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
17103 if (! optional_tbtab
)
17106 /* Optional fields follow. Some are variable length. */
17108 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
17109 11 double float. */
17110 /* There is an entry for each parameter in a register, in the order that
17111 they occur in the parameter list. Any intervening arguments on the
17112 stack are ignored. If the list overflows a long (max possible length
17113 34 bits) then completely leave off all elements that don't fit. */
17114 /* Only emit this long if there was at least one parameter. */
17115 if (fixed_parms
|| float_parms
)
17116 fprintf (file
, "\t.long %d\n", parm_info
);
17118 /* Offset from start of code to tb table. */
17119 fputs ("\t.long ", file
);
17120 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
17122 RS6000_OUTPUT_BASENAME (file
, fname
);
17124 assemble_name (file
, fname
);
17126 rs6000_output_function_entry (file
, fname
);
17129 /* Interrupt handler mask. */
17130 /* Omit this long, since we never set the interrupt handler bit
17133 /* Number of CTL (controlled storage) anchors. */
17134 /* Omit this long, since the has_ctl bit is never set above. */
17136 /* Displacement into stack of each CTL anchor. */
17137 /* Omit this list of longs, because there are no CTL anchors. */
17139 /* Length of function name. */
17142 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
17144 /* Function name. */
17145 assemble_string (fname
, strlen (fname
));
17147 /* Register for alloca automatic storage; this is always reg 31.
17148 Only emit this if the alloca bit was set above. */
17149 if (frame_pointer_needed
)
17150 fputs ("\t.byte 31\n", file
);
17152 fputs ("\t.align 2\n", file
);
17156 /* A C compound statement that outputs the assembler code for a thunk
17157 function, used to implement C++ virtual function calls with
17158 multiple inheritance. The thunk acts as a wrapper around a virtual
17159 function, adjusting the implicit object parameter before handing
17160 control off to the real function.
17162 First, emit code to add the integer DELTA to the location that
17163 contains the incoming first argument. Assume that this argument
17164 contains a pointer, and is the one used to pass the `this' pointer
17165 in C++. This is the incoming argument *before* the function
17166 prologue, e.g. `%o0' on a sparc. The addition must preserve the
17167 values of all other incoming arguments.
17169 After the addition, emit code to jump to FUNCTION, which is a
17170 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
17171 not touch the return address. Hence returning from FUNCTION will
17172 return to whoever called the current `thunk'.
17174 The effect must be as if FUNCTION had been called directly with the
17175 adjusted first argument. This macro is responsible for emitting
17176 all of the code for a thunk function; output_function_prologue()
17177 and output_function_epilogue() are not invoked.
17179 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
17180 been extracted from it.) It might possibly be useful on some
17181 targets, but probably not.
17183 If you do not define this macro, the target-independent code in the
17184 C++ frontend will generate a less efficient heavyweight thunk that
17185 calls FUNCTION instead of jumping to it. The generic approach does
17186 not support varargs. */
17189 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
17190 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
17193 rtx
this, insn
, funexp
;
17195 reload_completed
= 1;
17196 epilogue_completed
= 1;
17198 /* Mark the end of the (empty) prologue. */
17199 emit_note (NOTE_INSN_PROLOGUE_END
);
17201 /* Find the "this" pointer. If the function returns a structure,
17202 the structure return pointer is in r3. */
17203 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
17204 this = gen_rtx_REG (Pmode
, 4);
17206 this = gen_rtx_REG (Pmode
, 3);
17208 /* Apply the constant offset, if required. */
17211 rtx delta_rtx
= GEN_INT (delta
);
17212 emit_insn (TARGET_32BIT
17213 ? gen_addsi3 (this, this, delta_rtx
)
17214 : gen_adddi3 (this, this, delta_rtx
));
17217 /* Apply the offset from the vtable, if required. */
17220 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
17221 rtx tmp
= gen_rtx_REG (Pmode
, 12);
17223 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this));
17224 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
17226 emit_insn (TARGET_32BIT
17227 ? gen_addsi3 (tmp
, tmp
, vcall_offset_rtx
)
17228 : gen_adddi3 (tmp
, tmp
, vcall_offset_rtx
));
17229 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
17233 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
17235 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
17237 emit_insn (TARGET_32BIT
17238 ? gen_addsi3 (this, this, tmp
)
17239 : gen_adddi3 (this, this, tmp
));
17242 /* Generate a tail call to the target function. */
17243 if (!TREE_USED (function
))
17245 assemble_external (function
);
17246 TREE_USED (function
) = 1;
17248 funexp
= XEXP (DECL_RTL (function
), 0);
17249 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
17252 if (MACHOPIC_INDIRECT
)
17253 funexp
= machopic_indirect_call_target (funexp
);
17256 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
17257 generate sibcall RTL explicitly. */
17258 insn
= emit_call_insn (
17259 gen_rtx_PARALLEL (VOIDmode
,
17261 gen_rtx_CALL (VOIDmode
,
17262 funexp
, const0_rtx
),
17263 gen_rtx_USE (VOIDmode
, const0_rtx
),
17264 gen_rtx_USE (VOIDmode
,
17265 gen_rtx_REG (SImode
,
17267 gen_rtx_RETURN (VOIDmode
))));
17268 SIBLING_CALL_P (insn
) = 1;
17271 /* Run just enough of rest_of_compilation to get the insns emitted.
17272 There's not really enough bulk here to make other passes such as
17273 instruction scheduling worth while. Note that use_thunk calls
17274 assemble_start_function and assemble_end_function. */
17275 insn
= get_insns ();
17276 insn_locators_alloc ();
17277 shorten_branches (insn
);
17278 final_start_function (insn
, file
, 1);
17279 final (insn
, file
, 1);
17280 final_end_function ();
17281 free_after_compilation (cfun
);
17283 reload_completed
= 0;
17284 epilogue_completed
= 0;
17287 /* A quick summary of the various types of 'constant-pool tables'
17290 Target Flags Name One table per
17291 AIX (none) AIX TOC object file
17292 AIX -mfull-toc AIX TOC object file
17293 AIX -mminimal-toc AIX minimal TOC translation unit
17294 SVR4/EABI (none) SVR4 SDATA object file
17295 SVR4/EABI -fpic SVR4 pic object file
17296 SVR4/EABI -fPIC SVR4 PIC translation unit
17297 SVR4/EABI -mrelocatable EABI TOC function
17298 SVR4/EABI -maix AIX TOC object file
17299 SVR4/EABI -maix -mminimal-toc
17300 AIX minimal TOC translation unit
17302 Name Reg. Set by entries contains:
17303 made by addrs? fp? sum?
17305 AIX TOC 2 crt0 as Y option option
17306 AIX minimal TOC 30 prolog gcc Y Y option
17307 SVR4 SDATA 13 crt0 gcc N Y N
17308 SVR4 pic 30 prolog ld Y not yet N
17309 SVR4 PIC 30 prolog gcc Y option option
17310 EABI TOC 30 prolog gcc Y option option
17314 /* Hash functions for the hash table. */
17317 rs6000_hash_constant (rtx k
)
17319 enum rtx_code code
= GET_CODE (k
);
17320 enum machine_mode mode
= GET_MODE (k
);
17321 unsigned result
= (code
<< 3) ^ mode
;
17322 const char *format
;
17325 format
= GET_RTX_FORMAT (code
);
17326 flen
= strlen (format
);
17332 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
17335 if (mode
!= VOIDmode
)
17336 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
17348 for (; fidx
< flen
; fidx
++)
17349 switch (format
[fidx
])
17354 const char *str
= XSTR (k
, fidx
);
17355 len
= strlen (str
);
17356 result
= result
* 613 + len
;
17357 for (i
= 0; i
< len
; i
++)
17358 result
= result
* 613 + (unsigned) str
[i
];
17363 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
17367 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
17370 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
17371 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
17375 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
17376 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
17383 gcc_unreachable ();
17390 toc_hash_function (const void *hash_entry
)
17392 const struct toc_hash_struct
*thc
=
17393 (const struct toc_hash_struct
*) hash_entry
;
17394 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
17397 /* Compare H1 and H2 for equivalence. */
17400 toc_hash_eq (const void *h1
, const void *h2
)
17402 rtx r1
= ((const struct toc_hash_struct
*) h1
)->key
;
17403 rtx r2
= ((const struct toc_hash_struct
*) h2
)->key
;
17405 if (((const struct toc_hash_struct
*) h1
)->key_mode
17406 != ((const struct toc_hash_struct
*) h2
)->key_mode
)
17409 return rtx_equal_p (r1
, r2
);
17412 /* These are the names given by the C++ front-end to vtables, and
17413 vtable-like objects. Ideally, this logic should not be here;
17414 instead, there should be some programmatic way of inquiring as
17415 to whether or not an object is a vtable. */
17417 #define VTABLE_NAME_P(NAME) \
17418 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
17419 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
17420 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
17421 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
17422 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
17425 rs6000_output_symbol_ref (FILE *file
, rtx x
)
17427 /* Currently C++ toc references to vtables can be emitted before it
17428 is decided whether the vtable is public or private. If this is
17429 the case, then the linker will eventually complain that there is
17430 a reference to an unknown section. Thus, for vtables only,
17431 we emit the TOC reference to reference the symbol and not the
17433 const char *name
= XSTR (x
, 0);
17435 if (VTABLE_NAME_P (name
))
17437 RS6000_OUTPUT_BASENAME (file
, name
);
17440 assemble_name (file
, name
);
17443 /* Output a TOC entry. We derive the entry name from what is being
17447 output_toc (FILE *file
, rtx x
, int labelno
, enum machine_mode mode
)
17450 const char *name
= buf
;
17451 const char *real_name
;
17453 HOST_WIDE_INT offset
= 0;
17455 gcc_assert (!TARGET_NO_TOC
);
17457 /* When the linker won't eliminate them, don't output duplicate
17458 TOC entries (this happens on AIX if there is any kind of TOC,
17459 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
17461 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
17463 struct toc_hash_struct
*h
;
17466 /* Create toc_hash_table. This can't be done at OVERRIDE_OPTIONS
17467 time because GGC is not initialized at that point. */
17468 if (toc_hash_table
== NULL
)
17469 toc_hash_table
= htab_create_ggc (1021, toc_hash_function
,
17470 toc_hash_eq
, NULL
);
17472 h
= ggc_alloc (sizeof (*h
));
17474 h
->key_mode
= mode
;
17475 h
->labelno
= labelno
;
17477 found
= htab_find_slot (toc_hash_table
, h
, 1);
17478 if (*found
== NULL
)
17480 else /* This is indeed a duplicate.
17481 Set this label equal to that label. */
17483 fputs ("\t.set ", file
);
17484 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
17485 fprintf (file
, "%d,", labelno
);
17486 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
17487 fprintf (file
, "%d\n", ((*(const struct toc_hash_struct
**)
17493 /* If we're going to put a double constant in the TOC, make sure it's
17494 aligned properly when strict alignment is on. */
17495 if (GET_CODE (x
) == CONST_DOUBLE
17496 && STRICT_ALIGNMENT
17497 && GET_MODE_BITSIZE (mode
) >= 64
17498 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
17499 ASM_OUTPUT_ALIGN (file
, 3);
17502 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
17504 /* Handle FP constants specially. Note that if we have a minimal
17505 TOC, things we put here aren't actually in the TOC, so we can allow
17507 if (GET_CODE (x
) == CONST_DOUBLE
&&
17508 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
))
17510 REAL_VALUE_TYPE rv
;
17513 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
17514 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
17515 REAL_VALUE_TO_TARGET_DECIMAL128 (rv
, k
);
17517 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv
, k
);
17521 if (TARGET_MINIMAL_TOC
)
17522 fputs (DOUBLE_INT_ASM_OP
, file
);
17524 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
17525 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
17526 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
17527 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
17528 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
17529 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
17534 if (TARGET_MINIMAL_TOC
)
17535 fputs ("\t.long ", file
);
17537 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
17538 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
17539 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
17540 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
17541 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
17542 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
17546 else if (GET_CODE (x
) == CONST_DOUBLE
&&
17547 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
17549 REAL_VALUE_TYPE rv
;
17552 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
17554 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
17555 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, k
);
17557 REAL_VALUE_TO_TARGET_DOUBLE (rv
, k
);
17561 if (TARGET_MINIMAL_TOC
)
17562 fputs (DOUBLE_INT_ASM_OP
, file
);
17564 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
17565 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
17566 fprintf (file
, "0x%lx%08lx\n",
17567 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
17572 if (TARGET_MINIMAL_TOC
)
17573 fputs ("\t.long ", file
);
17575 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
17576 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
17577 fprintf (file
, "0x%lx,0x%lx\n",
17578 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
17582 else if (GET_CODE (x
) == CONST_DOUBLE
&&
17583 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
17585 REAL_VALUE_TYPE rv
;
17588 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
17589 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
17590 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
17592 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
17596 if (TARGET_MINIMAL_TOC
)
17597 fputs (DOUBLE_INT_ASM_OP
, file
);
17599 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
17600 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
17605 if (TARGET_MINIMAL_TOC
)
17606 fputs ("\t.long ", file
);
17608 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
17609 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
17613 else if (GET_MODE (x
) == VOIDmode
17614 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
17616 unsigned HOST_WIDE_INT low
;
17617 HOST_WIDE_INT high
;
17619 if (GET_CODE (x
) == CONST_DOUBLE
)
17621 low
= CONST_DOUBLE_LOW (x
);
17622 high
= CONST_DOUBLE_HIGH (x
);
17625 #if HOST_BITS_PER_WIDE_INT == 32
17628 high
= (low
& 0x80000000) ? ~0 : 0;
17632 low
= INTVAL (x
) & 0xffffffff;
17633 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
17637 /* TOC entries are always Pmode-sized, but since this
17638 is a bigendian machine then if we're putting smaller
17639 integer constants in the TOC we have to pad them.
17640 (This is still a win over putting the constants in
17641 a separate constant pool, because then we'd have
17642 to have both a TOC entry _and_ the actual constant.)
17644 For a 32-bit target, CONST_INT values are loaded and shifted
17645 entirely within `low' and can be stored in one TOC entry. */
17647 /* It would be easy to make this work, but it doesn't now. */
17648 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
17650 if (POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
17652 #if HOST_BITS_PER_WIDE_INT == 32
17653 lshift_double (low
, high
, POINTER_SIZE
- GET_MODE_BITSIZE (mode
),
17654 POINTER_SIZE
, &low
, &high
, 0);
17657 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
17658 high
= (HOST_WIDE_INT
) low
>> 32;
17665 if (TARGET_MINIMAL_TOC
)
17666 fputs (DOUBLE_INT_ASM_OP
, file
);
17668 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
17669 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
17670 fprintf (file
, "0x%lx%08lx\n",
17671 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
17676 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
17678 if (TARGET_MINIMAL_TOC
)
17679 fputs ("\t.long ", file
);
17681 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
17682 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
17683 fprintf (file
, "0x%lx,0x%lx\n",
17684 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
17688 if (TARGET_MINIMAL_TOC
)
17689 fputs ("\t.long ", file
);
17691 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
17692 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
17698 if (GET_CODE (x
) == CONST
)
17700 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
);
17702 base
= XEXP (XEXP (x
, 0), 0);
17703 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
17706 switch (GET_CODE (base
))
17709 name
= XSTR (base
, 0);
17713 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
17714 CODE_LABEL_NUMBER (XEXP (base
, 0)));
17718 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
17722 gcc_unreachable ();
17725 real_name
= (*targetm
.strip_name_encoding
) (name
);
17726 if (TARGET_MINIMAL_TOC
)
17727 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
17730 fprintf (file
, "\t.tc %s", real_name
);
17733 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
17735 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
17737 fputs ("[TC],", file
);
17740 /* Currently C++ toc references to vtables can be emitted before it
17741 is decided whether the vtable is public or private. If this is
17742 the case, then the linker will eventually complain that there is
17743 a TOC reference to an unknown section. Thus, for vtables only,
17744 we emit the TOC reference to reference the symbol and not the
17746 if (VTABLE_NAME_P (name
))
17748 RS6000_OUTPUT_BASENAME (file
, name
);
17750 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
17751 else if (offset
> 0)
17752 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
17755 output_addr_const (file
, x
);
17759 /* Output an assembler pseudo-op to write an ASCII string of N characters
17760 starting at P to FILE.
17762 On the RS/6000, we have to do this using the .byte operation and
17763 write out special characters outside the quoted string.
17764 Also, the assembler is broken; very long strings are truncated,
17765 so we must artificially break them up early. */
17768 output_ascii (FILE *file
, const char *p
, int n
)
17771 int i
, count_string
;
17772 const char *for_string
= "\t.byte \"";
17773 const char *for_decimal
= "\t.byte ";
17774 const char *to_close
= NULL
;
17777 for (i
= 0; i
< n
; i
++)
17780 if (c
>= ' ' && c
< 0177)
17783 fputs (for_string
, file
);
17786 /* Write two quotes to get one. */
17794 for_decimal
= "\"\n\t.byte ";
17798 if (count_string
>= 512)
17800 fputs (to_close
, file
);
17802 for_string
= "\t.byte \"";
17803 for_decimal
= "\t.byte ";
17811 fputs (for_decimal
, file
);
17812 fprintf (file
, "%d", c
);
17814 for_string
= "\n\t.byte \"";
17815 for_decimal
= ", ";
17821 /* Now close the string if we have written one. Then end the line. */
17823 fputs (to_close
, file
);
17826 /* Generate a unique section name for FILENAME for a section type
17827 represented by SECTION_DESC. Output goes into BUF.
17829 SECTION_DESC can be any string, as long as it is different for each
17830 possible section type.
17832 We name the section in the same manner as xlc. The name begins with an
17833 underscore followed by the filename (after stripping any leading directory
17834 names) with the last period replaced by the string SECTION_DESC. If
17835 FILENAME does not contain a period, SECTION_DESC is appended to the end of
17839 rs6000_gen_section_name (char **buf
, const char *filename
,
17840 const char *section_desc
)
17842 const char *q
, *after_last_slash
, *last_period
= 0;
17846 after_last_slash
= filename
;
17847 for (q
= filename
; *q
; q
++)
17850 after_last_slash
= q
+ 1;
17851 else if (*q
== '.')
17855 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
17856 *buf
= (char *) xmalloc (len
);
17861 for (q
= after_last_slash
; *q
; q
++)
17863 if (q
== last_period
)
17865 strcpy (p
, section_desc
);
17866 p
+= strlen (section_desc
);
17870 else if (ISALNUM (*q
))
17874 if (last_period
== 0)
17875 strcpy (p
, section_desc
);
17880 /* Emit profile function. */
17883 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
17885 /* Non-standard profiling for kernels, which just saves LR then calls
17886 _mcount without worrying about arg saves. The idea is to change
17887 the function prologue as little as possible as it isn't easy to
17888 account for arg save/restore code added just for _mcount. */
17889 if (TARGET_PROFILE_KERNEL
)
17892 if (DEFAULT_ABI
== ABI_AIX
)
17894 #ifndef NO_PROFILE_COUNTERS
17895 # define NO_PROFILE_COUNTERS 0
17897 if (NO_PROFILE_COUNTERS
)
17898 emit_library_call (init_one_libfunc (RS6000_MCOUNT
), 0, VOIDmode
, 0);
17902 const char *label_name
;
17905 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
17906 label_name
= (*targetm
.strip_name_encoding
) (ggc_strdup (buf
));
17907 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
17909 emit_library_call (init_one_libfunc (RS6000_MCOUNT
), 0, VOIDmode
, 1,
17913 else if (DEFAULT_ABI
== ABI_DARWIN
)
17915 const char *mcount_name
= RS6000_MCOUNT
;
17916 int caller_addr_regno
= LR_REGNO
;
17918 /* Be conservative and always set this, at least for now. */
17919 crtl
->uses_pic_offset_table
= 1;
17922 /* For PIC code, set up a stub and collect the caller's address
17923 from r0, which is where the prologue puts it. */
17924 if (MACHOPIC_INDIRECT
17925 && crtl
->uses_pic_offset_table
)
17926 caller_addr_regno
= 0;
17928 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
17930 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
17934 /* Write function profiler code. */
17937 output_function_profiler (FILE *file
, int labelno
)
17941 switch (DEFAULT_ABI
)
17944 gcc_unreachable ();
17949 warning (0, "no profiling of 64-bit code for this ABI");
17952 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
17953 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
17954 if (NO_PROFILE_COUNTERS
)
17956 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
17957 reg_names
[0], reg_names
[1]);
17959 else if (TARGET_SECURE_PLT
&& flag_pic
)
17961 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n\t{st|stw} %s,4(%s)\n",
17962 reg_names
[0], reg_names
[1]);
17963 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
17964 asm_fprintf (file
, "\t{cau|addis} %s,%s,",
17965 reg_names
[12], reg_names
[12]);
17966 assemble_name (file
, buf
);
17967 asm_fprintf (file
, "-1b@ha\n\t{cal|la} %s,", reg_names
[0]);
17968 assemble_name (file
, buf
);
17969 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
17971 else if (flag_pic
== 1)
17973 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
17974 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
17975 reg_names
[0], reg_names
[1]);
17976 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
17977 asm_fprintf (file
, "\t{l|lwz} %s,", reg_names
[0]);
17978 assemble_name (file
, buf
);
17979 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
17981 else if (flag_pic
> 1)
17983 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
17984 reg_names
[0], reg_names
[1]);
17985 /* Now, we need to get the address of the label. */
17986 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
17987 assemble_name (file
, buf
);
17988 fputs ("-.\n1:", file
);
17989 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
17990 asm_fprintf (file
, "\t{l|lwz} %s,0(%s)\n",
17991 reg_names
[0], reg_names
[11]);
17992 asm_fprintf (file
, "\t{cax|add} %s,%s,%s\n",
17993 reg_names
[0], reg_names
[0], reg_names
[11]);
17997 asm_fprintf (file
, "\t{liu|lis} %s,", reg_names
[12]);
17998 assemble_name (file
, buf
);
17999 fputs ("@ha\n", file
);
18000 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
18001 reg_names
[0], reg_names
[1]);
18002 asm_fprintf (file
, "\t{cal|la} %s,", reg_names
[0]);
18003 assemble_name (file
, buf
);
18004 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
18007 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
18008 fprintf (file
, "\tbl %s%s\n",
18009 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
18014 if (!TARGET_PROFILE_KERNEL
)
18016 /* Don't do anything, done in output_profile_hook (). */
18020 gcc_assert (!TARGET_32BIT
);
18022 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
18023 asm_fprintf (file
, "\tstd %s,16(%s)\n", reg_names
[0], reg_names
[1]);
18025 if (cfun
->static_chain_decl
!= NULL
)
18027 asm_fprintf (file
, "\tstd %s,24(%s)\n",
18028 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
18029 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
18030 asm_fprintf (file
, "\tld %s,24(%s)\n",
18031 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
18034 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
18042 /* The following variable value is the last issued insn. */
18044 static rtx last_scheduled_insn
;
18046 /* The following variable helps to balance issuing of load and
18047 store instructions */
18049 static int load_store_pendulum
;
18051 /* Power4 load update and store update instructions are cracked into a
18052 load or store and an integer insn which are executed in the same cycle.
18053 Branches have their own dispatch slot which does not count against the
18054 GCC issue rate, but it changes the program flow so there are no other
18055 instructions to issue in this cycle. */
18058 rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED
,
18059 int verbose ATTRIBUTE_UNUSED
,
18060 rtx insn
, int more
)
18062 last_scheduled_insn
= insn
;
18063 if (GET_CODE (PATTERN (insn
)) == USE
18064 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
18066 cached_can_issue_more
= more
;
18067 return cached_can_issue_more
;
18070 if (insn_terminates_group_p (insn
, current_group
))
18072 cached_can_issue_more
= 0;
18073 return cached_can_issue_more
;
18076 /* If no reservation, but reach here */
18077 if (recog_memoized (insn
) < 0)
18080 if (rs6000_sched_groups
)
18082 if (is_microcoded_insn (insn
))
18083 cached_can_issue_more
= 0;
18084 else if (is_cracked_insn (insn
))
18085 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
18087 cached_can_issue_more
= more
- 1;
18089 return cached_can_issue_more
;
18092 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
18095 cached_can_issue_more
= more
- 1;
18096 return cached_can_issue_more
;
18099 /* Adjust the cost of a scheduling dependency. Return the new cost of
18100 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
18103 rs6000_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
18105 enum attr_type attr_type
;
18107 if (! recog_memoized (insn
))
18110 switch (REG_NOTE_KIND (link
))
18114 /* Data dependency; DEP_INSN writes a register that INSN reads
18115 some cycles later. */
18117 /* Separate a load from a narrower, dependent store. */
18118 if (rs6000_sched_groups
18119 && GET_CODE (PATTERN (insn
)) == SET
18120 && GET_CODE (PATTERN (dep_insn
)) == SET
18121 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
18122 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
18123 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
18124 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
18127 attr_type
= get_attr_type (insn
);
18132 /* Tell the first scheduling pass about the latency between
18133 a mtctr and bctr (and mtlr and br/blr). The first
18134 scheduling pass will not know about this latency since
18135 the mtctr instruction, which has the latency associated
18136 to it, will be generated by reload. */
18137 return TARGET_POWER
? 5 : 4;
18139 /* Leave some extra cycles between a compare and its
18140 dependent branch, to inhibit expensive mispredicts. */
18141 if ((rs6000_cpu_attr
== CPU_PPC603
18142 || rs6000_cpu_attr
== CPU_PPC604
18143 || rs6000_cpu_attr
== CPU_PPC604E
18144 || rs6000_cpu_attr
== CPU_PPC620
18145 || rs6000_cpu_attr
== CPU_PPC630
18146 || rs6000_cpu_attr
== CPU_PPC750
18147 || rs6000_cpu_attr
== CPU_PPC7400
18148 || rs6000_cpu_attr
== CPU_PPC7450
18149 || rs6000_cpu_attr
== CPU_POWER4
18150 || rs6000_cpu_attr
== CPU_POWER5
18151 || rs6000_cpu_attr
== CPU_CELL
)
18152 && recog_memoized (dep_insn
)
18153 && (INSN_CODE (dep_insn
) >= 0))
18155 switch (get_attr_type (dep_insn
))
18159 case TYPE_DELAYED_COMPARE
:
18160 case TYPE_IMUL_COMPARE
:
18161 case TYPE_LMUL_COMPARE
:
18162 case TYPE_FPCOMPARE
:
18163 case TYPE_CR_LOGICAL
:
18164 case TYPE_DELAYED_CR
:
18173 case TYPE_STORE_UX
:
18175 case TYPE_FPSTORE_U
:
18176 case TYPE_FPSTORE_UX
:
18177 if ((rs6000_cpu
== PROCESSOR_POWER6
)
18178 && recog_memoized (dep_insn
)
18179 && (INSN_CODE (dep_insn
) >= 0))
18182 if (GET_CODE (PATTERN (insn
)) != SET
)
18183 /* If this happens, we have to extend this to schedule
18184 optimally. Return default for now. */
18187 /* Adjust the cost for the case where the value written
18188 by a fixed point operation is used as the address
18189 gen value on a store. */
18190 switch (get_attr_type (dep_insn
))
18197 if (! store_data_bypass_p (dep_insn
, insn
))
18201 case TYPE_LOAD_EXT
:
18202 case TYPE_LOAD_EXT_U
:
18203 case TYPE_LOAD_EXT_UX
:
18204 case TYPE_VAR_SHIFT_ROTATE
:
18205 case TYPE_VAR_DELAYED_COMPARE
:
18207 if (! store_data_bypass_p (dep_insn
, insn
))
18213 case TYPE_FAST_COMPARE
:
18216 case TYPE_INSERT_WORD
:
18217 case TYPE_INSERT_DWORD
:
18218 case TYPE_FPLOAD_U
:
18219 case TYPE_FPLOAD_UX
:
18221 case TYPE_STORE_UX
:
18222 case TYPE_FPSTORE_U
:
18223 case TYPE_FPSTORE_UX
:
18225 if (! store_data_bypass_p (dep_insn
, insn
))
18233 case TYPE_IMUL_COMPARE
:
18234 case TYPE_LMUL_COMPARE
:
18236 if (! store_data_bypass_p (dep_insn
, insn
))
18242 if (! store_data_bypass_p (dep_insn
, insn
))
18248 if (! store_data_bypass_p (dep_insn
, insn
))
18261 case TYPE_LOAD_EXT
:
18262 case TYPE_LOAD_EXT_U
:
18263 case TYPE_LOAD_EXT_UX
:
18264 if ((rs6000_cpu
== PROCESSOR_POWER6
)
18265 && recog_memoized (dep_insn
)
18266 && (INSN_CODE (dep_insn
) >= 0))
18269 /* Adjust the cost for the case where the value written
18270 by a fixed point instruction is used within the address
18271 gen portion of a subsequent load(u)(x) */
18272 switch (get_attr_type (dep_insn
))
18279 if (set_to_load_agen (dep_insn
, insn
))
18283 case TYPE_LOAD_EXT
:
18284 case TYPE_LOAD_EXT_U
:
18285 case TYPE_LOAD_EXT_UX
:
18286 case TYPE_VAR_SHIFT_ROTATE
:
18287 case TYPE_VAR_DELAYED_COMPARE
:
18289 if (set_to_load_agen (dep_insn
, insn
))
18295 case TYPE_FAST_COMPARE
:
18298 case TYPE_INSERT_WORD
:
18299 case TYPE_INSERT_DWORD
:
18300 case TYPE_FPLOAD_U
:
18301 case TYPE_FPLOAD_UX
:
18303 case TYPE_STORE_UX
:
18304 case TYPE_FPSTORE_U
:
18305 case TYPE_FPSTORE_UX
:
18307 if (set_to_load_agen (dep_insn
, insn
))
18315 case TYPE_IMUL_COMPARE
:
18316 case TYPE_LMUL_COMPARE
:
18318 if (set_to_load_agen (dep_insn
, insn
))
18324 if (set_to_load_agen (dep_insn
, insn
))
18330 if (set_to_load_agen (dep_insn
, insn
))
18341 if ((rs6000_cpu
== PROCESSOR_POWER6
)
18342 && recog_memoized (dep_insn
)
18343 && (INSN_CODE (dep_insn
) >= 0)
18344 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
18351 /* Fall out to return default cost. */
18355 case REG_DEP_OUTPUT
:
18356 /* Output dependency; DEP_INSN writes a register that INSN writes some
18358 if ((rs6000_cpu
== PROCESSOR_POWER6
)
18359 && recog_memoized (dep_insn
)
18360 && (INSN_CODE (dep_insn
) >= 0))
18362 attr_type
= get_attr_type (insn
);
18367 if (get_attr_type (dep_insn
) == TYPE_FP
)
18371 if (get_attr_type (dep_insn
) == TYPE_MFFGPR
)
18379 /* Anti dependency; DEP_INSN reads a register that INSN writes some
18384 gcc_unreachable ();
18390 /* The function returns a true if INSN is microcoded.
18391 Return false otherwise. */
18394 is_microcoded_insn (rtx insn
)
18396 if (!insn
|| !INSN_P (insn
)
18397 || GET_CODE (PATTERN (insn
)) == USE
18398 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
18401 if (rs6000_cpu_attr
== CPU_CELL
)
18402 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
18404 if (rs6000_sched_groups
)
18406 enum attr_type type
= get_attr_type (insn
);
18407 if (type
== TYPE_LOAD_EXT_U
18408 || type
== TYPE_LOAD_EXT_UX
18409 || type
== TYPE_LOAD_UX
18410 || type
== TYPE_STORE_UX
18411 || type
== TYPE_MFCR
)
18418 /* The function returns true if INSN is cracked into 2 instructions
18419 by the processor (and therefore occupies 2 issue slots). */
18422 is_cracked_insn (rtx insn
)
18424 if (!insn
|| !INSN_P (insn
)
18425 || GET_CODE (PATTERN (insn
)) == USE
18426 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
18429 if (rs6000_sched_groups
)
18431 enum attr_type type
= get_attr_type (insn
);
18432 if (type
== TYPE_LOAD_U
|| type
== TYPE_STORE_U
18433 || type
== TYPE_FPLOAD_U
|| type
== TYPE_FPSTORE_U
18434 || type
== TYPE_FPLOAD_UX
|| type
== TYPE_FPSTORE_UX
18435 || type
== TYPE_LOAD_EXT
|| type
== TYPE_DELAYED_CR
18436 || type
== TYPE_COMPARE
|| type
== TYPE_DELAYED_COMPARE
18437 || type
== TYPE_IMUL_COMPARE
|| type
== TYPE_LMUL_COMPARE
18438 || type
== TYPE_IDIV
|| type
== TYPE_LDIV
18439 || type
== TYPE_INSERT_WORD
)
18446 /* The function returns true if INSN can be issued only from
18447 the branch slot. */
18450 is_branch_slot_insn (rtx insn
)
18452 if (!insn
|| !INSN_P (insn
)
18453 || GET_CODE (PATTERN (insn
)) == USE
18454 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
18457 if (rs6000_sched_groups
)
18459 enum attr_type type
= get_attr_type (insn
);
18460 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
18468 /* The function returns true if out_inst sets a value that is
18469 used in the address generation computation of in_insn */
18471 set_to_load_agen (rtx out_insn
, rtx in_insn
)
18473 rtx out_set
, in_set
;
18475 /* For performance reasons, only handle the simple case where
18476 both loads are a single_set. */
18477 out_set
= single_set (out_insn
);
18480 in_set
= single_set (in_insn
);
18482 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
18488 /* The function returns true if the target storage location of
18489 out_insn is adjacent to the target storage location of in_insn */
18490 /* Return 1 if memory locations are adjacent. */
18493 adjacent_mem_locations (rtx insn1
, rtx insn2
)
18496 rtx a
= get_store_dest (PATTERN (insn1
));
18497 rtx b
= get_store_dest (PATTERN (insn2
));
18499 if ((GET_CODE (XEXP (a
, 0)) == REG
18500 || (GET_CODE (XEXP (a
, 0)) == PLUS
18501 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
18502 && (GET_CODE (XEXP (b
, 0)) == REG
18503 || (GET_CODE (XEXP (b
, 0)) == PLUS
18504 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
18506 HOST_WIDE_INT val0
= 0, val1
= 0, val_diff
;
18509 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
18511 reg0
= XEXP (XEXP (a
, 0), 0);
18512 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
18515 reg0
= XEXP (a
, 0);
18517 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
18519 reg1
= XEXP (XEXP (b
, 0), 0);
18520 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
18523 reg1
= XEXP (b
, 0);
18525 val_diff
= val1
- val0
;
18527 return ((REGNO (reg0
) == REGNO (reg1
))
18528 && ((MEM_SIZE (a
) && val_diff
== INTVAL (MEM_SIZE (a
)))
18529 || (MEM_SIZE (b
) && val_diff
== -INTVAL (MEM_SIZE (b
)))));
18535 /* A C statement (sans semicolon) to update the integer scheduling
18536 priority INSN_PRIORITY (INSN). Increase the priority to execute the
18537 INSN earlier, reduce the priority to execute INSN later. Do not
18538 define this macro if you do not need to adjust the scheduling
18539 priorities of insns. */
18542 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED
, int priority
)
18544 /* On machines (like the 750) which have asymmetric integer units,
18545 where one integer unit can do multiply and divides and the other
18546 can't, reduce the priority of multiply/divide so it is scheduled
18547 before other integer operations. */
18550 if (! INSN_P (insn
))
18553 if (GET_CODE (PATTERN (insn
)) == USE
)
18556 switch (rs6000_cpu_attr
) {
18558 switch (get_attr_type (insn
))
18565 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
18566 priority
, priority
);
18567 if (priority
>= 0 && priority
< 0x01000000)
18574 if (insn_must_be_first_in_group (insn
)
18575 && reload_completed
18576 && current_sched_info
->sched_max_insns_priority
18577 && rs6000_sched_restricted_insns_priority
)
18580 /* Prioritize insns that can be dispatched only in the first
18582 if (rs6000_sched_restricted_insns_priority
== 1)
18583 /* Attach highest priority to insn. This means that in
18584 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
18585 precede 'priority' (critical path) considerations. */
18586 return current_sched_info
->sched_max_insns_priority
;
18587 else if (rs6000_sched_restricted_insns_priority
== 2)
18588 /* Increase priority of insn by a minimal amount. This means that in
18589 haifa-sched.c:ready_sort(), only 'priority' (critical path)
18590 considerations precede dispatch-slot restriction considerations. */
18591 return (priority
+ 1);
18594 if (rs6000_cpu
== PROCESSOR_POWER6
18595 && ((load_store_pendulum
== -2 && is_load_insn (insn
))
18596 || (load_store_pendulum
== 2 && is_store_insn (insn
))))
18597 /* Attach highest priority to insn if the scheduler has just issued two
18598 stores and this instruction is a load, or two loads and this instruction
18599 is a store. Power6 wants loads and stores scheduled alternately
18601 return current_sched_info
->sched_max_insns_priority
;
18606 /* Return true if the instruction is nonpipelined on the Cell. */
18608 is_nonpipeline_insn (rtx insn
)
18610 enum attr_type type
;
18611 if (!insn
|| !INSN_P (insn
)
18612 || GET_CODE (PATTERN (insn
)) == USE
18613 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
18616 type
= get_attr_type (insn
);
18617 if (type
== TYPE_IMUL
18618 || type
== TYPE_IMUL2
18619 || type
== TYPE_IMUL3
18620 || type
== TYPE_LMUL
18621 || type
== TYPE_IDIV
18622 || type
== TYPE_LDIV
18623 || type
== TYPE_SDIV
18624 || type
== TYPE_DDIV
18625 || type
== TYPE_SSQRT
18626 || type
== TYPE_DSQRT
18627 || type
== TYPE_MFCR
18628 || type
== TYPE_MFCRF
18629 || type
== TYPE_MFJMPR
)
18637 /* Return how many instructions the machine can issue per cycle. */
18640 rs6000_issue_rate (void)
18642 /* Use issue rate of 1 for first scheduling pass to decrease degradation. */
18643 if (!reload_completed
)
18646 switch (rs6000_cpu_attr
) {
18647 case CPU_RIOS1
: /* ? */
18649 case CPU_PPC601
: /* ? */
18658 case CPU_PPCE300C2
:
18659 case CPU_PPCE300C3
:
18676 /* Return how many instructions to look ahead for better insn
18680 rs6000_use_sched_lookahead (void)
18682 if (rs6000_cpu_attr
== CPU_PPC8540
)
18684 if (rs6000_cpu_attr
== CPU_CELL
)
18685 return (reload_completed
? 8 : 0);
18689 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
18691 rs6000_use_sched_lookahead_guard (rtx insn
)
18693 if (rs6000_cpu_attr
!= CPU_CELL
)
18696 if (insn
== NULL_RTX
|| !INSN_P (insn
))
18699 if (!reload_completed
18700 || is_nonpipeline_insn (insn
)
18701 || is_microcoded_insn (insn
))
18707 /* Determine is PAT refers to memory. */
18710 is_mem_ref (rtx pat
)
18716 /* stack_tie does not produce any real memory traffic. */
18717 if (GET_CODE (pat
) == UNSPEC
18718 && XINT (pat
, 1) == UNSPEC_TIE
)
18721 if (GET_CODE (pat
) == MEM
)
18724 /* Recursively process the pattern. */
18725 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
18727 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0 && !ret
; i
--)
18730 ret
|= is_mem_ref (XEXP (pat
, i
));
18731 else if (fmt
[i
] == 'E')
18732 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
18733 ret
|= is_mem_ref (XVECEXP (pat
, i
, j
));
18739 /* Determine if PAT is a PATTERN of a load insn. */
18742 is_load_insn1 (rtx pat
)
18744 if (!pat
|| pat
== NULL_RTX
)
18747 if (GET_CODE (pat
) == SET
)
18748 return is_mem_ref (SET_SRC (pat
));
18750 if (GET_CODE (pat
) == PARALLEL
)
18754 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
18755 if (is_load_insn1 (XVECEXP (pat
, 0, i
)))
18762 /* Determine if INSN loads from memory. */
18765 is_load_insn (rtx insn
)
18767 if (!insn
|| !INSN_P (insn
))
18770 if (GET_CODE (insn
) == CALL_INSN
)
18773 return is_load_insn1 (PATTERN (insn
));
18776 /* Determine if PAT is a PATTERN of a store insn. */
18779 is_store_insn1 (rtx pat
)
18781 if (!pat
|| pat
== NULL_RTX
)
18784 if (GET_CODE (pat
) == SET
)
18785 return is_mem_ref (SET_DEST (pat
));
18787 if (GET_CODE (pat
) == PARALLEL
)
18791 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
18792 if (is_store_insn1 (XVECEXP (pat
, 0, i
)))
18799 /* Determine if INSN stores to memory. */
18802 is_store_insn (rtx insn
)
18804 if (!insn
|| !INSN_P (insn
))
18807 return is_store_insn1 (PATTERN (insn
));
18810 /* Return the dest of a store insn. */
18813 get_store_dest (rtx pat
)
18815 gcc_assert (is_store_insn1 (pat
));
18817 if (GET_CODE (pat
) == SET
)
18818 return SET_DEST (pat
);
18819 else if (GET_CODE (pat
) == PARALLEL
)
18823 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
18825 rtx inner_pat
= XVECEXP (pat
, 0, i
);
18826 if (GET_CODE (inner_pat
) == SET
18827 && is_mem_ref (SET_DEST (inner_pat
)))
18831 /* We shouldn't get here, because we should have either a simple
18832 store insn or a store with update which are covered above. */
18836 /* Returns whether the dependence between INSN and NEXT is considered
18837 costly by the given target. */
18840 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
18845 /* If the flag is not enabled - no dependence is considered costly;
18846 allow all dependent insns in the same group.
18847 This is the most aggressive option. */
18848 if (rs6000_sched_costly_dep
== no_dep_costly
)
18851 /* If the flag is set to 1 - a dependence is always considered costly;
18852 do not allow dependent instructions in the same group.
18853 This is the most conservative option. */
18854 if (rs6000_sched_costly_dep
== all_deps_costly
)
18857 insn
= DEP_PRO (dep
);
18858 next
= DEP_CON (dep
);
18860 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
18861 && is_load_insn (next
)
18862 && is_store_insn (insn
))
18863 /* Prevent load after store in the same group. */
18866 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
18867 && is_load_insn (next
)
18868 && is_store_insn (insn
)
18869 && DEP_TYPE (dep
) == REG_DEP_TRUE
)
18870 /* Prevent load after store in the same group if it is a true
18874 /* The flag is set to X; dependences with latency >= X are considered costly,
18875 and will not be scheduled in the same group. */
18876 if (rs6000_sched_costly_dep
<= max_dep_latency
18877 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
18883 /* Return the next insn after INSN that is found before TAIL is reached,
18884 skipping any "non-active" insns - insns that will not actually occupy
18885 an issue slot. Return NULL_RTX if such an insn is not found. */
18888 get_next_active_insn (rtx insn
, rtx tail
)
18890 if (insn
== NULL_RTX
|| insn
== tail
)
18895 insn
= NEXT_INSN (insn
);
18896 if (insn
== NULL_RTX
|| insn
== tail
)
18901 || (NONJUMP_INSN_P (insn
)
18902 && GET_CODE (PATTERN (insn
)) != USE
18903 && GET_CODE (PATTERN (insn
)) != CLOBBER
18904 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
18910 /* We are about to begin issuing insns for this clock cycle. */
18913 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
18914 rtx
*ready ATTRIBUTE_UNUSED
,
18915 int *pn_ready ATTRIBUTE_UNUSED
,
18916 int clock_var ATTRIBUTE_UNUSED
)
18918 int n_ready
= *pn_ready
;
18921 fprintf (dump
, "// rs6000_sched_reorder :\n");
18923 /* Reorder the ready list, if the second to last ready insn
18924 is a nonepipeline insn. */
18925 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
18927 if (is_nonpipeline_insn (ready
[n_ready
- 1])
18928 && (recog_memoized (ready
[n_ready
- 2]) > 0))
18929 /* Simply swap first two insns. */
18931 rtx tmp
= ready
[n_ready
- 1];
18932 ready
[n_ready
- 1] = ready
[n_ready
- 2];
18933 ready
[n_ready
- 2] = tmp
;
18937 if (rs6000_cpu
== PROCESSOR_POWER6
)
18938 load_store_pendulum
= 0;
18940 return rs6000_issue_rate ();
18943 /* Like rs6000_sched_reorder, but called after issuing each insn. */
18946 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx
*ready
,
18947 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
18950 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
18952 /* For Power6, we need to handle some special cases to try and keep the
18953 store queue from overflowing and triggering expensive flushes.
18955 This code monitors how load and store instructions are being issued
18956 and skews the ready list one way or the other to increase the likelihood
18957 that a desired instruction is issued at the proper time.
18959 A couple of things are done. First, we maintain a "load_store_pendulum"
18960 to track the current state of load/store issue.
18962 - If the pendulum is at zero, then no loads or stores have been
18963 issued in the current cycle so we do nothing.
18965 - If the pendulum is 1, then a single load has been issued in this
18966 cycle and we attempt to locate another load in the ready list to
18969 - If the pendulum is -2, then two stores have already been
18970 issued in this cycle, so we increase the priority of the first load
18971 in the ready list to increase it's likelihood of being chosen first
18974 - If the pendulum is -1, then a single store has been issued in this
18975 cycle and we attempt to locate another store in the ready list to
18976 issue with it, preferring a store to an adjacent memory location to
18977 facilitate store pairing in the store queue.
18979 - If the pendulum is 2, then two loads have already been
18980 issued in this cycle, so we increase the priority of the first store
18981 in the ready list to increase it's likelihood of being chosen first
18984 - If the pendulum < -2 or > 2, then do nothing.
18986 Note: This code covers the most common scenarios. There exist non
18987 load/store instructions which make use of the LSU and which
18988 would need to be accounted for to strictly model the behavior
18989 of the machine. Those instructions are currently unaccounted
18990 for to help minimize compile time overhead of this code.
18992 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
18998 if (is_store_insn (last_scheduled_insn
))
18999 /* Issuing a store, swing the load_store_pendulum to the left */
19000 load_store_pendulum
--;
19001 else if (is_load_insn (last_scheduled_insn
))
19002 /* Issuing a load, swing the load_store_pendulum to the right */
19003 load_store_pendulum
++;
19005 return cached_can_issue_more
;
19007 /* If the pendulum is balanced, or there is only one instruction on
19008 the ready list, then all is well, so return. */
19009 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
19010 return cached_can_issue_more
;
19012 if (load_store_pendulum
== 1)
19014 /* A load has been issued in this cycle. Scan the ready list
19015 for another load to issue with it */
19020 if (is_load_insn (ready
[pos
]))
19022 /* Found a load. Move it to the head of the ready list,
19023 and adjust it's priority so that it is more likely to
19026 for (i
=pos
; i
<*pn_ready
-1; i
++)
19027 ready
[i
] = ready
[i
+ 1];
19028 ready
[*pn_ready
-1] = tmp
;
19029 if INSN_PRIORITY_KNOWN (tmp
)
19030 INSN_PRIORITY (tmp
)++;
19036 else if (load_store_pendulum
== -2)
19038 /* Two stores have been issued in this cycle. Increase the
19039 priority of the first load in the ready list to favor it for
19040 issuing in the next cycle. */
19045 if (is_load_insn (ready
[pos
])
19046 && INSN_PRIORITY_KNOWN (ready
[pos
]))
19048 INSN_PRIORITY (ready
[pos
])++;
19050 /* Adjust the pendulum to account for the fact that a load
19051 was found and increased in priority. This is to prevent
19052 increasing the priority of multiple loads */
19053 load_store_pendulum
--;
19060 else if (load_store_pendulum
== -1)
19062 /* A store has been issued in this cycle. Scan the ready list for
19063 another store to issue with it, preferring a store to an adjacent
19065 int first_store_pos
= -1;
19071 if (is_store_insn (ready
[pos
]))
19073 /* Maintain the index of the first store found on the
19075 if (first_store_pos
== -1)
19076 first_store_pos
= pos
;
19078 if (is_store_insn (last_scheduled_insn
)
19079 && adjacent_mem_locations (last_scheduled_insn
,ready
[pos
]))
19081 /* Found an adjacent store. Move it to the head of the
19082 ready list, and adjust it's priority so that it is
19083 more likely to stay there */
19085 for (i
=pos
; i
<*pn_ready
-1; i
++)
19086 ready
[i
] = ready
[i
+ 1];
19087 ready
[*pn_ready
-1] = tmp
;
19088 if INSN_PRIORITY_KNOWN (tmp
)
19089 INSN_PRIORITY (tmp
)++;
19090 first_store_pos
= -1;
19098 if (first_store_pos
>= 0)
19100 /* An adjacent store wasn't found, but a non-adjacent store was,
19101 so move the non-adjacent store to the front of the ready
19102 list, and adjust its priority so that it is more likely to
19104 tmp
= ready
[first_store_pos
];
19105 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
19106 ready
[i
] = ready
[i
+ 1];
19107 ready
[*pn_ready
-1] = tmp
;
19108 if INSN_PRIORITY_KNOWN (tmp
)
19109 INSN_PRIORITY (tmp
)++;
19112 else if (load_store_pendulum
== 2)
19114 /* Two loads have been issued in this cycle. Increase the priority
19115 of the first store in the ready list to favor it for issuing in
19121 if (is_store_insn (ready
[pos
])
19122 && INSN_PRIORITY_KNOWN (ready
[pos
]))
19124 INSN_PRIORITY (ready
[pos
])++;
19126 /* Adjust the pendulum to account for the fact that a store
19127 was found and increased in priority. This is to prevent
19128 increasing the priority of multiple stores */
19129 load_store_pendulum
++;
19138 return cached_can_issue_more
;
19141 /* Return whether the presence of INSN causes a dispatch group termination
19142 of group WHICH_GROUP.
19144 If WHICH_GROUP == current_group, this function will return true if INSN
19145 causes the termination of the current group (i.e, the dispatch group to
19146 which INSN belongs). This means that INSN will be the last insn in the
19147 group it belongs to.
19149 If WHICH_GROUP == previous_group, this function will return true if INSN
19150 causes the termination of the previous group (i.e, the dispatch group that
19151 precedes the group to which INSN belongs). This means that INSN will be
19152 the first insn in the group it belongs to). */
19155 insn_terminates_group_p (rtx insn
, enum group_termination which_group
)
19162 first
= insn_must_be_first_in_group (insn
);
19163 last
= insn_must_be_last_in_group (insn
);
19168 if (which_group
== current_group
)
19170 else if (which_group
== previous_group
)
19178 insn_must_be_first_in_group (rtx insn
)
19180 enum attr_type type
;
19183 || insn
== NULL_RTX
19184 || GET_CODE (insn
) == NOTE
19185 || GET_CODE (PATTERN (insn
)) == USE
19186 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
19189 switch (rs6000_cpu
)
19191 case PROCESSOR_POWER5
:
19192 if (is_cracked_insn (insn
))
19194 case PROCESSOR_POWER4
:
19195 if (is_microcoded_insn (insn
))
19198 if (!rs6000_sched_groups
)
19201 type
= get_attr_type (insn
);
19208 case TYPE_DELAYED_CR
:
19209 case TYPE_CR_LOGICAL
:
19223 case PROCESSOR_POWER6
:
19224 type
= get_attr_type (insn
);
19228 case TYPE_INSERT_DWORD
:
19232 case TYPE_VAR_SHIFT_ROTATE
:
19239 case TYPE_INSERT_WORD
:
19240 case TYPE_DELAYED_COMPARE
:
19241 case TYPE_IMUL_COMPARE
:
19242 case TYPE_LMUL_COMPARE
:
19243 case TYPE_FPCOMPARE
:
19254 case TYPE_LOAD_EXT_UX
:
19256 case TYPE_STORE_UX
:
19257 case TYPE_FPLOAD_U
:
19258 case TYPE_FPLOAD_UX
:
19259 case TYPE_FPSTORE_U
:
19260 case TYPE_FPSTORE_UX
:
19274 insn_must_be_last_in_group (rtx insn
)
19276 enum attr_type type
;
19279 || insn
== NULL_RTX
19280 || GET_CODE (insn
) == NOTE
19281 || GET_CODE (PATTERN (insn
)) == USE
19282 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
19285 switch (rs6000_cpu
) {
19286 case PROCESSOR_POWER4
:
19287 case PROCESSOR_POWER5
:
19288 if (is_microcoded_insn (insn
))
19291 if (is_branch_slot_insn (insn
))
19295 case PROCESSOR_POWER6
:
19296 type
= get_attr_type (insn
);
19303 case TYPE_VAR_SHIFT_ROTATE
:
19310 case TYPE_DELAYED_COMPARE
:
19311 case TYPE_IMUL_COMPARE
:
19312 case TYPE_LMUL_COMPARE
:
19313 case TYPE_FPCOMPARE
:
19334 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
19335 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
19338 is_costly_group (rtx
*group_insns
, rtx next_insn
)
19341 int issue_rate
= rs6000_issue_rate ();
19343 for (i
= 0; i
< issue_rate
; i
++)
19345 sd_iterator_def sd_it
;
19347 rtx insn
= group_insns
[i
];
19352 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
19354 rtx next
= DEP_CON (dep
);
19356 if (next
== next_insn
19357 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
19365 /* Utility of the function redefine_groups.
19366 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
19367 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
19368 to keep it "far" (in a separate group) from GROUP_INSNS, following
19369 one of the following schemes, depending on the value of the flag
19370 -minsert_sched_nops = X:
19371 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
19372 in order to force NEXT_INSN into a separate group.
19373 (2) X < sched_finish_regroup_exact: insert exactly X nops.
19374 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
19375 insertion (has a group just ended, how many vacant issue slots remain in the
19376 last group, and how many dispatch groups were encountered so far). */
19379 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
19380 rtx next_insn
, bool *group_end
, int can_issue_more
,
19385 int issue_rate
= rs6000_issue_rate ();
19386 bool end
= *group_end
;
19389 if (next_insn
== NULL_RTX
)
19390 return can_issue_more
;
19392 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
19393 return can_issue_more
;
19395 force
= is_costly_group (group_insns
, next_insn
);
19397 return can_issue_more
;
19399 if (sched_verbose
> 6)
19400 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
19401 *group_count
,can_issue_more
);
19403 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
19406 can_issue_more
= 0;
19408 /* Since only a branch can be issued in the last issue_slot, it is
19409 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
19410 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
19411 in this case the last nop will start a new group and the branch
19412 will be forced to the new group. */
19413 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
19416 while (can_issue_more
> 0)
19419 emit_insn_before (nop
, next_insn
);
19427 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
19429 int n_nops
= rs6000_sched_insert_nops
;
19431 /* Nops can't be issued from the branch slot, so the effective
19432 issue_rate for nops is 'issue_rate - 1'. */
19433 if (can_issue_more
== 0)
19434 can_issue_more
= issue_rate
;
19436 if (can_issue_more
== 0)
19438 can_issue_more
= issue_rate
- 1;
19441 for (i
= 0; i
< issue_rate
; i
++)
19443 group_insns
[i
] = 0;
19450 emit_insn_before (nop
, next_insn
);
19451 if (can_issue_more
== issue_rate
- 1) /* new group begins */
19454 if (can_issue_more
== 0)
19456 can_issue_more
= issue_rate
- 1;
19459 for (i
= 0; i
< issue_rate
; i
++)
19461 group_insns
[i
] = 0;
19467 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
19470 /* Is next_insn going to start a new group? */
19473 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
19474 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
19475 || (can_issue_more
< issue_rate
&&
19476 insn_terminates_group_p (next_insn
, previous_group
)));
19477 if (*group_end
&& end
)
19480 if (sched_verbose
> 6)
19481 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
19482 *group_count
, can_issue_more
);
19483 return can_issue_more
;
19486 return can_issue_more
;
19489 /* This function tries to synch the dispatch groups that the compiler "sees"
19490 with the dispatch groups that the processor dispatcher is expected to
19491 form in practice. It tries to achieve this synchronization by forcing the
19492 estimated processor grouping on the compiler (as opposed to the function
19493 'pad_goups' which tries to force the scheduler's grouping on the processor).
19495 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
19496 examines the (estimated) dispatch groups that will be formed by the processor
19497 dispatcher. It marks these group boundaries to reflect the estimated
19498 processor grouping, overriding the grouping that the scheduler had marked.
19499 Depending on the value of the flag '-minsert-sched-nops' this function can
19500 force certain insns into separate groups or force a certain distance between
19501 them by inserting nops, for example, if there exists a "costly dependence"
19504 The function estimates the group boundaries that the processor will form as
19505 follows: It keeps track of how many vacant issue slots are available after
19506 each insn. A subsequent insn will start a new group if one of the following
19508 - no more vacant issue slots remain in the current dispatch group.
19509 - only the last issue slot, which is the branch slot, is vacant, but the next
19510 insn is not a branch.
19511 - only the last 2 or less issue slots, including the branch slot, are vacant,
19512 which means that a cracked insn (which occupies two issue slots) can't be
19513 issued in this group.
19514 - less than 'issue_rate' slots are vacant, and the next insn always needs to
19515 start a new group. */
19518 redefine_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
19520 rtx insn
, next_insn
;
19522 int can_issue_more
;
19525 int group_count
= 0;
19529 issue_rate
= rs6000_issue_rate ();
19530 group_insns
= alloca (issue_rate
* sizeof (rtx
));
19531 for (i
= 0; i
< issue_rate
; i
++)
19533 group_insns
[i
] = 0;
19535 can_issue_more
= issue_rate
;
19537 insn
= get_next_active_insn (prev_head_insn
, tail
);
19540 while (insn
!= NULL_RTX
)
19542 slot
= (issue_rate
- can_issue_more
);
19543 group_insns
[slot
] = insn
;
19545 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
19546 if (insn_terminates_group_p (insn
, current_group
))
19547 can_issue_more
= 0;
19549 next_insn
= get_next_active_insn (insn
, tail
);
19550 if (next_insn
== NULL_RTX
)
19551 return group_count
+ 1;
19553 /* Is next_insn going to start a new group? */
19555 = (can_issue_more
== 0
19556 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
19557 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
19558 || (can_issue_more
< issue_rate
&&
19559 insn_terminates_group_p (next_insn
, previous_group
)));
19561 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
19562 next_insn
, &group_end
, can_issue_more
,
19568 can_issue_more
= 0;
19569 for (i
= 0; i
< issue_rate
; i
++)
19571 group_insns
[i
] = 0;
19575 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
19576 PUT_MODE (next_insn
, VOIDmode
);
19577 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
19578 PUT_MODE (next_insn
, TImode
);
19581 if (can_issue_more
== 0)
19582 can_issue_more
= issue_rate
;
19585 return group_count
;
19588 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
19589 dispatch group boundaries that the scheduler had marked. Pad with nops
19590 any dispatch groups which have vacant issue slots, in order to force the
19591 scheduler's grouping on the processor dispatcher. The function
19592 returns the number of dispatch groups found. */
19595 pad_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
19597 rtx insn
, next_insn
;
19600 int can_issue_more
;
19602 int group_count
= 0;
19604 /* Initialize issue_rate. */
19605 issue_rate
= rs6000_issue_rate ();
19606 can_issue_more
= issue_rate
;
19608 insn
= get_next_active_insn (prev_head_insn
, tail
);
19609 next_insn
= get_next_active_insn (insn
, tail
);
19611 while (insn
!= NULL_RTX
)
19614 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
19616 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
19618 if (next_insn
== NULL_RTX
)
19623 /* If the scheduler had marked group termination at this location
19624 (between insn and next_indn), and neither insn nor next_insn will
19625 force group termination, pad the group with nops to force group
19628 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
19629 && !insn_terminates_group_p (insn
, current_group
)
19630 && !insn_terminates_group_p (next_insn
, previous_group
))
19632 if (!is_branch_slot_insn (next_insn
))
19635 while (can_issue_more
)
19638 emit_insn_before (nop
, next_insn
);
19643 can_issue_more
= issue_rate
;
19648 next_insn
= get_next_active_insn (insn
, tail
);
19651 return group_count
;
19654 /* We're beginning a new block. Initialize data structures as necessary. */
19657 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
19658 int sched_verbose ATTRIBUTE_UNUSED
,
19659 int max_ready ATTRIBUTE_UNUSED
)
19661 last_scheduled_insn
= NULL_RTX
;
19662 load_store_pendulum
= 0;
19665 /* The following function is called at the end of scheduling BB.
19666 After reload, it inserts nops at insn group bundling. */
19669 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
19674 fprintf (dump
, "=== Finishing schedule.\n");
19676 if (reload_completed
&& rs6000_sched_groups
)
19678 if (rs6000_sched_insert_nops
== sched_finish_none
)
19681 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
19682 n_groups
= pad_groups (dump
, sched_verbose
,
19683 current_sched_info
->prev_head
,
19684 current_sched_info
->next_tail
);
19686 n_groups
= redefine_groups (dump
, sched_verbose
,
19687 current_sched_info
->prev_head
,
19688 current_sched_info
->next_tail
);
19690 if (sched_verbose
>= 6)
19692 fprintf (dump
, "ngroups = %d\n", n_groups
);
19693 print_rtl (dump
, current_sched_info
->prev_head
);
19694 fprintf (dump
, "Done finish_sched\n");
19699 /* Length in units of the trampoline for entering a nested function. */
19702 rs6000_trampoline_size (void)
19706 switch (DEFAULT_ABI
)
19709 gcc_unreachable ();
19712 ret
= (TARGET_32BIT
) ? 12 : 24;
19717 ret
= (TARGET_32BIT
) ? 40 : 48;
19724 /* Emit RTL insns to initialize the variable parts of a trampoline.
19725 FNADDR is an RTX for the address of the function's pure code.
19726 CXT is an RTX for the static chain value for the function. */
19729 rs6000_initialize_trampoline (rtx addr
, rtx fnaddr
, rtx cxt
)
19731 int regsize
= (TARGET_32BIT
) ? 4 : 8;
19732 rtx ctx_reg
= force_reg (Pmode
, cxt
);
19734 switch (DEFAULT_ABI
)
19737 gcc_unreachable ();
19739 /* Macros to shorten the code expansions below. */
19740 #define MEM_DEREF(addr) gen_rtx_MEM (Pmode, memory_address (Pmode, addr))
19741 #define MEM_PLUS(addr,offset) \
19742 gen_rtx_MEM (Pmode, memory_address (Pmode, plus_constant (addr, offset)))
19744 /* Under AIX, just build the 3 word function descriptor */
19747 rtx fn_reg
= gen_reg_rtx (Pmode
);
19748 rtx toc_reg
= gen_reg_rtx (Pmode
);
19749 emit_move_insn (fn_reg
, MEM_DEREF (fnaddr
));
19750 emit_move_insn (toc_reg
, MEM_PLUS (fnaddr
, regsize
));
19751 emit_move_insn (MEM_DEREF (addr
), fn_reg
);
19752 emit_move_insn (MEM_PLUS (addr
, regsize
), toc_reg
);
19753 emit_move_insn (MEM_PLUS (addr
, 2*regsize
), ctx_reg
);
19757 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
19760 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
19761 FALSE
, VOIDmode
, 4,
19763 GEN_INT (rs6000_trampoline_size ()), SImode
,
19773 /* Table of valid machine attributes. */
19775 const struct attribute_spec rs6000_attribute_table
[] =
19777 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
19778 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
},
19779 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
},
19780 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
},
19781 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
},
19782 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
},
19783 #ifdef SUBTARGET_ATTRIBUTE_TABLE
19784 SUBTARGET_ATTRIBUTE_TABLE
,
19786 { NULL
, 0, 0, false, false, false, NULL
}
19789 /* Handle the "altivec" attribute. The attribute may have
19790 arguments as follows:
19792 __attribute__((altivec(vector__)))
19793 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
19794 __attribute__((altivec(bool__))) (always followed by 'unsigned')
19796 and may appear more than once (e.g., 'vector bool char') in a
19797 given declaration. */
19800 rs6000_handle_altivec_attribute (tree
*node
,
19801 tree name ATTRIBUTE_UNUSED
,
19803 int flags ATTRIBUTE_UNUSED
,
19804 bool *no_add_attrs
)
19806 tree type
= *node
, result
= NULL_TREE
;
19807 enum machine_mode mode
;
19810 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
19811 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
19812 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
19815 while (POINTER_TYPE_P (type
)
19816 || TREE_CODE (type
) == FUNCTION_TYPE
19817 || TREE_CODE (type
) == METHOD_TYPE
19818 || TREE_CODE (type
) == ARRAY_TYPE
)
19819 type
= TREE_TYPE (type
);
19821 mode
= TYPE_MODE (type
);
19823 /* Check for invalid AltiVec type qualifiers. */
19824 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
19827 error ("use of %<long%> in AltiVec types is invalid for 64-bit code");
19828 else if (rs6000_warn_altivec_long
)
19829 warning (0, "use of %<long%> in AltiVec types is deprecated; use %<int%>");
19831 else if (type
== long_long_unsigned_type_node
19832 || type
== long_long_integer_type_node
)
19833 error ("use of %<long long%> in AltiVec types is invalid");
19834 else if (type
== double_type_node
)
19835 error ("use of %<double%> in AltiVec types is invalid");
19836 else if (type
== long_double_type_node
)
19837 error ("use of %<long double%> in AltiVec types is invalid");
19838 else if (type
== boolean_type_node
)
19839 error ("use of boolean types in AltiVec types is invalid");
19840 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
19841 error ("use of %<complex%> in AltiVec types is invalid");
19842 else if (DECIMAL_FLOAT_MODE_P (mode
))
19843 error ("use of decimal floating point types in AltiVec types is invalid");
19845 switch (altivec_type
)
19848 unsigned_p
= TYPE_UNSIGNED (type
);
19852 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
19855 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
19858 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
19860 case SFmode
: result
= V4SF_type_node
; break;
19861 /* If the user says 'vector int bool', we may be handed the 'bool'
19862 attribute _before_ the 'vector' attribute, and so select the
19863 proper type in the 'b' case below. */
19864 case V4SImode
: case V8HImode
: case V16QImode
: case V4SFmode
:
19872 case SImode
: case V4SImode
: result
= bool_V4SI_type_node
; break;
19873 case HImode
: case V8HImode
: result
= bool_V8HI_type_node
; break;
19874 case QImode
: case V16QImode
: result
= bool_V16QI_type_node
;
19881 case V8HImode
: result
= pixel_V8HI_type_node
;
19887 if (result
&& result
!= type
&& TYPE_READONLY (type
))
19888 result
= build_qualified_type (result
, TYPE_QUAL_CONST
);
19890 *no_add_attrs
= true; /* No need to hang on to the attribute. */
19893 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
19898 /* AltiVec defines four built-in scalar types that serve as vector
19899 elements; we must teach the compiler how to mangle them. */
19901 static const char *
19902 rs6000_mangle_type (const_tree type
)
19904 type
= TYPE_MAIN_VARIANT (type
);
19906 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
19907 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
19910 if (type
== bool_char_type_node
) return "U6__boolc";
19911 if (type
== bool_short_type_node
) return "U6__bools";
19912 if (type
== pixel_type_node
) return "u7__pixel";
19913 if (type
== bool_int_type_node
) return "U6__booli";
19915 /* Mangle IBM extended float long double as `g' (__float128) on
19916 powerpc*-linux where long-double-64 previously was the default. */
19917 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
19919 && TARGET_LONG_DOUBLE_128
19920 && !TARGET_IEEEQUAD
)
19923 /* For all other types, use normal C++ mangling. */
19927 /* Handle a "longcall" or "shortcall" attribute; arguments as in
19928 struct attribute_spec.handler. */
19931 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
19932 tree args ATTRIBUTE_UNUSED
,
19933 int flags ATTRIBUTE_UNUSED
,
19934 bool *no_add_attrs
)
19936 if (TREE_CODE (*node
) != FUNCTION_TYPE
19937 && TREE_CODE (*node
) != FIELD_DECL
19938 && TREE_CODE (*node
) != TYPE_DECL
)
19940 warning (OPT_Wattributes
, "%qs attribute only applies to functions",
19941 IDENTIFIER_POINTER (name
));
19942 *no_add_attrs
= true;
19948 /* Set longcall attributes on all functions declared when
19949 rs6000_default_long_calls is true. */
19951 rs6000_set_default_type_attributes (tree type
)
19953 if (rs6000_default_long_calls
19954 && (TREE_CODE (type
) == FUNCTION_TYPE
19955 || TREE_CODE (type
) == METHOD_TYPE
))
19956 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
19958 TYPE_ATTRIBUTES (type
));
19961 darwin_set_default_type_attributes (type
);
19965 /* Return a reference suitable for calling a function with the
19966 longcall attribute. */
19969 rs6000_longcall_ref (rtx call_ref
)
19971 const char *call_name
;
19974 if (GET_CODE (call_ref
) != SYMBOL_REF
)
19977 /* System V adds '.' to the internal name, so skip them. */
19978 call_name
= XSTR (call_ref
, 0);
19979 if (*call_name
== '.')
19981 while (*call_name
== '.')
19984 node
= get_identifier (call_name
);
19985 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
19988 return force_reg (Pmode
, call_ref
);
19991 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
19992 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
19995 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
19996 struct attribute_spec.handler. */
19998 rs6000_handle_struct_attribute (tree
*node
, tree name
,
19999 tree args ATTRIBUTE_UNUSED
,
20000 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
20003 if (DECL_P (*node
))
20005 if (TREE_CODE (*node
) == TYPE_DECL
)
20006 type
= &TREE_TYPE (*node
);
20011 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
20012 || TREE_CODE (*type
) == UNION_TYPE
)))
20014 warning (OPT_Wattributes
, "%qs attribute ignored", IDENTIFIER_POINTER (name
));
20015 *no_add_attrs
= true;
20018 else if ((is_attribute_p ("ms_struct", name
)
20019 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
20020 || ((is_attribute_p ("gcc_struct", name
)
20021 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
20023 warning (OPT_Wattributes
, "%qs incompatible attribute ignored",
20024 IDENTIFIER_POINTER (name
));
20025 *no_add_attrs
= true;
20032 rs6000_ms_bitfield_layout_p (const_tree record_type
)
20034 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
20035 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
20036 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
20039 #ifdef USING_ELFOS_H
20041 /* A get_unnamed_section callback, used for switching to toc_section. */
20044 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
20046 if (DEFAULT_ABI
== ABI_AIX
20047 && TARGET_MINIMAL_TOC
20048 && !TARGET_RELOCATABLE
)
20050 if (!toc_initialized
)
20052 toc_initialized
= 1;
20053 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
20054 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
20055 fprintf (asm_out_file
, "\t.tc ");
20056 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
20057 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
20058 fprintf (asm_out_file
, "\n");
20060 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
20061 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
20062 fprintf (asm_out_file
, " = .+32768\n");
20065 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
20067 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_RELOCATABLE
)
20068 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
20071 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
20072 if (!toc_initialized
)
20074 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
20075 fprintf (asm_out_file
, " = .+32768\n");
20076 toc_initialized
= 1;
20081 /* Implement TARGET_ASM_INIT_SECTIONS. */
20084 rs6000_elf_asm_init_sections (void)
20087 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
20090 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
20091 SDATA2_SECTION_ASM_OP
);
20094 /* Implement TARGET_SELECT_RTX_SECTION. */
20097 rs6000_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
20098 unsigned HOST_WIDE_INT align
)
20100 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
20101 return toc_section
;
20103 return default_elf_select_rtx_section (mode
, x
, align
);
20106 /* For a SYMBOL_REF, set generic flags and then perform some
20107 target-specific processing.
20109 When the AIX ABI is requested on a non-AIX system, replace the
20110 function name with the real name (with a leading .) rather than the
20111 function descriptor name. This saves a lot of overriding code to
20112 read the prefixes. */
20115 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
20117 default_encode_section_info (decl
, rtl
, first
);
20120 && TREE_CODE (decl
) == FUNCTION_DECL
20122 && DEFAULT_ABI
== ABI_AIX
)
20124 rtx sym_ref
= XEXP (rtl
, 0);
20125 size_t len
= strlen (XSTR (sym_ref
, 0));
20126 char *str
= alloca (len
+ 2);
20128 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
20129 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
20134 compare_section_name (const char *section
, const char *template)
20138 len
= strlen (template);
20139 return (strncmp (section
, template, len
) == 0
20140 && (section
[len
] == 0 || section
[len
] == '.'));
20144 rs6000_elf_in_small_data_p (const_tree decl
)
20146 if (rs6000_sdata
== SDATA_NONE
)
20149 /* We want to merge strings, so we never consider them small data. */
20150 if (TREE_CODE (decl
) == STRING_CST
)
20153 /* Functions are never in the small data area. */
20154 if (TREE_CODE (decl
) == FUNCTION_DECL
)
20157 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
20159 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
20160 if (compare_section_name (section
, ".sdata")
20161 || compare_section_name (section
, ".sdata2")
20162 || compare_section_name (section
, ".gnu.linkonce.s")
20163 || compare_section_name (section
, ".sbss")
20164 || compare_section_name (section
, ".sbss2")
20165 || compare_section_name (section
, ".gnu.linkonce.sb")
20166 || strcmp (section
, ".PPC.EMB.sdata0") == 0
20167 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
20172 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
20175 && (unsigned HOST_WIDE_INT
) size
<= g_switch_value
20176 /* If it's not public, and we're not going to reference it there,
20177 there's no need to put it in the small data section. */
20178 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
20185 #endif /* USING_ELFOS_H */
20187 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
20190 rs6000_use_blocks_for_constant_p (enum machine_mode mode
, const_rtx x
)
20192 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
20195 /* Return a REG that occurs in ADDR with coefficient 1.
20196 ADDR can be effectively incremented by incrementing REG.
20198 r0 is special and we must not select it as an address
20199 register by this routine since our caller will try to
20200 increment the returned register via an "la" instruction. */
20203 find_addr_reg (rtx addr
)
20205 while (GET_CODE (addr
) == PLUS
)
20207 if (GET_CODE (XEXP (addr
, 0)) == REG
20208 && REGNO (XEXP (addr
, 0)) != 0)
20209 addr
= XEXP (addr
, 0);
20210 else if (GET_CODE (XEXP (addr
, 1)) == REG
20211 && REGNO (XEXP (addr
, 1)) != 0)
20212 addr
= XEXP (addr
, 1);
20213 else if (CONSTANT_P (XEXP (addr
, 0)))
20214 addr
= XEXP (addr
, 1);
20215 else if (CONSTANT_P (XEXP (addr
, 1)))
20216 addr
= XEXP (addr
, 0);
20218 gcc_unreachable ();
20220 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
20225 rs6000_fatal_bad_address (rtx op
)
20227 fatal_insn ("bad address", op
);
20232 static tree branch_island_list
= 0;
20234 /* Remember to generate a branch island for far calls to the given
20238 add_compiler_branch_island (tree label_name
, tree function_name
,
20241 tree branch_island
= build_tree_list (function_name
, label_name
);
20242 TREE_TYPE (branch_island
) = build_int_cst (NULL_TREE
, line_number
);
20243 TREE_CHAIN (branch_island
) = branch_island_list
;
20244 branch_island_list
= branch_island
;
20247 #define BRANCH_ISLAND_LABEL_NAME(BRANCH_ISLAND) TREE_VALUE (BRANCH_ISLAND)
20248 #define BRANCH_ISLAND_FUNCTION_NAME(BRANCH_ISLAND) TREE_PURPOSE (BRANCH_ISLAND)
20249 #define BRANCH_ISLAND_LINE_NUMBER(BRANCH_ISLAND) \
20250 TREE_INT_CST_LOW (TREE_TYPE (BRANCH_ISLAND))
20252 /* Generate far-jump branch islands for everything on the
20253 branch_island_list. Invoked immediately after the last instruction
20254 of the epilogue has been emitted; the branch-islands must be
20255 appended to, and contiguous with, the function body. Mach-O stubs
20256 are generated in machopic_output_stub(). */
20259 macho_branch_islands (void)
20262 tree branch_island
;
20264 for (branch_island
= branch_island_list
;
20266 branch_island
= TREE_CHAIN (branch_island
))
20268 const char *label
=
20269 IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island
));
20271 IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island
));
20272 char name_buf
[512];
20273 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
20274 if (name
[0] == '*' || name
[0] == '&')
20275 strcpy (name_buf
, name
+1);
20279 strcpy (name_buf
+1, name
);
20281 strcpy (tmp_buf
, "\n");
20282 strcat (tmp_buf
, label
);
20283 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
20284 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
20285 dbxout_stabd (N_SLINE
, BRANCH_ISLAND_LINE_NUMBER (branch_island
));
20286 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
20289 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
20290 strcat (tmp_buf
, label
);
20291 strcat (tmp_buf
, "_pic\n");
20292 strcat (tmp_buf
, label
);
20293 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
20295 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
20296 strcat (tmp_buf
, name_buf
);
20297 strcat (tmp_buf
, " - ");
20298 strcat (tmp_buf
, label
);
20299 strcat (tmp_buf
, "_pic)\n");
20301 strcat (tmp_buf
, "\tmtlr r0\n");
20303 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
20304 strcat (tmp_buf
, name_buf
);
20305 strcat (tmp_buf
, " - ");
20306 strcat (tmp_buf
, label
);
20307 strcat (tmp_buf
, "_pic)\n");
20309 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
20313 strcat (tmp_buf
, ":\nlis r12,hi16(");
20314 strcat (tmp_buf
, name_buf
);
20315 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
20316 strcat (tmp_buf
, name_buf
);
20317 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
20319 output_asm_insn (tmp_buf
, 0);
20320 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
20321 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
20322 dbxout_stabd (N_SLINE
, BRANCH_ISLAND_LINE_NUMBER (branch_island
));
20323 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
20326 branch_island_list
= 0;
20329 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
20330 already there or not. */
20333 no_previous_def (tree function_name
)
20335 tree branch_island
;
20336 for (branch_island
= branch_island_list
;
20338 branch_island
= TREE_CHAIN (branch_island
))
20339 if (function_name
== BRANCH_ISLAND_FUNCTION_NAME (branch_island
))
20344 /* GET_PREV_LABEL gets the label name from the previous definition of
20348 get_prev_label (tree function_name
)
20350 tree branch_island
;
20351 for (branch_island
= branch_island_list
;
20353 branch_island
= TREE_CHAIN (branch_island
))
20354 if (function_name
== BRANCH_ISLAND_FUNCTION_NAME (branch_island
))
20355 return BRANCH_ISLAND_LABEL_NAME (branch_island
);
20359 #ifndef DARWIN_LINKER_GENERATES_ISLANDS
20360 #define DARWIN_LINKER_GENERATES_ISLANDS 0
20363 /* KEXTs still need branch islands. */
20364 #define DARWIN_GENERATE_ISLANDS (!DARWIN_LINKER_GENERATES_ISLANDS \
20365 || flag_mkernel || flag_apple_kext)
20367 /* INSN is either a function call or a millicode call. It may have an
20368 unconditional jump in its delay slot.
20370 CALL_DEST is the routine we are calling. */
20373 output_call (rtx insn
, rtx
*operands
, int dest_operand_number
,
20374 int cookie_operand_number
)
20376 static char buf
[256];
20377 if (DARWIN_GENERATE_ISLANDS
20378 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
20379 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
20382 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
20384 if (no_previous_def (funname
))
20386 rtx label_rtx
= gen_label_rtx ();
20387 char *label_buf
, temp_buf
[256];
20388 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
20389 CODE_LABEL_NUMBER (label_rtx
));
20390 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
20391 labelname
= get_identifier (label_buf
);
20392 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
20395 labelname
= get_prev_label (funname
);
20397 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
20398 instruction will reach 'foo', otherwise link as 'bl L42'".
20399 "L42" should be a 'branch island', that will do a far jump to
20400 'foo'. Branch islands are generated in
20401 macho_branch_islands(). */
20402 sprintf (buf
, "jbsr %%z%d,%.246s",
20403 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
20406 sprintf (buf
, "bl %%z%d", dest_operand_number
);
20410 /* Generate PIC and indirect symbol stubs. */
20413 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
20415 unsigned int length
;
20416 char *symbol_name
, *lazy_ptr_name
;
20417 char *local_label_0
;
20418 static int label
= 0;
20420 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
20421 symb
= (*targetm
.strip_name_encoding
) (symb
);
20424 length
= strlen (symb
);
20425 symbol_name
= alloca (length
+ 32);
20426 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
20428 lazy_ptr_name
= alloca (length
+ 32);
20429 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
20432 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
20434 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
20438 fprintf (file
, "\t.align 5\n");
20440 fprintf (file
, "%s:\n", stub
);
20441 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
20444 local_label_0
= alloca (sizeof ("\"L00000000000$spb\""));
20445 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
20447 fprintf (file
, "\tmflr r0\n");
20448 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
20449 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
20450 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
20451 lazy_ptr_name
, local_label_0
);
20452 fprintf (file
, "\tmtlr r0\n");
20453 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
20454 (TARGET_64BIT
? "ldu" : "lwzu"),
20455 lazy_ptr_name
, local_label_0
);
20456 fprintf (file
, "\tmtctr r12\n");
20457 fprintf (file
, "\tbctr\n");
20461 fprintf (file
, "\t.align 4\n");
20463 fprintf (file
, "%s:\n", stub
);
20464 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
20466 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
20467 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
20468 (TARGET_64BIT
? "ldu" : "lwzu"),
20470 fprintf (file
, "\tmtctr r12\n");
20471 fprintf (file
, "\tbctr\n");
20474 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
20475 fprintf (file
, "%s:\n", lazy_ptr_name
);
20476 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
20477 fprintf (file
, "%sdyld_stub_binding_helper\n",
20478 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
20481 /* Legitimize PIC addresses. If the address is already
20482 position-independent, we return ORIG. Newly generated
20483 position-independent addresses go into a reg. This is REG if non
20484 zero, otherwise we allocate register(s) as necessary. */
20486 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
20489 rs6000_machopic_legitimize_pic_address (rtx orig
, enum machine_mode mode
,
20494 if (reg
== NULL
&& ! reload_in_progress
&& ! reload_completed
)
20495 reg
= gen_reg_rtx (Pmode
);
20497 if (GET_CODE (orig
) == CONST
)
20501 if (GET_CODE (XEXP (orig
, 0)) == PLUS
20502 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
20505 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
20507 /* Use a different reg for the intermediate value, as
20508 it will be marked UNCHANGING. */
20509 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
20510 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
20513 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
20516 if (GET_CODE (offset
) == CONST_INT
)
20518 if (SMALL_INT (offset
))
20519 return plus_constant (base
, INTVAL (offset
));
20520 else if (! reload_in_progress
&& ! reload_completed
)
20521 offset
= force_reg (Pmode
, offset
);
20524 rtx mem
= force_const_mem (Pmode
, orig
);
20525 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
20528 return gen_rtx_PLUS (Pmode
, base
, offset
);
20531 /* Fall back on generic machopic code. */
20532 return machopic_legitimize_pic_address (orig
, mode
, reg
);
20535 /* Output a .machine directive for the Darwin assembler, and call
20536 the generic start_file routine. */
20539 rs6000_darwin_file_start (void)
20541 static const struct
20547 { "ppc64", "ppc64", MASK_64BIT
},
20548 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
20549 { "power4", "ppc970", 0 },
20550 { "G5", "ppc970", 0 },
20551 { "7450", "ppc7450", 0 },
20552 { "7400", "ppc7400", MASK_ALTIVEC
},
20553 { "G4", "ppc7400", 0 },
20554 { "750", "ppc750", 0 },
20555 { "740", "ppc750", 0 },
20556 { "G3", "ppc750", 0 },
20557 { "604e", "ppc604e", 0 },
20558 { "604", "ppc604", 0 },
20559 { "603e", "ppc603", 0 },
20560 { "603", "ppc603", 0 },
20561 { "601", "ppc601", 0 },
20562 { NULL
, "ppc", 0 } };
20563 const char *cpu_id
= "";
20566 rs6000_file_start ();
20567 darwin_file_start ();
20569 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
20570 for (i
= 0; i
< ARRAY_SIZE (rs6000_select
); i
++)
20571 if (rs6000_select
[i
].set_arch_p
&& rs6000_select
[i
].string
20572 && rs6000_select
[i
].string
[0] != '\0')
20573 cpu_id
= rs6000_select
[i
].string
;
20575 /* Look through the mapping array. Pick the first name that either
20576 matches the argument, has a bit set in IF_SET that is also set
20577 in the target flags, or has a NULL name. */
20580 while (mapping
[i
].arg
!= NULL
20581 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
20582 && (mapping
[i
].if_set
& target_flags
) == 0)
20585 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
20588 #endif /* TARGET_MACHO */
20592 rs6000_elf_reloc_rw_mask (void)
20596 else if (DEFAULT_ABI
== ABI_AIX
)
20602 /* Record an element in the table of global constructors. SYMBOL is
20603 a SYMBOL_REF of the function to be called; PRIORITY is a number
20604 between 0 and MAX_INIT_PRIORITY.
20606 This differs from default_named_section_asm_out_constructor in
20607 that we have special handling for -mrelocatable. */
20610 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
20612 const char *section
= ".ctors";
20615 if (priority
!= DEFAULT_INIT_PRIORITY
)
20617 sprintf (buf
, ".ctors.%.5u",
20618 /* Invert the numbering so the linker puts us in the proper
20619 order; constructors are run from right to left, and the
20620 linker sorts in increasing order. */
20621 MAX_INIT_PRIORITY
- priority
);
20625 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
20626 assemble_align (POINTER_SIZE
);
20628 if (TARGET_RELOCATABLE
)
20630 fputs ("\t.long (", asm_out_file
);
20631 output_addr_const (asm_out_file
, symbol
);
20632 fputs (")@fixup\n", asm_out_file
);
20635 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
20639 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
20641 const char *section
= ".dtors";
20644 if (priority
!= DEFAULT_INIT_PRIORITY
)
20646 sprintf (buf
, ".dtors.%.5u",
20647 /* Invert the numbering so the linker puts us in the proper
20648 order; constructors are run from right to left, and the
20649 linker sorts in increasing order. */
20650 MAX_INIT_PRIORITY
- priority
);
20654 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
20655 assemble_align (POINTER_SIZE
);
20657 if (TARGET_RELOCATABLE
)
20659 fputs ("\t.long (", asm_out_file
);
20660 output_addr_const (asm_out_file
, symbol
);
20661 fputs (")@fixup\n", asm_out_file
);
20664 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
20668 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
20672 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
20673 ASM_OUTPUT_LABEL (file
, name
);
20674 fputs (DOUBLE_INT_ASM_OP
, file
);
20675 rs6000_output_function_entry (file
, name
);
20676 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
20679 fputs ("\t.size\t", file
);
20680 assemble_name (file
, name
);
20681 fputs (",24\n\t.type\t.", file
);
20682 assemble_name (file
, name
);
20683 fputs (",@function\n", file
);
20684 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
20686 fputs ("\t.globl\t.", file
);
20687 assemble_name (file
, name
);
20692 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
20693 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
20694 rs6000_output_function_entry (file
, name
);
20695 fputs (":\n", file
);
20699 if (TARGET_RELOCATABLE
20700 && !TARGET_SECURE_PLT
20701 && (get_pool_size () != 0 || crtl
->profile
)
20706 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
20708 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
20709 fprintf (file
, "\t.long ");
20710 assemble_name (file
, buf
);
20712 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
20713 assemble_name (file
, buf
);
20717 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
20718 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
20720 if (DEFAULT_ABI
== ABI_AIX
)
20722 const char *desc_name
, *orig_name
;
20724 orig_name
= (*targetm
.strip_name_encoding
) (name
);
20725 desc_name
= orig_name
;
20726 while (*desc_name
== '.')
20729 if (TREE_PUBLIC (decl
))
20730 fprintf (file
, "\t.globl %s\n", desc_name
);
20732 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
20733 fprintf (file
, "%s:\n", desc_name
);
20734 fprintf (file
, "\t.long %s\n", orig_name
);
20735 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
20736 if (DEFAULT_ABI
== ABI_AIX
)
20737 fputs ("\t.long 0\n", file
);
20738 fprintf (file
, "\t.previous\n");
20740 ASM_OUTPUT_LABEL (file
, name
);
20744 rs6000_elf_end_indicate_exec_stack (void)
20747 file_end_indicate_exec_stack ();
20753 rs6000_xcoff_asm_output_anchor (rtx symbol
)
20757 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
20758 SYMBOL_REF_BLOCK_OFFSET (symbol
));
20759 ASM_OUTPUT_DEF (asm_out_file
, XSTR (symbol
, 0), buffer
);
20763 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
20765 fputs (GLOBAL_ASM_OP
, stream
);
20766 RS6000_OUTPUT_BASENAME (stream
, name
);
20767 putc ('\n', stream
);
20770 /* A get_unnamed_decl callback, used for read-only sections. PTR
20771 points to the section string variable. */
20774 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
20776 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
20777 *(const char *const *) directive
,
20778 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
20781 /* Likewise for read-write sections. */
20784 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
20786 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
20787 *(const char *const *) directive
,
20788 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
20791 /* A get_unnamed_section callback, used for switching to toc_section. */
20794 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
20796 if (TARGET_MINIMAL_TOC
)
20798 /* toc_section is always selected at least once from
20799 rs6000_xcoff_file_start, so this is guaranteed to
20800 always be defined once and only once in each file. */
20801 if (!toc_initialized
)
20803 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
20804 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
20805 toc_initialized
= 1;
20807 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
20808 (TARGET_32BIT
? "" : ",3"));
20811 fputs ("\t.toc\n", asm_out_file
);
20814 /* Implement TARGET_ASM_INIT_SECTIONS. */
20817 rs6000_xcoff_asm_init_sections (void)
20819 read_only_data_section
20820 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
20821 &xcoff_read_only_section_name
);
20823 private_data_section
20824 = get_unnamed_section (SECTION_WRITE
,
20825 rs6000_xcoff_output_readwrite_section_asm_op
,
20826 &xcoff_private_data_section_name
);
20828 read_only_private_data_section
20829 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
20830 &xcoff_private_data_section_name
);
20833 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
20835 readonly_data_section
= read_only_data_section
;
20836 exception_section
= data_section
;
20840 rs6000_xcoff_reloc_rw_mask (void)
20846 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
20847 tree decl ATTRIBUTE_UNUSED
)
20850 static const char * const suffix
[3] = { "PR", "RO", "RW" };
20852 if (flags
& SECTION_CODE
)
20854 else if (flags
& SECTION_WRITE
)
20859 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
20860 (flags
& SECTION_CODE
) ? "." : "",
20861 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
20865 rs6000_xcoff_select_section (tree decl
, int reloc
,
20866 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
20868 if (decl_readonly_section (decl
, reloc
))
20870 if (TREE_PUBLIC (decl
))
20871 return read_only_data_section
;
20873 return read_only_private_data_section
;
20877 if (TREE_PUBLIC (decl
))
20878 return data_section
;
20880 return private_data_section
;
20885 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
20889 /* Use select_section for private and uninitialized data. */
20890 if (!TREE_PUBLIC (decl
)
20891 || DECL_COMMON (decl
)
20892 || DECL_INITIAL (decl
) == NULL_TREE
20893 || DECL_INITIAL (decl
) == error_mark_node
20894 || (flag_zero_initialized_in_bss
20895 && initializer_zerop (DECL_INITIAL (decl
))))
20898 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
20899 name
= (*targetm
.strip_name_encoding
) (name
);
20900 DECL_SECTION_NAME (decl
) = build_string (strlen (name
), name
);
20903 /* Select section for constant in constant pool.
20905 On RS/6000, all constants are in the private read-only data area.
20906 However, if this is being placed in the TOC it must be output as a
20910 rs6000_xcoff_select_rtx_section (enum machine_mode mode
, rtx x
,
20911 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
20913 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
20914 return toc_section
;
20916 return read_only_private_data_section
;
20919 /* Remove any trailing [DS] or the like from the symbol name. */
20921 static const char *
20922 rs6000_xcoff_strip_name_encoding (const char *name
)
20927 len
= strlen (name
);
20928 if (name
[len
- 1] == ']')
20929 return ggc_alloc_string (name
, len
- 4);
20934 /* Section attributes. AIX is always PIC. */
20936 static unsigned int
20937 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
20939 unsigned int align
;
20940 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
20942 /* Align to at least UNIT size. */
20943 if (flags
& SECTION_CODE
)
20944 align
= MIN_UNITS_PER_WORD
;
20946 /* Increase alignment of large objects if not already stricter. */
20947 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
20948 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
20949 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
20951 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
20954 /* Output at beginning of assembler file.
20956 Initialize the section names for the RS/6000 at this point.
20958 Specify filename, including full path, to assembler.
20960 We want to go into the TOC section so at least one .toc will be emitted.
20961 Also, in order to output proper .bs/.es pairs, we need at least one static
20962 [RW] section emitted.
20964 Finally, declare mcount when profiling to make the assembler happy. */
20967 rs6000_xcoff_file_start (void)
20969 rs6000_gen_section_name (&xcoff_bss_section_name
,
20970 main_input_filename
, ".bss_");
20971 rs6000_gen_section_name (&xcoff_private_data_section_name
,
20972 main_input_filename
, ".rw_");
20973 rs6000_gen_section_name (&xcoff_read_only_section_name
,
20974 main_input_filename
, ".ro_");
20976 fputs ("\t.file\t", asm_out_file
);
20977 output_quoted_string (asm_out_file
, main_input_filename
);
20978 fputc ('\n', asm_out_file
);
20979 if (write_symbols
!= NO_DEBUG
)
20980 switch_to_section (private_data_section
);
20981 switch_to_section (text_section
);
20983 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
20984 rs6000_file_start ();
20987 /* Output at end of assembler file.
20988 On the RS/6000, referencing data should automatically pull in text. */
20991 rs6000_xcoff_file_end (void)
20993 switch_to_section (text_section
);
20994 fputs ("_section_.text:\n", asm_out_file
);
20995 switch_to_section (data_section
);
20996 fputs (TARGET_32BIT
20997 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
21000 #endif /* TARGET_XCOFF */
21002 /* Compute a (partial) cost for rtx X. Return true if the complete
21003 cost has been computed, and false if subexpressions should be
21004 scanned. In either case, *TOTAL contains the cost result. */
21007 rs6000_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
21009 enum machine_mode mode
= GET_MODE (x
);
21013 /* On the RS/6000, if it is valid in the insn, it is free. */
21015 if (((outer_code
== SET
21016 || outer_code
== PLUS
21017 || outer_code
== MINUS
)
21018 && (satisfies_constraint_I (x
)
21019 || satisfies_constraint_L (x
)))
21020 || (outer_code
== AND
21021 && (satisfies_constraint_K (x
)
21023 ? satisfies_constraint_L (x
)
21024 : satisfies_constraint_J (x
))
21025 || mask_operand (x
, mode
)
21027 && mask64_operand (x
, DImode
))))
21028 || ((outer_code
== IOR
|| outer_code
== XOR
)
21029 && (satisfies_constraint_K (x
)
21031 ? satisfies_constraint_L (x
)
21032 : satisfies_constraint_J (x
))))
21033 || outer_code
== ASHIFT
21034 || outer_code
== ASHIFTRT
21035 || outer_code
== LSHIFTRT
21036 || outer_code
== ROTATE
21037 || outer_code
== ROTATERT
21038 || outer_code
== ZERO_EXTRACT
21039 || (outer_code
== MULT
21040 && satisfies_constraint_I (x
))
21041 || ((outer_code
== DIV
|| outer_code
== UDIV
21042 || outer_code
== MOD
|| outer_code
== UMOD
)
21043 && exact_log2 (INTVAL (x
)) >= 0)
21044 || (outer_code
== COMPARE
21045 && (satisfies_constraint_I (x
)
21046 || satisfies_constraint_K (x
)))
21047 || (outer_code
== EQ
21048 && (satisfies_constraint_I (x
)
21049 || satisfies_constraint_K (x
)
21051 ? satisfies_constraint_L (x
)
21052 : satisfies_constraint_J (x
))))
21053 || (outer_code
== GTU
21054 && satisfies_constraint_I (x
))
21055 || (outer_code
== LTU
21056 && satisfies_constraint_P (x
)))
21061 else if ((outer_code
== PLUS
21062 && reg_or_add_cint_operand (x
, VOIDmode
))
21063 || (outer_code
== MINUS
21064 && reg_or_sub_cint_operand (x
, VOIDmode
))
21065 || ((outer_code
== SET
21066 || outer_code
== IOR
21067 || outer_code
== XOR
)
21069 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
21071 *total
= COSTS_N_INSNS (1);
21077 if (mode
== DImode
&& code
== CONST_DOUBLE
)
21079 if ((outer_code
== IOR
|| outer_code
== XOR
)
21080 && CONST_DOUBLE_HIGH (x
) == 0
21081 && (CONST_DOUBLE_LOW (x
)
21082 & ~ (unsigned HOST_WIDE_INT
) 0xffff) == 0)
21087 else if ((outer_code
== AND
&& and64_2_operand (x
, DImode
))
21088 || ((outer_code
== SET
21089 || outer_code
== IOR
21090 || outer_code
== XOR
)
21091 && CONST_DOUBLE_HIGH (x
) == 0))
21093 *total
= COSTS_N_INSNS (1);
21103 /* When optimizing for size, MEM should be slightly more expensive
21104 than generating address, e.g., (plus (reg) (const)).
21105 L1 cache latency is about two instructions. */
21106 *total
= optimize_size
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
21114 if (mode
== DFmode
)
21116 if (GET_CODE (XEXP (x
, 0)) == MULT
)
21118 /* FNMA accounted in outer NEG. */
21119 if (outer_code
== NEG
)
21120 *total
= rs6000_cost
->dmul
- rs6000_cost
->fp
;
21122 *total
= rs6000_cost
->dmul
;
21125 *total
= rs6000_cost
->fp
;
21127 else if (mode
== SFmode
)
21129 /* FNMA accounted in outer NEG. */
21130 if (outer_code
== NEG
&& GET_CODE (XEXP (x
, 0)) == MULT
)
21133 *total
= rs6000_cost
->fp
;
21136 *total
= COSTS_N_INSNS (1);
21140 if (mode
== DFmode
)
21142 if (GET_CODE (XEXP (x
, 0)) == MULT
21143 || GET_CODE (XEXP (x
, 1)) == MULT
)
21145 /* FNMA accounted in outer NEG. */
21146 if (outer_code
== NEG
)
21147 *total
= rs6000_cost
->dmul
- rs6000_cost
->fp
;
21149 *total
= rs6000_cost
->dmul
;
21152 *total
= rs6000_cost
->fp
;
21154 else if (mode
== SFmode
)
21156 /* FNMA accounted in outer NEG. */
21157 if (outer_code
== NEG
&& GET_CODE (XEXP (x
, 0)) == MULT
)
21160 *total
= rs6000_cost
->fp
;
21163 *total
= COSTS_N_INSNS (1);
21167 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
21168 && satisfies_constraint_I (XEXP (x
, 1)))
21170 if (INTVAL (XEXP (x
, 1)) >= -256
21171 && INTVAL (XEXP (x
, 1)) <= 255)
21172 *total
= rs6000_cost
->mulsi_const9
;
21174 *total
= rs6000_cost
->mulsi_const
;
21176 /* FMA accounted in outer PLUS/MINUS. */
21177 else if ((mode
== DFmode
|| mode
== SFmode
)
21178 && (outer_code
== PLUS
|| outer_code
== MINUS
))
21180 else if (mode
== DFmode
)
21181 *total
= rs6000_cost
->dmul
;
21182 else if (mode
== SFmode
)
21183 *total
= rs6000_cost
->fp
;
21184 else if (mode
== DImode
)
21185 *total
= rs6000_cost
->muldi
;
21187 *total
= rs6000_cost
->mulsi
;
21192 if (FLOAT_MODE_P (mode
))
21194 *total
= mode
== DFmode
? rs6000_cost
->ddiv
21195 : rs6000_cost
->sdiv
;
21202 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
21203 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
21205 if (code
== DIV
|| code
== MOD
)
21207 *total
= COSTS_N_INSNS (2);
21210 *total
= COSTS_N_INSNS (1);
21214 if (GET_MODE (XEXP (x
, 1)) == DImode
)
21215 *total
= rs6000_cost
->divdi
;
21217 *total
= rs6000_cost
->divsi
;
21219 /* Add in shift and subtract for MOD. */
21220 if (code
== MOD
|| code
== UMOD
)
21221 *total
+= COSTS_N_INSNS (2);
21226 *total
= COSTS_N_INSNS (4);
21230 *total
= COSTS_N_INSNS (6);
21234 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
21246 *total
= COSTS_N_INSNS (1);
21254 /* Handle mul_highpart. */
21255 if (outer_code
== TRUNCATE
21256 && GET_CODE (XEXP (x
, 0)) == MULT
)
21258 if (mode
== DImode
)
21259 *total
= rs6000_cost
->muldi
;
21261 *total
= rs6000_cost
->mulsi
;
21264 else if (outer_code
== AND
)
21267 *total
= COSTS_N_INSNS (1);
21272 if (GET_CODE (XEXP (x
, 0)) == MEM
)
21275 *total
= COSTS_N_INSNS (1);
21281 if (!FLOAT_MODE_P (mode
))
21283 *total
= COSTS_N_INSNS (1);
21289 case UNSIGNED_FLOAT
:
21292 case FLOAT_TRUNCATE
:
21293 *total
= rs6000_cost
->fp
;
21297 if (mode
== DFmode
)
21300 *total
= rs6000_cost
->fp
;
21304 switch (XINT (x
, 1))
21307 *total
= rs6000_cost
->fp
;
21319 *total
= COSTS_N_INSNS (1);
21322 else if (FLOAT_MODE_P (mode
)
21323 && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
&& TARGET_FPRS
)
21325 *total
= rs6000_cost
->fp
;
21333 /* Carry bit requires mode == Pmode.
21334 NEG or PLUS already counted so only add one. */
21336 && (outer_code
== NEG
|| outer_code
== PLUS
))
21338 *total
= COSTS_N_INSNS (1);
21341 if (outer_code
== SET
)
21343 if (XEXP (x
, 1) == const0_rtx
)
21345 *total
= COSTS_N_INSNS (2);
21348 else if (mode
== Pmode
)
21350 *total
= COSTS_N_INSNS (3);
21359 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
21361 *total
= COSTS_N_INSNS (2);
21365 if (outer_code
== COMPARE
)
21379 /* A C expression returning the cost of moving data from a register of class
21380 CLASS1 to one of CLASS2. */
21383 rs6000_register_move_cost (enum machine_mode mode
,
21384 enum reg_class from
, enum reg_class to
)
21386 /* Moves from/to GENERAL_REGS. */
21387 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
21388 || reg_classes_intersect_p (from
, GENERAL_REGS
))
21390 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
21393 if (from
== FLOAT_REGS
|| from
== ALTIVEC_REGS
)
21394 return (rs6000_memory_move_cost (mode
, from
, 0)
21395 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, 0));
21397 /* It's more expensive to move CR_REGS than CR0_REGS because of the
21399 else if (from
== CR_REGS
)
21402 /* Power6 has slower LR/CTR moves so make them more expensive than
21403 memory in order to bias spills to memory .*/
21404 else if (rs6000_cpu
== PROCESSOR_POWER6
21405 && reg_classes_intersect_p (from
, LINK_OR_CTR_REGS
))
21406 return 6 * hard_regno_nregs
[0][mode
];
21409 /* A move will cost one instruction per GPR moved. */
21410 return 2 * hard_regno_nregs
[0][mode
];
21413 /* Moving between two similar registers is just one instruction. */
21414 else if (reg_classes_intersect_p (to
, from
))
21415 return (mode
== TFmode
|| mode
== TDmode
) ? 4 : 2;
21417 /* Everything else has to go through GENERAL_REGS. */
21419 return (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
21420 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
21423 /* A C expressions returning the cost of moving data of MODE from a register to
21427 rs6000_memory_move_cost (enum machine_mode mode
, enum reg_class
class,
21428 int in ATTRIBUTE_UNUSED
)
21430 if (reg_classes_intersect_p (class, GENERAL_REGS
))
21431 return 4 * hard_regno_nregs
[0][mode
];
21432 else if (reg_classes_intersect_p (class, FLOAT_REGS
))
21433 return 4 * hard_regno_nregs
[32][mode
];
21434 else if (reg_classes_intersect_p (class, ALTIVEC_REGS
))
21435 return 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
21437 return 4 + rs6000_register_move_cost (mode
, class, GENERAL_REGS
);
21440 /* Returns a code for a target-specific builtin that implements
21441 reciprocal of the function, or NULL_TREE if not available. */
21444 rs6000_builtin_reciprocal (unsigned int fn
, bool md_fn
,
21445 bool sqrt ATTRIBUTE_UNUSED
)
21447 if (! (TARGET_RECIP
&& TARGET_PPC_GFXOPT
&& !optimize_size
21448 && flag_finite_math_only
&& !flag_trapping_math
21449 && flag_unsafe_math_optimizations
))
21457 case BUILT_IN_SQRTF
:
21458 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRTF
];
21465 /* Newton-Raphson approximation of single-precision floating point divide n/d.
21466 Assumes no trapping math and finite arguments. */
21469 rs6000_emit_swdivsf (rtx dst
, rtx n
, rtx d
)
21471 rtx x0
, e0
, e1
, y1
, u0
, v0
, one
;
21473 x0
= gen_reg_rtx (SFmode
);
21474 e0
= gen_reg_rtx (SFmode
);
21475 e1
= gen_reg_rtx (SFmode
);
21476 y1
= gen_reg_rtx (SFmode
);
21477 u0
= gen_reg_rtx (SFmode
);
21478 v0
= gen_reg_rtx (SFmode
);
21479 one
= force_reg (SFmode
, CONST_DOUBLE_FROM_REAL_VALUE (dconst1
, SFmode
));
21481 /* x0 = 1./d estimate */
21482 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
21483 gen_rtx_UNSPEC (SFmode
, gen_rtvec (1, d
),
21485 /* e0 = 1. - d * x0 */
21486 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
21487 gen_rtx_MINUS (SFmode
, one
,
21488 gen_rtx_MULT (SFmode
, d
, x0
))));
21489 /* e1 = e0 + e0 * e0 */
21490 emit_insn (gen_rtx_SET (VOIDmode
, e1
,
21491 gen_rtx_PLUS (SFmode
,
21492 gen_rtx_MULT (SFmode
, e0
, e0
), e0
)));
21493 /* y1 = x0 + e1 * x0 */
21494 emit_insn (gen_rtx_SET (VOIDmode
, y1
,
21495 gen_rtx_PLUS (SFmode
,
21496 gen_rtx_MULT (SFmode
, e1
, x0
), x0
)));
21498 emit_insn (gen_rtx_SET (VOIDmode
, u0
,
21499 gen_rtx_MULT (SFmode
, n
, y1
)));
21500 /* v0 = n - d * u0 */
21501 emit_insn (gen_rtx_SET (VOIDmode
, v0
,
21502 gen_rtx_MINUS (SFmode
, n
,
21503 gen_rtx_MULT (SFmode
, d
, u0
))));
21504 /* dst = u0 + v0 * y1 */
21505 emit_insn (gen_rtx_SET (VOIDmode
, dst
,
21506 gen_rtx_PLUS (SFmode
,
21507 gen_rtx_MULT (SFmode
, v0
, y1
), u0
)));
21510 /* Newton-Raphson approximation of double-precision floating point divide n/d.
21511 Assumes no trapping math and finite arguments. */
21514 rs6000_emit_swdivdf (rtx dst
, rtx n
, rtx d
)
21516 rtx x0
, e0
, e1
, e2
, y1
, y2
, y3
, u0
, v0
, one
;
21518 x0
= gen_reg_rtx (DFmode
);
21519 e0
= gen_reg_rtx (DFmode
);
21520 e1
= gen_reg_rtx (DFmode
);
21521 e2
= gen_reg_rtx (DFmode
);
21522 y1
= gen_reg_rtx (DFmode
);
21523 y2
= gen_reg_rtx (DFmode
);
21524 y3
= gen_reg_rtx (DFmode
);
21525 u0
= gen_reg_rtx (DFmode
);
21526 v0
= gen_reg_rtx (DFmode
);
21527 one
= force_reg (DFmode
, CONST_DOUBLE_FROM_REAL_VALUE (dconst1
, DFmode
));
21529 /* x0 = 1./d estimate */
21530 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
21531 gen_rtx_UNSPEC (DFmode
, gen_rtvec (1, d
),
21533 /* e0 = 1. - d * x0 */
21534 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
21535 gen_rtx_MINUS (DFmode
, one
,
21536 gen_rtx_MULT (SFmode
, d
, x0
))));
21537 /* y1 = x0 + e0 * x0 */
21538 emit_insn (gen_rtx_SET (VOIDmode
, y1
,
21539 gen_rtx_PLUS (DFmode
,
21540 gen_rtx_MULT (DFmode
, e0
, x0
), x0
)));
21542 emit_insn (gen_rtx_SET (VOIDmode
, e1
,
21543 gen_rtx_MULT (DFmode
, e0
, e0
)));
21544 /* y2 = y1 + e1 * y1 */
21545 emit_insn (gen_rtx_SET (VOIDmode
, y2
,
21546 gen_rtx_PLUS (DFmode
,
21547 gen_rtx_MULT (DFmode
, e1
, y1
), y1
)));
21549 emit_insn (gen_rtx_SET (VOIDmode
, e2
,
21550 gen_rtx_MULT (DFmode
, e1
, e1
)));
21551 /* y3 = y2 + e2 * y2 */
21552 emit_insn (gen_rtx_SET (VOIDmode
, y3
,
21553 gen_rtx_PLUS (DFmode
,
21554 gen_rtx_MULT (DFmode
, e2
, y2
), y2
)));
21556 emit_insn (gen_rtx_SET (VOIDmode
, u0
,
21557 gen_rtx_MULT (DFmode
, n
, y3
)));
21558 /* v0 = n - d * u0 */
21559 emit_insn (gen_rtx_SET (VOIDmode
, v0
,
21560 gen_rtx_MINUS (DFmode
, n
,
21561 gen_rtx_MULT (DFmode
, d
, u0
))));
21562 /* dst = u0 + v0 * y3 */
21563 emit_insn (gen_rtx_SET (VOIDmode
, dst
,
21564 gen_rtx_PLUS (DFmode
,
21565 gen_rtx_MULT (DFmode
, v0
, y3
), u0
)));
21569 /* Newton-Raphson approximation of single-precision floating point rsqrt.
21570 Assumes no trapping math and finite arguments. */
21573 rs6000_emit_swrsqrtsf (rtx dst
, rtx src
)
21575 rtx x0
, x1
, x2
, y1
, u0
, u1
, u2
, v0
, v1
, v2
, t0
,
21576 half
, one
, halfthree
, c1
, cond
, label
;
21578 x0
= gen_reg_rtx (SFmode
);
21579 x1
= gen_reg_rtx (SFmode
);
21580 x2
= gen_reg_rtx (SFmode
);
21581 y1
= gen_reg_rtx (SFmode
);
21582 u0
= gen_reg_rtx (SFmode
);
21583 u1
= gen_reg_rtx (SFmode
);
21584 u2
= gen_reg_rtx (SFmode
);
21585 v0
= gen_reg_rtx (SFmode
);
21586 v1
= gen_reg_rtx (SFmode
);
21587 v2
= gen_reg_rtx (SFmode
);
21588 t0
= gen_reg_rtx (SFmode
);
21589 halfthree
= gen_reg_rtx (SFmode
);
21590 cond
= gen_rtx_REG (CCFPmode
, CR1_REGNO
);
21591 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
21593 /* check 0.0, 1.0, NaN, Inf by testing src * src = src */
21594 emit_insn (gen_rtx_SET (VOIDmode
, t0
,
21595 gen_rtx_MULT (SFmode
, src
, src
)));
21597 emit_insn (gen_rtx_SET (VOIDmode
, cond
,
21598 gen_rtx_COMPARE (CCFPmode
, t0
, src
)));
21599 c1
= gen_rtx_EQ (VOIDmode
, cond
, const0_rtx
);
21600 emit_unlikely_jump (c1
, label
);
21602 half
= force_reg (SFmode
, CONST_DOUBLE_FROM_REAL_VALUE (dconsthalf
, SFmode
));
21603 one
= force_reg (SFmode
, CONST_DOUBLE_FROM_REAL_VALUE (dconst1
, SFmode
));
21605 /* halfthree = 1.5 = 1.0 + 0.5 */
21606 emit_insn (gen_rtx_SET (VOIDmode
, halfthree
,
21607 gen_rtx_PLUS (SFmode
, one
, half
)));
21609 /* x0 = rsqrt estimate */
21610 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
21611 gen_rtx_UNSPEC (SFmode
, gen_rtvec (1, src
),
21614 /* y1 = 0.5 * src = 1.5 * src - src -> fewer constants */
21615 emit_insn (gen_rtx_SET (VOIDmode
, y1
,
21616 gen_rtx_MINUS (SFmode
,
21617 gen_rtx_MULT (SFmode
, src
, halfthree
),
21620 /* x1 = x0 * (1.5 - y1 * (x0 * x0)) */
21621 emit_insn (gen_rtx_SET (VOIDmode
, u0
,
21622 gen_rtx_MULT (SFmode
, x0
, x0
)));
21623 emit_insn (gen_rtx_SET (VOIDmode
, v0
,
21624 gen_rtx_MINUS (SFmode
,
21626 gen_rtx_MULT (SFmode
, y1
, u0
))));
21627 emit_insn (gen_rtx_SET (VOIDmode
, x1
,
21628 gen_rtx_MULT (SFmode
, x0
, v0
)));
21630 /* x2 = x1 * (1.5 - y1 * (x1 * x1)) */
21631 emit_insn (gen_rtx_SET (VOIDmode
, u1
,
21632 gen_rtx_MULT (SFmode
, x1
, x1
)));
21633 emit_insn (gen_rtx_SET (VOIDmode
, v1
,
21634 gen_rtx_MINUS (SFmode
,
21636 gen_rtx_MULT (SFmode
, y1
, u1
))));
21637 emit_insn (gen_rtx_SET (VOIDmode
, x2
,
21638 gen_rtx_MULT (SFmode
, x1
, v1
)));
21640 /* dst = x2 * (1.5 - y1 * (x2 * x2)) */
21641 emit_insn (gen_rtx_SET (VOIDmode
, u2
,
21642 gen_rtx_MULT (SFmode
, x2
, x2
)));
21643 emit_insn (gen_rtx_SET (VOIDmode
, v2
,
21644 gen_rtx_MINUS (SFmode
,
21646 gen_rtx_MULT (SFmode
, y1
, u2
))));
21647 emit_insn (gen_rtx_SET (VOIDmode
, dst
,
21648 gen_rtx_MULT (SFmode
, x2
, v2
)));
21650 emit_label (XEXP (label
, 0));
21653 /* Emit popcount intrinsic on TARGET_POPCNTB targets. DST is the
21654 target, and SRC is the argument operand. */
21657 rs6000_emit_popcount (rtx dst
, rtx src
)
21659 enum machine_mode mode
= GET_MODE (dst
);
21662 tmp1
= gen_reg_rtx (mode
);
21664 if (mode
== SImode
)
21666 emit_insn (gen_popcntbsi2 (tmp1
, src
));
21667 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
21669 tmp2
= force_reg (SImode
, tmp2
);
21670 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
21674 emit_insn (gen_popcntbdi2 (tmp1
, src
));
21675 tmp2
= expand_mult (DImode
, tmp1
,
21676 GEN_INT ((HOST_WIDE_INT
)
21677 0x01010101 << 32 | 0x01010101),
21679 tmp2
= force_reg (DImode
, tmp2
);
21680 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
21685 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
21686 target, and SRC is the argument operand. */
21689 rs6000_emit_parity (rtx dst
, rtx src
)
21691 enum machine_mode mode
= GET_MODE (dst
);
21694 tmp
= gen_reg_rtx (mode
);
21695 if (mode
== SImode
)
21697 /* Is mult+shift >= shift+xor+shift+xor? */
21698 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
21700 rtx tmp1
, tmp2
, tmp3
, tmp4
;
21702 tmp1
= gen_reg_rtx (SImode
);
21703 emit_insn (gen_popcntbsi2 (tmp1
, src
));
21705 tmp2
= gen_reg_rtx (SImode
);
21706 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
21707 tmp3
= gen_reg_rtx (SImode
);
21708 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
21710 tmp4
= gen_reg_rtx (SImode
);
21711 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
21712 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
21715 rs6000_emit_popcount (tmp
, src
);
21716 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
21720 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
21721 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
21723 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
21725 tmp1
= gen_reg_rtx (DImode
);
21726 emit_insn (gen_popcntbdi2 (tmp1
, src
));
21728 tmp2
= gen_reg_rtx (DImode
);
21729 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
21730 tmp3
= gen_reg_rtx (DImode
);
21731 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
21733 tmp4
= gen_reg_rtx (DImode
);
21734 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
21735 tmp5
= gen_reg_rtx (DImode
);
21736 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
21738 tmp6
= gen_reg_rtx (DImode
);
21739 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
21740 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
21743 rs6000_emit_popcount (tmp
, src
);
21744 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
21748 /* Return an RTX representing where to find the function value of a
21749 function returning MODE. */
21751 rs6000_complex_function_value (enum machine_mode mode
)
21753 unsigned int regno
;
21755 enum machine_mode inner
= GET_MODE_INNER (mode
);
21756 unsigned int inner_bytes
= GET_MODE_SIZE (inner
);
21758 if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
21759 regno
= FP_ARG_RETURN
;
21762 regno
= GP_ARG_RETURN
;
21764 /* 32-bit is OK since it'll go in r3/r4. */
21765 if (TARGET_32BIT
&& inner_bytes
>= 4)
21766 return gen_rtx_REG (mode
, regno
);
21769 if (inner_bytes
>= 8)
21770 return gen_rtx_REG (mode
, regno
);
21772 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
21774 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
21775 GEN_INT (inner_bytes
));
21776 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
21779 /* Define how to find the value returned by a function.
21780 VALTYPE is the data type of the value (as a tree).
21781 If the precise function being called is known, FUNC is its FUNCTION_DECL;
21782 otherwise, FUNC is 0.
21784 On the SPE, both FPs and vectors are returned in r3.
21786 On RS/6000 an integer value is in r3 and a floating-point value is in
21787 fp1, unless -msoft-float. */
21790 rs6000_function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
)
21792 enum machine_mode mode
;
21793 unsigned int regno
;
21795 /* Special handling for structs in darwin64. */
21796 if (rs6000_darwin64_abi
21797 && TYPE_MODE (valtype
) == BLKmode
21798 && TREE_CODE (valtype
) == RECORD_TYPE
21799 && int_size_in_bytes (valtype
) > 0)
21801 CUMULATIVE_ARGS valcum
;
21805 valcum
.fregno
= FP_ARG_MIN_REG
;
21806 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
21807 /* Do a trial code generation as if this were going to be passed as
21808 an argument; if any part goes in memory, we return NULL. */
21809 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, 1, true);
21812 /* Otherwise fall through to standard ABI rules. */
21815 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DImode
)
21817 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
21818 return gen_rtx_PARALLEL (DImode
,
21820 gen_rtx_EXPR_LIST (VOIDmode
,
21821 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
21823 gen_rtx_EXPR_LIST (VOIDmode
,
21824 gen_rtx_REG (SImode
,
21825 GP_ARG_RETURN
+ 1),
21828 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DCmode
)
21830 return gen_rtx_PARALLEL (DCmode
,
21832 gen_rtx_EXPR_LIST (VOIDmode
,
21833 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
21835 gen_rtx_EXPR_LIST (VOIDmode
,
21836 gen_rtx_REG (SImode
,
21837 GP_ARG_RETURN
+ 1),
21839 gen_rtx_EXPR_LIST (VOIDmode
,
21840 gen_rtx_REG (SImode
,
21841 GP_ARG_RETURN
+ 2),
21843 gen_rtx_EXPR_LIST (VOIDmode
,
21844 gen_rtx_REG (SImode
,
21845 GP_ARG_RETURN
+ 3),
21849 mode
= TYPE_MODE (valtype
);
21850 if ((INTEGRAL_TYPE_P (valtype
) && GET_MODE_BITSIZE (mode
) < BITS_PER_WORD
)
21851 || POINTER_TYPE_P (valtype
))
21852 mode
= TARGET_32BIT
? SImode
: DImode
;
21854 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
21855 /* _Decimal128 must use an even/odd register pair. */
21856 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
21857 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
21858 regno
= FP_ARG_RETURN
;
21859 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
21860 && targetm
.calls
.split_complex_arg
)
21861 return rs6000_complex_function_value (mode
);
21862 else if (TREE_CODE (valtype
) == VECTOR_TYPE
21863 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
21864 && ALTIVEC_VECTOR_MODE (mode
))
21865 regno
= ALTIVEC_ARG_RETURN
;
21866 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
21867 && (mode
== DFmode
|| mode
== DCmode
21868 || mode
== TFmode
|| mode
== TCmode
))
21869 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
21871 regno
= GP_ARG_RETURN
;
21873 return gen_rtx_REG (mode
, regno
);
21876 /* Define how to find the value returned by a library function
21877 assuming the value has mode MODE. */
21879 rs6000_libcall_value (enum machine_mode mode
)
21881 unsigned int regno
;
21883 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
21885 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
21886 return gen_rtx_PARALLEL (DImode
,
21888 gen_rtx_EXPR_LIST (VOIDmode
,
21889 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
21891 gen_rtx_EXPR_LIST (VOIDmode
,
21892 gen_rtx_REG (SImode
,
21893 GP_ARG_RETURN
+ 1),
21897 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
21898 /* _Decimal128 must use an even/odd register pair. */
21899 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
21900 else if (SCALAR_FLOAT_MODE_P (mode
)
21901 && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
21902 regno
= FP_ARG_RETURN
;
21903 else if (ALTIVEC_VECTOR_MODE (mode
)
21904 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
21905 regno
= ALTIVEC_ARG_RETURN
;
21906 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
21907 return rs6000_complex_function_value (mode
);
21908 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
21909 && (mode
== DFmode
|| mode
== DCmode
21910 || mode
== TFmode
|| mode
== TCmode
))
21911 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
21913 regno
= GP_ARG_RETURN
;
21915 return gen_rtx_REG (mode
, regno
);
21918 /* Define the offset between two registers, FROM to be eliminated and its
21919 replacement TO, at the start of a routine. */
21921 rs6000_initial_elimination_offset (int from
, int to
)
21923 rs6000_stack_t
*info
= rs6000_stack_info ();
21924 HOST_WIDE_INT offset
;
21926 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
21927 offset
= info
->push_p
? 0 : -info
->total_size
;
21928 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
21930 offset
= info
->push_p
? 0 : -info
->total_size
;
21931 if (FRAME_GROWS_DOWNWARD
)
21932 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
21934 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
21935 offset
= FRAME_GROWS_DOWNWARD
21936 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
21938 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
21939 offset
= info
->total_size
;
21940 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
21941 offset
= info
->push_p
? info
->total_size
: 0;
21942 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
21945 gcc_unreachable ();
21950 /* Return true if TYPE is a SPE or AltiVec opaque type. */
21953 rs6000_is_opaque_type (const_tree type
)
21955 return (type
== opaque_V2SI_type_node
21956 || type
== opaque_V2SF_type_node
21957 || type
== opaque_p_V2SI_type_node
21958 || type
== opaque_V4SI_type_node
);
21962 rs6000_dwarf_register_span (rtx reg
)
21967 && (SPE_VECTOR_MODE (GET_MODE (reg
))
21968 || (TARGET_E500_DOUBLE
21969 && (GET_MODE (reg
) == DFmode
|| GET_MODE (reg
) == DDmode
))))
21974 regno
= REGNO (reg
);
21976 /* The duality of the SPE register size wreaks all kinds of havoc.
21977 This is a way of distinguishing r0 in 32-bits from r0 in
21980 gen_rtx_PARALLEL (VOIDmode
,
21983 gen_rtx_REG (SImode
, regno
+ 1200),
21984 gen_rtx_REG (SImode
, regno
))
21986 gen_rtx_REG (SImode
, regno
),
21987 gen_rtx_REG (SImode
, regno
+ 1200)));
21990 /* Fill in sizes for SPE register high parts in table used by unwinder. */
21993 rs6000_init_dwarf_reg_sizes_extra (tree address
)
21998 enum machine_mode mode
= TYPE_MODE (char_type_node
);
21999 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, 0);
22000 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
22001 rtx value
= gen_int_mode (4, mode
);
22003 for (i
= 1201; i
< 1232; i
++)
22005 int column
= DWARF_REG_TO_UNWIND_COLUMN (i
);
22006 HOST_WIDE_INT offset
22007 = DWARF_FRAME_REGNUM (column
) * GET_MODE_SIZE (mode
);
22009 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
22014 /* Map internal gcc register numbers to DWARF2 register numbers. */
22017 rs6000_dbx_register_number (unsigned int regno
)
22019 if (regno
<= 63 || write_symbols
!= DWARF2_DEBUG
)
22021 if (regno
== MQ_REGNO
)
22023 if (regno
== LR_REGNO
)
22025 if (regno
== CTR_REGNO
)
22027 if (CR_REGNO_P (regno
))
22028 return regno
- CR0_REGNO
+ 86;
22029 if (regno
== XER_REGNO
)
22031 if (ALTIVEC_REGNO_P (regno
))
22032 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
22033 if (regno
== VRSAVE_REGNO
)
22035 if (regno
== VSCR_REGNO
)
22037 if (regno
== SPE_ACC_REGNO
)
22039 if (regno
== SPEFSCR_REGNO
)
22041 /* SPE high reg number. We get these values of regno from
22042 rs6000_dwarf_register_span. */
22043 gcc_assert (regno
>= 1200 && regno
< 1232);
22047 /* target hook eh_return_filter_mode */
22048 static enum machine_mode
22049 rs6000_eh_return_filter_mode (void)
22051 return TARGET_32BIT
? SImode
: word_mode
;
22054 /* Target hook for scalar_mode_supported_p. */
22056 rs6000_scalar_mode_supported_p (enum machine_mode mode
)
22058 if (DECIMAL_FLOAT_MODE_P (mode
))
22061 return default_scalar_mode_supported_p (mode
);
22064 /* Target hook for vector_mode_supported_p. */
22066 rs6000_vector_mode_supported_p (enum machine_mode mode
)
22069 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
22072 if (TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
22075 else if (TARGET_ALTIVEC
&& ALTIVEC_VECTOR_MODE (mode
))
22082 /* Target hook for invalid_arg_for_unprototyped_fn. */
22083 static const char *
22084 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
22086 return (!rs6000_darwin64_abi
22088 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
22089 && (funcdecl
== NULL_TREE
22090 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
22091 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
22092 ? N_("AltiVec argument passed to unprototyped function")
22096 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
22097 setup by using __stack_chk_fail_local hidden function instead of
22098 calling __stack_chk_fail directly. Otherwise it is better to call
22099 __stack_chk_fail directly. */
22102 rs6000_stack_protect_fail (void)
22104 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
22105 ? default_hidden_stack_protect_fail ()
22106 : default_external_stack_protect_fail ();
22109 #include "gt-rs6000.h"