* integrate.c: Remove.
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobf6cef090f1e45f1cb98888f06dc635bdde9aca5b
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
4 2012
5 Free Software Foundation, Inc.
6 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 3, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "recog.h"
36 #include "obstack.h"
37 #include "tree.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "except.h"
41 #include "function.h"
42 #include "output.h"
43 #include "basic-block.h"
44 #include "diagnostic-core.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "hashtab.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "langhooks.h"
53 #include "reload.h"
54 #include "cfglayout.h"
55 #include "cfgloop.h"
56 #include "sched-int.h"
57 #include "gimple.h"
58 #include "tree-flow.h"
59 #include "intl.h"
60 #include "params.h"
61 #include "tm-constrs.h"
62 #include "opts.h"
63 #if TARGET_XCOFF
64 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
65 #endif
66 #if TARGET_MACHO
67 #include "gstab.h" /* for N_SLINE */
68 #endif
70 #ifndef TARGET_NO_PROTOTYPE
71 #define TARGET_NO_PROTOTYPE 0
72 #endif
74 #define min(A,B) ((A) < (B) ? (A) : (B))
75 #define max(A,B) ((A) > (B) ? (A) : (B))
77 /* Structure used to define the rs6000 stack */
78 typedef struct rs6000_stack {
79 int reload_completed; /* stack info won't change from here on */
80 int first_gp_reg_save; /* first callee saved GP register used */
81 int first_fp_reg_save; /* first callee saved FP register used */
82 int first_altivec_reg_save; /* first callee saved AltiVec register used */
83 int lr_save_p; /* true if the link reg needs to be saved */
84 int cr_save_p; /* true if the CR reg needs to be saved */
85 unsigned int vrsave_mask; /* mask of vec registers to save */
86 int push_p; /* true if we need to allocate stack space */
87 int calls_p; /* true if the function makes any calls */
88 int world_save_p; /* true if we're saving *everything*:
89 r13-r31, cr, f14-f31, vrsave, v20-v31 */
90 enum rs6000_abi abi; /* which ABI to use */
91 int gp_save_offset; /* offset to save GP regs from initial SP */
92 int fp_save_offset; /* offset to save FP regs from initial SP */
93 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
94 int lr_save_offset; /* offset to save LR from initial SP */
95 int cr_save_offset; /* offset to save CR from initial SP */
96 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
97 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
98 int varargs_save_offset; /* offset to save the varargs registers */
99 int ehrd_offset; /* offset to EH return data */
100 int reg_size; /* register size (4 or 8) */
101 HOST_WIDE_INT vars_size; /* variable save area size */
102 int parm_size; /* outgoing parameter size */
103 int save_size; /* save area size */
104 int fixed_size; /* fixed size of stack frame */
105 int gp_size; /* size of saved GP registers */
106 int fp_size; /* size of saved FP registers */
107 int altivec_size; /* size of saved AltiVec registers */
108 int cr_size; /* size to hold CR if not in save_size */
109 int vrsave_size; /* size to hold VRSAVE if not in save_size */
110 int altivec_padding_size; /* size of altivec alignment padding if
111 not in save_size */
112 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
113 int spe_padding_size;
114 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
115 int spe_64bit_regs_used;
116 int savres_strategy;
117 } rs6000_stack_t;
119 /* A C structure for machine-specific, per-function data.
120 This is added to the cfun structure. */
121 typedef struct GTY(()) machine_function
123 /* Some local-dynamic symbol. */
124 const char *some_ld_name;
125 /* Whether the instruction chain has been scanned already. */
126 int insn_chain_scanned_p;
127 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
128 int ra_needs_full_frame;
129 /* Flags if __builtin_return_address (0) was used. */
130 int ra_need_lr;
131 /* Cache lr_save_p after expansion of builtin_eh_return. */
132 int lr_save_state;
133 /* Whether we need to save the TOC to the reserved stack location in the
134 function prologue. */
135 bool save_toc_in_prologue;
136 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
137 varargs save area. */
138 HOST_WIDE_INT varargs_save_offset;
139 /* Temporary stack slot to use for SDmode copies. This slot is
140 64-bits wide and is allocated early enough so that the offset
141 does not overflow the 16-bit load/store offset field. */
142 rtx sdmode_stack_slot;
143 } machine_function;
145 /* Support targetm.vectorize.builtin_mask_for_load. */
146 static GTY(()) tree altivec_builtin_mask_for_load;
148 /* Set to nonzero once AIX common-mode calls have been defined. */
149 static GTY(()) int common_mode_defined;
151 /* Label number of label created for -mrelocatable, to call to so we can
152 get the address of the GOT section */
153 static int rs6000_pic_labelno;
155 #ifdef USING_ELFOS_H
156 /* Counter for labels which are to be placed in .fixup. */
157 int fixuplabelno = 0;
158 #endif
160 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
161 int dot_symbols;
163 /* Specify the machine mode that pointers have. After generation of rtl, the
164 compiler makes no further distinction between pointers and any other objects
165 of this machine mode. The type is unsigned since not all things that
166 include rs6000.h also include machmode.h. */
167 unsigned rs6000_pmode;
169 /* Width in bits of a pointer. */
170 unsigned rs6000_pointer_size;
172 #ifdef HAVE_AS_GNU_ATTRIBUTE
173 /* Flag whether floating point values have been passed/returned. */
174 static bool rs6000_passes_float;
175 /* Flag whether vector values have been passed/returned. */
176 static bool rs6000_passes_vector;
177 /* Flag whether small (<= 8 byte) structures have been returned. */
178 static bool rs6000_returns_struct;
179 #endif
181 /* Value is TRUE if register/mode pair is acceptable. */
182 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
184 /* Maximum number of registers needed for a given register class and mode. */
185 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
187 /* How many registers are needed for a given register and mode. */
188 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
190 /* Map register number to register class. */
191 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
193 /* Reload functions based on the type and the vector unit. */
194 static enum insn_code rs6000_vector_reload[NUM_MACHINE_MODES][2];
196 static int dbg_cost_ctrl;
198 /* Built in types. */
199 tree rs6000_builtin_types[RS6000_BTI_MAX];
200 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
202 /* Flag to say the TOC is initialized */
203 int toc_initialized;
204 char toc_label_name[10];
206 /* Cached value of rs6000_variable_issue. This is cached in
207 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
208 static short cached_can_issue_more;
210 static GTY(()) section *read_only_data_section;
211 static GTY(()) section *private_data_section;
212 static GTY(()) section *read_only_private_data_section;
213 static GTY(()) section *sdata2_section;
214 static GTY(()) section *toc_section;
216 struct builtin_description
218 const unsigned int mask;
219 const enum insn_code icode;
220 const char *const name;
221 const enum rs6000_builtins code;
224 /* Describe the vector unit used for modes. */
225 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
226 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
228 /* Register classes for various constraints that are based on the target
229 switches. */
230 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
232 /* Describe the alignment of a vector. */
233 int rs6000_vector_align[NUM_MACHINE_MODES];
235 /* Map selected modes to types for builtins. */
236 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
238 /* What modes to automatically generate reciprocal divide estimate (fre) and
239 reciprocal sqrt (frsqrte) for. */
240 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
242 /* Masks to determine which reciprocal esitmate instructions to generate
243 automatically. */
244 enum rs6000_recip_mask {
245 RECIP_SF_DIV = 0x001, /* Use divide estimate */
246 RECIP_DF_DIV = 0x002,
247 RECIP_V4SF_DIV = 0x004,
248 RECIP_V2DF_DIV = 0x008,
250 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
251 RECIP_DF_RSQRT = 0x020,
252 RECIP_V4SF_RSQRT = 0x040,
253 RECIP_V2DF_RSQRT = 0x080,
255 /* Various combination of flags for -mrecip=xxx. */
256 RECIP_NONE = 0,
257 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
258 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
259 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
261 RECIP_HIGH_PRECISION = RECIP_ALL,
263 /* On low precision machines like the power5, don't enable double precision
264 reciprocal square root estimate, since it isn't accurate enough. */
265 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
268 /* -mrecip options. */
269 static struct
271 const char *string; /* option name */
272 unsigned int mask; /* mask bits to set */
273 } recip_options[] = {
274 { "all", RECIP_ALL },
275 { "none", RECIP_NONE },
276 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
277 | RECIP_V2DF_DIV) },
278 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
279 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
280 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
281 | RECIP_V2DF_RSQRT) },
282 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
283 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
286 /* 2 argument gen function typedef. */
287 typedef rtx (*gen_2arg_fn_t) (rtx, rtx, rtx);
289 /* Pointer to function (in rs6000-c.c) that can define or undefine target
290 macros that have changed. Languages that don't support the preprocessor
291 don't link in rs6000-c.c, so we can't call it directly. */
292 void (*rs6000_target_modify_macros_ptr) (bool, int, unsigned);
295 /* Target cpu costs. */
297 struct processor_costs {
298 const int mulsi; /* cost of SImode multiplication. */
299 const int mulsi_const; /* cost of SImode multiplication by constant. */
300 const int mulsi_const9; /* cost of SImode mult by short constant. */
301 const int muldi; /* cost of DImode multiplication. */
302 const int divsi; /* cost of SImode division. */
303 const int divdi; /* cost of DImode division. */
304 const int fp; /* cost of simple SFmode and DFmode insns. */
305 const int dmul; /* cost of DFmode multiplication (and fmadd). */
306 const int sdiv; /* cost of SFmode division (fdivs). */
307 const int ddiv; /* cost of DFmode division (fdiv). */
308 const int cache_line_size; /* cache line size in bytes. */
309 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
310 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
311 const int simultaneous_prefetches; /* number of parallel prefetch
312 operations. */
315 const struct processor_costs *rs6000_cost;
317 /* Processor costs (relative to an add) */
319 /* Instruction size costs on 32bit processors. */
320 static const
321 struct processor_costs size32_cost = {
322 COSTS_N_INSNS (1), /* mulsi */
323 COSTS_N_INSNS (1), /* mulsi_const */
324 COSTS_N_INSNS (1), /* mulsi_const9 */
325 COSTS_N_INSNS (1), /* muldi */
326 COSTS_N_INSNS (1), /* divsi */
327 COSTS_N_INSNS (1), /* divdi */
328 COSTS_N_INSNS (1), /* fp */
329 COSTS_N_INSNS (1), /* dmul */
330 COSTS_N_INSNS (1), /* sdiv */
331 COSTS_N_INSNS (1), /* ddiv */
338 /* Instruction size costs on 64bit processors. */
339 static const
340 struct processor_costs size64_cost = {
341 COSTS_N_INSNS (1), /* mulsi */
342 COSTS_N_INSNS (1), /* mulsi_const */
343 COSTS_N_INSNS (1), /* mulsi_const9 */
344 COSTS_N_INSNS (1), /* muldi */
345 COSTS_N_INSNS (1), /* divsi */
346 COSTS_N_INSNS (1), /* divdi */
347 COSTS_N_INSNS (1), /* fp */
348 COSTS_N_INSNS (1), /* dmul */
349 COSTS_N_INSNS (1), /* sdiv */
350 COSTS_N_INSNS (1), /* ddiv */
351 128,
357 /* Instruction costs on RIOS1 processors. */
358 static const
359 struct processor_costs rios1_cost = {
360 COSTS_N_INSNS (5), /* mulsi */
361 COSTS_N_INSNS (4), /* mulsi_const */
362 COSTS_N_INSNS (3), /* mulsi_const9 */
363 COSTS_N_INSNS (5), /* muldi */
364 COSTS_N_INSNS (19), /* divsi */
365 COSTS_N_INSNS (19), /* divdi */
366 COSTS_N_INSNS (2), /* fp */
367 COSTS_N_INSNS (2), /* dmul */
368 COSTS_N_INSNS (19), /* sdiv */
369 COSTS_N_INSNS (19), /* ddiv */
370 128, /* cache line size */
371 64, /* l1 cache */
372 512, /* l2 cache */
373 0, /* streams */
376 /* Instruction costs on RIOS2 processors. */
377 static const
378 struct processor_costs rios2_cost = {
379 COSTS_N_INSNS (2), /* mulsi */
380 COSTS_N_INSNS (2), /* mulsi_const */
381 COSTS_N_INSNS (2), /* mulsi_const9 */
382 COSTS_N_INSNS (2), /* muldi */
383 COSTS_N_INSNS (13), /* divsi */
384 COSTS_N_INSNS (13), /* divdi */
385 COSTS_N_INSNS (2), /* fp */
386 COSTS_N_INSNS (2), /* dmul */
387 COSTS_N_INSNS (17), /* sdiv */
388 COSTS_N_INSNS (17), /* ddiv */
389 256, /* cache line size */
390 256, /* l1 cache */
391 1024, /* l2 cache */
392 0, /* streams */
395 /* Instruction costs on RS64A processors. */
396 static const
397 struct processor_costs rs64a_cost = {
398 COSTS_N_INSNS (20), /* mulsi */
399 COSTS_N_INSNS (12), /* mulsi_const */
400 COSTS_N_INSNS (8), /* mulsi_const9 */
401 COSTS_N_INSNS (34), /* muldi */
402 COSTS_N_INSNS (65), /* divsi */
403 COSTS_N_INSNS (67), /* divdi */
404 COSTS_N_INSNS (4), /* fp */
405 COSTS_N_INSNS (4), /* dmul */
406 COSTS_N_INSNS (31), /* sdiv */
407 COSTS_N_INSNS (31), /* ddiv */
408 128, /* cache line size */
409 128, /* l1 cache */
410 2048, /* l2 cache */
411 1, /* streams */
414 /* Instruction costs on MPCCORE processors. */
415 static const
416 struct processor_costs mpccore_cost = {
417 COSTS_N_INSNS (2), /* mulsi */
418 COSTS_N_INSNS (2), /* mulsi_const */
419 COSTS_N_INSNS (2), /* mulsi_const9 */
420 COSTS_N_INSNS (2), /* muldi */
421 COSTS_N_INSNS (6), /* divsi */
422 COSTS_N_INSNS (6), /* divdi */
423 COSTS_N_INSNS (4), /* fp */
424 COSTS_N_INSNS (5), /* dmul */
425 COSTS_N_INSNS (10), /* sdiv */
426 COSTS_N_INSNS (17), /* ddiv */
427 32, /* cache line size */
428 4, /* l1 cache */
429 16, /* l2 cache */
430 1, /* streams */
433 /* Instruction costs on PPC403 processors. */
434 static const
435 struct processor_costs ppc403_cost = {
436 COSTS_N_INSNS (4), /* mulsi */
437 COSTS_N_INSNS (4), /* mulsi_const */
438 COSTS_N_INSNS (4), /* mulsi_const9 */
439 COSTS_N_INSNS (4), /* muldi */
440 COSTS_N_INSNS (33), /* divsi */
441 COSTS_N_INSNS (33), /* divdi */
442 COSTS_N_INSNS (11), /* fp */
443 COSTS_N_INSNS (11), /* dmul */
444 COSTS_N_INSNS (11), /* sdiv */
445 COSTS_N_INSNS (11), /* ddiv */
446 32, /* cache line size */
447 4, /* l1 cache */
448 16, /* l2 cache */
449 1, /* streams */
452 /* Instruction costs on PPC405 processors. */
453 static const
454 struct processor_costs ppc405_cost = {
455 COSTS_N_INSNS (5), /* mulsi */
456 COSTS_N_INSNS (4), /* mulsi_const */
457 COSTS_N_INSNS (3), /* mulsi_const9 */
458 COSTS_N_INSNS (5), /* muldi */
459 COSTS_N_INSNS (35), /* divsi */
460 COSTS_N_INSNS (35), /* divdi */
461 COSTS_N_INSNS (11), /* fp */
462 COSTS_N_INSNS (11), /* dmul */
463 COSTS_N_INSNS (11), /* sdiv */
464 COSTS_N_INSNS (11), /* ddiv */
465 32, /* cache line size */
466 16, /* l1 cache */
467 128, /* l2 cache */
468 1, /* streams */
471 /* Instruction costs on PPC440 processors. */
472 static const
473 struct processor_costs ppc440_cost = {
474 COSTS_N_INSNS (3), /* mulsi */
475 COSTS_N_INSNS (2), /* mulsi_const */
476 COSTS_N_INSNS (2), /* mulsi_const9 */
477 COSTS_N_INSNS (3), /* muldi */
478 COSTS_N_INSNS (34), /* divsi */
479 COSTS_N_INSNS (34), /* divdi */
480 COSTS_N_INSNS (5), /* fp */
481 COSTS_N_INSNS (5), /* dmul */
482 COSTS_N_INSNS (19), /* sdiv */
483 COSTS_N_INSNS (33), /* ddiv */
484 32, /* cache line size */
485 32, /* l1 cache */
486 256, /* l2 cache */
487 1, /* streams */
490 /* Instruction costs on PPC476 processors. */
491 static const
492 struct processor_costs ppc476_cost = {
493 COSTS_N_INSNS (4), /* mulsi */
494 COSTS_N_INSNS (4), /* mulsi_const */
495 COSTS_N_INSNS (4), /* mulsi_const9 */
496 COSTS_N_INSNS (4), /* muldi */
497 COSTS_N_INSNS (11), /* divsi */
498 COSTS_N_INSNS (11), /* divdi */
499 COSTS_N_INSNS (6), /* fp */
500 COSTS_N_INSNS (6), /* dmul */
501 COSTS_N_INSNS (19), /* sdiv */
502 COSTS_N_INSNS (33), /* ddiv */
503 32, /* l1 cache line size */
504 32, /* l1 cache */
505 512, /* l2 cache */
506 1, /* streams */
509 /* Instruction costs on PPC601 processors. */
510 static const
511 struct processor_costs ppc601_cost = {
512 COSTS_N_INSNS (5), /* mulsi */
513 COSTS_N_INSNS (5), /* mulsi_const */
514 COSTS_N_INSNS (5), /* mulsi_const9 */
515 COSTS_N_INSNS (5), /* muldi */
516 COSTS_N_INSNS (36), /* divsi */
517 COSTS_N_INSNS (36), /* divdi */
518 COSTS_N_INSNS (4), /* fp */
519 COSTS_N_INSNS (5), /* dmul */
520 COSTS_N_INSNS (17), /* sdiv */
521 COSTS_N_INSNS (31), /* ddiv */
522 32, /* cache line size */
523 32, /* l1 cache */
524 256, /* l2 cache */
525 1, /* streams */
528 /* Instruction costs on PPC603 processors. */
529 static const
530 struct processor_costs ppc603_cost = {
531 COSTS_N_INSNS (5), /* mulsi */
532 COSTS_N_INSNS (3), /* mulsi_const */
533 COSTS_N_INSNS (2), /* mulsi_const9 */
534 COSTS_N_INSNS (5), /* muldi */
535 COSTS_N_INSNS (37), /* divsi */
536 COSTS_N_INSNS (37), /* divdi */
537 COSTS_N_INSNS (3), /* fp */
538 COSTS_N_INSNS (4), /* dmul */
539 COSTS_N_INSNS (18), /* sdiv */
540 COSTS_N_INSNS (33), /* ddiv */
541 32, /* cache line size */
542 8, /* l1 cache */
543 64, /* l2 cache */
544 1, /* streams */
547 /* Instruction costs on PPC604 processors. */
548 static const
549 struct processor_costs ppc604_cost = {
550 COSTS_N_INSNS (4), /* mulsi */
551 COSTS_N_INSNS (4), /* mulsi_const */
552 COSTS_N_INSNS (4), /* mulsi_const9 */
553 COSTS_N_INSNS (4), /* muldi */
554 COSTS_N_INSNS (20), /* divsi */
555 COSTS_N_INSNS (20), /* divdi */
556 COSTS_N_INSNS (3), /* fp */
557 COSTS_N_INSNS (3), /* dmul */
558 COSTS_N_INSNS (18), /* sdiv */
559 COSTS_N_INSNS (32), /* ddiv */
560 32, /* cache line size */
561 16, /* l1 cache */
562 512, /* l2 cache */
563 1, /* streams */
566 /* Instruction costs on PPC604e processors. */
567 static const
568 struct processor_costs ppc604e_cost = {
569 COSTS_N_INSNS (2), /* mulsi */
570 COSTS_N_INSNS (2), /* mulsi_const */
571 COSTS_N_INSNS (2), /* mulsi_const9 */
572 COSTS_N_INSNS (2), /* muldi */
573 COSTS_N_INSNS (20), /* divsi */
574 COSTS_N_INSNS (20), /* divdi */
575 COSTS_N_INSNS (3), /* fp */
576 COSTS_N_INSNS (3), /* dmul */
577 COSTS_N_INSNS (18), /* sdiv */
578 COSTS_N_INSNS (32), /* ddiv */
579 32, /* cache line size */
580 32, /* l1 cache */
581 1024, /* l2 cache */
582 1, /* streams */
585 /* Instruction costs on PPC620 processors. */
586 static const
587 struct processor_costs ppc620_cost = {
588 COSTS_N_INSNS (5), /* mulsi */
589 COSTS_N_INSNS (4), /* mulsi_const */
590 COSTS_N_INSNS (3), /* mulsi_const9 */
591 COSTS_N_INSNS (7), /* muldi */
592 COSTS_N_INSNS (21), /* divsi */
593 COSTS_N_INSNS (37), /* divdi */
594 COSTS_N_INSNS (3), /* fp */
595 COSTS_N_INSNS (3), /* dmul */
596 COSTS_N_INSNS (18), /* sdiv */
597 COSTS_N_INSNS (32), /* ddiv */
598 128, /* cache line size */
599 32, /* l1 cache */
600 1024, /* l2 cache */
601 1, /* streams */
604 /* Instruction costs on PPC630 processors. */
605 static const
606 struct processor_costs ppc630_cost = {
607 COSTS_N_INSNS (5), /* mulsi */
608 COSTS_N_INSNS (4), /* mulsi_const */
609 COSTS_N_INSNS (3), /* mulsi_const9 */
610 COSTS_N_INSNS (7), /* muldi */
611 COSTS_N_INSNS (21), /* divsi */
612 COSTS_N_INSNS (37), /* divdi */
613 COSTS_N_INSNS (3), /* fp */
614 COSTS_N_INSNS (3), /* dmul */
615 COSTS_N_INSNS (17), /* sdiv */
616 COSTS_N_INSNS (21), /* ddiv */
617 128, /* cache line size */
618 64, /* l1 cache */
619 1024, /* l2 cache */
620 1, /* streams */
623 /* Instruction costs on Cell processor. */
624 /* COSTS_N_INSNS (1) ~ one add. */
625 static const
626 struct processor_costs ppccell_cost = {
627 COSTS_N_INSNS (9/2)+2, /* mulsi */
628 COSTS_N_INSNS (6/2), /* mulsi_const */
629 COSTS_N_INSNS (6/2), /* mulsi_const9 */
630 COSTS_N_INSNS (15/2)+2, /* muldi */
631 COSTS_N_INSNS (38/2), /* divsi */
632 COSTS_N_INSNS (70/2), /* divdi */
633 COSTS_N_INSNS (10/2), /* fp */
634 COSTS_N_INSNS (10/2), /* dmul */
635 COSTS_N_INSNS (74/2), /* sdiv */
636 COSTS_N_INSNS (74/2), /* ddiv */
637 128, /* cache line size */
638 32, /* l1 cache */
639 512, /* l2 cache */
640 6, /* streams */
643 /* Instruction costs on PPC750 and PPC7400 processors. */
644 static const
645 struct processor_costs ppc750_cost = {
646 COSTS_N_INSNS (5), /* mulsi */
647 COSTS_N_INSNS (3), /* mulsi_const */
648 COSTS_N_INSNS (2), /* mulsi_const9 */
649 COSTS_N_INSNS (5), /* muldi */
650 COSTS_N_INSNS (17), /* divsi */
651 COSTS_N_INSNS (17), /* divdi */
652 COSTS_N_INSNS (3), /* fp */
653 COSTS_N_INSNS (3), /* dmul */
654 COSTS_N_INSNS (17), /* sdiv */
655 COSTS_N_INSNS (31), /* ddiv */
656 32, /* cache line size */
657 32, /* l1 cache */
658 512, /* l2 cache */
659 1, /* streams */
662 /* Instruction costs on PPC7450 processors. */
663 static const
664 struct processor_costs ppc7450_cost = {
665 COSTS_N_INSNS (4), /* mulsi */
666 COSTS_N_INSNS (3), /* mulsi_const */
667 COSTS_N_INSNS (3), /* mulsi_const9 */
668 COSTS_N_INSNS (4), /* muldi */
669 COSTS_N_INSNS (23), /* divsi */
670 COSTS_N_INSNS (23), /* divdi */
671 COSTS_N_INSNS (5), /* fp */
672 COSTS_N_INSNS (5), /* dmul */
673 COSTS_N_INSNS (21), /* sdiv */
674 COSTS_N_INSNS (35), /* ddiv */
675 32, /* cache line size */
676 32, /* l1 cache */
677 1024, /* l2 cache */
678 1, /* streams */
681 /* Instruction costs on PPC8540 processors. */
682 static const
683 struct processor_costs ppc8540_cost = {
684 COSTS_N_INSNS (4), /* mulsi */
685 COSTS_N_INSNS (4), /* mulsi_const */
686 COSTS_N_INSNS (4), /* mulsi_const9 */
687 COSTS_N_INSNS (4), /* muldi */
688 COSTS_N_INSNS (19), /* divsi */
689 COSTS_N_INSNS (19), /* divdi */
690 COSTS_N_INSNS (4), /* fp */
691 COSTS_N_INSNS (4), /* dmul */
692 COSTS_N_INSNS (29), /* sdiv */
693 COSTS_N_INSNS (29), /* ddiv */
694 32, /* cache line size */
695 32, /* l1 cache */
696 256, /* l2 cache */
697 1, /* prefetch streams /*/
700 /* Instruction costs on E300C2 and E300C3 cores. */
701 static const
702 struct processor_costs ppce300c2c3_cost = {
703 COSTS_N_INSNS (4), /* mulsi */
704 COSTS_N_INSNS (4), /* mulsi_const */
705 COSTS_N_INSNS (4), /* mulsi_const9 */
706 COSTS_N_INSNS (4), /* muldi */
707 COSTS_N_INSNS (19), /* divsi */
708 COSTS_N_INSNS (19), /* divdi */
709 COSTS_N_INSNS (3), /* fp */
710 COSTS_N_INSNS (4), /* dmul */
711 COSTS_N_INSNS (18), /* sdiv */
712 COSTS_N_INSNS (33), /* ddiv */
714 16, /* l1 cache */
715 16, /* l2 cache */
716 1, /* prefetch streams /*/
719 /* Instruction costs on PPCE500MC processors. */
720 static const
721 struct processor_costs ppce500mc_cost = {
722 COSTS_N_INSNS (4), /* mulsi */
723 COSTS_N_INSNS (4), /* mulsi_const */
724 COSTS_N_INSNS (4), /* mulsi_const9 */
725 COSTS_N_INSNS (4), /* muldi */
726 COSTS_N_INSNS (14), /* divsi */
727 COSTS_N_INSNS (14), /* divdi */
728 COSTS_N_INSNS (8), /* fp */
729 COSTS_N_INSNS (10), /* dmul */
730 COSTS_N_INSNS (36), /* sdiv */
731 COSTS_N_INSNS (66), /* ddiv */
732 64, /* cache line size */
733 32, /* l1 cache */
734 128, /* l2 cache */
735 1, /* prefetch streams /*/
738 /* Instruction costs on PPCE500MC64 processors. */
739 static const
740 struct processor_costs ppce500mc64_cost = {
741 COSTS_N_INSNS (4), /* mulsi */
742 COSTS_N_INSNS (4), /* mulsi_const */
743 COSTS_N_INSNS (4), /* mulsi_const9 */
744 COSTS_N_INSNS (4), /* muldi */
745 COSTS_N_INSNS (14), /* divsi */
746 COSTS_N_INSNS (14), /* divdi */
747 COSTS_N_INSNS (4), /* fp */
748 COSTS_N_INSNS (10), /* dmul */
749 COSTS_N_INSNS (36), /* sdiv */
750 COSTS_N_INSNS (66), /* ddiv */
751 64, /* cache line size */
752 32, /* l1 cache */
753 128, /* l2 cache */
754 1, /* prefetch streams /*/
757 /* Instruction costs on AppliedMicro Titan processors. */
758 static const
759 struct processor_costs titan_cost = {
760 COSTS_N_INSNS (5), /* mulsi */
761 COSTS_N_INSNS (5), /* mulsi_const */
762 COSTS_N_INSNS (5), /* mulsi_const9 */
763 COSTS_N_INSNS (5), /* muldi */
764 COSTS_N_INSNS (18), /* divsi */
765 COSTS_N_INSNS (18), /* divdi */
766 COSTS_N_INSNS (10), /* fp */
767 COSTS_N_INSNS (10), /* dmul */
768 COSTS_N_INSNS (46), /* sdiv */
769 COSTS_N_INSNS (72), /* ddiv */
770 32, /* cache line size */
771 32, /* l1 cache */
772 512, /* l2 cache */
773 1, /* prefetch streams /*/
776 /* Instruction costs on POWER4 and POWER5 processors. */
777 static const
778 struct processor_costs power4_cost = {
779 COSTS_N_INSNS (3), /* mulsi */
780 COSTS_N_INSNS (2), /* mulsi_const */
781 COSTS_N_INSNS (2), /* mulsi_const9 */
782 COSTS_N_INSNS (4), /* muldi */
783 COSTS_N_INSNS (18), /* divsi */
784 COSTS_N_INSNS (34), /* divdi */
785 COSTS_N_INSNS (3), /* fp */
786 COSTS_N_INSNS (3), /* dmul */
787 COSTS_N_INSNS (17), /* sdiv */
788 COSTS_N_INSNS (17), /* ddiv */
789 128, /* cache line size */
790 32, /* l1 cache */
791 1024, /* l2 cache */
792 8, /* prefetch streams /*/
795 /* Instruction costs on POWER6 processors. */
796 static const
797 struct processor_costs power6_cost = {
798 COSTS_N_INSNS (8), /* mulsi */
799 COSTS_N_INSNS (8), /* mulsi_const */
800 COSTS_N_INSNS (8), /* mulsi_const9 */
801 COSTS_N_INSNS (8), /* muldi */
802 COSTS_N_INSNS (22), /* divsi */
803 COSTS_N_INSNS (28), /* divdi */
804 COSTS_N_INSNS (3), /* fp */
805 COSTS_N_INSNS (3), /* dmul */
806 COSTS_N_INSNS (13), /* sdiv */
807 COSTS_N_INSNS (16), /* ddiv */
808 128, /* cache line size */
809 64, /* l1 cache */
810 2048, /* l2 cache */
811 16, /* prefetch streams */
814 /* Instruction costs on POWER7 processors. */
815 static const
816 struct processor_costs power7_cost = {
817 COSTS_N_INSNS (2), /* mulsi */
818 COSTS_N_INSNS (2), /* mulsi_const */
819 COSTS_N_INSNS (2), /* mulsi_const9 */
820 COSTS_N_INSNS (2), /* muldi */
821 COSTS_N_INSNS (18), /* divsi */
822 COSTS_N_INSNS (34), /* divdi */
823 COSTS_N_INSNS (3), /* fp */
824 COSTS_N_INSNS (3), /* dmul */
825 COSTS_N_INSNS (13), /* sdiv */
826 COSTS_N_INSNS (16), /* ddiv */
827 128, /* cache line size */
828 32, /* l1 cache */
829 256, /* l2 cache */
830 12, /* prefetch streams */
833 /* Instruction costs on POWER A2 processors. */
834 static const
835 struct processor_costs ppca2_cost = {
836 COSTS_N_INSNS (16), /* mulsi */
837 COSTS_N_INSNS (16), /* mulsi_const */
838 COSTS_N_INSNS (16), /* mulsi_const9 */
839 COSTS_N_INSNS (16), /* muldi */
840 COSTS_N_INSNS (22), /* divsi */
841 COSTS_N_INSNS (28), /* divdi */
842 COSTS_N_INSNS (3), /* fp */
843 COSTS_N_INSNS (3), /* dmul */
844 COSTS_N_INSNS (59), /* sdiv */
845 COSTS_N_INSNS (72), /* ddiv */
847 16, /* l1 cache */
848 2048, /* l2 cache */
849 16, /* prefetch streams */
853 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
854 #undef RS6000_BUILTIN_1
855 #undef RS6000_BUILTIN_2
856 #undef RS6000_BUILTIN_3
857 #undef RS6000_BUILTIN_A
858 #undef RS6000_BUILTIN_D
859 #undef RS6000_BUILTIN_E
860 #undef RS6000_BUILTIN_P
861 #undef RS6000_BUILTIN_Q
862 #undef RS6000_BUILTIN_S
863 #undef RS6000_BUILTIN_X
865 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
866 { NAME, ICODE, MASK, ATTR },
868 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
869 { NAME, ICODE, MASK, ATTR },
871 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
872 { NAME, ICODE, MASK, ATTR },
874 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
875 { NAME, ICODE, MASK, ATTR },
877 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
878 { NAME, ICODE, MASK, ATTR },
880 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
881 { NAME, ICODE, MASK, ATTR },
883 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
884 { NAME, ICODE, MASK, ATTR },
886 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
887 { NAME, ICODE, MASK, ATTR },
889 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
890 { NAME, ICODE, MASK, ATTR },
892 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
893 { NAME, ICODE, MASK, ATTR },
895 struct rs6000_builtin_info_type {
896 const char *name;
897 const enum insn_code icode;
898 const unsigned mask;
899 const unsigned attr;
902 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
904 #include "rs6000-builtin.def"
907 #undef RS6000_BUILTIN_1
908 #undef RS6000_BUILTIN_2
909 #undef RS6000_BUILTIN_3
910 #undef RS6000_BUILTIN_A
911 #undef RS6000_BUILTIN_D
912 #undef RS6000_BUILTIN_E
913 #undef RS6000_BUILTIN_P
914 #undef RS6000_BUILTIN_Q
915 #undef RS6000_BUILTIN_S
916 #undef RS6000_BUILTIN_X
918 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
919 static tree (*rs6000_veclib_handler) (tree, tree, tree);
922 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
923 static bool spe_func_has_64bit_regs_p (void);
924 static struct machine_function * rs6000_init_machine_status (void);
925 static int rs6000_ra_ever_killed (void);
926 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
927 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
928 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
929 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
930 static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
931 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
932 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
933 static int rs6000_debug_address_cost (rtx, bool);
934 static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
935 static bool is_microcoded_insn (rtx);
936 static bool is_nonpipeline_insn (rtx);
937 static bool is_cracked_insn (rtx);
938 static bool is_load_insn (rtx, rtx *);
939 static bool is_store_insn (rtx, rtx *);
940 static bool set_to_load_agen (rtx,rtx);
941 static bool insn_terminates_group_p (rtx , enum group_termination);
942 static bool insn_must_be_first_in_group (rtx);
943 static bool insn_must_be_last_in_group (rtx);
944 static void altivec_init_builtins (void);
945 static tree builtin_function_type (enum machine_mode, enum machine_mode,
946 enum machine_mode, enum machine_mode,
947 enum rs6000_builtins, const char *name);
948 static void rs6000_common_init_builtins (void);
949 static void paired_init_builtins (void);
950 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
951 static void spe_init_builtins (void);
952 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
953 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
954 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
955 static rs6000_stack_t *rs6000_stack_info (void);
956 static void is_altivec_return_reg (rtx, void *);
957 int easy_vector_constant (rtx, enum machine_mode);
958 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
959 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
960 static int rs6000_tls_symbol_ref_1 (rtx *, void *);
961 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
962 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
963 bool, bool);
964 #if TARGET_MACHO
965 static void macho_branch_islands (void);
966 #endif
967 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
968 int, int *);
969 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
970 int, int, int *);
971 static bool rs6000_mode_dependent_address (const_rtx);
972 static bool rs6000_debug_mode_dependent_address (const_rtx);
973 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
974 enum machine_mode, rtx);
975 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
976 enum machine_mode,
977 rtx);
978 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
979 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
980 enum reg_class);
981 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
982 enum machine_mode);
983 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
984 enum reg_class,
985 enum machine_mode);
986 static bool rs6000_cannot_change_mode_class (enum machine_mode,
987 enum machine_mode,
988 enum reg_class);
989 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
990 enum machine_mode,
991 enum reg_class);
992 static bool rs6000_save_toc_in_prologue_p (void);
994 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
995 int, int *)
996 = rs6000_legitimize_reload_address;
998 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
999 = rs6000_mode_dependent_address;
1001 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1002 enum machine_mode, rtx)
1003 = rs6000_secondary_reload_class;
1005 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1006 = rs6000_preferred_reload_class;
1008 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1009 enum machine_mode)
1010 = rs6000_secondary_memory_needed;
1012 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1013 enum machine_mode,
1014 enum reg_class)
1015 = rs6000_cannot_change_mode_class;
1017 const int INSN_NOT_AVAILABLE = -1;
1019 /* Hash table stuff for keeping track of TOC entries. */
1021 struct GTY(()) toc_hash_struct
1023 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1024 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1025 rtx key;
1026 enum machine_mode key_mode;
1027 int labelno;
1030 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1032 /* Hash table to keep track of the argument types for builtin functions. */
1034 struct GTY(()) builtin_hash_struct
1036 tree type;
1037 enum machine_mode mode[4]; /* return value + 3 arguments. */
1038 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1041 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1044 /* Default register names. */
1045 char rs6000_reg_names[][8] =
1047 "0", "1", "2", "3", "4", "5", "6", "7",
1048 "8", "9", "10", "11", "12", "13", "14", "15",
1049 "16", "17", "18", "19", "20", "21", "22", "23",
1050 "24", "25", "26", "27", "28", "29", "30", "31",
1051 "0", "1", "2", "3", "4", "5", "6", "7",
1052 "8", "9", "10", "11", "12", "13", "14", "15",
1053 "16", "17", "18", "19", "20", "21", "22", "23",
1054 "24", "25", "26", "27", "28", "29", "30", "31",
1055 "mq", "lr", "ctr","ap",
1056 "0", "1", "2", "3", "4", "5", "6", "7",
1057 "ca",
1058 /* AltiVec registers. */
1059 "0", "1", "2", "3", "4", "5", "6", "7",
1060 "8", "9", "10", "11", "12", "13", "14", "15",
1061 "16", "17", "18", "19", "20", "21", "22", "23",
1062 "24", "25", "26", "27", "28", "29", "30", "31",
1063 "vrsave", "vscr",
1064 /* SPE registers. */
1065 "spe_acc", "spefscr",
1066 /* Soft frame pointer. */
1067 "sfp"
1070 #ifdef TARGET_REGNAMES
1071 static const char alt_reg_names[][8] =
1073 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1074 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1075 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1076 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1077 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1078 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1079 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1080 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1081 "mq", "lr", "ctr", "ap",
1082 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1083 "ca",
1084 /* AltiVec registers. */
1085 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1086 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1087 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1088 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1089 "vrsave", "vscr",
1090 /* SPE registers. */
1091 "spe_acc", "spefscr",
1092 /* Soft frame pointer. */
1093 "sfp"
1095 #endif
1097 /* Table of valid machine attributes. */
1099 static const struct attribute_spec rs6000_attribute_table[] =
1101 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1102 affects_type_identity } */
1103 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1104 false },
1105 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1106 false },
1107 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1108 false },
1109 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1110 false },
1111 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1112 false },
1113 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1114 SUBTARGET_ATTRIBUTE_TABLE,
1115 #endif
1116 { NULL, 0, 0, false, false, false, NULL, false }
1119 #ifndef MASK_STRICT_ALIGN
1120 #define MASK_STRICT_ALIGN 0
1121 #endif
1122 #ifndef TARGET_PROFILE_KERNEL
1123 #define TARGET_PROFILE_KERNEL 0
1124 #endif
1126 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1127 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1129 /* Initialize the GCC target structure. */
1130 #undef TARGET_ATTRIBUTE_TABLE
1131 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1132 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1133 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1134 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1135 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1137 #undef TARGET_ASM_ALIGNED_DI_OP
1138 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1140 /* Default unaligned ops are only provided for ELF. Find the ops needed
1141 for non-ELF systems. */
1142 #ifndef OBJECT_FORMAT_ELF
1143 #if TARGET_XCOFF
1144 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1145 64-bit targets. */
1146 #undef TARGET_ASM_UNALIGNED_HI_OP
1147 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1148 #undef TARGET_ASM_UNALIGNED_SI_OP
1149 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1150 #undef TARGET_ASM_UNALIGNED_DI_OP
1151 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1152 #else
1153 /* For Darwin. */
1154 #undef TARGET_ASM_UNALIGNED_HI_OP
1155 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1156 #undef TARGET_ASM_UNALIGNED_SI_OP
1157 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1158 #undef TARGET_ASM_UNALIGNED_DI_OP
1159 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1160 #undef TARGET_ASM_ALIGNED_DI_OP
1161 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1162 #endif
1163 #endif
1165 /* This hook deals with fixups for relocatable code and DI-mode objects
1166 in 64-bit code. */
1167 #undef TARGET_ASM_INTEGER
1168 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1170 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1171 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1172 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1173 #endif
1175 #undef TARGET_SET_UP_BY_PROLOGUE
1176 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1178 #undef TARGET_HAVE_TLS
1179 #define TARGET_HAVE_TLS HAVE_AS_TLS
1181 #undef TARGET_CANNOT_FORCE_CONST_MEM
1182 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1184 #undef TARGET_DELEGITIMIZE_ADDRESS
1185 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1187 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1188 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1190 #undef TARGET_ASM_FUNCTION_PROLOGUE
1191 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1192 #undef TARGET_ASM_FUNCTION_EPILOGUE
1193 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1195 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1196 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1198 #undef TARGET_LEGITIMIZE_ADDRESS
1199 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1201 #undef TARGET_SCHED_VARIABLE_ISSUE
1202 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1204 #undef TARGET_SCHED_ISSUE_RATE
1205 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1206 #undef TARGET_SCHED_ADJUST_COST
1207 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1208 #undef TARGET_SCHED_ADJUST_PRIORITY
1209 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1210 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1211 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1212 #undef TARGET_SCHED_INIT
1213 #define TARGET_SCHED_INIT rs6000_sched_init
1214 #undef TARGET_SCHED_FINISH
1215 #define TARGET_SCHED_FINISH rs6000_sched_finish
1216 #undef TARGET_SCHED_REORDER
1217 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1218 #undef TARGET_SCHED_REORDER2
1219 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1221 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1222 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1224 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1225 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1227 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1228 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1229 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1230 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1231 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1232 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1233 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1234 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1236 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1237 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1238 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
1239 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even
1240 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD
1241 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd
1242 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1243 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1244 rs6000_builtin_support_vector_misalignment
1245 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1246 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1247 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1248 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1249 rs6000_builtin_vectorization_cost
1250 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1251 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1252 rs6000_preferred_simd_mode
1254 #undef TARGET_INIT_BUILTINS
1255 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1256 #undef TARGET_BUILTIN_DECL
1257 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1259 #undef TARGET_EXPAND_BUILTIN
1260 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1262 #undef TARGET_MANGLE_TYPE
1263 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1265 #undef TARGET_INIT_LIBFUNCS
1266 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1268 #if TARGET_MACHO
1269 #undef TARGET_BINDS_LOCAL_P
1270 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1271 #endif
1273 #undef TARGET_MS_BITFIELD_LAYOUT_P
1274 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1276 #undef TARGET_ASM_OUTPUT_MI_THUNK
1277 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1279 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1280 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1282 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1283 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1285 #undef TARGET_INVALID_WITHIN_DOLOOP
1286 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1288 #undef TARGET_REGISTER_MOVE_COST
1289 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1290 #undef TARGET_MEMORY_MOVE_COST
1291 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1292 #undef TARGET_RTX_COSTS
1293 #define TARGET_RTX_COSTS rs6000_rtx_costs
1294 #undef TARGET_ADDRESS_COST
1295 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
1297 #undef TARGET_DWARF_REGISTER_SPAN
1298 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1300 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1301 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1303 /* On rs6000, function arguments are promoted, as are function return
1304 values. */
1305 #undef TARGET_PROMOTE_FUNCTION_MODE
1306 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1308 #undef TARGET_RETURN_IN_MEMORY
1309 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1311 #undef TARGET_SETUP_INCOMING_VARARGS
1312 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1314 /* Always strict argument naming on rs6000. */
1315 #undef TARGET_STRICT_ARGUMENT_NAMING
1316 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1317 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1318 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1319 #undef TARGET_SPLIT_COMPLEX_ARG
1320 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1321 #undef TARGET_MUST_PASS_IN_STACK
1322 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1323 #undef TARGET_PASS_BY_REFERENCE
1324 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1325 #undef TARGET_ARG_PARTIAL_BYTES
1326 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1327 #undef TARGET_FUNCTION_ARG_ADVANCE
1328 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1329 #undef TARGET_FUNCTION_ARG
1330 #define TARGET_FUNCTION_ARG rs6000_function_arg
1331 #undef TARGET_FUNCTION_ARG_BOUNDARY
1332 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1334 #undef TARGET_BUILD_BUILTIN_VA_LIST
1335 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1337 #undef TARGET_EXPAND_BUILTIN_VA_START
1338 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1340 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1341 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1343 #undef TARGET_EH_RETURN_FILTER_MODE
1344 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1346 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1347 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1349 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1350 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1352 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1353 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1355 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1356 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1358 #undef TARGET_OPTION_OVERRIDE
1359 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1361 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1362 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1363 rs6000_builtin_vectorized_function
1365 #if !TARGET_MACHO
1366 #undef TARGET_STACK_PROTECT_FAIL
1367 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1368 #endif
1370 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1371 The PowerPC architecture requires only weak consistency among
1372 processors--that is, memory accesses between processors need not be
1373 sequentially consistent and memory accesses among processors can occur
1374 in any order. The ability to order memory accesses weakly provides
1375 opportunities for more efficient use of the system bus. Unless a
1376 dependency exists, the 604e allows read operations to precede store
1377 operations. */
1378 #undef TARGET_RELAXED_ORDERING
1379 #define TARGET_RELAXED_ORDERING true
1381 #ifdef HAVE_AS_TLS
1382 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1383 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1384 #endif
1386 /* Use a 32-bit anchor range. This leads to sequences like:
1388 addis tmp,anchor,high
1389 add dest,tmp,low
1391 where tmp itself acts as an anchor, and can be shared between
1392 accesses to the same 64k page. */
1393 #undef TARGET_MIN_ANCHOR_OFFSET
1394 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1395 #undef TARGET_MAX_ANCHOR_OFFSET
1396 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1397 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1398 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1400 #undef TARGET_BUILTIN_RECIPROCAL
1401 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1403 #undef TARGET_EXPAND_TO_RTL_HOOK
1404 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1406 #undef TARGET_INSTANTIATE_DECLS
1407 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1409 #undef TARGET_SECONDARY_RELOAD
1410 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1412 #undef TARGET_LEGITIMATE_ADDRESS_P
1413 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1415 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1416 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1418 #undef TARGET_CAN_ELIMINATE
1419 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1421 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1422 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1424 #undef TARGET_TRAMPOLINE_INIT
1425 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1427 #undef TARGET_FUNCTION_VALUE
1428 #define TARGET_FUNCTION_VALUE rs6000_function_value
1430 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1431 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1433 #undef TARGET_OPTION_SAVE
1434 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1436 #undef TARGET_OPTION_RESTORE
1437 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1439 #undef TARGET_OPTION_PRINT
1440 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1442 #undef TARGET_CAN_INLINE_P
1443 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1445 #undef TARGET_SET_CURRENT_FUNCTION
1446 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1448 #undef TARGET_LEGITIMATE_CONSTANT_P
1449 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1451 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1452 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1455 /* Simplifications for entries below. */
1457 enum {
1458 POWERPC_BASE_MASK = MASK_POWERPC | MASK_NEW_MNEMONICS,
1459 POWERPC_7400_MASK = POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_ALTIVEC
1462 /* Some OSs don't support saving the high part of 64-bit registers on context
1463 switch. Other OSs don't support saving Altivec registers. On those OSs, we
1464 don't touch the MASK_POWERPC64 or MASK_ALTIVEC settings; if the user wants
1465 either, the user must explicitly specify them and we won't interfere with
1466 the user's specification. */
1468 enum {
1469 POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
1470 POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN
1471 | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
1472 | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
1473 | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP
1474 | MASK_POPCNTD | MASK_VSX | MASK_ISEL | MASK_NO_UPDATE
1475 | MASK_RECIP_PRECISION)
1478 /* Masks for instructions set at various powerpc ISAs. */
1479 enum {
1480 ISA_2_1_MASKS = MASK_MFCRF,
1481 ISA_2_2_MASKS = (ISA_2_1_MASKS | MASK_POPCNTB),
1482 ISA_2_4_MASKS = (ISA_2_2_MASKS | MASK_FPRND),
1484 /* For ISA 2.05, do not add MFPGPR, since it isn't in ISA 2.06, and don't add
1485 ALTIVEC, since in general it isn't a win on power6. In ISA 2.04, fsel,
1486 fre, fsqrt, etc. were no longer documented as optional. Group masks by
1487 server and embedded. */
1488 ISA_2_5_MASKS_EMBEDDED = (ISA_2_2_MASKS | MASK_CMPB | MASK_RECIP_PRECISION
1489 | MASK_PPC_GFXOPT | MASK_PPC_GPOPT),
1490 ISA_2_5_MASKS_SERVER = (ISA_2_5_MASKS_EMBEDDED | MASK_DFP),
1492 /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but
1493 altivec is a win so enable it. */
1494 ISA_2_6_MASKS_EMBEDDED = (ISA_2_5_MASKS_EMBEDDED | MASK_POPCNTD),
1495 ISA_2_6_MASKS_SERVER = (ISA_2_5_MASKS_SERVER | MASK_POPCNTD | MASK_ALTIVEC
1496 | MASK_VSX)
1499 struct rs6000_ptt
1501 const char *const name; /* Canonical processor name. */
1502 const enum processor_type processor; /* Processor type enum value. */
1503 const int target_enable; /* Target flags to enable. */
1506 static struct rs6000_ptt const processor_target_table[] =
1508 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1509 #include "rs6000-cpus.def"
1510 #undef RS6000_CPU
1513 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1514 name is invalid. */
1516 static int
1517 rs6000_cpu_name_lookup (const char *name)
1519 size_t i;
1521 if (name != NULL)
1523 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1524 if (! strcmp (name, processor_target_table[i].name))
1525 return (int)i;
1528 return -1;
1532 /* Return number of consecutive hard regs needed starting at reg REGNO
1533 to hold something of mode MODE.
1534 This is ordinarily the length in words of a value of mode MODE
1535 but can be less for certain modes in special long registers.
1537 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1538 scalar instructions. The upper 32 bits are only available to the
1539 SIMD instructions.
1541 POWER and PowerPC GPRs hold 32 bits worth;
1542 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1544 static int
1545 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1547 unsigned HOST_WIDE_INT reg_size;
1549 if (FP_REGNO_P (regno))
1550 reg_size = (VECTOR_MEM_VSX_P (mode)
1551 ? UNITS_PER_VSX_WORD
1552 : UNITS_PER_FP_WORD);
1554 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1555 reg_size = UNITS_PER_SPE_WORD;
1557 else if (ALTIVEC_REGNO_P (regno))
1558 reg_size = UNITS_PER_ALTIVEC_WORD;
1560 /* The value returned for SCmode in the E500 double case is 2 for
1561 ABI compatibility; storing an SCmode value in a single register
1562 would require function_arg and rs6000_spe_function_arg to handle
1563 SCmode so as to pass the value correctly in a pair of
1564 registers. */
1565 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1566 && !DECIMAL_FLOAT_MODE_P (mode))
1567 reg_size = UNITS_PER_FP_WORD;
1569 else
1570 reg_size = UNITS_PER_WORD;
1572 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1575 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1576 MODE. */
1577 static int
1578 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1580 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1582 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1583 implementations. Don't allow an item to be split between a FP register
1584 and an Altivec register. */
1585 if (VECTOR_MEM_VSX_P (mode))
1587 if (FP_REGNO_P (regno))
1588 return FP_REGNO_P (last_regno);
1590 if (ALTIVEC_REGNO_P (regno))
1591 return ALTIVEC_REGNO_P (last_regno);
1594 /* The GPRs can hold any mode, but values bigger than one register
1595 cannot go past R31. */
1596 if (INT_REGNO_P (regno))
1597 return INT_REGNO_P (last_regno);
1599 /* The float registers (except for VSX vector modes) can only hold floating
1600 modes and DImode. This excludes the 32-bit decimal float mode for
1601 now. */
1602 if (FP_REGNO_P (regno))
1604 if (SCALAR_FLOAT_MODE_P (mode)
1605 && (mode != TDmode || (regno % 2) == 0)
1606 && FP_REGNO_P (last_regno))
1607 return 1;
1609 if (GET_MODE_CLASS (mode) == MODE_INT
1610 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1611 return 1;
1613 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1614 && PAIRED_VECTOR_MODE (mode))
1615 return 1;
1617 return 0;
1620 /* The CR register can only hold CC modes. */
1621 if (CR_REGNO_P (regno))
1622 return GET_MODE_CLASS (mode) == MODE_CC;
1624 if (CA_REGNO_P (regno))
1625 return mode == BImode;
1627 /* AltiVec only in AldyVec registers. */
1628 if (ALTIVEC_REGNO_P (regno))
1629 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode);
1631 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1632 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1633 return 1;
1635 /* We cannot put TImode anywhere except general register and it must be able
1636 to fit within the register set. In the future, allow TImode in the
1637 Altivec or VSX registers. */
1639 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1642 /* Print interesting facts about registers. */
1643 static void
1644 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1646 int r, m;
1648 for (r = first_regno; r <= last_regno; ++r)
1650 const char *comma = "";
1651 int len;
1653 if (first_regno == last_regno)
1654 fprintf (stderr, "%s:\t", reg_name);
1655 else
1656 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1658 len = 8;
1659 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1660 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1662 if (len > 70)
1664 fprintf (stderr, ",\n\t");
1665 len = 8;
1666 comma = "";
1669 if (rs6000_hard_regno_nregs[m][r] > 1)
1670 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1671 rs6000_hard_regno_nregs[m][r]);
1672 else
1673 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1675 comma = ", ";
1678 if (call_used_regs[r])
1680 if (len > 70)
1682 fprintf (stderr, ",\n\t");
1683 len = 8;
1684 comma = "";
1687 len += fprintf (stderr, "%s%s", comma, "call-used");
1688 comma = ", ";
1691 if (fixed_regs[r])
1693 if (len > 70)
1695 fprintf (stderr, ",\n\t");
1696 len = 8;
1697 comma = "";
1700 len += fprintf (stderr, "%s%s", comma, "fixed");
1701 comma = ", ";
1704 if (len > 70)
1706 fprintf (stderr, ",\n\t");
1707 comma = "";
1710 fprintf (stderr, "%sregno = %d\n", comma, r);
1714 #define DEBUG_FMT_D "%-32s= %d\n"
1715 #define DEBUG_FMT_X "%-32s= 0x%x\n"
1716 #define DEBUG_FMT_S "%-32s= %s\n"
1718 /* Print various interesting information with -mdebug=reg. */
1719 static void
1720 rs6000_debug_reg_global (void)
1722 static const char *const tf[2] = { "false", "true" };
1723 const char *nl = (const char *)0;
1724 int m;
1725 char costly_num[20];
1726 char nop_num[20];
1727 const char *costly_str;
1728 const char *nop_str;
1729 const char *trace_str;
1730 const char *abi_str;
1731 const char *cmodel_str;
1733 /* Map enum rs6000_vector to string. */
1734 static const char *rs6000_debug_vector_unit[] = {
1735 "none",
1736 "altivec",
1737 "vsx",
1738 "paired",
1739 "spe",
1740 "other"
1743 fprintf (stderr, "Register information: (last virtual reg = %d)\n",
1744 LAST_VIRTUAL_REGISTER);
1745 rs6000_debug_reg_print (0, 31, "gr");
1746 rs6000_debug_reg_print (32, 63, "fp");
1747 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
1748 LAST_ALTIVEC_REGNO,
1749 "vs");
1750 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
1751 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
1752 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
1753 rs6000_debug_reg_print (MQ_REGNO, MQ_REGNO, "mq");
1754 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
1755 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
1756 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
1757 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
1758 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
1760 fprintf (stderr,
1761 "\n"
1762 "d reg_class = %s\n"
1763 "f reg_class = %s\n"
1764 "v reg_class = %s\n"
1765 "wa reg_class = %s\n"
1766 "wd reg_class = %s\n"
1767 "wf reg_class = %s\n"
1768 "ws reg_class = %s\n\n",
1769 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
1770 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
1771 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
1772 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
1773 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
1774 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
1775 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]]);
1777 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1778 if (rs6000_vector_unit[m] || rs6000_vector_mem[m])
1780 nl = "\n";
1781 fprintf (stderr, "Vector mode: %-5s arithmetic: %-8s move: %-8s\n",
1782 GET_MODE_NAME (m),
1783 rs6000_debug_vector_unit[ rs6000_vector_unit[m] ],
1784 rs6000_debug_vector_unit[ rs6000_vector_mem[m] ]);
1787 if (nl)
1788 fputs (nl, stderr);
1790 if (rs6000_recip_control)
1792 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
1794 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1795 if (rs6000_recip_bits[m])
1797 fprintf (stderr,
1798 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
1799 GET_MODE_NAME (m),
1800 (RS6000_RECIP_AUTO_RE_P (m)
1801 ? "auto"
1802 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
1803 (RS6000_RECIP_AUTO_RSQRTE_P (m)
1804 ? "auto"
1805 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
1808 fputs ("\n", stderr);
1811 if (rs6000_cpu_index >= 0)
1812 fprintf (stderr, DEBUG_FMT_S, "cpu",
1813 processor_target_table[rs6000_cpu_index].name);
1815 if (rs6000_tune_index >= 0)
1816 fprintf (stderr, DEBUG_FMT_S, "tune",
1817 processor_target_table[rs6000_tune_index].name);
1819 switch (rs6000_sched_costly_dep)
1821 case max_dep_latency:
1822 costly_str = "max_dep_latency";
1823 break;
1825 case no_dep_costly:
1826 costly_str = "no_dep_costly";
1827 break;
1829 case all_deps_costly:
1830 costly_str = "all_deps_costly";
1831 break;
1833 case true_store_to_load_dep_costly:
1834 costly_str = "true_store_to_load_dep_costly";
1835 break;
1837 case store_to_load_dep_costly:
1838 costly_str = "store_to_load_dep_costly";
1839 break;
1841 default:
1842 costly_str = costly_num;
1843 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
1844 break;
1847 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
1849 switch (rs6000_sched_insert_nops)
1851 case sched_finish_regroup_exact:
1852 nop_str = "sched_finish_regroup_exact";
1853 break;
1855 case sched_finish_pad_groups:
1856 nop_str = "sched_finish_pad_groups";
1857 break;
1859 case sched_finish_none:
1860 nop_str = "sched_finish_none";
1861 break;
1863 default:
1864 nop_str = nop_num;
1865 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
1866 break;
1869 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
1871 switch (rs6000_sdata)
1873 default:
1874 case SDATA_NONE:
1875 break;
1877 case SDATA_DATA:
1878 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
1879 break;
1881 case SDATA_SYSV:
1882 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
1883 break;
1885 case SDATA_EABI:
1886 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
1887 break;
1891 switch (rs6000_traceback)
1893 case traceback_default: trace_str = "default"; break;
1894 case traceback_none: trace_str = "none"; break;
1895 case traceback_part: trace_str = "part"; break;
1896 case traceback_full: trace_str = "full"; break;
1897 default: trace_str = "unknown"; break;
1900 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
1902 switch (rs6000_current_cmodel)
1904 case CMODEL_SMALL: cmodel_str = "small"; break;
1905 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
1906 case CMODEL_LARGE: cmodel_str = "large"; break;
1907 default: cmodel_str = "unknown"; break;
1910 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
1912 switch (rs6000_current_abi)
1914 case ABI_NONE: abi_str = "none"; break;
1915 case ABI_AIX: abi_str = "aix"; break;
1916 case ABI_V4: abi_str = "V4"; break;
1917 case ABI_DARWIN: abi_str = "darwin"; break;
1918 default: abi_str = "unknown"; break;
1921 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
1923 if (rs6000_altivec_abi)
1924 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
1926 if (rs6000_spe_abi)
1927 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
1929 if (rs6000_darwin64_abi)
1930 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
1932 if (rs6000_float_gprs)
1933 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
1935 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
1936 fprintf (stderr, DEBUG_FMT_S, "align_branch",
1937 tf[!!rs6000_align_branch_targets]);
1938 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
1939 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
1940 rs6000_long_double_type_size);
1941 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
1942 (int)rs6000_sched_restricted_insns_priority);
1943 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
1944 (int)END_BUILTINS);
1945 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
1946 (int)RS6000_BUILTIN_COUNT);
1947 fprintf (stderr, DEBUG_FMT_X, "Builtin mask", rs6000_builtin_mask);
1950 /* Initialize the various global tables that are based on register size. */
1951 static void
1952 rs6000_init_hard_regno_mode_ok (bool global_init_p)
1954 int r, m, c;
1955 int align64;
1956 int align32;
1958 /* Precalculate REGNO_REG_CLASS. */
1959 rs6000_regno_regclass[0] = GENERAL_REGS;
1960 for (r = 1; r < 32; ++r)
1961 rs6000_regno_regclass[r] = BASE_REGS;
1963 for (r = 32; r < 64; ++r)
1964 rs6000_regno_regclass[r] = FLOAT_REGS;
1966 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
1967 rs6000_regno_regclass[r] = NO_REGS;
1969 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
1970 rs6000_regno_regclass[r] = ALTIVEC_REGS;
1972 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
1973 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
1974 rs6000_regno_regclass[r] = CR_REGS;
1976 rs6000_regno_regclass[MQ_REGNO] = MQ_REGS;
1977 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
1978 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
1979 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
1980 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
1981 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
1982 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
1983 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
1984 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
1985 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
1987 /* Precalculate vector information, this must be set up before the
1988 rs6000_hard_regno_nregs_internal below. */
1989 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1991 rs6000_vector_unit[m] = rs6000_vector_mem[m] = VECTOR_NONE;
1992 rs6000_vector_reload[m][0] = CODE_FOR_nothing;
1993 rs6000_vector_reload[m][1] = CODE_FOR_nothing;
1996 for (c = 0; c < (int)(int)RS6000_CONSTRAINT_MAX; c++)
1997 rs6000_constraints[c] = NO_REGS;
1999 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2000 believes it can use native alignment or still uses 128-bit alignment. */
2001 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2003 align64 = 64;
2004 align32 = 32;
2006 else
2008 align64 = 128;
2009 align32 = 128;
2012 /* V2DF mode, VSX only. */
2013 if (TARGET_VSX)
2015 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2016 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2017 rs6000_vector_align[V2DFmode] = align64;
2020 /* V4SF mode, either VSX or Altivec. */
2021 if (TARGET_VSX)
2023 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2024 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2025 rs6000_vector_align[V4SFmode] = align32;
2027 else if (TARGET_ALTIVEC)
2029 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2030 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2031 rs6000_vector_align[V4SFmode] = align32;
2034 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2035 and stores. */
2036 if (TARGET_ALTIVEC)
2038 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2039 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2040 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2041 rs6000_vector_align[V4SImode] = align32;
2042 rs6000_vector_align[V8HImode] = align32;
2043 rs6000_vector_align[V16QImode] = align32;
2045 if (TARGET_VSX)
2047 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2048 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2049 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2051 else
2053 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2054 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2055 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2059 /* V2DImode, only allow under VSX, which can do V2DI insert/splat/extract.
2060 Altivec doesn't have 64-bit support. */
2061 if (TARGET_VSX)
2063 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2064 rs6000_vector_unit[V2DImode] = VECTOR_NONE;
2065 rs6000_vector_align[V2DImode] = align64;
2068 /* DFmode, see if we want to use the VSX unit. */
2069 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2071 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2072 rs6000_vector_mem[DFmode]
2073 = (TARGET_VSX_SCALAR_MEMORY ? VECTOR_VSX : VECTOR_NONE);
2074 rs6000_vector_align[DFmode] = align64;
2077 /* TODO add SPE and paired floating point vector support. */
2079 /* Register class constraints for the constraints that depend on compile
2080 switches. */
2081 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2082 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
2084 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2085 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS;
2087 if (TARGET_VSX)
2089 /* At present, we just use VSX_REGS, but we have different constraints
2090 based on the use, in case we want to fine tune the default register
2091 class used. wa = any VSX register, wf = register class to use for
2092 V4SF, wd = register class to use for V2DF, and ws = register classs to
2093 use for DF scalars. */
2094 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2095 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS;
2096 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS;
2097 rs6000_constraints[RS6000_CONSTRAINT_ws] = (TARGET_VSX_SCALAR_MEMORY
2098 ? VSX_REGS
2099 : FLOAT_REGS);
2102 if (TARGET_ALTIVEC)
2103 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2105 /* Set up the reload helper functions. */
2106 if (TARGET_VSX || TARGET_ALTIVEC)
2108 if (TARGET_64BIT)
2110 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_di_store;
2111 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_di_load;
2112 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_di_store;
2113 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_di_load;
2114 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_di_store;
2115 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_di_load;
2116 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_di_store;
2117 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_di_load;
2118 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_di_store;
2119 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_di_load;
2120 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_di_store;
2121 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_di_load;
2122 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2124 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_di_store;
2125 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_di_load;
2128 else
2130 rs6000_vector_reload[V16QImode][0] = CODE_FOR_reload_v16qi_si_store;
2131 rs6000_vector_reload[V16QImode][1] = CODE_FOR_reload_v16qi_si_load;
2132 rs6000_vector_reload[V8HImode][0] = CODE_FOR_reload_v8hi_si_store;
2133 rs6000_vector_reload[V8HImode][1] = CODE_FOR_reload_v8hi_si_load;
2134 rs6000_vector_reload[V4SImode][0] = CODE_FOR_reload_v4si_si_store;
2135 rs6000_vector_reload[V4SImode][1] = CODE_FOR_reload_v4si_si_load;
2136 rs6000_vector_reload[V2DImode][0] = CODE_FOR_reload_v2di_si_store;
2137 rs6000_vector_reload[V2DImode][1] = CODE_FOR_reload_v2di_si_load;
2138 rs6000_vector_reload[V4SFmode][0] = CODE_FOR_reload_v4sf_si_store;
2139 rs6000_vector_reload[V4SFmode][1] = CODE_FOR_reload_v4sf_si_load;
2140 rs6000_vector_reload[V2DFmode][0] = CODE_FOR_reload_v2df_si_store;
2141 rs6000_vector_reload[V2DFmode][1] = CODE_FOR_reload_v2df_si_load;
2142 if (TARGET_VSX && TARGET_VSX_SCALAR_MEMORY)
2144 rs6000_vector_reload[DFmode][0] = CODE_FOR_reload_df_si_store;
2145 rs6000_vector_reload[DFmode][1] = CODE_FOR_reload_df_si_load;
2150 /* Precalculate HARD_REGNO_NREGS. */
2151 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2152 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2153 rs6000_hard_regno_nregs[m][r]
2154 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2156 /* Precalculate HARD_REGNO_MODE_OK. */
2157 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2158 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2159 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2160 rs6000_hard_regno_mode_ok_p[m][r] = true;
2162 /* Precalculate CLASS_MAX_NREGS sizes. */
2163 for (c = 0; c < LIM_REG_CLASSES; ++c)
2165 int reg_size;
2167 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2168 reg_size = UNITS_PER_VSX_WORD;
2170 else if (c == ALTIVEC_REGS)
2171 reg_size = UNITS_PER_ALTIVEC_WORD;
2173 else if (c == FLOAT_REGS)
2174 reg_size = UNITS_PER_FP_WORD;
2176 else
2177 reg_size = UNITS_PER_WORD;
2179 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2180 rs6000_class_max_nregs[m][c]
2181 = (GET_MODE_SIZE (m) + reg_size - 1) / reg_size;
2184 if (TARGET_E500_DOUBLE)
2185 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2187 /* Calculate which modes to automatically generate code to use a the
2188 reciprocal divide and square root instructions. In the future, possibly
2189 automatically generate the instructions even if the user did not specify
2190 -mrecip. The older machines double precision reciprocal sqrt estimate is
2191 not accurate enough. */
2192 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2193 if (TARGET_FRES)
2194 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2195 if (TARGET_FRE)
2196 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2197 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2198 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2199 if (VECTOR_UNIT_VSX_P (V2DFmode))
2200 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2202 if (TARGET_FRSQRTES)
2203 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2204 if (TARGET_FRSQRTE)
2205 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2206 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2207 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2208 if (VECTOR_UNIT_VSX_P (V2DFmode))
2209 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2211 if (rs6000_recip_control)
2213 if (!flag_finite_math_only)
2214 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2215 if (flag_trapping_math)
2216 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2217 if (!flag_reciprocal_math)
2218 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2219 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2221 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2222 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2223 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2225 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2226 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2227 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2229 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2230 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2231 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2233 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2234 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2235 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2237 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2238 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2239 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2241 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2242 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2243 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2245 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2246 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2247 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2249 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2250 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2251 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2255 if (global_init_p || TARGET_DEBUG_TARGET)
2257 if (TARGET_DEBUG_REG)
2258 rs6000_debug_reg_global ();
2260 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2261 fprintf (stderr,
2262 "SImode variable mult cost = %d\n"
2263 "SImode constant mult cost = %d\n"
2264 "SImode short constant mult cost = %d\n"
2265 "DImode multipliciation cost = %d\n"
2266 "SImode division cost = %d\n"
2267 "DImode division cost = %d\n"
2268 "Simple fp operation cost = %d\n"
2269 "DFmode multiplication cost = %d\n"
2270 "SFmode division cost = %d\n"
2271 "DFmode division cost = %d\n"
2272 "cache line size = %d\n"
2273 "l1 cache size = %d\n"
2274 "l2 cache size = %d\n"
2275 "simultaneous prefetches = %d\n"
2276 "\n",
2277 rs6000_cost->mulsi,
2278 rs6000_cost->mulsi_const,
2279 rs6000_cost->mulsi_const9,
2280 rs6000_cost->muldi,
2281 rs6000_cost->divsi,
2282 rs6000_cost->divdi,
2283 rs6000_cost->fp,
2284 rs6000_cost->dmul,
2285 rs6000_cost->sdiv,
2286 rs6000_cost->ddiv,
2287 rs6000_cost->cache_line_size,
2288 rs6000_cost->l1_cache_size,
2289 rs6000_cost->l2_cache_size,
2290 rs6000_cost->simultaneous_prefetches);
2294 #if TARGET_MACHO
2295 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2297 static void
2298 darwin_rs6000_override_options (void)
2300 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2301 off. */
2302 rs6000_altivec_abi = 1;
2303 TARGET_ALTIVEC_VRSAVE = 1;
2304 rs6000_current_abi = ABI_DARWIN;
2306 if (DEFAULT_ABI == ABI_DARWIN
2307 && TARGET_64BIT)
2308 darwin_one_byte_bool = 1;
2310 if (TARGET_64BIT && ! TARGET_POWERPC64)
2312 target_flags |= MASK_POWERPC64;
2313 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2315 if (flag_mkernel)
2317 rs6000_default_long_calls = 1;
2318 target_flags |= MASK_SOFT_FLOAT;
2321 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2322 Altivec. */
2323 if (!flag_mkernel && !flag_apple_kext
2324 && TARGET_64BIT
2325 && ! (target_flags_explicit & MASK_ALTIVEC))
2326 target_flags |= MASK_ALTIVEC;
2328 /* Unless the user (not the configurer) has explicitly overridden
2329 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2330 G4 unless targeting the kernel. */
2331 if (!flag_mkernel
2332 && !flag_apple_kext
2333 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
2334 && ! (target_flags_explicit & MASK_ALTIVEC)
2335 && ! global_options_set.x_rs6000_cpu_index)
2337 target_flags |= MASK_ALTIVEC;
2340 #endif
2342 /* If not otherwise specified by a target, make 'long double' equivalent to
2343 'double'. */
2345 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2346 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2347 #endif
2349 /* Return the builtin mask of the various options used that could affect which
2350 builtins were used. In the past we used target_flags, but we've run out of
2351 bits, and some options like SPE and PAIRED are no longer in
2352 target_flags. */
2354 unsigned
2355 rs6000_builtin_mask_calculate (void)
2357 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
2358 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
2359 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
2360 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
2361 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
2362 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
2363 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
2364 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
2365 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
2366 | ((TARGET_POWERPC) ? RS6000_BTM_POWERPC : 0)
2367 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0));
2370 /* Override command line options. Mostly we process the processor type and
2371 sometimes adjust other TARGET_ options. */
2373 static bool
2374 rs6000_option_override_internal (bool global_init_p)
2376 bool ret = true;
2377 bool have_cpu = false;
2379 /* The default cpu requested at configure time, if any. */
2380 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
2382 int set_masks;
2383 int cpu_index;
2384 int tune_index;
2385 struct cl_target_option *main_target_opt
2386 = ((global_init_p || target_option_default_node == NULL)
2387 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
2389 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
2390 library functions, so warn about it. The flag may be useful for
2391 performance studies from time to time though, so don't disable it
2392 entirely. */
2393 if (global_options_set.x_rs6000_alignment_flags
2394 && rs6000_alignment_flags == MASK_ALIGN_POWER
2395 && DEFAULT_ABI == ABI_DARWIN
2396 && TARGET_64BIT)
2397 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2398 " it is incompatible with the installed C and C++ libraries");
2400 /* Numerous experiment shows that IRA based loop pressure
2401 calculation works better for RTL loop invariant motion on targets
2402 with enough (>= 32) registers. It is an expensive optimization.
2403 So it is on only for peak performance. */
2404 if (optimize >= 3 && global_init_p)
2405 flag_ira_loop_pressure = 1;
2407 /* Set the pointer size. */
2408 if (TARGET_64BIT)
2410 rs6000_pmode = (int)DImode;
2411 rs6000_pointer_size = 64;
2413 else
2415 rs6000_pmode = (int)SImode;
2416 rs6000_pointer_size = 32;
2419 set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
2420 #ifdef OS_MISSING_POWERPC64
2421 if (OS_MISSING_POWERPC64)
2422 set_masks &= ~MASK_POWERPC64;
2423 #endif
2424 #ifdef OS_MISSING_ALTIVEC
2425 if (OS_MISSING_ALTIVEC)
2426 set_masks &= ~MASK_ALTIVEC;
2427 #endif
2429 /* Don't override by the processor default if given explicitly. */
2430 set_masks &= ~target_flags_explicit;
2432 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
2433 the cpu in a target attribute or pragma, but did not specify a tuning
2434 option, use the cpu for the tuning option rather than the option specified
2435 with -mtune on the command line. Process a '--with-cpu' configuration
2436 request as an implicit --cpu. */
2437 if (rs6000_cpu_index >= 0)
2439 cpu_index = rs6000_cpu_index;
2440 have_cpu = true;
2442 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
2444 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
2445 have_cpu = true;
2447 else
2449 const char *default_cpu =
2450 (implicit_cpu ? implicit_cpu
2451 : (TARGET_POWERPC64 ? "powerpc64" : "powerpc"));
2453 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
2454 have_cpu = implicit_cpu != 0;
2457 gcc_assert (cpu_index >= 0);
2459 target_flags &= ~set_masks;
2460 target_flags |= (processor_target_table[cpu_index].target_enable
2461 & set_masks);
2463 if (rs6000_tune_index >= 0)
2464 tune_index = rs6000_tune_index;
2465 else if (have_cpu)
2466 rs6000_tune_index = tune_index = cpu_index;
2467 else
2469 size_t i;
2470 enum processor_type tune_proc
2471 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
2473 tune_index = -1;
2474 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2475 if (processor_target_table[i].processor == tune_proc)
2477 rs6000_tune_index = tune_index = i;
2478 break;
2482 gcc_assert (tune_index >= 0);
2483 rs6000_cpu = processor_target_table[tune_index].processor;
2485 /* Pick defaults for SPE related control flags. Do this early to make sure
2486 that the TARGET_ macros are representative ASAP. */
2488 int spe_capable_cpu =
2489 (rs6000_cpu == PROCESSOR_PPC8540
2490 || rs6000_cpu == PROCESSOR_PPC8548);
2492 if (!global_options_set.x_rs6000_spe_abi)
2493 rs6000_spe_abi = spe_capable_cpu;
2495 if (!global_options_set.x_rs6000_spe)
2496 rs6000_spe = spe_capable_cpu;
2498 if (!global_options_set.x_rs6000_float_gprs)
2499 rs6000_float_gprs =
2500 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
2501 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
2502 : 0);
2505 if (global_options_set.x_rs6000_spe_abi
2506 && rs6000_spe_abi
2507 && !TARGET_SPE_ABI)
2508 error ("not configured for SPE ABI");
2510 if (global_options_set.x_rs6000_spe
2511 && rs6000_spe
2512 && !TARGET_SPE)
2513 error ("not configured for SPE instruction set");
2515 if (main_target_opt != NULL
2516 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
2517 || (main_target_opt->x_rs6000_spe != rs6000_spe)
2518 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
2519 error ("target attribute or pragma changes SPE ABI");
2521 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
2522 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64)
2524 if (TARGET_ALTIVEC)
2525 error ("AltiVec not supported in this target");
2526 if (TARGET_SPE)
2527 error ("SPE not supported in this target");
2530 /* Disable Cell microcode if we are optimizing for the Cell
2531 and not optimizing for size. */
2532 if (rs6000_gen_cell_microcode == -1)
2533 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
2534 && !optimize_size);
2536 /* If we are optimizing big endian systems for space and it's OK to
2537 use instructions that would be microcoded on the Cell, use the
2538 load/store multiple and string instructions. */
2539 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
2540 target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
2542 /* Don't allow -mmultiple or -mstring on little endian systems
2543 unless the cpu is a 750, because the hardware doesn't support the
2544 instructions used in little endian mode, and causes an alignment
2545 trap. The 750 does not cause an alignment trap (except when the
2546 target is unaligned). */
2548 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
2550 if (TARGET_MULTIPLE)
2552 target_flags &= ~MASK_MULTIPLE;
2553 if ((target_flags_explicit & MASK_MULTIPLE) != 0)
2554 warning (0, "-mmultiple is not supported on little endian systems");
2557 if (TARGET_STRING)
2559 target_flags &= ~MASK_STRING;
2560 if ((target_flags_explicit & MASK_STRING) != 0)
2561 warning (0, "-mstring is not supported on little endian systems");
2565 /* Add some warnings for VSX. */
2566 if (TARGET_VSX)
2568 const char *msg = NULL;
2569 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
2570 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
2572 if (target_flags_explicit & MASK_VSX)
2573 msg = N_("-mvsx requires hardware floating point");
2574 else
2575 target_flags &= ~ MASK_VSX;
2577 else if (TARGET_PAIRED_FLOAT)
2578 msg = N_("-mvsx and -mpaired are incompatible");
2579 /* The hardware will allow VSX and little endian, but until we make sure
2580 things like vector select, etc. work don't allow VSX on little endian
2581 systems at this point. */
2582 else if (!BYTES_BIG_ENDIAN)
2583 msg = N_("-mvsx used with little endian code");
2584 else if (TARGET_AVOID_XFORM > 0)
2585 msg = N_("-mvsx needs indexed addressing");
2586 else if (!TARGET_ALTIVEC && (target_flags_explicit & MASK_ALTIVEC))
2588 if (target_flags_explicit & MASK_VSX)
2589 msg = N_("-mvsx and -mno-altivec are incompatible");
2590 else
2591 msg = N_("-mno-altivec disables vsx");
2594 if (msg)
2596 warning (0, msg);
2597 target_flags &= ~ MASK_VSX;
2598 target_flags_explicit |= MASK_VSX;
2602 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
2603 unless the user explicitly used the -mno-<option> to disable the code. */
2604 if (TARGET_VSX)
2605 target_flags |= (ISA_2_6_MASKS_SERVER & ~target_flags_explicit);
2606 else if (TARGET_POPCNTD)
2607 target_flags |= (ISA_2_6_MASKS_EMBEDDED & ~target_flags_explicit);
2608 else if (TARGET_DFP)
2609 target_flags |= (ISA_2_5_MASKS_SERVER & ~target_flags_explicit);
2610 else if (TARGET_CMPB)
2611 target_flags |= (ISA_2_5_MASKS_EMBEDDED & ~target_flags_explicit);
2612 else if (TARGET_FPRND)
2613 target_flags |= (ISA_2_4_MASKS & ~target_flags_explicit);
2614 else if (TARGET_POPCNTB)
2615 target_flags |= (ISA_2_2_MASKS & ~target_flags_explicit);
2616 else if (TARGET_ALTIVEC)
2617 target_flags |= (MASK_PPC_GFXOPT & ~target_flags_explicit);
2619 /* E500mc does "better" if we inline more aggressively. Respect the
2620 user's opinion, though. */
2621 if (rs6000_block_move_inline_limit == 0
2622 && (rs6000_cpu == PROCESSOR_PPCE500MC
2623 || rs6000_cpu == PROCESSOR_PPCE500MC64))
2624 rs6000_block_move_inline_limit = 128;
2626 /* store_one_arg depends on expand_block_move to handle at least the
2627 size of reg_parm_stack_space. */
2628 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
2629 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
2631 if (global_init_p)
2633 /* If the appropriate debug option is enabled, replace the target hooks
2634 with debug versions that call the real version and then prints
2635 debugging information. */
2636 if (TARGET_DEBUG_COST)
2638 targetm.rtx_costs = rs6000_debug_rtx_costs;
2639 targetm.address_cost = rs6000_debug_address_cost;
2640 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
2643 if (TARGET_DEBUG_ADDR)
2645 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
2646 targetm.legitimize_address = rs6000_debug_legitimize_address;
2647 rs6000_secondary_reload_class_ptr
2648 = rs6000_debug_secondary_reload_class;
2649 rs6000_secondary_memory_needed_ptr
2650 = rs6000_debug_secondary_memory_needed;
2651 rs6000_cannot_change_mode_class_ptr
2652 = rs6000_debug_cannot_change_mode_class;
2653 rs6000_preferred_reload_class_ptr
2654 = rs6000_debug_preferred_reload_class;
2655 rs6000_legitimize_reload_address_ptr
2656 = rs6000_debug_legitimize_reload_address;
2657 rs6000_mode_dependent_address_ptr
2658 = rs6000_debug_mode_dependent_address;
2661 if (rs6000_veclibabi_name)
2663 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
2664 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
2665 else
2667 error ("unknown vectorization library ABI type (%s) for "
2668 "-mveclibabi= switch", rs6000_veclibabi_name);
2669 ret = false;
2674 if (!global_options_set.x_rs6000_long_double_type_size)
2676 if (main_target_opt != NULL
2677 && (main_target_opt->x_rs6000_long_double_type_size
2678 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
2679 error ("target attribute or pragma changes long double size");
2680 else
2681 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
2684 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
2685 if (!global_options_set.x_rs6000_ieeequad)
2686 rs6000_ieeequad = 1;
2687 #endif
2689 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
2690 target attribute or pragma which automatically enables both options,
2691 unless the altivec ABI was set. This is set by default for 64-bit, but
2692 not for 32-bit. */
2693 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2694 target_flags &= ~((MASK_VSX | MASK_ALTIVEC) & ~target_flags_explicit);
2696 /* Enable Altivec ABI for AIX -maltivec. */
2697 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
2699 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
2700 error ("target attribute or pragma changes AltiVec ABI");
2701 else
2702 rs6000_altivec_abi = 1;
2705 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
2706 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
2707 be explicitly overridden in either case. */
2708 if (TARGET_ELF)
2710 if (!global_options_set.x_rs6000_altivec_abi
2711 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
2713 if (main_target_opt != NULL &&
2714 !main_target_opt->x_rs6000_altivec_abi)
2715 error ("target attribute or pragma changes AltiVec ABI");
2716 else
2717 rs6000_altivec_abi = 1;
2720 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
2721 if (!global_options_set.x_TARGET_ALTIVEC_VRSAVE)
2722 TARGET_ALTIVEC_VRSAVE = rs6000_altivec_abi;
2725 /* Set the Darwin64 ABI as default for 64-bit Darwin.
2726 So far, the only darwin64 targets are also MACH-O. */
2727 if (TARGET_MACHO
2728 && DEFAULT_ABI == ABI_DARWIN
2729 && TARGET_64BIT)
2731 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
2732 error ("target attribute or pragma changes darwin64 ABI");
2733 else
2735 rs6000_darwin64_abi = 1;
2736 /* Default to natural alignment, for better performance. */
2737 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
2741 /* Place FP constants in the constant pool instead of TOC
2742 if section anchors enabled. */
2743 if (flag_section_anchors)
2744 TARGET_NO_FP_IN_TOC = 1;
2746 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2747 SUBTARGET_OVERRIDE_OPTIONS;
2748 #endif
2749 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2750 SUBSUBTARGET_OVERRIDE_OPTIONS;
2751 #endif
2752 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
2753 SUB3TARGET_OVERRIDE_OPTIONS;
2754 #endif
2756 /* For the E500 family of cores, reset the single/double FP flags to let us
2757 check that they remain constant across attributes or pragmas. Also,
2758 clear a possible request for string instructions, not supported and which
2759 we might have silently queried above for -Os.
2761 For other families, clear ISEL in case it was set implicitly.
2764 switch (rs6000_cpu)
2766 case PROCESSOR_PPC8540:
2767 case PROCESSOR_PPC8548:
2768 case PROCESSOR_PPCE500MC:
2769 case PROCESSOR_PPCE500MC64:
2771 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
2772 rs6000_double_float = TARGET_E500_DOUBLE;
2774 target_flags &= ~MASK_STRING;
2776 break;
2778 default:
2780 if (have_cpu && !(target_flags_explicit & MASK_ISEL))
2781 target_flags &= ~MASK_ISEL;
2783 break;
2786 if (main_target_opt)
2788 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
2789 error ("target attribute or pragma changes single precision floating "
2790 "point");
2791 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
2792 error ("target attribute or pragma changes double precision floating "
2793 "point");
2796 /* Detect invalid option combinations with E500. */
2797 CHECK_E500_OPTIONS;
2799 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
2800 && rs6000_cpu != PROCESSOR_POWER5
2801 && rs6000_cpu != PROCESSOR_POWER6
2802 && rs6000_cpu != PROCESSOR_POWER7
2803 && rs6000_cpu != PROCESSOR_PPCA2
2804 && rs6000_cpu != PROCESSOR_CELL
2805 && rs6000_cpu != PROCESSOR_PPC476);
2806 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
2807 || rs6000_cpu == PROCESSOR_POWER5
2808 || rs6000_cpu == PROCESSOR_POWER7);
2809 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
2810 || rs6000_cpu == PROCESSOR_POWER5
2811 || rs6000_cpu == PROCESSOR_POWER6
2812 || rs6000_cpu == PROCESSOR_POWER7
2813 || rs6000_cpu == PROCESSOR_PPCE500MC
2814 || rs6000_cpu == PROCESSOR_PPCE500MC64);
2816 /* Allow debug switches to override the above settings. These are set to -1
2817 in rs6000.opt to indicate the user hasn't directly set the switch. */
2818 if (TARGET_ALWAYS_HINT >= 0)
2819 rs6000_always_hint = TARGET_ALWAYS_HINT;
2821 if (TARGET_SCHED_GROUPS >= 0)
2822 rs6000_sched_groups = TARGET_SCHED_GROUPS;
2824 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
2825 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
2827 rs6000_sched_restricted_insns_priority
2828 = (rs6000_sched_groups ? 1 : 0);
2830 /* Handle -msched-costly-dep option. */
2831 rs6000_sched_costly_dep
2832 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
2834 if (rs6000_sched_costly_dep_str)
2836 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
2837 rs6000_sched_costly_dep = no_dep_costly;
2838 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
2839 rs6000_sched_costly_dep = all_deps_costly;
2840 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
2841 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
2842 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
2843 rs6000_sched_costly_dep = store_to_load_dep_costly;
2844 else
2845 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
2846 atoi (rs6000_sched_costly_dep_str));
2849 /* Handle -minsert-sched-nops option. */
2850 rs6000_sched_insert_nops
2851 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
2853 if (rs6000_sched_insert_nops_str)
2855 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
2856 rs6000_sched_insert_nops = sched_finish_none;
2857 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
2858 rs6000_sched_insert_nops = sched_finish_pad_groups;
2859 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
2860 rs6000_sched_insert_nops = sched_finish_regroup_exact;
2861 else
2862 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
2863 atoi (rs6000_sched_insert_nops_str));
2866 if (global_init_p)
2868 #ifdef TARGET_REGNAMES
2869 /* If the user desires alternate register names, copy in the
2870 alternate names now. */
2871 if (TARGET_REGNAMES)
2872 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
2873 #endif
2875 /* Set aix_struct_return last, after the ABI is determined.
2876 If -maix-struct-return or -msvr4-struct-return was explicitly
2877 used, don't override with the ABI default. */
2878 if (!global_options_set.x_aix_struct_return)
2879 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
2881 #if 0
2882 /* IBM XL compiler defaults to unsigned bitfields. */
2883 if (TARGET_XL_COMPAT)
2884 flag_signed_bitfields = 0;
2885 #endif
2887 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
2888 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
2890 if (TARGET_TOC)
2891 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
2893 /* We can only guarantee the availability of DI pseudo-ops when
2894 assembling for 64-bit targets. */
2895 if (!TARGET_64BIT)
2897 targetm.asm_out.aligned_op.di = NULL;
2898 targetm.asm_out.unaligned_op.di = NULL;
2902 /* Set branch target alignment, if not optimizing for size. */
2903 if (!optimize_size)
2905 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
2906 aligned 8byte to avoid misprediction by the branch predictor. */
2907 if (rs6000_cpu == PROCESSOR_TITAN
2908 || rs6000_cpu == PROCESSOR_CELL)
2910 if (align_functions <= 0)
2911 align_functions = 8;
2912 if (align_jumps <= 0)
2913 align_jumps = 8;
2914 if (align_loops <= 0)
2915 align_loops = 8;
2917 if (rs6000_align_branch_targets)
2919 if (align_functions <= 0)
2920 align_functions = 16;
2921 if (align_jumps <= 0)
2922 align_jumps = 16;
2923 if (align_loops <= 0)
2925 can_override_loop_align = 1;
2926 align_loops = 16;
2929 if (align_jumps_max_skip <= 0)
2930 align_jumps_max_skip = 15;
2931 if (align_loops_max_skip <= 0)
2932 align_loops_max_skip = 15;
2935 /* Arrange to save and restore machine status around nested functions. */
2936 init_machine_status = rs6000_init_machine_status;
2938 /* We should always be splitting complex arguments, but we can't break
2939 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
2940 if (DEFAULT_ABI != ABI_AIX)
2941 targetm.calls.split_complex_arg = NULL;
2944 /* Initialize rs6000_cost with the appropriate target costs. */
2945 if (optimize_size)
2946 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
2947 else
2948 switch (rs6000_cpu)
2950 case PROCESSOR_RIOS1:
2951 rs6000_cost = &rios1_cost;
2952 break;
2954 case PROCESSOR_RIOS2:
2955 rs6000_cost = &rios2_cost;
2956 break;
2958 case PROCESSOR_RS64A:
2959 rs6000_cost = &rs64a_cost;
2960 break;
2962 case PROCESSOR_MPCCORE:
2963 rs6000_cost = &mpccore_cost;
2964 break;
2966 case PROCESSOR_PPC403:
2967 rs6000_cost = &ppc403_cost;
2968 break;
2970 case PROCESSOR_PPC405:
2971 rs6000_cost = &ppc405_cost;
2972 break;
2974 case PROCESSOR_PPC440:
2975 rs6000_cost = &ppc440_cost;
2976 break;
2978 case PROCESSOR_PPC476:
2979 rs6000_cost = &ppc476_cost;
2980 break;
2982 case PROCESSOR_PPC601:
2983 rs6000_cost = &ppc601_cost;
2984 break;
2986 case PROCESSOR_PPC603:
2987 rs6000_cost = &ppc603_cost;
2988 break;
2990 case PROCESSOR_PPC604:
2991 rs6000_cost = &ppc604_cost;
2992 break;
2994 case PROCESSOR_PPC604e:
2995 rs6000_cost = &ppc604e_cost;
2996 break;
2998 case PROCESSOR_PPC620:
2999 rs6000_cost = &ppc620_cost;
3000 break;
3002 case PROCESSOR_PPC630:
3003 rs6000_cost = &ppc630_cost;
3004 break;
3006 case PROCESSOR_CELL:
3007 rs6000_cost = &ppccell_cost;
3008 break;
3010 case PROCESSOR_PPC750:
3011 case PROCESSOR_PPC7400:
3012 rs6000_cost = &ppc750_cost;
3013 break;
3015 case PROCESSOR_PPC7450:
3016 rs6000_cost = &ppc7450_cost;
3017 break;
3019 case PROCESSOR_PPC8540:
3020 case PROCESSOR_PPC8548:
3021 rs6000_cost = &ppc8540_cost;
3022 break;
3024 case PROCESSOR_PPCE300C2:
3025 case PROCESSOR_PPCE300C3:
3026 rs6000_cost = &ppce300c2c3_cost;
3027 break;
3029 case PROCESSOR_PPCE500MC:
3030 rs6000_cost = &ppce500mc_cost;
3031 break;
3033 case PROCESSOR_PPCE500MC64:
3034 rs6000_cost = &ppce500mc64_cost;
3035 break;
3037 case PROCESSOR_TITAN:
3038 rs6000_cost = &titan_cost;
3039 break;
3041 case PROCESSOR_POWER4:
3042 case PROCESSOR_POWER5:
3043 rs6000_cost = &power4_cost;
3044 break;
3046 case PROCESSOR_POWER6:
3047 rs6000_cost = &power6_cost;
3048 break;
3050 case PROCESSOR_POWER7:
3051 rs6000_cost = &power7_cost;
3052 break;
3054 case PROCESSOR_PPCA2:
3055 rs6000_cost = &ppca2_cost;
3056 break;
3058 default:
3059 gcc_unreachable ();
3062 if (global_init_p)
3064 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3065 rs6000_cost->simultaneous_prefetches,
3066 global_options.x_param_values,
3067 global_options_set.x_param_values);
3068 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3069 global_options.x_param_values,
3070 global_options_set.x_param_values);
3071 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3072 rs6000_cost->cache_line_size,
3073 global_options.x_param_values,
3074 global_options_set.x_param_values);
3075 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3076 global_options.x_param_values,
3077 global_options_set.x_param_values);
3079 /* If using typedef char *va_list, signal that
3080 __builtin_va_start (&ap, 0) can be optimized to
3081 ap = __builtin_next_arg (0). */
3082 if (DEFAULT_ABI != ABI_V4)
3083 targetm.expand_builtin_va_start = NULL;
3086 /* Set up single/double float flags.
3087 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3088 then set both flags. */
3089 if (TARGET_HARD_FLOAT && TARGET_FPRS
3090 && rs6000_single_float == 0 && rs6000_double_float == 0)
3091 rs6000_single_float = rs6000_double_float = 1;
3093 /* If not explicitly specified via option, decide whether to generate indexed
3094 load/store instructions. */
3095 if (TARGET_AVOID_XFORM == -1)
3096 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3097 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3098 need indexed accesses and the type used is the scalar type of the element
3099 being loaded or stored. */
3100 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
3101 && !TARGET_ALTIVEC);
3103 /* Set the -mrecip options. */
3104 if (rs6000_recip_name)
3106 char *p = ASTRDUP (rs6000_recip_name);
3107 char *q;
3108 unsigned int mask, i;
3109 bool invert;
3111 while ((q = strtok (p, ",")) != NULL)
3113 p = NULL;
3114 if (*q == '!')
3116 invert = true;
3117 q++;
3119 else
3120 invert = false;
3122 if (!strcmp (q, "default"))
3123 mask = ((TARGET_RECIP_PRECISION)
3124 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
3125 else
3127 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
3128 if (!strcmp (q, recip_options[i].string))
3130 mask = recip_options[i].mask;
3131 break;
3134 if (i == ARRAY_SIZE (recip_options))
3136 error ("unknown option for -mrecip=%s", q);
3137 invert = false;
3138 mask = 0;
3139 ret = false;
3143 if (invert)
3144 rs6000_recip_control &= ~mask;
3145 else
3146 rs6000_recip_control |= mask;
3150 /* Set the builtin mask of the various options used that could affect which
3151 builtins were used. In the past we used target_flags, but we've run out
3152 of bits, and some options like SPE and PAIRED are no longer in
3153 target_flags. */
3154 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
3155 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
3156 fprintf (stderr, "new builtin mask = 0x%x%s%s%s%s\n", rs6000_builtin_mask,
3157 (rs6000_builtin_mask & RS6000_BTM_ALTIVEC) ? ", altivec" : "",
3158 (rs6000_builtin_mask & RS6000_BTM_VSX) ? ", vsx" : "",
3159 (rs6000_builtin_mask & RS6000_BTM_PAIRED) ? ", paired" : "",
3160 (rs6000_builtin_mask & RS6000_BTM_SPE) ? ", spe" : "");
3162 /* Initialize all of the registers. */
3163 rs6000_init_hard_regno_mode_ok (global_init_p);
3165 /* Save the initial options in case the user does function specific options */
3166 if (global_init_p)
3167 target_option_default_node = target_option_current_node
3168 = build_target_option_node ();
3170 /* If not explicitly specified via option, decide whether to generate the
3171 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3172 if (TARGET_LINK_STACK == -1)
3173 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
3175 return ret;
3178 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3179 define the target cpu type. */
3181 static void
3182 rs6000_option_override (void)
3184 (void) rs6000_option_override_internal (true);
3188 /* Implement targetm.vectorize.builtin_mask_for_load. */
3189 static tree
3190 rs6000_builtin_mask_for_load (void)
3192 if (TARGET_ALTIVEC || TARGET_VSX)
3193 return altivec_builtin_mask_for_load;
3194 else
3195 return 0;
3198 /* Implement LOOP_ALIGN. */
3200 rs6000_loop_align (rtx label)
3202 basic_block bb;
3203 int ninsns;
3205 /* Don't override loop alignment if -falign-loops was specified. */
3206 if (!can_override_loop_align)
3207 return align_loops_log;
3209 bb = BLOCK_FOR_INSN (label);
3210 ninsns = num_loop_insns(bb->loop_father);
3212 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3213 if (ninsns > 4 && ninsns <= 8
3214 && (rs6000_cpu == PROCESSOR_POWER4
3215 || rs6000_cpu == PROCESSOR_POWER5
3216 || rs6000_cpu == PROCESSOR_POWER6
3217 || rs6000_cpu == PROCESSOR_POWER7))
3218 return 5;
3219 else
3220 return align_loops_log;
3223 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3224 static int
3225 rs6000_loop_align_max_skip (rtx label)
3227 return (1 << rs6000_loop_align (label)) - 1;
3230 /* Implement targetm.vectorize.builtin_mul_widen_even. */
3231 static tree
3232 rs6000_builtin_mul_widen_even (tree type)
3234 if (!TARGET_ALTIVEC)
3235 return NULL_TREE;
3237 switch (TYPE_MODE (type))
3239 case V8HImode:
3240 return TYPE_UNSIGNED (type)
3241 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUH_UNS]
3242 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESH];
3244 case V16QImode:
3245 return TYPE_UNSIGNED (type)
3246 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUB_UNS]
3247 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESB];
3248 default:
3249 return NULL_TREE;
3253 /* Implement targetm.vectorize.builtin_mul_widen_odd. */
3254 static tree
3255 rs6000_builtin_mul_widen_odd (tree type)
3257 if (!TARGET_ALTIVEC)
3258 return NULL_TREE;
3260 switch (TYPE_MODE (type))
3262 case V8HImode:
3263 return TYPE_UNSIGNED (type)
3264 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUH_UNS]
3265 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSH];
3267 case V16QImode:
3268 return TYPE_UNSIGNED (type)
3269 ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUB_UNS]
3270 : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSB];
3271 default:
3272 return NULL_TREE;
3277 /* Return true iff, data reference of TYPE can reach vector alignment (16)
3278 after applying N number of iterations. This routine does not determine
3279 how may iterations are required to reach desired alignment. */
3281 static bool
3282 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
3284 if (is_packed)
3285 return false;
3287 if (TARGET_32BIT)
3289 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
3290 return true;
3292 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
3293 return true;
3295 return false;
3297 else
3299 if (TARGET_MACHO)
3300 return false;
3302 /* Assuming that all other types are naturally aligned. CHECKME! */
3303 return true;
3307 /* Return true if the vector misalignment factor is supported by the
3308 target. */
3309 static bool
3310 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
3311 const_tree type,
3312 int misalignment,
3313 bool is_packed)
3315 if (TARGET_VSX)
3317 /* Return if movmisalign pattern is not supported for this mode. */
3318 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
3319 return false;
3321 if (misalignment == -1)
3323 /* Misalignment factor is unknown at compile time but we know
3324 it's word aligned. */
3325 if (rs6000_vector_alignment_reachable (type, is_packed))
3327 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
3329 if (element_size == 64 || element_size == 32)
3330 return true;
3333 return false;
3336 /* VSX supports word-aligned vector. */
3337 if (misalignment % 4 == 0)
3338 return true;
3340 return false;
3343 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3344 static int
3345 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3346 tree vectype, int misalign)
3348 unsigned elements;
3350 switch (type_of_cost)
3352 case scalar_stmt:
3353 case scalar_load:
3354 case scalar_store:
3355 case vector_stmt:
3356 case vector_load:
3357 case vector_store:
3358 case vec_to_scalar:
3359 case scalar_to_vec:
3360 case cond_branch_not_taken:
3361 return 1;
3363 case vec_perm:
3364 if (TARGET_VSX)
3365 return 4;
3366 else
3367 return 1;
3369 case vec_promote_demote:
3370 if (TARGET_VSX)
3371 return 5;
3372 else
3373 return 1;
3375 case cond_branch_taken:
3376 return 3;
3378 case unaligned_load:
3379 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3381 elements = TYPE_VECTOR_SUBPARTS (vectype);
3382 if (elements == 2)
3383 /* Double word aligned. */
3384 return 2;
3386 if (elements == 4)
3388 switch (misalign)
3390 case 8:
3391 /* Double word aligned. */
3392 return 2;
3394 case -1:
3395 /* Unknown misalignment. */
3396 case 4:
3397 case 12:
3398 /* Word aligned. */
3399 return 22;
3401 default:
3402 gcc_unreachable ();
3407 if (TARGET_ALTIVEC)
3408 /* Misaligned loads are not supported. */
3409 gcc_unreachable ();
3411 return 2;
3413 case unaligned_store:
3414 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
3416 elements = TYPE_VECTOR_SUBPARTS (vectype);
3417 if (elements == 2)
3418 /* Double word aligned. */
3419 return 2;
3421 if (elements == 4)
3423 switch (misalign)
3425 case 8:
3426 /* Double word aligned. */
3427 return 2;
3429 case -1:
3430 /* Unknown misalignment. */
3431 case 4:
3432 case 12:
3433 /* Word aligned. */
3434 return 23;
3436 default:
3437 gcc_unreachable ();
3442 if (TARGET_ALTIVEC)
3443 /* Misaligned stores are not supported. */
3444 gcc_unreachable ();
3446 return 2;
3448 default:
3449 gcc_unreachable ();
3453 /* Implement targetm.vectorize.preferred_simd_mode. */
3455 static enum machine_mode
3456 rs6000_preferred_simd_mode (enum machine_mode mode)
3458 if (TARGET_VSX)
3459 switch (mode)
3461 case DFmode:
3462 return V2DFmode;
3463 default:;
3465 if (TARGET_ALTIVEC || TARGET_VSX)
3466 switch (mode)
3468 case SFmode:
3469 return V4SFmode;
3470 case DImode:
3471 return V2DImode;
3472 case SImode:
3473 return V4SImode;
3474 case HImode:
3475 return V8HImode;
3476 case QImode:
3477 return V16QImode;
3478 default:;
3480 if (TARGET_SPE)
3481 switch (mode)
3483 case SFmode:
3484 return V2SFmode;
3485 case SImode:
3486 return V2SImode;
3487 default:;
3489 if (TARGET_PAIRED_FLOAT
3490 && mode == SFmode)
3491 return V2SFmode;
3492 return word_mode;
3495 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
3496 library with vectorized intrinsics. */
3498 static tree
3499 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
3501 char name[32];
3502 const char *suffix = NULL;
3503 tree fntype, new_fndecl, bdecl = NULL_TREE;
3504 int n_args = 1;
3505 const char *bname;
3506 enum machine_mode el_mode, in_mode;
3507 int n, in_n;
3509 /* Libmass is suitable for unsafe math only as it does not correctly support
3510 parts of IEEE with the required precision such as denormals. Only support
3511 it if we have VSX to use the simd d2 or f4 functions.
3512 XXX: Add variable length support. */
3513 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
3514 return NULL_TREE;
3516 el_mode = TYPE_MODE (TREE_TYPE (type_out));
3517 n = TYPE_VECTOR_SUBPARTS (type_out);
3518 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3519 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3520 if (el_mode != in_mode
3521 || n != in_n)
3522 return NULL_TREE;
3524 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3526 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3527 switch (fn)
3529 case BUILT_IN_ATAN2:
3530 case BUILT_IN_HYPOT:
3531 case BUILT_IN_POW:
3532 n_args = 2;
3533 /* fall through */
3535 case BUILT_IN_ACOS:
3536 case BUILT_IN_ACOSH:
3537 case BUILT_IN_ASIN:
3538 case BUILT_IN_ASINH:
3539 case BUILT_IN_ATAN:
3540 case BUILT_IN_ATANH:
3541 case BUILT_IN_CBRT:
3542 case BUILT_IN_COS:
3543 case BUILT_IN_COSH:
3544 case BUILT_IN_ERF:
3545 case BUILT_IN_ERFC:
3546 case BUILT_IN_EXP2:
3547 case BUILT_IN_EXP:
3548 case BUILT_IN_EXPM1:
3549 case BUILT_IN_LGAMMA:
3550 case BUILT_IN_LOG10:
3551 case BUILT_IN_LOG1P:
3552 case BUILT_IN_LOG2:
3553 case BUILT_IN_LOG:
3554 case BUILT_IN_SIN:
3555 case BUILT_IN_SINH:
3556 case BUILT_IN_SQRT:
3557 case BUILT_IN_TAN:
3558 case BUILT_IN_TANH:
3559 bdecl = builtin_decl_implicit (fn);
3560 suffix = "d2"; /* pow -> powd2 */
3561 if (el_mode != DFmode
3562 || n != 2)
3563 return NULL_TREE;
3564 break;
3566 case BUILT_IN_ATAN2F:
3567 case BUILT_IN_HYPOTF:
3568 case BUILT_IN_POWF:
3569 n_args = 2;
3570 /* fall through */
3572 case BUILT_IN_ACOSF:
3573 case BUILT_IN_ACOSHF:
3574 case BUILT_IN_ASINF:
3575 case BUILT_IN_ASINHF:
3576 case BUILT_IN_ATANF:
3577 case BUILT_IN_ATANHF:
3578 case BUILT_IN_CBRTF:
3579 case BUILT_IN_COSF:
3580 case BUILT_IN_COSHF:
3581 case BUILT_IN_ERFF:
3582 case BUILT_IN_ERFCF:
3583 case BUILT_IN_EXP2F:
3584 case BUILT_IN_EXPF:
3585 case BUILT_IN_EXPM1F:
3586 case BUILT_IN_LGAMMAF:
3587 case BUILT_IN_LOG10F:
3588 case BUILT_IN_LOG1PF:
3589 case BUILT_IN_LOG2F:
3590 case BUILT_IN_LOGF:
3591 case BUILT_IN_SINF:
3592 case BUILT_IN_SINHF:
3593 case BUILT_IN_SQRTF:
3594 case BUILT_IN_TANF:
3595 case BUILT_IN_TANHF:
3596 bdecl = builtin_decl_implicit (fn);
3597 suffix = "4"; /* powf -> powf4 */
3598 if (el_mode != SFmode
3599 || n != 4)
3600 return NULL_TREE;
3601 break;
3603 default:
3604 return NULL_TREE;
3607 else
3608 return NULL_TREE;
3610 gcc_assert (suffix != NULL);
3611 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
3612 strcpy (name, bname + sizeof ("__builtin_") - 1);
3613 strcat (name, suffix);
3615 if (n_args == 1)
3616 fntype = build_function_type_list (type_out, type_in, NULL);
3617 else if (n_args == 2)
3618 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
3619 else
3620 gcc_unreachable ();
3622 /* Build a function declaration for the vectorized function. */
3623 new_fndecl = build_decl (BUILTINS_LOCATION,
3624 FUNCTION_DECL, get_identifier (name), fntype);
3625 TREE_PUBLIC (new_fndecl) = 1;
3626 DECL_EXTERNAL (new_fndecl) = 1;
3627 DECL_IS_NOVOPS (new_fndecl) = 1;
3628 TREE_READONLY (new_fndecl) = 1;
3630 return new_fndecl;
3633 /* Returns a function decl for a vectorized version of the builtin function
3634 with builtin function code FN and the result vector type TYPE, or NULL_TREE
3635 if it is not available. */
3637 static tree
3638 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
3639 tree type_in)
3641 enum machine_mode in_mode, out_mode;
3642 int in_n, out_n;
3644 if (TARGET_DEBUG_BUILTIN)
3645 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
3646 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
3647 GET_MODE_NAME (TYPE_MODE (type_out)),
3648 GET_MODE_NAME (TYPE_MODE (type_in)));
3650 if (TREE_CODE (type_out) != VECTOR_TYPE
3651 || TREE_CODE (type_in) != VECTOR_TYPE
3652 || !TARGET_VECTORIZE_BUILTINS)
3653 return NULL_TREE;
3655 out_mode = TYPE_MODE (TREE_TYPE (type_out));
3656 out_n = TYPE_VECTOR_SUBPARTS (type_out);
3657 in_mode = TYPE_MODE (TREE_TYPE (type_in));
3658 in_n = TYPE_VECTOR_SUBPARTS (type_in);
3660 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3662 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
3663 switch (fn)
3665 case BUILT_IN_COPYSIGN:
3666 if (VECTOR_UNIT_VSX_P (V2DFmode)
3667 && out_mode == DFmode && out_n == 2
3668 && in_mode == DFmode && in_n == 2)
3669 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
3670 break;
3671 case BUILT_IN_COPYSIGNF:
3672 if (out_mode != SFmode || out_n != 4
3673 || in_mode != SFmode || in_n != 4)
3674 break;
3675 if (VECTOR_UNIT_VSX_P (V4SFmode))
3676 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
3677 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3678 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
3679 break;
3680 case BUILT_IN_SQRT:
3681 if (VECTOR_UNIT_VSX_P (V2DFmode)
3682 && out_mode == DFmode && out_n == 2
3683 && in_mode == DFmode && in_n == 2)
3684 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
3685 break;
3686 case BUILT_IN_SQRTF:
3687 if (VECTOR_UNIT_VSX_P (V4SFmode)
3688 && out_mode == SFmode && out_n == 4
3689 && in_mode == SFmode && in_n == 4)
3690 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
3691 break;
3692 case BUILT_IN_CEIL:
3693 if (VECTOR_UNIT_VSX_P (V2DFmode)
3694 && out_mode == DFmode && out_n == 2
3695 && in_mode == DFmode && in_n == 2)
3696 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
3697 break;
3698 case BUILT_IN_CEILF:
3699 if (out_mode != SFmode || out_n != 4
3700 || in_mode != SFmode || in_n != 4)
3701 break;
3702 if (VECTOR_UNIT_VSX_P (V4SFmode))
3703 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
3704 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3705 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
3706 break;
3707 case BUILT_IN_FLOOR:
3708 if (VECTOR_UNIT_VSX_P (V2DFmode)
3709 && out_mode == DFmode && out_n == 2
3710 && in_mode == DFmode && in_n == 2)
3711 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
3712 break;
3713 case BUILT_IN_FLOORF:
3714 if (out_mode != SFmode || out_n != 4
3715 || in_mode != SFmode || in_n != 4)
3716 break;
3717 if (VECTOR_UNIT_VSX_P (V4SFmode))
3718 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
3719 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3720 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
3721 break;
3722 case BUILT_IN_FMA:
3723 if (VECTOR_UNIT_VSX_P (V2DFmode)
3724 && out_mode == DFmode && out_n == 2
3725 && in_mode == DFmode && in_n == 2)
3726 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
3727 break;
3728 case BUILT_IN_FMAF:
3729 if (VECTOR_UNIT_VSX_P (V4SFmode)
3730 && out_mode == SFmode && out_n == 4
3731 && in_mode == SFmode && in_n == 4)
3732 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
3733 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
3734 && out_mode == SFmode && out_n == 4
3735 && in_mode == SFmode && in_n == 4)
3736 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
3737 break;
3738 case BUILT_IN_TRUNC:
3739 if (VECTOR_UNIT_VSX_P (V2DFmode)
3740 && out_mode == DFmode && out_n == 2
3741 && in_mode == DFmode && in_n == 2)
3742 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
3743 break;
3744 case BUILT_IN_TRUNCF:
3745 if (out_mode != SFmode || out_n != 4
3746 || in_mode != SFmode || in_n != 4)
3747 break;
3748 if (VECTOR_UNIT_VSX_P (V4SFmode))
3749 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
3750 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
3751 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
3752 break;
3753 case BUILT_IN_NEARBYINT:
3754 if (VECTOR_UNIT_VSX_P (V2DFmode)
3755 && flag_unsafe_math_optimizations
3756 && out_mode == DFmode && out_n == 2
3757 && in_mode == DFmode && in_n == 2)
3758 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
3759 break;
3760 case BUILT_IN_NEARBYINTF:
3761 if (VECTOR_UNIT_VSX_P (V4SFmode)
3762 && flag_unsafe_math_optimizations
3763 && out_mode == SFmode && out_n == 4
3764 && in_mode == SFmode && in_n == 4)
3765 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
3766 break;
3767 case BUILT_IN_RINT:
3768 if (VECTOR_UNIT_VSX_P (V2DFmode)
3769 && !flag_trapping_math
3770 && out_mode == DFmode && out_n == 2
3771 && in_mode == DFmode && in_n == 2)
3772 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
3773 break;
3774 case BUILT_IN_RINTF:
3775 if (VECTOR_UNIT_VSX_P (V4SFmode)
3776 && !flag_trapping_math
3777 && out_mode == SFmode && out_n == 4
3778 && in_mode == SFmode && in_n == 4)
3779 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
3780 break;
3781 default:
3782 break;
3786 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
3788 enum rs6000_builtins fn
3789 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
3790 switch (fn)
3792 case RS6000_BUILTIN_RSQRTF:
3793 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3794 && out_mode == SFmode && out_n == 4
3795 && in_mode == SFmode && in_n == 4)
3796 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
3797 break;
3798 case RS6000_BUILTIN_RSQRT:
3799 if (VECTOR_UNIT_VSX_P (V2DFmode)
3800 && out_mode == DFmode && out_n == 2
3801 && in_mode == DFmode && in_n == 2)
3802 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
3803 break;
3804 case RS6000_BUILTIN_RECIPF:
3805 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
3806 && out_mode == SFmode && out_n == 4
3807 && in_mode == SFmode && in_n == 4)
3808 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
3809 break;
3810 case RS6000_BUILTIN_RECIP:
3811 if (VECTOR_UNIT_VSX_P (V2DFmode)
3812 && out_mode == DFmode && out_n == 2
3813 && in_mode == DFmode && in_n == 2)
3814 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
3815 break;
3816 default:
3817 break;
3821 /* Generate calls to libmass if appropriate. */
3822 if (rs6000_veclib_handler)
3823 return rs6000_veclib_handler (fndecl, type_out, type_in);
3825 return NULL_TREE;
3828 /* Default CPU string for rs6000*_file_start functions. */
3829 static const char *rs6000_default_cpu;
3831 /* Do anything needed at the start of the asm file. */
3833 static void
3834 rs6000_file_start (void)
3836 char buffer[80];
3837 const char *start = buffer;
3838 FILE *file = asm_out_file;
3840 rs6000_default_cpu = TARGET_CPU_DEFAULT;
3842 default_file_start ();
3844 if (flag_verbose_asm)
3846 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
3848 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
3850 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
3851 start = "";
3854 if (global_options_set.x_rs6000_cpu_index)
3856 fprintf (file, "%s -mcpu=%s", start,
3857 processor_target_table[rs6000_cpu_index].name);
3858 start = "";
3861 if (global_options_set.x_rs6000_tune_index)
3863 fprintf (file, "%s -mtune=%s", start,
3864 processor_target_table[rs6000_tune_index].name);
3865 start = "";
3868 if (PPC405_ERRATUM77)
3870 fprintf (file, "%s PPC405CR_ERRATUM77", start);
3871 start = "";
3874 #ifdef USING_ELFOS_H
3875 switch (rs6000_sdata)
3877 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
3878 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
3879 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
3880 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
3883 if (rs6000_sdata && g_switch_value)
3885 fprintf (file, "%s -G %d", start,
3886 g_switch_value);
3887 start = "";
3889 #endif
3891 if (*start == '\0')
3892 putc ('\n', file);
3895 if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
3897 switch_to_section (toc_section);
3898 switch_to_section (text_section);
3903 /* Return nonzero if this function is known to have a null epilogue. */
3906 direct_return (void)
3908 if (reload_completed)
3910 rs6000_stack_t *info = rs6000_stack_info ();
3912 if (info->first_gp_reg_save == 32
3913 && info->first_fp_reg_save == 64
3914 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
3915 && ! info->lr_save_p
3916 && ! info->cr_save_p
3917 && info->vrsave_mask == 0
3918 && ! info->push_p)
3919 return 1;
3922 return 0;
3925 /* Return the number of instructions it takes to form a constant in an
3926 integer register. */
3929 num_insns_constant_wide (HOST_WIDE_INT value)
3931 /* signed constant loadable with {cal|addi} */
3932 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
3933 return 1;
3935 /* constant loadable with {cau|addis} */
3936 else if ((value & 0xffff) == 0
3937 && (value >> 31 == -1 || value >> 31 == 0))
3938 return 1;
3940 #if HOST_BITS_PER_WIDE_INT == 64
3941 else if (TARGET_POWERPC64)
3943 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
3944 HOST_WIDE_INT high = value >> 31;
3946 if (high == 0 || high == -1)
3947 return 2;
3949 high >>= 1;
3951 if (low == 0)
3952 return num_insns_constant_wide (high) + 1;
3953 else if (high == 0)
3954 return num_insns_constant_wide (low) + 1;
3955 else
3956 return (num_insns_constant_wide (high)
3957 + num_insns_constant_wide (low) + 1);
3959 #endif
3961 else
3962 return 2;
3966 num_insns_constant (rtx op, enum machine_mode mode)
3968 HOST_WIDE_INT low, high;
3970 switch (GET_CODE (op))
3972 case CONST_INT:
3973 #if HOST_BITS_PER_WIDE_INT == 64
3974 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
3975 && mask64_operand (op, mode))
3976 return 2;
3977 else
3978 #endif
3979 return num_insns_constant_wide (INTVAL (op));
3981 case CONST_DOUBLE:
3982 if (mode == SFmode || mode == SDmode)
3984 long l;
3985 REAL_VALUE_TYPE rv;
3987 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
3988 if (DECIMAL_FLOAT_MODE_P (mode))
3989 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
3990 else
3991 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
3992 return num_insns_constant_wide ((HOST_WIDE_INT) l);
3995 if (mode == VOIDmode || mode == DImode)
3997 high = CONST_DOUBLE_HIGH (op);
3998 low = CONST_DOUBLE_LOW (op);
4000 else
4002 long l[2];
4003 REAL_VALUE_TYPE rv;
4005 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
4006 if (DECIMAL_FLOAT_MODE_P (mode))
4007 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
4008 else
4009 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
4010 high = l[WORDS_BIG_ENDIAN == 0];
4011 low = l[WORDS_BIG_ENDIAN != 0];
4014 if (TARGET_32BIT)
4015 return (num_insns_constant_wide (low)
4016 + num_insns_constant_wide (high));
4017 else
4019 if ((high == 0 && low >= 0)
4020 || (high == -1 && low < 0))
4021 return num_insns_constant_wide (low);
4023 else if (mask64_operand (op, mode))
4024 return 2;
4026 else if (low == 0)
4027 return num_insns_constant_wide (high) + 1;
4029 else
4030 return (num_insns_constant_wide (high)
4031 + num_insns_constant_wide (low) + 1);
4034 default:
4035 gcc_unreachable ();
4039 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4040 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4041 corresponding element of the vector, but for V4SFmode and V2SFmode,
4042 the corresponding "float" is interpreted as an SImode integer. */
4044 HOST_WIDE_INT
4045 const_vector_elt_as_int (rtx op, unsigned int elt)
4047 rtx tmp;
4049 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4050 gcc_assert (GET_MODE (op) != V2DImode
4051 && GET_MODE (op) != V2DFmode);
4053 tmp = CONST_VECTOR_ELT (op, elt);
4054 if (GET_MODE (op) == V4SFmode
4055 || GET_MODE (op) == V2SFmode)
4056 tmp = gen_lowpart (SImode, tmp);
4057 return INTVAL (tmp);
4060 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4061 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4062 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4063 all items are set to the same value and contain COPIES replicas of the
4064 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4065 operand and the others are set to the value of the operand's msb. */
4067 static bool
4068 vspltis_constant (rtx op, unsigned step, unsigned copies)
4070 enum machine_mode mode = GET_MODE (op);
4071 enum machine_mode inner = GET_MODE_INNER (mode);
4073 unsigned i;
4074 unsigned nunits;
4075 unsigned bitsize;
4076 unsigned mask;
4078 HOST_WIDE_INT val;
4079 HOST_WIDE_INT splat_val;
4080 HOST_WIDE_INT msb_val;
4082 if (mode == V2DImode || mode == V2DFmode)
4083 return false;
4085 nunits = GET_MODE_NUNITS (mode);
4086 bitsize = GET_MODE_BITSIZE (inner);
4087 mask = GET_MODE_MASK (inner);
4089 val = const_vector_elt_as_int (op, nunits - 1);
4090 splat_val = val;
4091 msb_val = val > 0 ? 0 : -1;
4093 /* Construct the value to be splatted, if possible. If not, return 0. */
4094 for (i = 2; i <= copies; i *= 2)
4096 HOST_WIDE_INT small_val;
4097 bitsize /= 2;
4098 small_val = splat_val >> bitsize;
4099 mask >>= bitsize;
4100 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
4101 return false;
4102 splat_val = small_val;
4105 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4106 if (EASY_VECTOR_15 (splat_val))
4109 /* Also check if we can splat, and then add the result to itself. Do so if
4110 the value is positive, of if the splat instruction is using OP's mode;
4111 for splat_val < 0, the splat and the add should use the same mode. */
4112 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
4113 && (splat_val >= 0 || (step == 1 && copies == 1)))
4116 /* Also check if are loading up the most significant bit which can be done by
4117 loading up -1 and shifting the value left by -1. */
4118 else if (EASY_VECTOR_MSB (splat_val, inner))
4121 else
4122 return false;
4124 /* Check if VAL is present in every STEP-th element, and the
4125 other elements are filled with its most significant bit. */
4126 for (i = 0; i < nunits - 1; ++i)
4128 HOST_WIDE_INT desired_val;
4129 if (((i + 1) & (step - 1)) == 0)
4130 desired_val = val;
4131 else
4132 desired_val = msb_val;
4134 if (desired_val != const_vector_elt_as_int (op, i))
4135 return false;
4138 return true;
4142 /* Return true if OP is of the given MODE and can be synthesized
4143 with a vspltisb, vspltish or vspltisw. */
4145 bool
4146 easy_altivec_constant (rtx op, enum machine_mode mode)
4148 unsigned step, copies;
4150 if (mode == VOIDmode)
4151 mode = GET_MODE (op);
4152 else if (mode != GET_MODE (op))
4153 return false;
4155 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
4156 constants. */
4157 if (mode == V2DFmode)
4158 return zero_constant (op, mode);
4160 if (mode == V2DImode)
4162 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
4163 easy. */
4164 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
4165 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
4166 return false;
4168 if (zero_constant (op, mode))
4169 return true;
4171 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
4172 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
4173 return true;
4175 return false;
4178 /* Start with a vspltisw. */
4179 step = GET_MODE_NUNITS (mode) / 4;
4180 copies = 1;
4182 if (vspltis_constant (op, step, copies))
4183 return true;
4185 /* Then try with a vspltish. */
4186 if (step == 1)
4187 copies <<= 1;
4188 else
4189 step >>= 1;
4191 if (vspltis_constant (op, step, copies))
4192 return true;
4194 /* And finally a vspltisb. */
4195 if (step == 1)
4196 copies <<= 1;
4197 else
4198 step >>= 1;
4200 if (vspltis_constant (op, step, copies))
4201 return true;
4203 return false;
4206 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
4207 result is OP. Abort if it is not possible. */
4210 gen_easy_altivec_constant (rtx op)
4212 enum machine_mode mode = GET_MODE (op);
4213 int nunits = GET_MODE_NUNITS (mode);
4214 rtx last = CONST_VECTOR_ELT (op, nunits - 1);
4215 unsigned step = nunits / 4;
4216 unsigned copies = 1;
4218 /* Start with a vspltisw. */
4219 if (vspltis_constant (op, step, copies))
4220 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
4222 /* Then try with a vspltish. */
4223 if (step == 1)
4224 copies <<= 1;
4225 else
4226 step >>= 1;
4228 if (vspltis_constant (op, step, copies))
4229 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
4231 /* And finally a vspltisb. */
4232 if (step == 1)
4233 copies <<= 1;
4234 else
4235 step >>= 1;
4237 if (vspltis_constant (op, step, copies))
4238 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
4240 gcc_unreachable ();
4243 const char *
4244 output_vec_const_move (rtx *operands)
4246 int cst, cst2;
4247 enum machine_mode mode;
4248 rtx dest, vec;
4250 dest = operands[0];
4251 vec = operands[1];
4252 mode = GET_MODE (dest);
4254 if (TARGET_VSX)
4256 if (zero_constant (vec, mode))
4257 return "xxlxor %x0,%x0,%x0";
4259 if (mode == V2DImode
4260 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
4261 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
4262 return "vspltisw %0,-1";
4265 if (TARGET_ALTIVEC)
4267 rtx splat_vec;
4268 if (zero_constant (vec, mode))
4269 return "vxor %0,%0,%0";
4271 splat_vec = gen_easy_altivec_constant (vec);
4272 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
4273 operands[1] = XEXP (splat_vec, 0);
4274 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
4275 return "#";
4277 switch (GET_MODE (splat_vec))
4279 case V4SImode:
4280 return "vspltisw %0,%1";
4282 case V8HImode:
4283 return "vspltish %0,%1";
4285 case V16QImode:
4286 return "vspltisb %0,%1";
4288 default:
4289 gcc_unreachable ();
4293 gcc_assert (TARGET_SPE);
4295 /* Vector constant 0 is handled as a splitter of V2SI, and in the
4296 pattern of V1DI, V4HI, and V2SF.
4298 FIXME: We should probably return # and add post reload
4299 splitters for these, but this way is so easy ;-). */
4300 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
4301 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
4302 operands[1] = CONST_VECTOR_ELT (vec, 0);
4303 operands[2] = CONST_VECTOR_ELT (vec, 1);
4304 if (cst == cst2)
4305 return "li %0,%1\n\tevmergelo %0,%0,%0";
4306 else
4307 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
4310 /* Initialize TARGET of vector PAIRED to VALS. */
4312 void
4313 paired_expand_vector_init (rtx target, rtx vals)
4315 enum machine_mode mode = GET_MODE (target);
4316 int n_elts = GET_MODE_NUNITS (mode);
4317 int n_var = 0;
4318 rtx x, new_rtx, tmp, constant_op, op1, op2;
4319 int i;
4321 for (i = 0; i < n_elts; ++i)
4323 x = XVECEXP (vals, 0, i);
4324 if (!(CONST_INT_P (x)
4325 || GET_CODE (x) == CONST_DOUBLE
4326 || GET_CODE (x) == CONST_FIXED))
4327 ++n_var;
4329 if (n_var == 0)
4331 /* Load from constant pool. */
4332 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
4333 return;
4336 if (n_var == 2)
4338 /* The vector is initialized only with non-constants. */
4339 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
4340 XVECEXP (vals, 0, 1));
4342 emit_move_insn (target, new_rtx);
4343 return;
4346 /* One field is non-constant and the other one is a constant. Load the
4347 constant from the constant pool and use ps_merge instruction to
4348 construct the whole vector. */
4349 op1 = XVECEXP (vals, 0, 0);
4350 op2 = XVECEXP (vals, 0, 1);
4352 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
4354 tmp = gen_reg_rtx (GET_MODE (constant_op));
4355 emit_move_insn (tmp, constant_op);
4357 if (CONSTANT_P (op1))
4358 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
4359 else
4360 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
4362 emit_move_insn (target, new_rtx);
4365 void
4366 paired_expand_vector_move (rtx operands[])
4368 rtx op0 = operands[0], op1 = operands[1];
4370 emit_move_insn (op0, op1);
4373 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
4374 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
4375 operands for the relation operation COND. This is a recursive
4376 function. */
4378 static void
4379 paired_emit_vector_compare (enum rtx_code rcode,
4380 rtx dest, rtx op0, rtx op1,
4381 rtx cc_op0, rtx cc_op1)
4383 rtx tmp = gen_reg_rtx (V2SFmode);
4384 rtx tmp1, max, min;
4386 gcc_assert (TARGET_PAIRED_FLOAT);
4387 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
4389 switch (rcode)
4391 case LT:
4392 case LTU:
4393 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4394 return;
4395 case GE:
4396 case GEU:
4397 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4398 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
4399 return;
4400 case LE:
4401 case LEU:
4402 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
4403 return;
4404 case GT:
4405 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4406 return;
4407 case EQ:
4408 tmp1 = gen_reg_rtx (V2SFmode);
4409 max = gen_reg_rtx (V2SFmode);
4410 min = gen_reg_rtx (V2SFmode);
4411 gen_reg_rtx (V2SFmode);
4413 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
4414 emit_insn (gen_selv2sf4
4415 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4416 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
4417 emit_insn (gen_selv2sf4
4418 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
4419 emit_insn (gen_subv2sf3 (tmp1, min, max));
4420 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
4421 return;
4422 case NE:
4423 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
4424 return;
4425 case UNLE:
4426 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
4427 return;
4428 case UNLT:
4429 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
4430 return;
4431 case UNGE:
4432 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
4433 return;
4434 case UNGT:
4435 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
4436 return;
4437 default:
4438 gcc_unreachable ();
4441 return;
4444 /* Emit vector conditional expression.
4445 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
4446 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
4449 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
4450 rtx cond, rtx cc_op0, rtx cc_op1)
4452 enum rtx_code rcode = GET_CODE (cond);
4454 if (!TARGET_PAIRED_FLOAT)
4455 return 0;
4457 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
4459 return 1;
4462 /* Initialize vector TARGET to VALS. */
4464 void
4465 rs6000_expand_vector_init (rtx target, rtx vals)
4467 enum machine_mode mode = GET_MODE (target);
4468 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4469 int n_elts = GET_MODE_NUNITS (mode);
4470 int n_var = 0, one_var = -1;
4471 bool all_same = true, all_const_zero = true;
4472 rtx x, mem;
4473 int i;
4475 for (i = 0; i < n_elts; ++i)
4477 x = XVECEXP (vals, 0, i);
4478 if (!(CONST_INT_P (x)
4479 || GET_CODE (x) == CONST_DOUBLE
4480 || GET_CODE (x) == CONST_FIXED))
4481 ++n_var, one_var = i;
4482 else if (x != CONST0_RTX (inner_mode))
4483 all_const_zero = false;
4485 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
4486 all_same = false;
4489 if (n_var == 0)
4491 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
4492 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
4493 if ((int_vector_p || TARGET_VSX) && all_const_zero)
4495 /* Zero register. */
4496 emit_insn (gen_rtx_SET (VOIDmode, target,
4497 gen_rtx_XOR (mode, target, target)));
4498 return;
4500 else if (int_vector_p && easy_vector_constant (const_vec, mode))
4502 /* Splat immediate. */
4503 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
4504 return;
4506 else
4508 /* Load from constant pool. */
4509 emit_move_insn (target, const_vec);
4510 return;
4514 /* Double word values on VSX can use xxpermdi or lxvdsx. */
4515 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4517 rtx op0 = XVECEXP (vals, 0, 0);
4518 rtx op1 = XVECEXP (vals, 0, 1);
4519 if (all_same)
4521 if (!MEM_P (op0) && !REG_P (op0))
4522 op0 = force_reg (inner_mode, op0);
4523 if (mode == V2DFmode)
4524 emit_insn (gen_vsx_splat_v2df (target, op0));
4525 else
4526 emit_insn (gen_vsx_splat_v2di (target, op0));
4528 else
4530 op0 = force_reg (inner_mode, op0);
4531 op1 = force_reg (inner_mode, op1);
4532 if (mode == V2DFmode)
4533 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
4534 else
4535 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
4537 return;
4540 /* With single precision floating point on VSX, know that internally single
4541 precision is actually represented as a double, and either make 2 V2DF
4542 vectors, and convert these vectors to single precision, or do one
4543 conversion, and splat the result to the other elements. */
4544 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
4546 if (all_same)
4548 rtx freg = gen_reg_rtx (V4SFmode);
4549 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
4551 emit_insn (gen_vsx_xscvdpsp_scalar (freg, sreg));
4552 emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx));
4554 else
4556 rtx dbl_even = gen_reg_rtx (V2DFmode);
4557 rtx dbl_odd = gen_reg_rtx (V2DFmode);
4558 rtx flt_even = gen_reg_rtx (V4SFmode);
4559 rtx flt_odd = gen_reg_rtx (V4SFmode);
4560 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
4561 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
4562 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
4563 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
4565 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
4566 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
4567 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
4568 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
4569 rs6000_expand_extract_even (target, flt_even, flt_odd);
4571 return;
4574 /* Store value to stack temp. Load vector element. Splat. However, splat
4575 of 64-bit items is not supported on Altivec. */
4576 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
4578 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4579 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
4580 XVECEXP (vals, 0, 0));
4581 x = gen_rtx_UNSPEC (VOIDmode,
4582 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4583 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4584 gen_rtvec (2,
4585 gen_rtx_SET (VOIDmode,
4586 target, mem),
4587 x)));
4588 x = gen_rtx_VEC_SELECT (inner_mode, target,
4589 gen_rtx_PARALLEL (VOIDmode,
4590 gen_rtvec (1, const0_rtx)));
4591 emit_insn (gen_rtx_SET (VOIDmode, target,
4592 gen_rtx_VEC_DUPLICATE (mode, x)));
4593 return;
4596 /* One field is non-constant. Load constant then overwrite
4597 varying field. */
4598 if (n_var == 1)
4600 rtx copy = copy_rtx (vals);
4602 /* Load constant part of vector, substitute neighboring value for
4603 varying element. */
4604 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
4605 rs6000_expand_vector_init (target, copy);
4607 /* Insert variable. */
4608 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
4609 return;
4612 /* Construct the vector in memory one field at a time
4613 and load the whole vector. */
4614 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4615 for (i = 0; i < n_elts; i++)
4616 emit_move_insn (adjust_address_nv (mem, inner_mode,
4617 i * GET_MODE_SIZE (inner_mode)),
4618 XVECEXP (vals, 0, i));
4619 emit_move_insn (target, mem);
4622 /* Set field ELT of TARGET to VAL. */
4624 void
4625 rs6000_expand_vector_set (rtx target, rtx val, int elt)
4627 enum machine_mode mode = GET_MODE (target);
4628 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4629 rtx reg = gen_reg_rtx (mode);
4630 rtx mask, mem, x;
4631 int width = GET_MODE_SIZE (inner_mode);
4632 int i;
4634 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
4636 rtx (*set_func) (rtx, rtx, rtx, rtx)
4637 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
4638 emit_insn (set_func (target, target, val, GEN_INT (elt)));
4639 return;
4642 /* Load single variable value. */
4643 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
4644 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
4645 x = gen_rtx_UNSPEC (VOIDmode,
4646 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
4647 emit_insn (gen_rtx_PARALLEL (VOIDmode,
4648 gen_rtvec (2,
4649 gen_rtx_SET (VOIDmode,
4650 reg, mem),
4651 x)));
4653 /* Linear sequence. */
4654 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
4655 for (i = 0; i < 16; ++i)
4656 XVECEXP (mask, 0, i) = GEN_INT (i);
4658 /* Set permute mask to insert element into target. */
4659 for (i = 0; i < width; ++i)
4660 XVECEXP (mask, 0, elt*width + i)
4661 = GEN_INT (i + 0x10);
4662 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
4663 x = gen_rtx_UNSPEC (mode,
4664 gen_rtvec (3, target, reg,
4665 force_reg (V16QImode, x)),
4666 UNSPEC_VPERM);
4667 emit_insn (gen_rtx_SET (VOIDmode, target, x));
4670 /* Extract field ELT from VEC into TARGET. */
4672 void
4673 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
4675 enum machine_mode mode = GET_MODE (vec);
4676 enum machine_mode inner_mode = GET_MODE_INNER (mode);
4677 rtx mem;
4679 if (VECTOR_MEM_VSX_P (mode))
4681 switch (mode)
4683 default:
4684 break;
4685 case V2DFmode:
4686 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
4687 return;
4688 case V2DImode:
4689 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
4690 return;
4691 case V4SFmode:
4692 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
4693 return;
4697 /* Allocate mode-sized buffer. */
4698 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
4700 emit_move_insn (mem, vec);
4702 /* Add offset to field within buffer matching vector element. */
4703 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
4705 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
4708 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
4709 implement ANDing by the mask IN. */
4710 void
4711 build_mask64_2_operands (rtx in, rtx *out)
4713 #if HOST_BITS_PER_WIDE_INT >= 64
4714 unsigned HOST_WIDE_INT c, lsb, m1, m2;
4715 int shift;
4717 gcc_assert (GET_CODE (in) == CONST_INT);
4719 c = INTVAL (in);
4720 if (c & 1)
4722 /* Assume c initially something like 0x00fff000000fffff. The idea
4723 is to rotate the word so that the middle ^^^^^^ group of zeros
4724 is at the MS end and can be cleared with an rldicl mask. We then
4725 rotate back and clear off the MS ^^ group of zeros with a
4726 second rldicl. */
4727 c = ~c; /* c == 0xff000ffffff00000 */
4728 lsb = c & -c; /* lsb == 0x0000000000100000 */
4729 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
4730 c = ~c; /* c == 0x00fff000000fffff */
4731 c &= -lsb; /* c == 0x00fff00000000000 */
4732 lsb = c & -c; /* lsb == 0x0000100000000000 */
4733 c = ~c; /* c == 0xff000fffffffffff */
4734 c &= -lsb; /* c == 0xff00000000000000 */
4735 shift = 0;
4736 while ((lsb >>= 1) != 0)
4737 shift++; /* shift == 44 on exit from loop */
4738 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
4739 m1 = ~m1; /* m1 == 0x000000ffffffffff */
4740 m2 = ~c; /* m2 == 0x00ffffffffffffff */
4742 else
4744 /* Assume c initially something like 0xff000f0000000000. The idea
4745 is to rotate the word so that the ^^^ middle group of zeros
4746 is at the LS end and can be cleared with an rldicr mask. We then
4747 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
4748 a second rldicr. */
4749 lsb = c & -c; /* lsb == 0x0000010000000000 */
4750 m2 = -lsb; /* m2 == 0xffffff0000000000 */
4751 c = ~c; /* c == 0x00fff0ffffffffff */
4752 c &= -lsb; /* c == 0x00fff00000000000 */
4753 lsb = c & -c; /* lsb == 0x0000100000000000 */
4754 c = ~c; /* c == 0xff000fffffffffff */
4755 c &= -lsb; /* c == 0xff00000000000000 */
4756 shift = 0;
4757 while ((lsb >>= 1) != 0)
4758 shift++; /* shift == 44 on exit from loop */
4759 m1 = ~c; /* m1 == 0x00ffffffffffffff */
4760 m1 >>= shift; /* m1 == 0x0000000000000fff */
4761 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
4764 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
4765 masks will be all 1's. We are guaranteed more than one transition. */
4766 out[0] = GEN_INT (64 - shift);
4767 out[1] = GEN_INT (m1);
4768 out[2] = GEN_INT (shift);
4769 out[3] = GEN_INT (m2);
4770 #else
4771 (void)in;
4772 (void)out;
4773 gcc_unreachable ();
4774 #endif
4777 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
4779 bool
4780 invalid_e500_subreg (rtx op, enum machine_mode mode)
4782 if (TARGET_E500_DOUBLE)
4784 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
4785 subreg:TI and reg:TF. Decimal float modes are like integer
4786 modes (only low part of each register used) for this
4787 purpose. */
4788 if (GET_CODE (op) == SUBREG
4789 && (mode == SImode || mode == DImode || mode == TImode
4790 || mode == DDmode || mode == TDmode)
4791 && REG_P (SUBREG_REG (op))
4792 && (GET_MODE (SUBREG_REG (op)) == DFmode
4793 || GET_MODE (SUBREG_REG (op)) == TFmode))
4794 return true;
4796 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
4797 reg:TI. */
4798 if (GET_CODE (op) == SUBREG
4799 && (mode == DFmode || mode == TFmode)
4800 && REG_P (SUBREG_REG (op))
4801 && (GET_MODE (SUBREG_REG (op)) == DImode
4802 || GET_MODE (SUBREG_REG (op)) == TImode
4803 || GET_MODE (SUBREG_REG (op)) == DDmode
4804 || GET_MODE (SUBREG_REG (op)) == TDmode))
4805 return true;
4808 if (TARGET_SPE
4809 && GET_CODE (op) == SUBREG
4810 && mode == SImode
4811 && REG_P (SUBREG_REG (op))
4812 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
4813 return true;
4815 return false;
4818 /* AIX increases natural record alignment to doubleword if the first
4819 field is an FP double while the FP fields remain word aligned. */
4821 unsigned int
4822 rs6000_special_round_type_align (tree type, unsigned int computed,
4823 unsigned int specified)
4825 unsigned int align = MAX (computed, specified);
4826 tree field = TYPE_FIELDS (type);
4828 /* Skip all non field decls */
4829 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4830 field = DECL_CHAIN (field);
4832 if (field != NULL && field != type)
4834 type = TREE_TYPE (field);
4835 while (TREE_CODE (type) == ARRAY_TYPE)
4836 type = TREE_TYPE (type);
4838 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
4839 align = MAX (align, 64);
4842 return align;
4845 /* Darwin increases record alignment to the natural alignment of
4846 the first field. */
4848 unsigned int
4849 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
4850 unsigned int specified)
4852 unsigned int align = MAX (computed, specified);
4854 if (TYPE_PACKED (type))
4855 return align;
4857 /* Find the first field, looking down into aggregates. */
4858 do {
4859 tree field = TYPE_FIELDS (type);
4860 /* Skip all non field decls */
4861 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
4862 field = DECL_CHAIN (field);
4863 if (! field)
4864 break;
4865 /* A packed field does not contribute any extra alignment. */
4866 if (DECL_PACKED (field))
4867 return align;
4868 type = TREE_TYPE (field);
4869 while (TREE_CODE (type) == ARRAY_TYPE)
4870 type = TREE_TYPE (type);
4871 } while (AGGREGATE_TYPE_P (type));
4873 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
4874 align = MAX (align, TYPE_ALIGN (type));
4876 return align;
4879 /* Return 1 for an operand in small memory on V.4/eabi. */
4882 small_data_operand (rtx op ATTRIBUTE_UNUSED,
4883 enum machine_mode mode ATTRIBUTE_UNUSED)
4885 #if TARGET_ELF
4886 rtx sym_ref;
4888 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
4889 return 0;
4891 if (DEFAULT_ABI != ABI_V4)
4892 return 0;
4894 /* Vector and float memory instructions have a limited offset on the
4895 SPE, so using a vector or float variable directly as an operand is
4896 not useful. */
4897 if (TARGET_SPE
4898 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
4899 return 0;
4901 if (GET_CODE (op) == SYMBOL_REF)
4902 sym_ref = op;
4904 else if (GET_CODE (op) != CONST
4905 || GET_CODE (XEXP (op, 0)) != PLUS
4906 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
4907 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
4908 return 0;
4910 else
4912 rtx sum = XEXP (op, 0);
4913 HOST_WIDE_INT summand;
4915 /* We have to be careful here, because it is the referenced address
4916 that must be 32k from _SDA_BASE_, not just the symbol. */
4917 summand = INTVAL (XEXP (sum, 1));
4918 if (summand < 0 || summand > g_switch_value)
4919 return 0;
4921 sym_ref = XEXP (sum, 0);
4924 return SYMBOL_REF_SMALL_P (sym_ref);
4925 #else
4926 return 0;
4927 #endif
4930 /* Return true if either operand is a general purpose register. */
4932 bool
4933 gpr_or_gpr_p (rtx op0, rtx op1)
4935 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
4936 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
4940 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
4942 static bool
4943 reg_offset_addressing_ok_p (enum machine_mode mode)
4945 switch (mode)
4947 case V16QImode:
4948 case V8HImode:
4949 case V4SFmode:
4950 case V4SImode:
4951 case V2DFmode:
4952 case V2DImode:
4953 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. */
4954 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
4955 return false;
4956 break;
4958 case V4HImode:
4959 case V2SImode:
4960 case V1DImode:
4961 case V2SFmode:
4962 /* Paired vector modes. Only reg+reg addressing is valid. */
4963 if (TARGET_PAIRED_FLOAT)
4964 return false;
4965 break;
4967 default:
4968 break;
4971 return true;
4974 static bool
4975 virtual_stack_registers_memory_p (rtx op)
4977 int regnum;
4979 if (GET_CODE (op) == REG)
4980 regnum = REGNO (op);
4982 else if (GET_CODE (op) == PLUS
4983 && GET_CODE (XEXP (op, 0)) == REG
4984 && GET_CODE (XEXP (op, 1)) == CONST_INT)
4985 regnum = REGNO (XEXP (op, 0));
4987 else
4988 return false;
4990 return (regnum >= FIRST_VIRTUAL_REGISTER
4991 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
4994 /* Return true if memory accesses to OP are known to never straddle
4995 a 32k boundary. */
4997 static bool
4998 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
4999 enum machine_mode mode)
5001 tree decl, type;
5002 unsigned HOST_WIDE_INT dsize, dalign;
5004 if (GET_CODE (op) != SYMBOL_REF)
5005 return false;
5007 decl = SYMBOL_REF_DECL (op);
5008 if (!decl)
5010 if (GET_MODE_SIZE (mode) == 0)
5011 return false;
5013 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
5014 replacing memory addresses with an anchor plus offset. We
5015 could find the decl by rummaging around in the block->objects
5016 VEC for the given offset but that seems like too much work. */
5017 dalign = 1;
5018 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
5019 && SYMBOL_REF_ANCHOR_P (op)
5020 && SYMBOL_REF_BLOCK (op) != NULL)
5022 struct object_block *block = SYMBOL_REF_BLOCK (op);
5023 HOST_WIDE_INT lsb, mask;
5025 /* Given the alignment of the block.. */
5026 dalign = block->alignment;
5027 mask = dalign / BITS_PER_UNIT - 1;
5029 /* ..and the combined offset of the anchor and any offset
5030 to this block object.. */
5031 offset += SYMBOL_REF_BLOCK_OFFSET (op);
5032 lsb = offset & -offset;
5034 /* ..find how many bits of the alignment we know for the
5035 object. */
5036 mask &= lsb - 1;
5037 dalign = mask + 1;
5039 return dalign >= GET_MODE_SIZE (mode);
5042 if (DECL_P (decl))
5044 if (TREE_CODE (decl) == FUNCTION_DECL)
5045 return true;
5047 if (!DECL_SIZE_UNIT (decl))
5048 return false;
5050 if (!host_integerp (DECL_SIZE_UNIT (decl), 1))
5051 return false;
5053 dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
5054 if (dsize > 32768)
5055 return false;
5057 dalign = DECL_ALIGN_UNIT (decl);
5058 return dalign >= dsize;
5061 type = TREE_TYPE (decl);
5063 if (TREE_CODE (decl) == STRING_CST)
5064 dsize = TREE_STRING_LENGTH (decl);
5065 else if (TYPE_SIZE_UNIT (type)
5066 && host_integerp (TYPE_SIZE_UNIT (type), 1))
5067 dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5068 else
5069 return false;
5070 if (dsize > 32768)
5071 return false;
5073 dalign = TYPE_ALIGN (type);
5074 if (CONSTANT_CLASS_P (decl))
5075 dalign = CONSTANT_ALIGNMENT (decl, dalign);
5076 else
5077 dalign = DATA_ALIGNMENT (decl, dalign);
5078 dalign /= BITS_PER_UNIT;
5079 return dalign >= dsize;
5082 static bool
5083 constant_pool_expr_p (rtx op)
5085 rtx base, offset;
5087 split_const (op, &base, &offset);
5088 return (GET_CODE (base) == SYMBOL_REF
5089 && CONSTANT_POOL_ADDRESS_P (base)
5090 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
5093 static const_rtx tocrel_base, tocrel_offset;
5095 /* Return true if OP is a toc pointer relative address (the output
5096 of create_TOC_reference). If STRICT, do not match high part or
5097 non-split -mcmodel=large/medium toc pointer relative addresses. */
5099 bool
5100 toc_relative_expr_p (const_rtx op, bool strict)
5102 if (!TARGET_TOC)
5103 return false;
5105 if (TARGET_CMODEL != CMODEL_SMALL)
5107 /* Only match the low part. */
5108 if (GET_CODE (op) == LO_SUM
5109 && REG_P (XEXP (op, 0))
5110 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
5111 op = XEXP (op, 1);
5112 else if (strict)
5113 return false;
5116 tocrel_base = op;
5117 tocrel_offset = const0_rtx;
5118 if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
5120 tocrel_base = XEXP (op, 0);
5121 tocrel_offset = XEXP (op, 1);
5124 return (GET_CODE (tocrel_base) == UNSPEC
5125 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
5128 /* Return true if X is a constant pool address, and also for cmodel=medium
5129 if X is a toc-relative address known to be offsettable within MODE. */
5131 bool
5132 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
5133 bool strict)
5135 return (toc_relative_expr_p (x, strict)
5136 && (TARGET_CMODEL != CMODEL_MEDIUM
5137 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
5138 || mode == QImode
5139 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
5140 INTVAL (tocrel_offset), mode)));
5143 static bool
5144 legitimate_small_data_p (enum machine_mode mode, rtx x)
5146 return (DEFAULT_ABI == ABI_V4
5147 && !flag_pic && !TARGET_TOC
5148 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
5149 && small_data_operand (x, mode));
5152 /* SPE offset addressing is limited to 5-bits worth of double words. */
5153 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
5155 bool
5156 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
5158 unsigned HOST_WIDE_INT offset, extra;
5160 if (GET_CODE (x) != PLUS)
5161 return false;
5162 if (GET_CODE (XEXP (x, 0)) != REG)
5163 return false;
5164 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5165 return false;
5166 if (!reg_offset_addressing_ok_p (mode))
5167 return virtual_stack_registers_memory_p (x);
5168 if (legitimate_constant_pool_address_p (x, mode, strict))
5169 return true;
5170 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5171 return false;
5173 offset = INTVAL (XEXP (x, 1));
5174 extra = 0;
5175 switch (mode)
5177 case V4HImode:
5178 case V2SImode:
5179 case V1DImode:
5180 case V2SFmode:
5181 /* SPE vector modes. */
5182 return SPE_CONST_OFFSET_OK (offset);
5184 case DFmode:
5185 if (TARGET_E500_DOUBLE)
5186 return SPE_CONST_OFFSET_OK (offset);
5188 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
5189 addressing. */
5190 if (VECTOR_MEM_VSX_P (DFmode))
5191 return false;
5193 case DDmode:
5194 case DImode:
5195 /* On e500v2, we may have:
5197 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
5199 Which gets addressed with evldd instructions. */
5200 if (TARGET_E500_DOUBLE)
5201 return SPE_CONST_OFFSET_OK (offset);
5203 if (mode == DFmode || mode == DDmode || !TARGET_POWERPC64)
5204 extra = 4;
5205 else if (offset & 3)
5206 return false;
5207 break;
5209 case TFmode:
5210 if (TARGET_E500_DOUBLE)
5211 return (SPE_CONST_OFFSET_OK (offset)
5212 && SPE_CONST_OFFSET_OK (offset + 8));
5214 case TDmode:
5215 case TImode:
5216 if (mode == TFmode || mode == TDmode || !TARGET_POWERPC64)
5217 extra = 12;
5218 else if (offset & 3)
5219 return false;
5220 else
5221 extra = 8;
5222 break;
5224 default:
5225 break;
5228 offset += 0x8000;
5229 return offset < 0x10000 - extra;
5232 bool
5233 legitimate_indexed_address_p (rtx x, int strict)
5235 rtx op0, op1;
5237 if (GET_CODE (x) != PLUS)
5238 return false;
5240 op0 = XEXP (x, 0);
5241 op1 = XEXP (x, 1);
5243 /* Recognize the rtl generated by reload which we know will later be
5244 replaced with proper base and index regs. */
5245 if (!strict
5246 && reload_in_progress
5247 && (REG_P (op0) || GET_CODE (op0) == PLUS)
5248 && REG_P (op1))
5249 return true;
5251 return (REG_P (op0) && REG_P (op1)
5252 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
5253 && INT_REG_OK_FOR_INDEX_P (op1, strict))
5254 || (INT_REG_OK_FOR_BASE_P (op1, strict)
5255 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
5258 bool
5259 avoiding_indexed_address_p (enum machine_mode mode)
5261 /* Avoid indexed addressing for modes that have non-indexed
5262 load/store instruction forms. */
5263 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
5266 inline bool
5267 legitimate_indirect_address_p (rtx x, int strict)
5269 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
5272 bool
5273 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
5275 if (!TARGET_MACHO || !flag_pic
5276 || mode != SImode || GET_CODE (x) != MEM)
5277 return false;
5278 x = XEXP (x, 0);
5280 if (GET_CODE (x) != LO_SUM)
5281 return false;
5282 if (GET_CODE (XEXP (x, 0)) != REG)
5283 return false;
5284 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
5285 return false;
5286 x = XEXP (x, 1);
5288 return CONSTANT_P (x);
5291 static bool
5292 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
5294 if (GET_CODE (x) != LO_SUM)
5295 return false;
5296 if (GET_CODE (XEXP (x, 0)) != REG)
5297 return false;
5298 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
5299 return false;
5300 /* Restrict addressing for DI because of our SUBREG hackery. */
5301 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
5302 || mode == DDmode || mode == TDmode
5303 || mode == DImode))
5304 return false;
5305 x = XEXP (x, 1);
5307 if (TARGET_ELF || TARGET_MACHO)
5309 if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
5310 return false;
5311 if (TARGET_TOC)
5312 return false;
5313 if (GET_MODE_NUNITS (mode) != 1)
5314 return false;
5315 if (GET_MODE_BITSIZE (mode) > 64
5316 || (GET_MODE_BITSIZE (mode) > 32 && !TARGET_POWERPC64
5317 && !(TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
5318 && (mode == DFmode || mode == DDmode))))
5319 return false;
5321 return CONSTANT_P (x);
5324 return false;
5328 /* Try machine-dependent ways of modifying an illegitimate address
5329 to be legitimate. If we find one, return the new, valid address.
5330 This is used from only one place: `memory_address' in explow.c.
5332 OLDX is the address as it was before break_out_memory_refs was
5333 called. In some cases it is useful to look at this to decide what
5334 needs to be done.
5336 It is always safe for this function to do nothing. It exists to
5337 recognize opportunities to optimize the output.
5339 On RS/6000, first check for the sum of a register with a constant
5340 integer that is out of range. If so, generate code to add the
5341 constant with the low-order 16 bits masked to the register and force
5342 this result into another register (this can be done with `cau').
5343 Then generate an address of REG+(CONST&0xffff), allowing for the
5344 possibility of bit 16 being a one.
5346 Then check for the sum of a register and something not constant, try to
5347 load the other things into a register and return the sum. */
5349 static rtx
5350 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5351 enum machine_mode mode)
5353 unsigned int extra = 0;
5355 if (!reg_offset_addressing_ok_p (mode))
5357 if (virtual_stack_registers_memory_p (x))
5358 return x;
5360 /* In theory we should not be seeing addresses of the form reg+0,
5361 but just in case it is generated, optimize it away. */
5362 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
5363 return force_reg (Pmode, XEXP (x, 0));
5365 /* Make sure both operands are registers. */
5366 else if (GET_CODE (x) == PLUS)
5367 return gen_rtx_PLUS (Pmode,
5368 force_reg (Pmode, XEXP (x, 0)),
5369 force_reg (Pmode, XEXP (x, 1)));
5370 else
5371 return force_reg (Pmode, x);
5373 if (GET_CODE (x) == SYMBOL_REF)
5375 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
5376 if (model != 0)
5377 return rs6000_legitimize_tls_address (x, model);
5380 switch (mode)
5382 case DFmode:
5383 case DDmode:
5384 extra = 4;
5385 break;
5386 case DImode:
5387 if (!TARGET_POWERPC64)
5388 extra = 4;
5389 break;
5390 case TFmode:
5391 case TDmode:
5392 extra = 12;
5393 break;
5394 case TImode:
5395 extra = TARGET_POWERPC64 ? 8 : 12;
5396 break;
5397 default:
5398 break;
5401 if (GET_CODE (x) == PLUS
5402 && GET_CODE (XEXP (x, 0)) == REG
5403 && GET_CODE (XEXP (x, 1)) == CONST_INT
5404 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
5405 >= 0x10000 - extra)
5406 && !((TARGET_POWERPC64
5407 && (mode == DImode || mode == TImode)
5408 && (INTVAL (XEXP (x, 1)) & 3) != 0)
5409 || SPE_VECTOR_MODE (mode)
5410 || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
5411 || mode == DImode || mode == DDmode
5412 || mode == TDmode))))
5414 HOST_WIDE_INT high_int, low_int;
5415 rtx sum;
5416 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
5417 if (low_int >= 0x8000 - extra)
5418 low_int = 0;
5419 high_int = INTVAL (XEXP (x, 1)) - low_int;
5420 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
5421 GEN_INT (high_int)), 0);
5422 return plus_constant (Pmode, sum, low_int);
5424 else if (GET_CODE (x) == PLUS
5425 && GET_CODE (XEXP (x, 0)) == REG
5426 && GET_CODE (XEXP (x, 1)) != CONST_INT
5427 && GET_MODE_NUNITS (mode) == 1
5428 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5429 || TARGET_POWERPC64
5430 || ((mode != DImode && mode != DFmode && mode != DDmode)
5431 || (TARGET_E500_DOUBLE && mode != DDmode)))
5432 && (TARGET_POWERPC64 || mode != DImode)
5433 && !avoiding_indexed_address_p (mode)
5434 && mode != TImode
5435 && mode != TFmode
5436 && mode != TDmode)
5438 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
5439 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
5441 else if (SPE_VECTOR_MODE (mode)
5442 || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
5443 || mode == DDmode || mode == TDmode
5444 || mode == DImode)))
5446 if (mode == DImode)
5447 return x;
5448 /* We accept [reg + reg] and [reg + OFFSET]. */
5450 if (GET_CODE (x) == PLUS)
5452 rtx op1 = XEXP (x, 0);
5453 rtx op2 = XEXP (x, 1);
5454 rtx y;
5456 op1 = force_reg (Pmode, op1);
5458 if (GET_CODE (op2) != REG
5459 && (GET_CODE (op2) != CONST_INT
5460 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
5461 || (GET_MODE_SIZE (mode) > 8
5462 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
5463 op2 = force_reg (Pmode, op2);
5465 /* We can't always do [reg + reg] for these, because [reg +
5466 reg + offset] is not a legitimate addressing mode. */
5467 y = gen_rtx_PLUS (Pmode, op1, op2);
5469 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
5470 return force_reg (Pmode, y);
5471 else
5472 return y;
5475 return force_reg (Pmode, x);
5477 else if (TARGET_ELF
5478 && TARGET_32BIT
5479 && TARGET_NO_TOC
5480 && ! flag_pic
5481 && GET_CODE (x) != CONST_INT
5482 && GET_CODE (x) != CONST_DOUBLE
5483 && CONSTANT_P (x)
5484 && GET_MODE_NUNITS (mode) == 1
5485 && (GET_MODE_BITSIZE (mode) <= 32
5486 || ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5487 && (mode == DFmode || mode == DDmode))))
5489 rtx reg = gen_reg_rtx (Pmode);
5490 emit_insn (gen_elf_high (reg, x));
5491 return gen_rtx_LO_SUM (Pmode, reg, x);
5493 else if (TARGET_MACHO && TARGET_32BIT && TARGET_NO_TOC
5494 && ! flag_pic
5495 #if TARGET_MACHO
5496 && ! MACHO_DYNAMIC_NO_PIC_P
5497 #endif
5498 && GET_CODE (x) != CONST_INT
5499 && GET_CODE (x) != CONST_DOUBLE
5500 && CONSTANT_P (x)
5501 && GET_MODE_NUNITS (mode) == 1
5502 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
5503 || (mode != DFmode && mode != DDmode))
5504 && mode != DImode
5505 && mode != TImode)
5507 rtx reg = gen_reg_rtx (Pmode);
5508 emit_insn (gen_macho_high (reg, x));
5509 return gen_rtx_LO_SUM (Pmode, reg, x);
5511 else if (TARGET_TOC
5512 && GET_CODE (x) == SYMBOL_REF
5513 && constant_pool_expr_p (x)
5514 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
5515 return create_TOC_reference (x, NULL_RTX);
5516 else
5517 return x;
5520 /* Debug version of rs6000_legitimize_address. */
5521 static rtx
5522 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
5524 rtx ret;
5525 rtx insns;
5527 start_sequence ();
5528 ret = rs6000_legitimize_address (x, oldx, mode);
5529 insns = get_insns ();
5530 end_sequence ();
5532 if (ret != x)
5534 fprintf (stderr,
5535 "\nrs6000_legitimize_address: mode %s, old code %s, "
5536 "new code %s, modified\n",
5537 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
5538 GET_RTX_NAME (GET_CODE (ret)));
5540 fprintf (stderr, "Original address:\n");
5541 debug_rtx (x);
5543 fprintf (stderr, "oldx:\n");
5544 debug_rtx (oldx);
5546 fprintf (stderr, "New address:\n");
5547 debug_rtx (ret);
5549 if (insns)
5551 fprintf (stderr, "Insns added:\n");
5552 debug_rtx_list (insns, 20);
5555 else
5557 fprintf (stderr,
5558 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
5559 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
5561 debug_rtx (x);
5564 if (insns)
5565 emit_insn (insns);
5567 return ret;
5570 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5571 We need to emit DTP-relative relocations. */
5573 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5574 static void
5575 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
5577 switch (size)
5579 case 4:
5580 fputs ("\t.long\t", file);
5581 break;
5582 case 8:
5583 fputs (DOUBLE_INT_ASM_OP, file);
5584 break;
5585 default:
5586 gcc_unreachable ();
5588 output_addr_const (file, x);
5589 fputs ("@dtprel+0x8000", file);
5592 /* In the name of slightly smaller debug output, and to cater to
5593 general assembler lossage, recognize various UNSPEC sequences
5594 and turn them back into a direct symbol reference. */
5596 static rtx
5597 rs6000_delegitimize_address (rtx orig_x)
5599 rtx x, y, offset;
5601 orig_x = delegitimize_mem_from_attrs (orig_x);
5602 x = orig_x;
5603 if (MEM_P (x))
5604 x = XEXP (x, 0);
5606 y = x;
5607 if (TARGET_CMODEL != CMODEL_SMALL
5608 && GET_CODE (y) == LO_SUM)
5609 y = XEXP (y, 1);
5611 offset = NULL_RTX;
5612 if (GET_CODE (y) == PLUS
5613 && GET_MODE (y) == Pmode
5614 && CONST_INT_P (XEXP (y, 1)))
5616 offset = XEXP (y, 1);
5617 y = XEXP (y, 0);
5620 if (GET_CODE (y) == UNSPEC
5621 && XINT (y, 1) == UNSPEC_TOCREL)
5623 #ifdef ENABLE_CHECKING
5624 if (REG_P (XVECEXP (y, 0, 1))
5625 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
5627 /* All good. */
5629 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
5631 /* Weirdness alert. df_note_compute can replace r2 with a
5632 debug_expr when this unspec is in a debug_insn.
5633 Seen in gcc.dg/pr51957-1.c */
5635 else
5637 debug_rtx (orig_x);
5638 abort ();
5640 #endif
5641 y = XVECEXP (y, 0, 0);
5642 if (offset != NULL_RTX)
5643 y = gen_rtx_PLUS (Pmode, y, offset);
5644 if (!MEM_P (orig_x))
5645 return y;
5646 else
5647 return replace_equiv_address_nv (orig_x, y);
5650 if (TARGET_MACHO
5651 && GET_CODE (orig_x) == LO_SUM
5652 && GET_CODE (XEXP (orig_x, 1)) == CONST)
5654 y = XEXP (XEXP (orig_x, 1), 0);
5655 if (GET_CODE (y) == UNSPEC
5656 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
5657 return XVECEXP (y, 0, 0);
5660 return orig_x;
5663 /* Return true if X shouldn't be emitted into the debug info.
5664 The linker doesn't like .toc section references from
5665 .debug_* sections, so reject .toc section symbols. */
5667 static bool
5668 rs6000_const_not_ok_for_debug_p (rtx x)
5670 if (GET_CODE (x) == SYMBOL_REF
5671 && CONSTANT_POOL_ADDRESS_P (x))
5673 rtx c = get_pool_constant (x);
5674 enum machine_mode cmode = get_pool_mode (x);
5675 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
5676 return true;
5679 return false;
5682 /* Construct the SYMBOL_REF for the tls_get_addr function. */
5684 static GTY(()) rtx rs6000_tls_symbol;
5685 static rtx
5686 rs6000_tls_get_addr (void)
5688 if (!rs6000_tls_symbol)
5689 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
5691 return rs6000_tls_symbol;
5694 /* Construct the SYMBOL_REF for TLS GOT references. */
5696 static GTY(()) rtx rs6000_got_symbol;
5697 static rtx
5698 rs6000_got_sym (void)
5700 if (!rs6000_got_symbol)
5702 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
5703 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
5704 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
5707 return rs6000_got_symbol;
5710 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
5711 this (thread-local) address. */
5713 static rtx
5714 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
5716 rtx dest, insn;
5718 dest = gen_reg_rtx (Pmode);
5719 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
5721 rtx tlsreg;
5723 if (TARGET_64BIT)
5725 tlsreg = gen_rtx_REG (Pmode, 13);
5726 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
5728 else
5730 tlsreg = gen_rtx_REG (Pmode, 2);
5731 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
5733 emit_insn (insn);
5735 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
5737 rtx tlsreg, tmp;
5739 tmp = gen_reg_rtx (Pmode);
5740 if (TARGET_64BIT)
5742 tlsreg = gen_rtx_REG (Pmode, 13);
5743 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
5745 else
5747 tlsreg = gen_rtx_REG (Pmode, 2);
5748 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
5750 emit_insn (insn);
5751 if (TARGET_64BIT)
5752 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
5753 else
5754 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
5755 emit_insn (insn);
5757 else
5759 rtx r3, got, tga, tmp1, tmp2, call_insn;
5761 /* We currently use relocations like @got@tlsgd for tls, which
5762 means the linker will handle allocation of tls entries, placing
5763 them in the .got section. So use a pointer to the .got section,
5764 not one to secondary TOC sections used by 64-bit -mminimal-toc,
5765 or to secondary GOT sections used by 32-bit -fPIC. */
5766 if (TARGET_64BIT)
5767 got = gen_rtx_REG (Pmode, 2);
5768 else
5770 if (flag_pic == 1)
5771 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
5772 else
5774 rtx gsym = rs6000_got_sym ();
5775 got = gen_reg_rtx (Pmode);
5776 if (flag_pic == 0)
5777 rs6000_emit_move (got, gsym, Pmode);
5778 else
5780 rtx mem, lab, last;
5782 tmp1 = gen_reg_rtx (Pmode);
5783 tmp2 = gen_reg_rtx (Pmode);
5784 mem = gen_const_mem (Pmode, tmp1);
5785 lab = gen_label_rtx ();
5786 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
5787 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
5788 if (TARGET_LINK_STACK)
5789 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
5790 emit_move_insn (tmp2, mem);
5791 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
5792 set_unique_reg_note (last, REG_EQUAL, gsym);
5797 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
5799 tga = rs6000_tls_get_addr ();
5800 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
5801 1, const0_rtx, Pmode);
5803 r3 = gen_rtx_REG (Pmode, 3);
5804 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5805 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
5806 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5807 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
5808 else if (DEFAULT_ABI == ABI_V4)
5809 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
5810 else
5811 gcc_unreachable ();
5812 call_insn = last_call_insn ();
5813 PATTERN (call_insn) = insn;
5814 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5815 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5816 pic_offset_table_rtx);
5818 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
5820 tga = rs6000_tls_get_addr ();
5821 tmp1 = gen_reg_rtx (Pmode);
5822 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
5823 1, const0_rtx, Pmode);
5825 r3 = gen_rtx_REG (Pmode, 3);
5826 if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
5827 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
5828 else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
5829 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
5830 else if (DEFAULT_ABI == ABI_V4)
5831 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
5832 else
5833 gcc_unreachable ();
5834 call_insn = last_call_insn ();
5835 PATTERN (call_insn) = insn;
5836 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
5837 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
5838 pic_offset_table_rtx);
5840 if (rs6000_tls_size == 16)
5842 if (TARGET_64BIT)
5843 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
5844 else
5845 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
5847 else if (rs6000_tls_size == 32)
5849 tmp2 = gen_reg_rtx (Pmode);
5850 if (TARGET_64BIT)
5851 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
5852 else
5853 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
5854 emit_insn (insn);
5855 if (TARGET_64BIT)
5856 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
5857 else
5858 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
5860 else
5862 tmp2 = gen_reg_rtx (Pmode);
5863 if (TARGET_64BIT)
5864 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
5865 else
5866 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
5867 emit_insn (insn);
5868 insn = gen_rtx_SET (Pmode, dest,
5869 gen_rtx_PLUS (Pmode, tmp2, tmp1));
5871 emit_insn (insn);
5873 else
5875 /* IE, or 64-bit offset LE. */
5876 tmp2 = gen_reg_rtx (Pmode);
5877 if (TARGET_64BIT)
5878 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
5879 else
5880 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
5881 emit_insn (insn);
5882 if (TARGET_64BIT)
5883 insn = gen_tls_tls_64 (dest, tmp2, addr);
5884 else
5885 insn = gen_tls_tls_32 (dest, tmp2, addr);
5886 emit_insn (insn);
5890 return dest;
5893 /* Return 1 if X contains a thread-local symbol. */
5895 static bool
5896 rs6000_tls_referenced_p (rtx x)
5898 if (! TARGET_HAVE_TLS)
5899 return false;
5901 return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
5904 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
5906 static bool
5907 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5909 if (GET_CODE (x) == HIGH
5910 && GET_CODE (XEXP (x, 0)) == UNSPEC)
5911 return true;
5913 return rs6000_tls_referenced_p (x);
5916 /* Return 1 if *X is a thread-local symbol. This is the same as
5917 rs6000_tls_symbol_ref except for the type of the unused argument. */
5919 static int
5920 rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
5922 return RS6000_SYMBOL_REF_TLS_P (*x);
5925 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
5926 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
5927 can be addressed relative to the toc pointer. */
5929 static bool
5930 use_toc_relative_ref (rtx sym)
5932 return ((constant_pool_expr_p (sym)
5933 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
5934 get_pool_mode (sym)))
5935 || (TARGET_CMODEL == CMODEL_MEDIUM
5936 && !CONSTANT_POOL_ADDRESS_P (sym)
5937 && SYMBOL_REF_LOCAL_P (sym)));
5940 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
5941 replace the input X, or the original X if no replacement is called for.
5942 The output parameter *WIN is 1 if the calling macro should goto WIN,
5943 0 if it should not.
5945 For RS/6000, we wish to handle large displacements off a base
5946 register by splitting the addend across an addiu/addis and the mem insn.
5947 This cuts number of extra insns needed from 3 to 1.
5949 On Darwin, we use this to generate code for floating point constants.
5950 A movsf_low is generated so we wind up with 2 instructions rather than 3.
5951 The Darwin code is inside #if TARGET_MACHO because only then are the
5952 machopic_* functions defined. */
5953 static rtx
5954 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
5955 int opnum, int type,
5956 int ind_levels ATTRIBUTE_UNUSED, int *win)
5958 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
5960 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
5961 DFmode/DImode MEM. */
5962 if (reg_offset_p
5963 && opnum == 1
5964 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
5965 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
5966 reg_offset_p = false;
5968 /* We must recognize output that we have already generated ourselves. */
5969 if (GET_CODE (x) == PLUS
5970 && GET_CODE (XEXP (x, 0)) == PLUS
5971 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5972 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5973 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5975 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5976 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5977 opnum, (enum reload_type) type);
5978 *win = 1;
5979 return x;
5982 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
5983 if (GET_CODE (x) == LO_SUM
5984 && GET_CODE (XEXP (x, 0)) == HIGH)
5986 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5987 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5988 opnum, (enum reload_type) type);
5989 *win = 1;
5990 return x;
5993 #if TARGET_MACHO
5994 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
5995 && GET_CODE (x) == LO_SUM
5996 && GET_CODE (XEXP (x, 0)) == PLUS
5997 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
5998 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
5999 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
6000 && machopic_operand_p (XEXP (x, 1)))
6002 /* Result of previous invocation of this function on Darwin
6003 floating point constant. */
6004 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6005 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6006 opnum, (enum reload_type) type);
6007 *win = 1;
6008 return x;
6010 #endif
6012 if (TARGET_CMODEL != CMODEL_SMALL
6013 && reg_offset_p
6014 && small_toc_ref (x, VOIDmode))
6016 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
6017 x = gen_rtx_LO_SUM (Pmode, hi, x);
6018 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6019 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6020 opnum, (enum reload_type) type);
6021 *win = 1;
6022 return x;
6025 /* Force ld/std non-word aligned offset into base register by wrapping
6026 in offset 0. */
6027 if (GET_CODE (x) == PLUS
6028 && GET_CODE (XEXP (x, 0)) == REG
6029 && REGNO (XEXP (x, 0)) < 32
6030 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6031 && GET_CODE (XEXP (x, 1)) == CONST_INT
6032 && reg_offset_p
6033 && (INTVAL (XEXP (x, 1)) & 3) != 0
6034 && VECTOR_MEM_NONE_P (mode)
6035 && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
6036 && TARGET_POWERPC64)
6038 x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
6039 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6040 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6041 opnum, (enum reload_type) type);
6042 *win = 1;
6043 return x;
6046 if (GET_CODE (x) == PLUS
6047 && GET_CODE (XEXP (x, 0)) == REG
6048 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
6049 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
6050 && GET_CODE (XEXP (x, 1)) == CONST_INT
6051 && reg_offset_p
6052 && !SPE_VECTOR_MODE (mode)
6053 && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
6054 || mode == DDmode || mode == TDmode
6055 || mode == DImode))
6056 && VECTOR_MEM_NONE_P (mode))
6058 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6059 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
6060 HOST_WIDE_INT high
6061 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
6063 /* Check for 32-bit overflow. */
6064 if (high + low != val)
6066 *win = 0;
6067 return x;
6070 /* Reload the high part into a base reg; leave the low part
6071 in the mem directly. */
6073 x = gen_rtx_PLUS (GET_MODE (x),
6074 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6075 GEN_INT (high)),
6076 GEN_INT (low));
6078 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6079 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
6080 opnum, (enum reload_type) type);
6081 *win = 1;
6082 return x;
6085 if (GET_CODE (x) == SYMBOL_REF
6086 && reg_offset_p
6087 && VECTOR_MEM_NONE_P (mode)
6088 && !SPE_VECTOR_MODE (mode)
6089 #if TARGET_MACHO
6090 && DEFAULT_ABI == ABI_DARWIN
6091 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
6092 && machopic_symbol_defined_p (x)
6093 #else
6094 && DEFAULT_ABI == ABI_V4
6095 && !flag_pic
6096 #endif
6097 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
6098 The same goes for DImode without 64-bit gprs and DFmode and DDmode
6099 without fprs. */
6100 && mode != TFmode
6101 && mode != TDmode
6102 && (mode != DImode || TARGET_POWERPC64)
6103 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
6104 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
6106 #if TARGET_MACHO
6107 if (flag_pic)
6109 rtx offset = machopic_gen_offset (x);
6110 x = gen_rtx_LO_SUM (GET_MODE (x),
6111 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
6112 gen_rtx_HIGH (Pmode, offset)), offset);
6114 else
6115 #endif
6116 x = gen_rtx_LO_SUM (GET_MODE (x),
6117 gen_rtx_HIGH (Pmode, x), x);
6119 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6120 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6121 opnum, (enum reload_type) type);
6122 *win = 1;
6123 return x;
6126 /* Reload an offset address wrapped by an AND that represents the
6127 masking of the lower bits. Strip the outer AND and let reload
6128 convert the offset address into an indirect address. For VSX,
6129 force reload to create the address with an AND in a separate
6130 register, because we can't guarantee an altivec register will
6131 be used. */
6132 if (VECTOR_MEM_ALTIVEC_P (mode)
6133 && GET_CODE (x) == AND
6134 && GET_CODE (XEXP (x, 0)) == PLUS
6135 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6136 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6137 && GET_CODE (XEXP (x, 1)) == CONST_INT
6138 && INTVAL (XEXP (x, 1)) == -16)
6140 x = XEXP (x, 0);
6141 *win = 1;
6142 return x;
6145 if (TARGET_TOC
6146 && reg_offset_p
6147 && GET_CODE (x) == SYMBOL_REF
6148 && use_toc_relative_ref (x))
6150 x = create_TOC_reference (x, NULL_RTX);
6151 if (TARGET_CMODEL != CMODEL_SMALL)
6152 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
6153 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
6154 opnum, (enum reload_type) type);
6155 *win = 1;
6156 return x;
6158 *win = 0;
6159 return x;
6162 /* Debug version of rs6000_legitimize_reload_address. */
6163 static rtx
6164 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
6165 int opnum, int type,
6166 int ind_levels, int *win)
6168 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
6169 ind_levels, win);
6170 fprintf (stderr,
6171 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
6172 "type = %d, ind_levels = %d, win = %d, original addr:\n",
6173 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
6174 debug_rtx (x);
6176 if (x == ret)
6177 fprintf (stderr, "Same address returned\n");
6178 else if (!ret)
6179 fprintf (stderr, "NULL returned\n");
6180 else
6182 fprintf (stderr, "New address:\n");
6183 debug_rtx (ret);
6186 return ret;
6189 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
6190 that is a valid memory address for an instruction.
6191 The MODE argument is the machine mode for the MEM expression
6192 that wants to use this address.
6194 On the RS/6000, there are four valid address: a SYMBOL_REF that
6195 refers to a constant pool entry of an address (or the sum of it
6196 plus a constant), a short (16-bit signed) constant plus a register,
6197 the sum of two registers, or a register indirect, possibly with an
6198 auto-increment. For DFmode, DDmode and DImode with a constant plus
6199 register, we must ensure that both words are addressable or PowerPC64
6200 with offset word aligned.
6202 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
6203 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
6204 because adjacent memory cells are accessed by adding word-sized offsets
6205 during assembly output. */
6206 static bool
6207 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
6209 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
6211 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
6212 if (VECTOR_MEM_ALTIVEC_P (mode)
6213 && GET_CODE (x) == AND
6214 && GET_CODE (XEXP (x, 1)) == CONST_INT
6215 && INTVAL (XEXP (x, 1)) == -16)
6216 x = XEXP (x, 0);
6218 if (RS6000_SYMBOL_REF_TLS_P (x))
6219 return 0;
6220 if (legitimate_indirect_address_p (x, reg_ok_strict))
6221 return 1;
6222 if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
6223 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6224 && !SPE_VECTOR_MODE (mode)
6225 && mode != TFmode
6226 && mode != TDmode
6227 /* Restrict addressing for DI because of our SUBREG hackery. */
6228 && !(TARGET_E500_DOUBLE
6229 && (mode == DFmode || mode == DDmode || mode == DImode))
6230 && TARGET_UPDATE
6231 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
6232 return 1;
6233 if (virtual_stack_registers_memory_p (x))
6234 return 1;
6235 if (reg_offset_p && legitimate_small_data_p (mode, x))
6236 return 1;
6237 if (reg_offset_p
6238 && legitimate_constant_pool_address_p (x, mode, reg_ok_strict))
6239 return 1;
6240 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
6241 if (! reg_ok_strict
6242 && reg_offset_p
6243 && GET_CODE (x) == PLUS
6244 && GET_CODE (XEXP (x, 0)) == REG
6245 && (XEXP (x, 0) == virtual_stack_vars_rtx
6246 || XEXP (x, 0) == arg_pointer_rtx)
6247 && GET_CODE (XEXP (x, 1)) == CONST_INT)
6248 return 1;
6249 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
6250 return 1;
6251 if (mode != TImode
6252 && mode != TFmode
6253 && mode != TDmode
6254 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6255 || TARGET_POWERPC64
6256 || (mode != DFmode && mode != DDmode)
6257 || (TARGET_E500_DOUBLE && mode != DDmode))
6258 && (TARGET_POWERPC64 || mode != DImode)
6259 && !avoiding_indexed_address_p (mode)
6260 && legitimate_indexed_address_p (x, reg_ok_strict))
6261 return 1;
6262 if (GET_CODE (x) == PRE_MODIFY
6263 && mode != TImode
6264 && mode != TFmode
6265 && mode != TDmode
6266 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6267 || TARGET_POWERPC64
6268 || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
6269 && (TARGET_POWERPC64 || mode != DImode)
6270 && !VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
6271 && !SPE_VECTOR_MODE (mode)
6272 /* Restrict addressing for DI because of our SUBREG hackery. */
6273 && !(TARGET_E500_DOUBLE
6274 && (mode == DFmode || mode == DDmode || mode == DImode))
6275 && TARGET_UPDATE
6276 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
6277 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1), reg_ok_strict)
6278 || (!avoiding_indexed_address_p (mode)
6279 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
6280 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6281 return 1;
6282 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
6283 return 1;
6284 return 0;
6287 /* Debug version of rs6000_legitimate_address_p. */
6288 static bool
6289 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
6290 bool reg_ok_strict)
6292 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
6293 fprintf (stderr,
6294 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
6295 "strict = %d, code = %s\n",
6296 ret ? "true" : "false",
6297 GET_MODE_NAME (mode),
6298 reg_ok_strict,
6299 GET_RTX_NAME (GET_CODE (x)));
6300 debug_rtx (x);
6302 return ret;
6305 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
6307 static bool
6308 rs6000_mode_dependent_address_p (const_rtx addr)
6310 return rs6000_mode_dependent_address_ptr (addr);
6313 /* Go to LABEL if ADDR (a legitimate address expression)
6314 has an effect that depends on the machine mode it is used for.
6316 On the RS/6000 this is true of all integral offsets (since AltiVec
6317 and VSX modes don't allow them) or is a pre-increment or decrement.
6319 ??? Except that due to conceptual problems in offsettable_address_p
6320 we can't really report the problems of integral offsets. So leave
6321 this assuming that the adjustable offset must be valid for the
6322 sub-words of a TFmode operand, which is what we had before. */
6324 static bool
6325 rs6000_mode_dependent_address (const_rtx addr)
6327 switch (GET_CODE (addr))
6329 case PLUS:
6330 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
6331 is considered a legitimate address before reload, so there
6332 are no offset restrictions in that case. Note that this
6333 condition is safe in strict mode because any address involving
6334 virtual_stack_vars_rtx or arg_pointer_rtx would already have
6335 been rejected as illegitimate. */
6336 if (XEXP (addr, 0) != virtual_stack_vars_rtx
6337 && XEXP (addr, 0) != arg_pointer_rtx
6338 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
6340 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
6341 return val + 12 + 0x8000 >= 0x10000;
6343 break;
6345 case LO_SUM:
6346 /* Anything in the constant pool is sufficiently aligned that
6347 all bytes have the same high part address. */
6348 return !legitimate_constant_pool_address_p (addr, QImode, false);
6350 /* Auto-increment cases are now treated generically in recog.c. */
6351 case PRE_MODIFY:
6352 return TARGET_UPDATE;
6354 /* AND is only allowed in Altivec loads. */
6355 case AND:
6356 return true;
6358 default:
6359 break;
6362 return false;
6365 /* Debug version of rs6000_mode_dependent_address. */
6366 static bool
6367 rs6000_debug_mode_dependent_address (const_rtx addr)
6369 bool ret = rs6000_mode_dependent_address (addr);
6371 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
6372 ret ? "true" : "false");
6373 debug_rtx (addr);
6375 return ret;
6378 /* Implement FIND_BASE_TERM. */
6381 rs6000_find_base_term (rtx op)
6383 rtx base;
6385 base = op;
6386 if (GET_CODE (base) == CONST)
6387 base = XEXP (base, 0);
6388 if (GET_CODE (base) == PLUS)
6389 base = XEXP (base, 0);
6390 if (GET_CODE (base) == UNSPEC)
6391 switch (XINT (base, 1))
6393 case UNSPEC_TOCREL:
6394 case UNSPEC_MACHOPIC_OFFSET:
6395 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
6396 for aliasing purposes. */
6397 return XVECEXP (base, 0, 0);
6400 return op;
6403 /* More elaborate version of recog's offsettable_memref_p predicate
6404 that works around the ??? note of rs6000_mode_dependent_address.
6405 In particular it accepts
6407 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
6409 in 32-bit mode, that the recog predicate rejects. */
6411 bool
6412 rs6000_offsettable_memref_p (rtx op)
6414 if (!MEM_P (op))
6415 return false;
6417 /* First mimic offsettable_memref_p. */
6418 if (offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)))
6419 return true;
6421 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
6422 the latter predicate knows nothing about the mode of the memory
6423 reference and, therefore, assumes that it is the largest supported
6424 mode (TFmode). As a consequence, legitimate offsettable memory
6425 references are rejected. rs6000_legitimate_offset_address_p contains
6426 the correct logic for the PLUS case of rs6000_mode_dependent_address. */
6427 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0), 1);
6430 /* Change register usage conditional on target flags. */
6431 static void
6432 rs6000_conditional_register_usage (void)
6434 int i;
6436 if (TARGET_DEBUG_TARGET)
6437 fprintf (stderr, "rs6000_conditional_register_usage called\n");
6439 /* Set MQ register fixed (already call_used) if not POWER
6440 architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
6441 be allocated. */
6442 if (! TARGET_POWER)
6443 fixed_regs[64] = 1;
6445 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
6446 if (TARGET_64BIT)
6447 fixed_regs[13] = call_used_regs[13]
6448 = call_really_used_regs[13] = 1;
6450 /* Conditionally disable FPRs. */
6451 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
6452 for (i = 32; i < 64; i++)
6453 fixed_regs[i] = call_used_regs[i]
6454 = call_really_used_regs[i] = 1;
6456 /* The TOC register is not killed across calls in a way that is
6457 visible to the compiler. */
6458 if (DEFAULT_ABI == ABI_AIX)
6459 call_really_used_regs[2] = 0;
6461 if (DEFAULT_ABI == ABI_V4
6462 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6463 && flag_pic == 2)
6464 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6466 if (DEFAULT_ABI == ABI_V4
6467 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
6468 && flag_pic == 1)
6469 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6470 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6471 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6473 if (DEFAULT_ABI == ABI_DARWIN
6474 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
6475 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6476 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6477 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6479 if (TARGET_TOC && TARGET_MINIMAL_TOC)
6480 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
6481 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
6483 if (TARGET_SPE)
6485 global_regs[SPEFSCR_REGNO] = 1;
6486 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
6487 registers in prologues and epilogues. We no longer use r14
6488 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
6489 pool for link-compatibility with older versions of GCC. Once
6490 "old" code has died out, we can return r14 to the allocation
6491 pool. */
6492 fixed_regs[14]
6493 = call_used_regs[14]
6494 = call_really_used_regs[14] = 1;
6497 if (!TARGET_ALTIVEC && !TARGET_VSX)
6499 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
6500 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6501 call_really_used_regs[VRSAVE_REGNO] = 1;
6504 if (TARGET_ALTIVEC || TARGET_VSX)
6505 global_regs[VSCR_REGNO] = 1;
6507 if (TARGET_ALTIVEC_ABI)
6509 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
6510 call_used_regs[i] = call_really_used_regs[i] = 1;
6512 /* AIX reserves VR20:31 in non-extended ABI mode. */
6513 if (TARGET_XCOFF)
6514 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
6515 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
6519 /* Try to output insns to set TARGET equal to the constant C if it can
6520 be done in less than N insns. Do all computations in MODE.
6521 Returns the place where the output has been placed if it can be
6522 done and the insns have been emitted. If it would take more than N
6523 insns, zero is returned and no insns and emitted. */
6526 rs6000_emit_set_const (rtx dest, enum machine_mode mode,
6527 rtx source, int n ATTRIBUTE_UNUSED)
6529 rtx result, insn, set;
6530 HOST_WIDE_INT c0, c1;
6532 switch (mode)
6534 case QImode:
6535 case HImode:
6536 if (dest == NULL)
6537 dest = gen_reg_rtx (mode);
6538 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
6539 return dest;
6541 case SImode:
6542 result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
6544 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
6545 GEN_INT (INTVAL (source)
6546 & (~ (HOST_WIDE_INT) 0xffff))));
6547 emit_insn (gen_rtx_SET (VOIDmode, dest,
6548 gen_rtx_IOR (SImode, copy_rtx (result),
6549 GEN_INT (INTVAL (source) & 0xffff))));
6550 result = dest;
6551 break;
6553 case DImode:
6554 switch (GET_CODE (source))
6556 case CONST_INT:
6557 c0 = INTVAL (source);
6558 c1 = -(c0 < 0);
6559 break;
6561 case CONST_DOUBLE:
6562 #if HOST_BITS_PER_WIDE_INT >= 64
6563 c0 = CONST_DOUBLE_LOW (source);
6564 c1 = -(c0 < 0);
6565 #else
6566 c0 = CONST_DOUBLE_LOW (source);
6567 c1 = CONST_DOUBLE_HIGH (source);
6568 #endif
6569 break;
6571 default:
6572 gcc_unreachable ();
6575 result = rs6000_emit_set_long_const (dest, c0, c1);
6576 break;
6578 default:
6579 gcc_unreachable ();
6582 insn = get_last_insn ();
6583 set = single_set (insn);
6584 if (! CONSTANT_P (SET_SRC (set)))
6585 set_unique_reg_note (insn, REG_EQUAL, source);
6587 return result;
6590 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
6591 fall back to a straight forward decomposition. We do this to avoid
6592 exponential run times encountered when looking for longer sequences
6593 with rs6000_emit_set_const. */
6594 static rtx
6595 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
6597 if (!TARGET_POWERPC64)
6599 rtx operand1, operand2;
6601 operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
6602 DImode);
6603 operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
6604 DImode);
6605 emit_move_insn (operand1, GEN_INT (c1));
6606 emit_move_insn (operand2, GEN_INT (c2));
6608 else
6610 HOST_WIDE_INT ud1, ud2, ud3, ud4;
6612 ud1 = c1 & 0xffff;
6613 ud2 = (c1 & 0xffff0000) >> 16;
6614 #if HOST_BITS_PER_WIDE_INT >= 64
6615 c2 = c1 >> 32;
6616 #endif
6617 ud3 = c2 & 0xffff;
6618 ud4 = (c2 & 0xffff0000) >> 16;
6620 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
6621 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
6623 if (ud1 & 0x8000)
6624 emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
6625 else
6626 emit_move_insn (dest, GEN_INT (ud1));
6629 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
6630 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
6632 if (ud2 & 0x8000)
6633 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6634 - 0x80000000));
6635 else
6636 emit_move_insn (dest, GEN_INT (ud2 << 16));
6637 if (ud1 != 0)
6638 emit_move_insn (copy_rtx (dest),
6639 gen_rtx_IOR (DImode, copy_rtx (dest),
6640 GEN_INT (ud1)));
6642 else if (ud3 == 0 && ud4 == 0)
6644 gcc_assert (ud2 & 0x8000);
6645 emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
6646 - 0x80000000));
6647 if (ud1 != 0)
6648 emit_move_insn (copy_rtx (dest),
6649 gen_rtx_IOR (DImode, copy_rtx (dest),
6650 GEN_INT (ud1)));
6651 emit_move_insn (copy_rtx (dest),
6652 gen_rtx_ZERO_EXTEND (DImode,
6653 gen_lowpart (SImode,
6654 copy_rtx (dest))));
6656 else if ((ud4 == 0xffff && (ud3 & 0x8000))
6657 || (ud4 == 0 && ! (ud3 & 0x8000)))
6659 if (ud3 & 0x8000)
6660 emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
6661 - 0x80000000));
6662 else
6663 emit_move_insn (dest, GEN_INT (ud3 << 16));
6665 if (ud2 != 0)
6666 emit_move_insn (copy_rtx (dest),
6667 gen_rtx_IOR (DImode, copy_rtx (dest),
6668 GEN_INT (ud2)));
6669 emit_move_insn (copy_rtx (dest),
6670 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6671 GEN_INT (16)));
6672 if (ud1 != 0)
6673 emit_move_insn (copy_rtx (dest),
6674 gen_rtx_IOR (DImode, copy_rtx (dest),
6675 GEN_INT (ud1)));
6677 else
6679 if (ud4 & 0x8000)
6680 emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
6681 - 0x80000000));
6682 else
6683 emit_move_insn (dest, GEN_INT (ud4 << 16));
6685 if (ud3 != 0)
6686 emit_move_insn (copy_rtx (dest),
6687 gen_rtx_IOR (DImode, copy_rtx (dest),
6688 GEN_INT (ud3)));
6690 emit_move_insn (copy_rtx (dest),
6691 gen_rtx_ASHIFT (DImode, copy_rtx (dest),
6692 GEN_INT (32)));
6693 if (ud2 != 0)
6694 emit_move_insn (copy_rtx (dest),
6695 gen_rtx_IOR (DImode, copy_rtx (dest),
6696 GEN_INT (ud2 << 16)));
6697 if (ud1 != 0)
6698 emit_move_insn (copy_rtx (dest),
6699 gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
6702 return dest;
6705 /* Helper for the following. Get rid of [r+r] memory refs
6706 in cases where it won't work (TImode, TFmode, TDmode). */
6708 static void
6709 rs6000_eliminate_indexed_memrefs (rtx operands[2])
6711 if (reload_in_progress)
6712 return;
6714 if (GET_CODE (operands[0]) == MEM
6715 && GET_CODE (XEXP (operands[0], 0)) != REG
6716 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
6717 GET_MODE (operands[0]), false))
6718 operands[0]
6719 = replace_equiv_address (operands[0],
6720 copy_addr_to_reg (XEXP (operands[0], 0)));
6722 if (GET_CODE (operands[1]) == MEM
6723 && GET_CODE (XEXP (operands[1], 0)) != REG
6724 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
6725 GET_MODE (operands[1]), false))
6726 operands[1]
6727 = replace_equiv_address (operands[1],
6728 copy_addr_to_reg (XEXP (operands[1], 0)));
6731 /* Emit a move from SOURCE to DEST in mode MODE. */
6732 void
6733 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
6735 rtx operands[2];
6736 operands[0] = dest;
6737 operands[1] = source;
6739 if (TARGET_DEBUG_ADDR)
6741 fprintf (stderr,
6742 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
6743 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
6744 GET_MODE_NAME (mode),
6745 reload_in_progress,
6746 reload_completed,
6747 can_create_pseudo_p ());
6748 debug_rtx (dest);
6749 fprintf (stderr, "source:\n");
6750 debug_rtx (source);
6753 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
6754 if (GET_CODE (operands[1]) == CONST_DOUBLE
6755 && ! FLOAT_MODE_P (mode)
6756 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
6758 /* FIXME. This should never happen. */
6759 /* Since it seems that it does, do the safe thing and convert
6760 to a CONST_INT. */
6761 operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
6763 gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
6764 || FLOAT_MODE_P (mode)
6765 || ((CONST_DOUBLE_HIGH (operands[1]) != 0
6766 || CONST_DOUBLE_LOW (operands[1]) < 0)
6767 && (CONST_DOUBLE_HIGH (operands[1]) != -1
6768 || CONST_DOUBLE_LOW (operands[1]) >= 0)));
6770 /* Check if GCC is setting up a block move that will end up using FP
6771 registers as temporaries. We must make sure this is acceptable. */
6772 if (GET_CODE (operands[0]) == MEM
6773 && GET_CODE (operands[1]) == MEM
6774 && mode == DImode
6775 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
6776 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
6777 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
6778 ? 32 : MEM_ALIGN (operands[0])))
6779 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
6780 ? 32
6781 : MEM_ALIGN (operands[1]))))
6782 && ! MEM_VOLATILE_P (operands [0])
6783 && ! MEM_VOLATILE_P (operands [1]))
6785 emit_move_insn (adjust_address (operands[0], SImode, 0),
6786 adjust_address (operands[1], SImode, 0));
6787 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
6788 adjust_address (copy_rtx (operands[1]), SImode, 4));
6789 return;
6792 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
6793 && !gpc_reg_operand (operands[1], mode))
6794 operands[1] = force_reg (mode, operands[1]);
6796 if (mode == SFmode && ! TARGET_POWERPC
6797 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6798 && GET_CODE (operands[0]) == MEM)
6800 int regnum;
6802 if (reload_in_progress || reload_completed)
6803 regnum = true_regnum (operands[1]);
6804 else if (GET_CODE (operands[1]) == REG)
6805 regnum = REGNO (operands[1]);
6806 else
6807 regnum = -1;
6809 /* If operands[1] is a register, on POWER it may have
6810 double-precision data in it, so truncate it to single
6811 precision. */
6812 if (FP_REGNO_P (regnum) || regnum >= FIRST_PSEUDO_REGISTER)
6814 rtx newreg;
6815 newreg = (!can_create_pseudo_p () ? copy_rtx (operands[1])
6816 : gen_reg_rtx (mode));
6817 emit_insn (gen_aux_truncdfsf2 (newreg, operands[1]));
6818 operands[1] = newreg;
6822 /* Recognize the case where operand[1] is a reference to thread-local
6823 data and load its address to a register. */
6824 if (rs6000_tls_referenced_p (operands[1]))
6826 enum tls_model model;
6827 rtx tmp = operands[1];
6828 rtx addend = NULL;
6830 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
6832 addend = XEXP (XEXP (tmp, 0), 1);
6833 tmp = XEXP (XEXP (tmp, 0), 0);
6836 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
6837 model = SYMBOL_REF_TLS_MODEL (tmp);
6838 gcc_assert (model != 0);
6840 tmp = rs6000_legitimize_tls_address (tmp, model);
6841 if (addend)
6843 tmp = gen_rtx_PLUS (mode, tmp, addend);
6844 tmp = force_operand (tmp, operands[0]);
6846 operands[1] = tmp;
6849 /* Handle the case where reload calls us with an invalid address. */
6850 if (reload_in_progress && mode == Pmode
6851 && (! general_operand (operands[1], mode)
6852 || ! nonimmediate_operand (operands[0], mode)))
6853 goto emit_set;
6855 /* 128-bit constant floating-point values on Darwin should really be
6856 loaded as two parts. */
6857 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
6858 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
6860 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
6861 simplify_gen_subreg (DFmode, operands[1], mode, 0),
6862 DFmode);
6863 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
6864 GET_MODE_SIZE (DFmode)),
6865 simplify_gen_subreg (DFmode, operands[1], mode,
6866 GET_MODE_SIZE (DFmode)),
6867 DFmode);
6868 return;
6871 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
6872 cfun->machine->sdmode_stack_slot =
6873 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
6875 if (reload_in_progress
6876 && mode == SDmode
6877 && MEM_P (operands[0])
6878 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
6879 && REG_P (operands[1]))
6881 if (FP_REGNO_P (REGNO (operands[1])))
6883 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
6884 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
6885 emit_insn (gen_movsd_store (mem, operands[1]));
6887 else if (INT_REGNO_P (REGNO (operands[1])))
6889 rtx mem = adjust_address_nv (operands[0], mode, 4);
6890 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
6891 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
6893 else
6894 gcc_unreachable();
6895 return;
6897 if (reload_in_progress
6898 && mode == SDmode
6899 && REG_P (operands[0])
6900 && MEM_P (operands[1])
6901 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
6903 if (FP_REGNO_P (REGNO (operands[0])))
6905 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
6906 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
6907 emit_insn (gen_movsd_load (operands[0], mem));
6909 else if (INT_REGNO_P (REGNO (operands[0])))
6911 rtx mem = adjust_address_nv (operands[1], mode, 4);
6912 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
6913 emit_insn (gen_movsd_hardfloat (operands[0], mem));
6915 else
6916 gcc_unreachable();
6917 return;
6920 /* FIXME: In the long term, this switch statement should go away
6921 and be replaced by a sequence of tests based on things like
6922 mode == Pmode. */
6923 switch (mode)
6925 case HImode:
6926 case QImode:
6927 if (CONSTANT_P (operands[1])
6928 && GET_CODE (operands[1]) != CONST_INT)
6929 operands[1] = force_const_mem (mode, operands[1]);
6930 break;
6932 case TFmode:
6933 case TDmode:
6934 rs6000_eliminate_indexed_memrefs (operands);
6935 /* fall through */
6937 case DFmode:
6938 case DDmode:
6939 case SFmode:
6940 case SDmode:
6941 if (CONSTANT_P (operands[1])
6942 && ! easy_fp_constant (operands[1], mode))
6943 operands[1] = force_const_mem (mode, operands[1]);
6944 break;
6946 case V16QImode:
6947 case V8HImode:
6948 case V4SFmode:
6949 case V4SImode:
6950 case V4HImode:
6951 case V2SFmode:
6952 case V2SImode:
6953 case V1DImode:
6954 case V2DFmode:
6955 case V2DImode:
6956 if (CONSTANT_P (operands[1])
6957 && !easy_vector_constant (operands[1], mode))
6958 operands[1] = force_const_mem (mode, operands[1]);
6959 break;
6961 case SImode:
6962 case DImode:
6963 /* Use default pattern for address of ELF small data */
6964 if (TARGET_ELF
6965 && mode == Pmode
6966 && DEFAULT_ABI == ABI_V4
6967 && (GET_CODE (operands[1]) == SYMBOL_REF
6968 || GET_CODE (operands[1]) == CONST)
6969 && small_data_operand (operands[1], mode))
6971 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
6972 return;
6975 if (DEFAULT_ABI == ABI_V4
6976 && mode == Pmode && mode == SImode
6977 && flag_pic == 1 && got_operand (operands[1], mode))
6979 emit_insn (gen_movsi_got (operands[0], operands[1]));
6980 return;
6983 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
6984 && TARGET_NO_TOC
6985 && ! flag_pic
6986 && mode == Pmode
6987 && CONSTANT_P (operands[1])
6988 && GET_CODE (operands[1]) != HIGH
6989 && GET_CODE (operands[1]) != CONST_INT)
6991 rtx target = (!can_create_pseudo_p ()
6992 ? operands[0]
6993 : gen_reg_rtx (mode));
6995 /* If this is a function address on -mcall-aixdesc,
6996 convert it to the address of the descriptor. */
6997 if (DEFAULT_ABI == ABI_AIX
6998 && GET_CODE (operands[1]) == SYMBOL_REF
6999 && XSTR (operands[1], 0)[0] == '.')
7001 const char *name = XSTR (operands[1], 0);
7002 rtx new_ref;
7003 while (*name == '.')
7004 name++;
7005 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
7006 CONSTANT_POOL_ADDRESS_P (new_ref)
7007 = CONSTANT_POOL_ADDRESS_P (operands[1]);
7008 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
7009 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
7010 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
7011 operands[1] = new_ref;
7014 if (DEFAULT_ABI == ABI_DARWIN)
7016 #if TARGET_MACHO
7017 if (MACHO_DYNAMIC_NO_PIC_P)
7019 /* Take care of any required data indirection. */
7020 operands[1] = rs6000_machopic_legitimize_pic_address (
7021 operands[1], mode, operands[0]);
7022 if (operands[0] != operands[1])
7023 emit_insn (gen_rtx_SET (VOIDmode,
7024 operands[0], operands[1]));
7025 return;
7027 #endif
7028 emit_insn (gen_macho_high (target, operands[1]));
7029 emit_insn (gen_macho_low (operands[0], target, operands[1]));
7030 return;
7033 emit_insn (gen_elf_high (target, operands[1]));
7034 emit_insn (gen_elf_low (operands[0], target, operands[1]));
7035 return;
7038 /* If this is a SYMBOL_REF that refers to a constant pool entry,
7039 and we have put it in the TOC, we just need to make a TOC-relative
7040 reference to it. */
7041 if (TARGET_TOC
7042 && GET_CODE (operands[1]) == SYMBOL_REF
7043 && use_toc_relative_ref (operands[1]))
7044 operands[1] = create_TOC_reference (operands[1], operands[0]);
7045 else if (mode == Pmode
7046 && CONSTANT_P (operands[1])
7047 && GET_CODE (operands[1]) != HIGH
7048 && ((GET_CODE (operands[1]) != CONST_INT
7049 && ! easy_fp_constant (operands[1], mode))
7050 || (GET_CODE (operands[1]) == CONST_INT
7051 && (num_insns_constant (operands[1], mode)
7052 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
7053 || (GET_CODE (operands[0]) == REG
7054 && FP_REGNO_P (REGNO (operands[0]))))
7055 && !toc_relative_expr_p (operands[1], false)
7056 && (TARGET_CMODEL == CMODEL_SMALL
7057 || can_create_pseudo_p ()
7058 || (REG_P (operands[0])
7059 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
7062 #if TARGET_MACHO
7063 /* Darwin uses a special PIC legitimizer. */
7064 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
7066 operands[1] =
7067 rs6000_machopic_legitimize_pic_address (operands[1], mode,
7068 operands[0]);
7069 if (operands[0] != operands[1])
7070 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7071 return;
7073 #endif
7075 /* If we are to limit the number of things we put in the TOC and
7076 this is a symbol plus a constant we can add in one insn,
7077 just put the symbol in the TOC and add the constant. Don't do
7078 this if reload is in progress. */
7079 if (GET_CODE (operands[1]) == CONST
7080 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
7081 && GET_CODE (XEXP (operands[1], 0)) == PLUS
7082 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
7083 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
7084 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
7085 && ! side_effects_p (operands[0]))
7087 rtx sym =
7088 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
7089 rtx other = XEXP (XEXP (operands[1], 0), 1);
7091 sym = force_reg (mode, sym);
7092 emit_insn (gen_add3_insn (operands[0], sym, other));
7093 return;
7096 operands[1] = force_const_mem (mode, operands[1]);
7098 if (TARGET_TOC
7099 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
7100 && constant_pool_expr_p (XEXP (operands[1], 0))
7101 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
7102 get_pool_constant (XEXP (operands[1], 0)),
7103 get_pool_mode (XEXP (operands[1], 0))))
7105 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
7106 operands[0]);
7107 operands[1] = gen_const_mem (mode, tocref);
7108 set_mem_alias_set (operands[1], get_TOC_alias_set ());
7111 break;
7113 case TImode:
7114 rs6000_eliminate_indexed_memrefs (operands);
7116 if (TARGET_POWER)
7118 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7119 gen_rtvec (2,
7120 gen_rtx_SET (VOIDmode,
7121 operands[0], operands[1]),
7122 gen_rtx_CLOBBER (VOIDmode,
7123 gen_rtx_SCRATCH (SImode)))));
7124 return;
7126 break;
7128 default:
7129 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
7132 /* Above, we may have called force_const_mem which may have returned
7133 an invalid address. If we can, fix this up; otherwise, reload will
7134 have to deal with it. */
7135 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
7136 operands[1] = validize_mem (operands[1]);
7138 emit_set:
7139 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7142 /* Nonzero if we can use a floating-point register to pass this arg. */
7143 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
7144 (SCALAR_FLOAT_MODE_P (MODE) \
7145 && (CUM)->fregno <= FP_ARG_MAX_REG \
7146 && TARGET_HARD_FLOAT && TARGET_FPRS)
7148 /* Nonzero if we can use an AltiVec register to pass this arg. */
7149 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
7150 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
7151 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
7152 && TARGET_ALTIVEC_ABI \
7153 && (NAMED))
7155 /* Return a nonzero value to say to return the function value in
7156 memory, just as large structures are always returned. TYPE will be
7157 the data type of the value, and FNTYPE will be the type of the
7158 function doing the returning, or @code{NULL} for libcalls.
7160 The AIX ABI for the RS/6000 specifies that all structures are
7161 returned in memory. The Darwin ABI does the same.
7163 For the Darwin 64 Bit ABI, a function result can be returned in
7164 registers or in memory, depending on the size of the return data
7165 type. If it is returned in registers, the value occupies the same
7166 registers as it would if it were the first and only function
7167 argument. Otherwise, the function places its result in memory at
7168 the location pointed to by GPR3.
7170 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
7171 but a draft put them in memory, and GCC used to implement the draft
7172 instead of the final standard. Therefore, aix_struct_return
7173 controls this instead of DEFAULT_ABI; V.4 targets needing backward
7174 compatibility can change DRAFT_V4_STRUCT_RET to override the
7175 default, and -m switches get the final word. See
7176 rs6000_option_override_internal for more details.
7178 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
7179 long double support is enabled. These values are returned in memory.
7181 int_size_in_bytes returns -1 for variable size objects, which go in
7182 memory always. The cast to unsigned makes -1 > 8. */
7184 static bool
7185 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7187 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
7188 if (TARGET_MACHO
7189 && rs6000_darwin64_abi
7190 && TREE_CODE (type) == RECORD_TYPE
7191 && int_size_in_bytes (type) > 0)
7193 CUMULATIVE_ARGS valcum;
7194 rtx valret;
7196 valcum.words = 0;
7197 valcum.fregno = FP_ARG_MIN_REG;
7198 valcum.vregno = ALTIVEC_ARG_MIN_REG;
7199 /* Do a trial code generation as if this were going to be passed
7200 as an argument; if any part goes in memory, we return NULL. */
7201 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
7202 if (valret)
7203 return false;
7204 /* Otherwise fall through to more conventional ABI rules. */
7207 if (AGGREGATE_TYPE_P (type)
7208 && (aix_struct_return
7209 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
7210 return true;
7212 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
7213 modes only exist for GCC vector types if -maltivec. */
7214 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
7215 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
7216 return false;
7218 /* Return synthetic vectors in memory. */
7219 if (TREE_CODE (type) == VECTOR_TYPE
7220 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
7222 static bool warned_for_return_big_vectors = false;
7223 if (!warned_for_return_big_vectors)
7225 warning (0, "GCC vector returned by reference: "
7226 "non-standard ABI extension with no compatibility guarantee");
7227 warned_for_return_big_vectors = true;
7229 return true;
7232 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
7233 return true;
7235 return false;
7238 #ifdef HAVE_AS_GNU_ATTRIBUTE
7239 /* Return TRUE if a call to function FNDECL may be one that
7240 potentially affects the function calling ABI of the object file. */
7242 static bool
7243 call_ABI_of_interest (tree fndecl)
7245 if (cgraph_state == CGRAPH_STATE_EXPANSION)
7247 struct cgraph_node *c_node;
7249 /* Libcalls are always interesting. */
7250 if (fndecl == NULL_TREE)
7251 return true;
7253 /* Any call to an external function is interesting. */
7254 if (DECL_EXTERNAL (fndecl))
7255 return true;
7257 /* Interesting functions that we are emitting in this object file. */
7258 c_node = cgraph_get_node (fndecl);
7259 c_node = cgraph_function_or_thunk_node (c_node, NULL);
7260 return !cgraph_only_called_directly_p (c_node);
7262 return false;
7264 #endif
7266 /* Initialize a variable CUM of type CUMULATIVE_ARGS
7267 for a call to a function whose data type is FNTYPE.
7268 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
7270 For incoming args we set the number of arguments in the prototype large
7271 so we never return a PARALLEL. */
7273 void
7274 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
7275 rtx libname ATTRIBUTE_UNUSED, int incoming,
7276 int libcall, int n_named_args,
7277 tree fndecl ATTRIBUTE_UNUSED,
7278 enum machine_mode return_mode ATTRIBUTE_UNUSED)
7280 static CUMULATIVE_ARGS zero_cumulative;
7282 *cum = zero_cumulative;
7283 cum->words = 0;
7284 cum->fregno = FP_ARG_MIN_REG;
7285 cum->vregno = ALTIVEC_ARG_MIN_REG;
7286 cum->prototype = (fntype && prototype_p (fntype));
7287 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
7288 ? CALL_LIBCALL : CALL_NORMAL);
7289 cum->sysv_gregno = GP_ARG_MIN_REG;
7290 cum->stdarg = stdarg_p (fntype);
7292 cum->nargs_prototype = 0;
7293 if (incoming || cum->prototype)
7294 cum->nargs_prototype = n_named_args;
7296 /* Check for a longcall attribute. */
7297 if ((!fntype && rs6000_default_long_calls)
7298 || (fntype
7299 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
7300 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
7301 cum->call_cookie |= CALL_LONG;
7303 if (TARGET_DEBUG_ARG)
7305 fprintf (stderr, "\ninit_cumulative_args:");
7306 if (fntype)
7308 tree ret_type = TREE_TYPE (fntype);
7309 fprintf (stderr, " ret code = %s,",
7310 tree_code_name[ (int)TREE_CODE (ret_type) ]);
7313 if (cum->call_cookie & CALL_LONG)
7314 fprintf (stderr, " longcall,");
7316 fprintf (stderr, " proto = %d, nargs = %d\n",
7317 cum->prototype, cum->nargs_prototype);
7320 #ifdef HAVE_AS_GNU_ATTRIBUTE
7321 if (DEFAULT_ABI == ABI_V4)
7323 cum->escapes = call_ABI_of_interest (fndecl);
7324 if (cum->escapes)
7326 tree return_type;
7328 if (fntype)
7330 return_type = TREE_TYPE (fntype);
7331 return_mode = TYPE_MODE (return_type);
7333 else
7334 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
7336 if (return_type != NULL)
7338 if (TREE_CODE (return_type) == RECORD_TYPE
7339 && TYPE_TRANSPARENT_AGGR (return_type))
7341 return_type = TREE_TYPE (first_field (return_type));
7342 return_mode = TYPE_MODE (return_type);
7344 if (AGGREGATE_TYPE_P (return_type)
7345 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
7346 <= 8))
7347 rs6000_returns_struct = true;
7349 if (SCALAR_FLOAT_MODE_P (return_mode))
7350 rs6000_passes_float = true;
7351 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
7352 || SPE_VECTOR_MODE (return_mode))
7353 rs6000_passes_vector = true;
7356 #endif
7358 if (fntype
7359 && !TARGET_ALTIVEC
7360 && TARGET_ALTIVEC_ABI
7361 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
7363 error ("cannot return value in vector register because"
7364 " altivec instructions are disabled, use -maltivec"
7365 " to enable them");
7369 /* Return true if TYPE must be passed on the stack and not in registers. */
7371 static bool
7372 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
7374 if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
7375 return must_pass_in_stack_var_size (mode, type);
7376 else
7377 return must_pass_in_stack_var_size_or_pad (mode, type);
7380 /* If defined, a C expression which determines whether, and in which
7381 direction, to pad out an argument with extra space. The value
7382 should be of type `enum direction': either `upward' to pad above
7383 the argument, `downward' to pad below, or `none' to inhibit
7384 padding.
7386 For the AIX ABI structs are always stored left shifted in their
7387 argument slot. */
7389 enum direction
7390 function_arg_padding (enum machine_mode mode, const_tree type)
7392 #ifndef AGGREGATE_PADDING_FIXED
7393 #define AGGREGATE_PADDING_FIXED 0
7394 #endif
7395 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
7396 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
7397 #endif
7399 if (!AGGREGATE_PADDING_FIXED)
7401 /* GCC used to pass structures of the same size as integer types as
7402 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
7403 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
7404 passed padded downward, except that -mstrict-align further
7405 muddied the water in that multi-component structures of 2 and 4
7406 bytes in size were passed padded upward.
7408 The following arranges for best compatibility with previous
7409 versions of gcc, but removes the -mstrict-align dependency. */
7410 if (BYTES_BIG_ENDIAN)
7412 HOST_WIDE_INT size = 0;
7414 if (mode == BLKmode)
7416 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
7417 size = int_size_in_bytes (type);
7419 else
7420 size = GET_MODE_SIZE (mode);
7422 if (size == 1 || size == 2 || size == 4)
7423 return downward;
7425 return upward;
7428 if (AGGREGATES_PAD_UPWARD_ALWAYS)
7430 if (type != 0 && AGGREGATE_TYPE_P (type))
7431 return upward;
7434 /* Fall back to the default. */
7435 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7438 /* If defined, a C expression that gives the alignment boundary, in bits,
7439 of an argument with the specified mode and type. If it is not defined,
7440 PARM_BOUNDARY is used for all arguments.
7442 V.4 wants long longs and doubles to be double word aligned. Just
7443 testing the mode size is a boneheaded way to do this as it means
7444 that other types such as complex int are also double word aligned.
7445 However, we're stuck with this because changing the ABI might break
7446 existing library interfaces.
7448 Doubleword align SPE vectors.
7449 Quadword align Altivec/VSX vectors.
7450 Quadword align large synthetic vector types. */
7452 static unsigned int
7453 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
7455 if (DEFAULT_ABI == ABI_V4
7456 && (GET_MODE_SIZE (mode) == 8
7457 || (TARGET_HARD_FLOAT
7458 && TARGET_FPRS
7459 && (mode == TFmode || mode == TDmode))))
7460 return 64;
7461 else if (SPE_VECTOR_MODE (mode)
7462 || (type && TREE_CODE (type) == VECTOR_TYPE
7463 && int_size_in_bytes (type) >= 8
7464 && int_size_in_bytes (type) < 16))
7465 return 64;
7466 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7467 || (type && TREE_CODE (type) == VECTOR_TYPE
7468 && int_size_in_bytes (type) >= 16))
7469 return 128;
7470 else if (TARGET_MACHO
7471 && rs6000_darwin64_abi
7472 && mode == BLKmode
7473 && type && TYPE_ALIGN (type) > 64)
7474 return 128;
7475 else
7476 return PARM_BOUNDARY;
7479 /* For a function parm of MODE and TYPE, return the starting word in
7480 the parameter area. NWORDS of the parameter area are already used. */
7482 static unsigned int
7483 rs6000_parm_start (enum machine_mode mode, const_tree type,
7484 unsigned int nwords)
7486 unsigned int align;
7487 unsigned int parm_offset;
7489 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
7490 parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
7491 return nwords + (-(parm_offset + nwords) & align);
7494 /* Compute the size (in words) of a function argument. */
7496 static unsigned long
7497 rs6000_arg_size (enum machine_mode mode, const_tree type)
7499 unsigned long size;
7501 if (mode != BLKmode)
7502 size = GET_MODE_SIZE (mode);
7503 else
7504 size = int_size_in_bytes (type);
7506 if (TARGET_32BIT)
7507 return (size + 3) >> 2;
7508 else
7509 return (size + 7) >> 3;
7512 /* Use this to flush pending int fields. */
7514 static void
7515 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
7516 HOST_WIDE_INT bitpos, int final)
7518 unsigned int startbit, endbit;
7519 int intregs, intoffset;
7520 enum machine_mode mode;
7522 /* Handle the situations where a float is taking up the first half
7523 of the GPR, and the other half is empty (typically due to
7524 alignment restrictions). We can detect this by a 8-byte-aligned
7525 int field, or by seeing that this is the final flush for this
7526 argument. Count the word and continue on. */
7527 if (cum->floats_in_gpr == 1
7528 && (cum->intoffset % 64 == 0
7529 || (cum->intoffset == -1 && final)))
7531 cum->words++;
7532 cum->floats_in_gpr = 0;
7535 if (cum->intoffset == -1)
7536 return;
7538 intoffset = cum->intoffset;
7539 cum->intoffset = -1;
7540 cum->floats_in_gpr = 0;
7542 if (intoffset % BITS_PER_WORD != 0)
7544 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
7545 MODE_INT, 0);
7546 if (mode == BLKmode)
7548 /* We couldn't find an appropriate mode, which happens,
7549 e.g., in packed structs when there are 3 bytes to load.
7550 Back intoffset back to the beginning of the word in this
7551 case. */
7552 intoffset = intoffset & -BITS_PER_WORD;
7556 startbit = intoffset & -BITS_PER_WORD;
7557 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
7558 intregs = (endbit - startbit) / BITS_PER_WORD;
7559 cum->words += intregs;
7560 /* words should be unsigned. */
7561 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
7563 int pad = (endbit/BITS_PER_WORD) - cum->words;
7564 cum->words += pad;
7568 /* The darwin64 ABI calls for us to recurse down through structs,
7569 looking for elements passed in registers. Unfortunately, we have
7570 to track int register count here also because of misalignments
7571 in powerpc alignment mode. */
7573 static void
7574 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
7575 const_tree type,
7576 HOST_WIDE_INT startbitpos)
7578 tree f;
7580 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
7581 if (TREE_CODE (f) == FIELD_DECL)
7583 HOST_WIDE_INT bitpos = startbitpos;
7584 tree ftype = TREE_TYPE (f);
7585 enum machine_mode mode;
7586 if (ftype == error_mark_node)
7587 continue;
7588 mode = TYPE_MODE (ftype);
7590 if (DECL_SIZE (f) != 0
7591 && host_integerp (bit_position (f), 1))
7592 bitpos += int_bit_position (f);
7594 /* ??? FIXME: else assume zero offset. */
7596 if (TREE_CODE (ftype) == RECORD_TYPE)
7597 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
7598 else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
7600 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
7601 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7602 cum->fregno += n_fpregs;
7603 /* Single-precision floats present a special problem for
7604 us, because they are smaller than an 8-byte GPR, and so
7605 the structure-packing rules combined with the standard
7606 varargs behavior mean that we want to pack float/float
7607 and float/int combinations into a single register's
7608 space. This is complicated by the arg advance flushing,
7609 which works on arbitrarily large groups of int-type
7610 fields. */
7611 if (mode == SFmode)
7613 if (cum->floats_in_gpr == 1)
7615 /* Two floats in a word; count the word and reset
7616 the float count. */
7617 cum->words++;
7618 cum->floats_in_gpr = 0;
7620 else if (bitpos % 64 == 0)
7622 /* A float at the beginning of an 8-byte word;
7623 count it and put off adjusting cum->words until
7624 we see if a arg advance flush is going to do it
7625 for us. */
7626 cum->floats_in_gpr++;
7628 else
7630 /* The float is at the end of a word, preceded
7631 by integer fields, so the arg advance flush
7632 just above has already set cum->words and
7633 everything is taken care of. */
7636 else
7637 cum->words += n_fpregs;
7639 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
7641 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
7642 cum->vregno++;
7643 cum->words += 2;
7645 else if (cum->intoffset == -1)
7646 cum->intoffset = bitpos;
7650 /* Check for an item that needs to be considered specially under the darwin 64
7651 bit ABI. These are record types where the mode is BLK or the structure is
7652 8 bytes in size. */
7653 static int
7654 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
7656 return rs6000_darwin64_abi
7657 && ((mode == BLKmode
7658 && TREE_CODE (type) == RECORD_TYPE
7659 && int_size_in_bytes (type) > 0)
7660 || (type && TREE_CODE (type) == RECORD_TYPE
7661 && int_size_in_bytes (type) == 8)) ? 1 : 0;
7664 /* Update the data in CUM to advance over an argument
7665 of mode MODE and data type TYPE.
7666 (TYPE is null for libcalls where that information may not be available.)
7668 Note that for args passed by reference, function_arg will be called
7669 with MODE and TYPE set to that of the pointer to the arg, not the arg
7670 itself. */
7672 static void
7673 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7674 const_tree type, bool named, int depth)
7676 /* Only tick off an argument if we're not recursing. */
7677 if (depth == 0)
7678 cum->nargs_prototype--;
7680 #ifdef HAVE_AS_GNU_ATTRIBUTE
7681 if (DEFAULT_ABI == ABI_V4
7682 && cum->escapes)
7684 if (SCALAR_FLOAT_MODE_P (mode))
7685 rs6000_passes_float = true;
7686 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
7687 rs6000_passes_vector = true;
7688 else if (SPE_VECTOR_MODE (mode)
7689 && !cum->stdarg
7690 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7691 rs6000_passes_vector = true;
7693 #endif
7695 if (TARGET_ALTIVEC_ABI
7696 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
7697 || (type && TREE_CODE (type) == VECTOR_TYPE
7698 && int_size_in_bytes (type) == 16)))
7700 bool stack = false;
7702 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
7704 cum->vregno++;
7705 if (!TARGET_ALTIVEC)
7706 error ("cannot pass argument in vector register because"
7707 " altivec instructions are disabled, use -maltivec"
7708 " to enable them");
7710 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
7711 even if it is going to be passed in a vector register.
7712 Darwin does the same for variable-argument functions. */
7713 if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
7714 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
7715 stack = true;
7717 else
7718 stack = true;
7720 if (stack)
7722 int align;
7724 /* Vector parameters must be 16-byte aligned. This places
7725 them at 2 mod 4 in terms of words in 32-bit mode, since
7726 the parameter save area starts at offset 24 from the
7727 stack. In 64-bit mode, they just have to start on an
7728 even word, since the parameter save area is 16-byte
7729 aligned. Space for GPRs is reserved even if the argument
7730 will be passed in memory. */
7731 if (TARGET_32BIT)
7732 align = (2 - cum->words) & 3;
7733 else
7734 align = cum->words & 1;
7735 cum->words += align + rs6000_arg_size (mode, type);
7737 if (TARGET_DEBUG_ARG)
7739 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
7740 cum->words, align);
7741 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
7742 cum->nargs_prototype, cum->prototype,
7743 GET_MODE_NAME (mode));
7747 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
7748 && !cum->stdarg
7749 && cum->sysv_gregno <= GP_ARG_MAX_REG)
7750 cum->sysv_gregno++;
7752 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
7754 int size = int_size_in_bytes (type);
7755 /* Variable sized types have size == -1 and are
7756 treated as if consisting entirely of ints.
7757 Pad to 16 byte boundary if needed. */
7758 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
7759 && (cum->words % 2) != 0)
7760 cum->words++;
7761 /* For varargs, we can just go up by the size of the struct. */
7762 if (!named)
7763 cum->words += (size + 7) / 8;
7764 else
7766 /* It is tempting to say int register count just goes up by
7767 sizeof(type)/8, but this is wrong in a case such as
7768 { int; double; int; } [powerpc alignment]. We have to
7769 grovel through the fields for these too. */
7770 cum->intoffset = 0;
7771 cum->floats_in_gpr = 0;
7772 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
7773 rs6000_darwin64_record_arg_advance_flush (cum,
7774 size * BITS_PER_UNIT, 1);
7776 if (TARGET_DEBUG_ARG)
7778 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
7779 cum->words, TYPE_ALIGN (type), size);
7780 fprintf (stderr,
7781 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
7782 cum->nargs_prototype, cum->prototype,
7783 GET_MODE_NAME (mode));
7786 else if (DEFAULT_ABI == ABI_V4)
7788 if (TARGET_HARD_FLOAT && TARGET_FPRS
7789 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
7790 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
7791 || (mode == TFmode && !TARGET_IEEEQUAD)
7792 || mode == SDmode || mode == DDmode || mode == TDmode))
7794 /* _Decimal128 must use an even/odd register pair. This assumes
7795 that the register number is odd when fregno is odd. */
7796 if (mode == TDmode && (cum->fregno % 2) == 1)
7797 cum->fregno++;
7799 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
7800 <= FP_ARG_V4_MAX_REG)
7801 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
7802 else
7804 cum->fregno = FP_ARG_V4_MAX_REG + 1;
7805 if (mode == DFmode || mode == TFmode
7806 || mode == DDmode || mode == TDmode)
7807 cum->words += cum->words & 1;
7808 cum->words += rs6000_arg_size (mode, type);
7811 else
7813 int n_words = rs6000_arg_size (mode, type);
7814 int gregno = cum->sysv_gregno;
7816 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
7817 (r7,r8) or (r9,r10). As does any other 2 word item such
7818 as complex int due to a historical mistake. */
7819 if (n_words == 2)
7820 gregno += (1 - gregno) & 1;
7822 /* Multi-reg args are not split between registers and stack. */
7823 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
7825 /* Long long and SPE vectors are aligned on the stack.
7826 So are other 2 word items such as complex int due to
7827 a historical mistake. */
7828 if (n_words == 2)
7829 cum->words += cum->words & 1;
7830 cum->words += n_words;
7833 /* Note: continuing to accumulate gregno past when we've started
7834 spilling to the stack indicates the fact that we've started
7835 spilling to the stack to expand_builtin_saveregs. */
7836 cum->sysv_gregno = gregno + n_words;
7839 if (TARGET_DEBUG_ARG)
7841 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
7842 cum->words, cum->fregno);
7843 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
7844 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
7845 fprintf (stderr, "mode = %4s, named = %d\n",
7846 GET_MODE_NAME (mode), named);
7849 else
7851 int n_words = rs6000_arg_size (mode, type);
7852 int start_words = cum->words;
7853 int align_words = rs6000_parm_start (mode, type, start_words);
7855 cum->words = align_words + n_words;
7857 if (SCALAR_FLOAT_MODE_P (mode)
7858 && TARGET_HARD_FLOAT && TARGET_FPRS)
7860 /* _Decimal128 must be passed in an even/odd float register pair.
7861 This assumes that the register number is odd when fregno is
7862 odd. */
7863 if (mode == TDmode && (cum->fregno % 2) == 1)
7864 cum->fregno++;
7865 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
7868 if (TARGET_DEBUG_ARG)
7870 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
7871 cum->words, cum->fregno);
7872 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
7873 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
7874 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
7875 named, align_words - start_words, depth);
7880 static void
7881 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
7882 const_tree type, bool named)
7884 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
7888 static rtx
7889 spe_build_register_parallel (enum machine_mode mode, int gregno)
7891 rtx r1, r3, r5, r7;
7893 switch (mode)
7895 case DFmode:
7896 r1 = gen_rtx_REG (DImode, gregno);
7897 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
7898 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
7900 case DCmode:
7901 case TFmode:
7902 r1 = gen_rtx_REG (DImode, gregno);
7903 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
7904 r3 = gen_rtx_REG (DImode, gregno + 2);
7905 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
7906 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
7908 case TCmode:
7909 r1 = gen_rtx_REG (DImode, gregno);
7910 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
7911 r3 = gen_rtx_REG (DImode, gregno + 2);
7912 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
7913 r5 = gen_rtx_REG (DImode, gregno + 4);
7914 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
7915 r7 = gen_rtx_REG (DImode, gregno + 6);
7916 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
7917 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
7919 default:
7920 gcc_unreachable ();
7924 /* Determine where to put a SIMD argument on the SPE. */
7925 static rtx
7926 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7927 const_tree type)
7929 int gregno = cum->sysv_gregno;
7931 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
7932 are passed and returned in a pair of GPRs for ABI compatibility. */
7933 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
7934 || mode == DCmode || mode == TCmode))
7936 int n_words = rs6000_arg_size (mode, type);
7938 /* Doubles go in an odd/even register pair (r5/r6, etc). */
7939 if (mode == DFmode)
7940 gregno += (1 - gregno) & 1;
7942 /* Multi-reg args are not split between registers and stack. */
7943 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
7944 return NULL_RTX;
7946 return spe_build_register_parallel (mode, gregno);
7948 if (cum->stdarg)
7950 int n_words = rs6000_arg_size (mode, type);
7952 /* SPE vectors are put in odd registers. */
7953 if (n_words == 2 && (gregno & 1) == 0)
7954 gregno += 1;
7956 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
7958 rtx r1, r2;
7959 enum machine_mode m = SImode;
7961 r1 = gen_rtx_REG (m, gregno);
7962 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
7963 r2 = gen_rtx_REG (m, gregno + 1);
7964 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
7965 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
7967 else
7968 return NULL_RTX;
7970 else
7972 if (gregno <= GP_ARG_MAX_REG)
7973 return gen_rtx_REG (mode, gregno);
7974 else
7975 return NULL_RTX;
7979 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
7980 structure between cum->intoffset and bitpos to integer registers. */
7982 static void
7983 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
7984 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
7986 enum machine_mode mode;
7987 unsigned int regno;
7988 unsigned int startbit, endbit;
7989 int this_regno, intregs, intoffset;
7990 rtx reg;
7992 if (cum->intoffset == -1)
7993 return;
7995 intoffset = cum->intoffset;
7996 cum->intoffset = -1;
7998 /* If this is the trailing part of a word, try to only load that
7999 much into the register. Otherwise load the whole register. Note
8000 that in the latter case we may pick up unwanted bits. It's not a
8001 problem at the moment but may wish to revisit. */
8003 if (intoffset % BITS_PER_WORD != 0)
8005 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
8006 MODE_INT, 0);
8007 if (mode == BLKmode)
8009 /* We couldn't find an appropriate mode, which happens,
8010 e.g., in packed structs when there are 3 bytes to load.
8011 Back intoffset back to the beginning of the word in this
8012 case. */
8013 intoffset = intoffset & -BITS_PER_WORD;
8014 mode = word_mode;
8017 else
8018 mode = word_mode;
8020 startbit = intoffset & -BITS_PER_WORD;
8021 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
8022 intregs = (endbit - startbit) / BITS_PER_WORD;
8023 this_regno = cum->words + intoffset / BITS_PER_WORD;
8025 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
8026 cum->use_stack = 1;
8028 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
8029 if (intregs <= 0)
8030 return;
8032 intoffset /= BITS_PER_UNIT;
8035 regno = GP_ARG_MIN_REG + this_regno;
8036 reg = gen_rtx_REG (mode, regno);
8037 rvec[(*k)++] =
8038 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
8040 this_regno += 1;
8041 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
8042 mode = word_mode;
8043 intregs -= 1;
8045 while (intregs > 0);
8048 /* Recursive workhorse for the following. */
8050 static void
8051 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
8052 HOST_WIDE_INT startbitpos, rtx rvec[],
8053 int *k)
8055 tree f;
8057 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
8058 if (TREE_CODE (f) == FIELD_DECL)
8060 HOST_WIDE_INT bitpos = startbitpos;
8061 tree ftype = TREE_TYPE (f);
8062 enum machine_mode mode;
8063 if (ftype == error_mark_node)
8064 continue;
8065 mode = TYPE_MODE (ftype);
8067 if (DECL_SIZE (f) != 0
8068 && host_integerp (bit_position (f), 1))
8069 bitpos += int_bit_position (f);
8071 /* ??? FIXME: else assume zero offset. */
8073 if (TREE_CODE (ftype) == RECORD_TYPE)
8074 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
8075 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
8077 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8078 #if 0
8079 switch (mode)
8081 case SCmode: mode = SFmode; break;
8082 case DCmode: mode = DFmode; break;
8083 case TCmode: mode = TFmode; break;
8084 default: break;
8086 #endif
8087 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8088 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8090 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8091 && (mode == TFmode || mode == TDmode));
8092 /* Long double or _Decimal128 split over regs and memory. */
8093 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
8094 cum->use_stack=1;
8096 rvec[(*k)++]
8097 = gen_rtx_EXPR_LIST (VOIDmode,
8098 gen_rtx_REG (mode, cum->fregno++),
8099 GEN_INT (bitpos / BITS_PER_UNIT));
8100 if (mode == TFmode || mode == TDmode)
8101 cum->fregno++;
8103 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
8105 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
8106 rvec[(*k)++]
8107 = gen_rtx_EXPR_LIST (VOIDmode,
8108 gen_rtx_REG (mode, cum->vregno++),
8109 GEN_INT (bitpos / BITS_PER_UNIT));
8111 else if (cum->intoffset == -1)
8112 cum->intoffset = bitpos;
8116 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
8117 the register(s) to be used for each field and subfield of a struct
8118 being passed by value, along with the offset of where the
8119 register's value may be found in the block. FP fields go in FP
8120 register, vector fields go in vector registers, and everything
8121 else goes in int registers, packed as in memory.
8123 This code is also used for function return values. RETVAL indicates
8124 whether this is the case.
8126 Much of this is taken from the SPARC V9 port, which has a similar
8127 calling convention. */
8129 static rtx
8130 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
8131 bool named, bool retval)
8133 rtx rvec[FIRST_PSEUDO_REGISTER];
8134 int k = 1, kbase = 1;
8135 HOST_WIDE_INT typesize = int_size_in_bytes (type);
8136 /* This is a copy; modifications are not visible to our caller. */
8137 CUMULATIVE_ARGS copy_cum = *orig_cum;
8138 CUMULATIVE_ARGS *cum = &copy_cum;
8140 /* Pad to 16 byte boundary if needed. */
8141 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
8142 && (cum->words % 2) != 0)
8143 cum->words++;
8145 cum->intoffset = 0;
8146 cum->use_stack = 0;
8147 cum->named = named;
8149 /* Put entries into rvec[] for individual FP and vector fields, and
8150 for the chunks of memory that go in int regs. Note we start at
8151 element 1; 0 is reserved for an indication of using memory, and
8152 may or may not be filled in below. */
8153 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
8154 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
8156 /* If any part of the struct went on the stack put all of it there.
8157 This hack is because the generic code for
8158 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
8159 parts of the struct are not at the beginning. */
8160 if (cum->use_stack)
8162 if (retval)
8163 return NULL_RTX; /* doesn't go in registers at all */
8164 kbase = 0;
8165 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8167 if (k > 1 || cum->use_stack)
8168 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
8169 else
8170 return NULL_RTX;
8173 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
8175 static rtx
8176 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
8177 int align_words)
8179 int n_units;
8180 int i, k;
8181 rtx rvec[GP_ARG_NUM_REG + 1];
8183 if (align_words >= GP_ARG_NUM_REG)
8184 return NULL_RTX;
8186 n_units = rs6000_arg_size (mode, type);
8188 /* Optimize the simple case where the arg fits in one gpr, except in
8189 the case of BLKmode due to assign_parms assuming that registers are
8190 BITS_PER_WORD wide. */
8191 if (n_units == 0
8192 || (n_units == 1 && mode != BLKmode))
8193 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8195 k = 0;
8196 if (align_words + n_units > GP_ARG_NUM_REG)
8197 /* Not all of the arg fits in gprs. Say that it goes in memory too,
8198 using a magic NULL_RTX component.
8199 This is not strictly correct. Only some of the arg belongs in
8200 memory, not all of it. However, the normal scheme using
8201 function_arg_partial_nregs can result in unusual subregs, eg.
8202 (subreg:SI (reg:DF) 4), which are not handled well. The code to
8203 store the whole arg to memory is often more efficient than code
8204 to store pieces, and we know that space is available in the right
8205 place for the whole arg. */
8206 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8208 i = 0;
8211 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
8212 rtx off = GEN_INT (i++ * 4);
8213 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8215 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
8217 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8220 /* Determine where to put an argument to a function.
8221 Value is zero to push the argument on the stack,
8222 or a hard register in which to store the argument.
8224 MODE is the argument's machine mode.
8225 TYPE is the data type of the argument (as a tree).
8226 This is null for libcalls where that information may
8227 not be available.
8228 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8229 the preceding args and about the function being called. It is
8230 not modified in this routine.
8231 NAMED is nonzero if this argument is a named parameter
8232 (otherwise it is an extra parameter matching an ellipsis).
8234 On RS/6000 the first eight words of non-FP are normally in registers
8235 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
8236 Under V.4, the first 8 FP args are in registers.
8238 If this is floating-point and no prototype is specified, we use
8239 both an FP and integer register (or possibly FP reg and stack). Library
8240 functions (when CALL_LIBCALL is set) always have the proper types for args,
8241 so we can pass the FP value just in one register. emit_library_function
8242 doesn't support PARALLEL anyway.
8244 Note that for args passed by reference, function_arg will be called
8245 with MODE and TYPE set to that of the pointer to the arg, not the arg
8246 itself. */
8248 static rtx
8249 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8250 const_tree type, bool named)
8252 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8253 enum rs6000_abi abi = DEFAULT_ABI;
8255 /* Return a marker to indicate whether CR1 needs to set or clear the
8256 bit that V.4 uses to say fp args were passed in registers.
8257 Assume that we don't need the marker for software floating point,
8258 or compiler generated library calls. */
8259 if (mode == VOIDmode)
8261 if (abi == ABI_V4
8262 && (cum->call_cookie & CALL_LIBCALL) == 0
8263 && (cum->stdarg
8264 || (cum->nargs_prototype < 0
8265 && (cum->prototype || TARGET_NO_PROTOTYPE))))
8267 /* For the SPE, we need to crxor CR6 always. */
8268 if (TARGET_SPE_ABI)
8269 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
8270 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
8271 return GEN_INT (cum->call_cookie
8272 | ((cum->fregno == FP_ARG_MIN_REG)
8273 ? CALL_V4_SET_FP_ARGS
8274 : CALL_V4_CLEAR_FP_ARGS));
8277 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
8280 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8282 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
8283 if (rslt != NULL_RTX)
8284 return rslt;
8285 /* Else fall through to usual handling. */
8288 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
8289 if (TARGET_64BIT && ! cum->prototype)
8291 /* Vector parameters get passed in vector register
8292 and also in GPRs or memory, in absence of prototype. */
8293 int align_words;
8294 rtx slot;
8295 align_words = (cum->words + 1) & ~1;
8297 if (align_words >= GP_ARG_NUM_REG)
8299 slot = NULL_RTX;
8301 else
8303 slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8305 return gen_rtx_PARALLEL (mode,
8306 gen_rtvec (2,
8307 gen_rtx_EXPR_LIST (VOIDmode,
8308 slot, const0_rtx),
8309 gen_rtx_EXPR_LIST (VOIDmode,
8310 gen_rtx_REG (mode, cum->vregno),
8311 const0_rtx)));
8313 else
8314 return gen_rtx_REG (mode, cum->vregno);
8315 else if (TARGET_ALTIVEC_ABI
8316 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
8317 || (type && TREE_CODE (type) == VECTOR_TYPE
8318 && int_size_in_bytes (type) == 16)))
8320 if (named || abi == ABI_V4)
8321 return NULL_RTX;
8322 else
8324 /* Vector parameters to varargs functions under AIX or Darwin
8325 get passed in memory and possibly also in GPRs. */
8326 int align, align_words, n_words;
8327 enum machine_mode part_mode;
8329 /* Vector parameters must be 16-byte aligned. This places them at
8330 2 mod 4 in terms of words in 32-bit mode, since the parameter
8331 save area starts at offset 24 from the stack. In 64-bit mode,
8332 they just have to start on an even word, since the parameter
8333 save area is 16-byte aligned. */
8334 if (TARGET_32BIT)
8335 align = (2 - cum->words) & 3;
8336 else
8337 align = cum->words & 1;
8338 align_words = cum->words + align;
8340 /* Out of registers? Memory, then. */
8341 if (align_words >= GP_ARG_NUM_REG)
8342 return NULL_RTX;
8344 if (TARGET_32BIT && TARGET_POWERPC64)
8345 return rs6000_mixed_function_arg (mode, type, align_words);
8347 /* The vector value goes in GPRs. Only the part of the
8348 value in GPRs is reported here. */
8349 part_mode = mode;
8350 n_words = rs6000_arg_size (mode, type);
8351 if (align_words + n_words > GP_ARG_NUM_REG)
8352 /* Fortunately, there are only two possibilities, the value
8353 is either wholly in GPRs or half in GPRs and half not. */
8354 part_mode = DImode;
8356 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
8359 else if (TARGET_SPE_ABI && TARGET_SPE
8360 && (SPE_VECTOR_MODE (mode)
8361 || (TARGET_E500_DOUBLE && (mode == DFmode
8362 || mode == DCmode
8363 || mode == TFmode
8364 || mode == TCmode))))
8365 return rs6000_spe_function_arg (cum, mode, type);
8367 else if (abi == ABI_V4)
8369 if (TARGET_HARD_FLOAT && TARGET_FPRS
8370 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
8371 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
8372 || (mode == TFmode && !TARGET_IEEEQUAD)
8373 || mode == SDmode || mode == DDmode || mode == TDmode))
8375 /* _Decimal128 must use an even/odd register pair. This assumes
8376 that the register number is odd when fregno is odd. */
8377 if (mode == TDmode && (cum->fregno % 2) == 1)
8378 cum->fregno++;
8380 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
8381 <= FP_ARG_V4_MAX_REG)
8382 return gen_rtx_REG (mode, cum->fregno);
8383 else
8384 return NULL_RTX;
8386 else
8388 int n_words = rs6000_arg_size (mode, type);
8389 int gregno = cum->sysv_gregno;
8391 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
8392 (r7,r8) or (r9,r10). As does any other 2 word item such
8393 as complex int due to a historical mistake. */
8394 if (n_words == 2)
8395 gregno += (1 - gregno) & 1;
8397 /* Multi-reg args are not split between registers and stack. */
8398 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
8399 return NULL_RTX;
8401 if (TARGET_32BIT && TARGET_POWERPC64)
8402 return rs6000_mixed_function_arg (mode, type,
8403 gregno - GP_ARG_MIN_REG);
8404 return gen_rtx_REG (mode, gregno);
8407 else
8409 int align_words = rs6000_parm_start (mode, type, cum->words);
8411 /* _Decimal128 must be passed in an even/odd float register pair.
8412 This assumes that the register number is odd when fregno is odd. */
8413 if (mode == TDmode && (cum->fregno % 2) == 1)
8414 cum->fregno++;
8416 if (USE_FP_FOR_ARG_P (cum, mode, type))
8418 rtx rvec[GP_ARG_NUM_REG + 1];
8419 rtx r;
8420 int k;
8421 bool needs_psave;
8422 enum machine_mode fmode = mode;
8423 unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
8425 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
8427 /* Currently, we only ever need one reg here because complex
8428 doubles are split. */
8429 gcc_assert (cum->fregno == FP_ARG_MAX_REG
8430 && (fmode == TFmode || fmode == TDmode));
8432 /* Long double or _Decimal128 split over regs and memory. */
8433 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
8436 /* Do we also need to pass this arg in the parameter save
8437 area? */
8438 needs_psave = (type
8439 && (cum->nargs_prototype <= 0
8440 || (DEFAULT_ABI == ABI_AIX
8441 && TARGET_XL_COMPAT
8442 && align_words >= GP_ARG_NUM_REG)));
8444 if (!needs_psave && mode == fmode)
8445 return gen_rtx_REG (fmode, cum->fregno);
8447 k = 0;
8448 if (needs_psave)
8450 /* Describe the part that goes in gprs or the stack.
8451 This piece must come first, before the fprs. */
8452 if (align_words < GP_ARG_NUM_REG)
8454 unsigned long n_words = rs6000_arg_size (mode, type);
8456 if (align_words + n_words > GP_ARG_NUM_REG
8457 || (TARGET_32BIT && TARGET_POWERPC64))
8459 /* If this is partially on the stack, then we only
8460 include the portion actually in registers here. */
8461 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
8462 rtx off;
8463 int i = 0;
8464 if (align_words + n_words > GP_ARG_NUM_REG)
8465 /* Not all of the arg fits in gprs. Say that it
8466 goes in memory too, using a magic NULL_RTX
8467 component. Also see comment in
8468 rs6000_mixed_function_arg for why the normal
8469 function_arg_partial_nregs scheme doesn't work
8470 in this case. */
8471 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
8472 const0_rtx);
8475 r = gen_rtx_REG (rmode,
8476 GP_ARG_MIN_REG + align_words);
8477 off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
8478 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
8480 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
8482 else
8484 /* The whole arg fits in gprs. */
8485 r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8486 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8489 else
8490 /* It's entirely in memory. */
8491 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
8494 /* Describe where this piece goes in the fprs. */
8495 r = gen_rtx_REG (fmode, cum->fregno);
8496 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
8498 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
8500 else if (align_words < GP_ARG_NUM_REG)
8502 if (TARGET_32BIT && TARGET_POWERPC64)
8503 return rs6000_mixed_function_arg (mode, type, align_words);
8505 if (mode == BLKmode)
8506 mode = Pmode;
8508 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
8510 else
8511 return NULL_RTX;
8515 /* For an arg passed partly in registers and partly in memory, this is
8516 the number of bytes passed in registers. For args passed entirely in
8517 registers or entirely in memory, zero. When an arg is described by a
8518 PARALLEL, perhaps using more than one register type, this function
8519 returns the number of bytes used by the first element of the PARALLEL. */
8521 static int
8522 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
8523 tree type, bool named)
8525 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8526 int ret = 0;
8527 int align_words;
8529 if (DEFAULT_ABI == ABI_V4)
8530 return 0;
8532 if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
8533 && cum->nargs_prototype >= 0)
8534 return 0;
8536 /* In this complicated case we just disable the partial_nregs code. */
8537 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
8538 return 0;
8540 align_words = rs6000_parm_start (mode, type, cum->words);
8542 if (USE_FP_FOR_ARG_P (cum, mode, type))
8544 /* If we are passing this arg in the fixed parameter save area
8545 (gprs or memory) as well as fprs, then this function should
8546 return the number of partial bytes passed in the parameter
8547 save area rather than partial bytes passed in fprs. */
8548 if (type
8549 && (cum->nargs_prototype <= 0
8550 || (DEFAULT_ABI == ABI_AIX
8551 && TARGET_XL_COMPAT
8552 && align_words >= GP_ARG_NUM_REG)))
8553 return 0;
8554 else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
8555 > FP_ARG_MAX_REG + 1)
8556 ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
8557 else if (cum->nargs_prototype >= 0)
8558 return 0;
8561 if (align_words < GP_ARG_NUM_REG
8562 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
8563 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
8565 if (ret != 0 && TARGET_DEBUG_ARG)
8566 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
8568 return ret;
8571 /* A C expression that indicates when an argument must be passed by
8572 reference. If nonzero for an argument, a copy of that argument is
8573 made in memory and a pointer to the argument is passed instead of
8574 the argument itself. The pointer is passed in whatever way is
8575 appropriate for passing a pointer to that type.
8577 Under V.4, aggregates and long double are passed by reference.
8579 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
8580 reference unless the AltiVec vector extension ABI is in force.
8582 As an extension to all ABIs, variable sized types are passed by
8583 reference. */
8585 static bool
8586 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
8587 enum machine_mode mode, const_tree type,
8588 bool named ATTRIBUTE_UNUSED)
8590 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
8592 if (TARGET_DEBUG_ARG)
8593 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
8594 return 1;
8597 if (!type)
8598 return 0;
8600 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
8602 if (TARGET_DEBUG_ARG)
8603 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
8604 return 1;
8607 if (int_size_in_bytes (type) < 0)
8609 if (TARGET_DEBUG_ARG)
8610 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
8611 return 1;
8614 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8615 modes only exist for GCC vector types if -maltivec. */
8616 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
8618 if (TARGET_DEBUG_ARG)
8619 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
8620 return 1;
8623 /* Pass synthetic vectors in memory. */
8624 if (TREE_CODE (type) == VECTOR_TYPE
8625 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
8627 static bool warned_for_pass_big_vectors = false;
8628 if (TARGET_DEBUG_ARG)
8629 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
8630 if (!warned_for_pass_big_vectors)
8632 warning (0, "GCC vector passed by reference: "
8633 "non-standard ABI extension with no compatibility guarantee");
8634 warned_for_pass_big_vectors = true;
8636 return 1;
8639 return 0;
8642 static void
8643 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
8645 int i;
8646 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
8648 if (nregs == 0)
8649 return;
8651 for (i = 0; i < nregs; i++)
8653 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
8654 if (reload_completed)
8656 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
8657 tem = NULL_RTX;
8658 else
8659 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
8660 i * GET_MODE_SIZE (reg_mode));
8662 else
8663 tem = replace_equiv_address (tem, XEXP (tem, 0));
8665 gcc_assert (tem);
8667 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
8671 /* Perform any needed actions needed for a function that is receiving a
8672 variable number of arguments.
8674 CUM is as above.
8676 MODE and TYPE are the mode and type of the current parameter.
8678 PRETEND_SIZE is a variable that should be set to the amount of stack
8679 that must be pushed by the prolog to pretend that our caller pushed
8682 Normally, this macro will push all remaining incoming registers on the
8683 stack and set PRETEND_SIZE to the length of the registers pushed. */
8685 static void
8686 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
8687 tree type, int *pretend_size ATTRIBUTE_UNUSED,
8688 int no_rtl)
8690 CUMULATIVE_ARGS next_cum;
8691 int reg_size = TARGET_32BIT ? 4 : 8;
8692 rtx save_area = NULL_RTX, mem;
8693 int first_reg_offset;
8694 alias_set_type set;
8696 /* Skip the last named argument. */
8697 next_cum = *get_cumulative_args (cum);
8698 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
8700 if (DEFAULT_ABI == ABI_V4)
8702 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
8704 if (! no_rtl)
8706 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
8707 HOST_WIDE_INT offset = 0;
8709 /* Try to optimize the size of the varargs save area.
8710 The ABI requires that ap.reg_save_area is doubleword
8711 aligned, but we don't need to allocate space for all
8712 the bytes, only those to which we actually will save
8713 anything. */
8714 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
8715 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
8716 if (TARGET_HARD_FLOAT && TARGET_FPRS
8717 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8718 && cfun->va_list_fpr_size)
8720 if (gpr_reg_num)
8721 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
8722 * UNITS_PER_FP_WORD;
8723 if (cfun->va_list_fpr_size
8724 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8725 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
8726 else
8727 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
8728 * UNITS_PER_FP_WORD;
8730 if (gpr_reg_num)
8732 offset = -((first_reg_offset * reg_size) & ~7);
8733 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
8735 gpr_reg_num = cfun->va_list_gpr_size;
8736 if (reg_size == 4 && (first_reg_offset & 1))
8737 gpr_reg_num++;
8739 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
8741 else if (fpr_size)
8742 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
8743 * UNITS_PER_FP_WORD
8744 - (int) (GP_ARG_NUM_REG * reg_size);
8746 if (gpr_size + fpr_size)
8748 rtx reg_save_area
8749 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
8750 gcc_assert (GET_CODE (reg_save_area) == MEM);
8751 reg_save_area = XEXP (reg_save_area, 0);
8752 if (GET_CODE (reg_save_area) == PLUS)
8754 gcc_assert (XEXP (reg_save_area, 0)
8755 == virtual_stack_vars_rtx);
8756 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
8757 offset += INTVAL (XEXP (reg_save_area, 1));
8759 else
8760 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
8763 cfun->machine->varargs_save_offset = offset;
8764 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
8767 else
8769 first_reg_offset = next_cum.words;
8770 save_area = virtual_incoming_args_rtx;
8772 if (targetm.calls.must_pass_in_stack (mode, type))
8773 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
8776 set = get_varargs_alias_set ();
8777 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
8778 && cfun->va_list_gpr_size)
8780 int nregs = GP_ARG_NUM_REG - first_reg_offset;
8782 if (va_list_gpr_counter_field)
8784 /* V4 va_list_gpr_size counts number of registers needed. */
8785 if (nregs > cfun->va_list_gpr_size)
8786 nregs = cfun->va_list_gpr_size;
8788 else
8790 /* char * va_list instead counts number of bytes needed. */
8791 if (nregs > cfun->va_list_gpr_size / reg_size)
8792 nregs = cfun->va_list_gpr_size / reg_size;
8795 mem = gen_rtx_MEM (BLKmode,
8796 plus_constant (Pmode, save_area,
8797 first_reg_offset * reg_size));
8798 MEM_NOTRAP_P (mem) = 1;
8799 set_mem_alias_set (mem, set);
8800 set_mem_align (mem, BITS_PER_WORD);
8802 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
8803 nregs);
8806 /* Save FP registers if needed. */
8807 if (DEFAULT_ABI == ABI_V4
8808 && TARGET_HARD_FLOAT && TARGET_FPRS
8809 && ! no_rtl
8810 && next_cum.fregno <= FP_ARG_V4_MAX_REG
8811 && cfun->va_list_fpr_size)
8813 int fregno = next_cum.fregno, nregs;
8814 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
8815 rtx lab = gen_label_rtx ();
8816 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
8817 * UNITS_PER_FP_WORD);
8819 emit_jump_insn
8820 (gen_rtx_SET (VOIDmode,
8821 pc_rtx,
8822 gen_rtx_IF_THEN_ELSE (VOIDmode,
8823 gen_rtx_NE (VOIDmode, cr1,
8824 const0_rtx),
8825 gen_rtx_LABEL_REF (VOIDmode, lab),
8826 pc_rtx)));
8828 for (nregs = 0;
8829 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
8830 fregno++, off += UNITS_PER_FP_WORD, nregs++)
8832 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8833 ? DFmode : SFmode,
8834 plus_constant (Pmode, save_area, off));
8835 MEM_NOTRAP_P (mem) = 1;
8836 set_mem_alias_set (mem, set);
8837 set_mem_align (mem, GET_MODE_ALIGNMENT (
8838 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8839 ? DFmode : SFmode));
8840 emit_move_insn (mem, gen_rtx_REG (
8841 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8842 ? DFmode : SFmode, fregno));
8845 emit_label (lab);
8849 /* Create the va_list data type. */
8851 static tree
8852 rs6000_build_builtin_va_list (void)
8854 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
8856 /* For AIX, prefer 'char *' because that's what the system
8857 header files like. */
8858 if (DEFAULT_ABI != ABI_V4)
8859 return build_pointer_type (char_type_node);
8861 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
8862 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
8863 get_identifier ("__va_list_tag"), record);
8865 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
8866 unsigned_char_type_node);
8867 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
8868 unsigned_char_type_node);
8869 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
8870 every user file. */
8871 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
8872 get_identifier ("reserved"), short_unsigned_type_node);
8873 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
8874 get_identifier ("overflow_arg_area"),
8875 ptr_type_node);
8876 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
8877 get_identifier ("reg_save_area"),
8878 ptr_type_node);
8880 va_list_gpr_counter_field = f_gpr;
8881 va_list_fpr_counter_field = f_fpr;
8883 DECL_FIELD_CONTEXT (f_gpr) = record;
8884 DECL_FIELD_CONTEXT (f_fpr) = record;
8885 DECL_FIELD_CONTEXT (f_res) = record;
8886 DECL_FIELD_CONTEXT (f_ovf) = record;
8887 DECL_FIELD_CONTEXT (f_sav) = record;
8889 TYPE_STUB_DECL (record) = type_decl;
8890 TYPE_NAME (record) = type_decl;
8891 TYPE_FIELDS (record) = f_gpr;
8892 DECL_CHAIN (f_gpr) = f_fpr;
8893 DECL_CHAIN (f_fpr) = f_res;
8894 DECL_CHAIN (f_res) = f_ovf;
8895 DECL_CHAIN (f_ovf) = f_sav;
8897 layout_type (record);
8899 /* The correct type is an array type of one element. */
8900 return build_array_type (record, build_index_type (size_zero_node));
8903 /* Implement va_start. */
8905 static void
8906 rs6000_va_start (tree valist, rtx nextarg)
8908 HOST_WIDE_INT words, n_gpr, n_fpr;
8909 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
8910 tree gpr, fpr, ovf, sav, t;
8912 /* Only SVR4 needs something special. */
8913 if (DEFAULT_ABI != ABI_V4)
8915 std_expand_builtin_va_start (valist, nextarg);
8916 return;
8919 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8920 f_fpr = DECL_CHAIN (f_gpr);
8921 f_res = DECL_CHAIN (f_fpr);
8922 f_ovf = DECL_CHAIN (f_res);
8923 f_sav = DECL_CHAIN (f_ovf);
8925 valist = build_simple_mem_ref (valist);
8926 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8927 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
8928 f_fpr, NULL_TREE);
8929 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
8930 f_ovf, NULL_TREE);
8931 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
8932 f_sav, NULL_TREE);
8934 /* Count number of gp and fp argument registers used. */
8935 words = crtl->args.info.words;
8936 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
8937 GP_ARG_NUM_REG);
8938 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
8939 FP_ARG_NUM_REG);
8941 if (TARGET_DEBUG_ARG)
8942 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
8943 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
8944 words, n_gpr, n_fpr);
8946 if (cfun->va_list_gpr_size)
8948 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8949 build_int_cst (NULL_TREE, n_gpr));
8950 TREE_SIDE_EFFECTS (t) = 1;
8951 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8954 if (cfun->va_list_fpr_size)
8956 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8957 build_int_cst (NULL_TREE, n_fpr));
8958 TREE_SIDE_EFFECTS (t) = 1;
8959 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8961 #ifdef HAVE_AS_GNU_ATTRIBUTE
8962 if (call_ABI_of_interest (cfun->decl))
8963 rs6000_passes_float = true;
8964 #endif
8967 /* Find the overflow area. */
8968 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8969 if (words != 0)
8970 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
8971 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8972 TREE_SIDE_EFFECTS (t) = 1;
8973 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8975 /* If there were no va_arg invocations, don't set up the register
8976 save area. */
8977 if (!cfun->va_list_gpr_size
8978 && !cfun->va_list_fpr_size
8979 && n_gpr < GP_ARG_NUM_REG
8980 && n_fpr < FP_ARG_V4_MAX_REG)
8981 return;
8983 /* Find the register save area. */
8984 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
8985 if (cfun->machine->varargs_save_offset)
8986 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
8987 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8988 TREE_SIDE_EFFECTS (t) = 1;
8989 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8992 /* Implement va_arg. */
8994 static tree
8995 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8996 gimple_seq *post_p)
8998 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
8999 tree gpr, fpr, ovf, sav, reg, t, u;
9000 int size, rsize, n_reg, sav_ofs, sav_scale;
9001 tree lab_false, lab_over, addr;
9002 int align;
9003 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
9004 int regalign = 0;
9005 gimple stmt;
9007 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9009 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
9010 return build_va_arg_indirect_ref (t);
9013 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
9014 earlier version of gcc, with the property that it always applied alignment
9015 adjustments to the va-args (even for zero-sized types). The cheapest way
9016 to deal with this is to replicate the effect of the part of
9017 std_gimplify_va_arg_expr that carries out the align adjust, for the case
9018 of relevance.
9019 We don't need to check for pass-by-reference because of the test above.
9020 We can return a simplifed answer, since we know there's no offset to add. */
9022 if (TARGET_MACHO
9023 && rs6000_darwin64_abi
9024 && integer_zerop (TYPE_SIZE (type)))
9026 unsigned HOST_WIDE_INT align, boundary;
9027 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
9028 align = PARM_BOUNDARY / BITS_PER_UNIT;
9029 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
9030 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
9031 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
9032 boundary /= BITS_PER_UNIT;
9033 if (boundary > align)
9035 tree t ;
9036 /* This updates arg ptr by the amount that would be necessary
9037 to align the zero-sized (but not zero-alignment) item. */
9038 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9039 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
9040 gimplify_and_add (t, pre_p);
9042 t = fold_convert (sizetype, valist_tmp);
9043 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
9044 fold_convert (TREE_TYPE (valist),
9045 fold_build2 (BIT_AND_EXPR, sizetype, t,
9046 size_int (-boundary))));
9047 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
9048 gimplify_and_add (t, pre_p);
9050 /* Since it is zero-sized there's no increment for the item itself. */
9051 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
9052 return build_va_arg_indirect_ref (valist_tmp);
9055 if (DEFAULT_ABI != ABI_V4)
9057 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
9059 tree elem_type = TREE_TYPE (type);
9060 enum machine_mode elem_mode = TYPE_MODE (elem_type);
9061 int elem_size = GET_MODE_SIZE (elem_mode);
9063 if (elem_size < UNITS_PER_WORD)
9065 tree real_part, imag_part;
9066 gimple_seq post = NULL;
9068 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9069 &post);
9070 /* Copy the value into a temporary, lest the formal temporary
9071 be reused out from under us. */
9072 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
9073 gimple_seq_add_seq (pre_p, post);
9075 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
9076 post_p);
9078 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
9082 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
9085 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9086 f_fpr = DECL_CHAIN (f_gpr);
9087 f_res = DECL_CHAIN (f_fpr);
9088 f_ovf = DECL_CHAIN (f_res);
9089 f_sav = DECL_CHAIN (f_ovf);
9091 valist = build_va_arg_indirect_ref (valist);
9092 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9093 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
9094 f_fpr, NULL_TREE);
9095 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
9096 f_ovf, NULL_TREE);
9097 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
9098 f_sav, NULL_TREE);
9100 size = int_size_in_bytes (type);
9101 rsize = (size + 3) / 4;
9102 align = 1;
9104 if (TARGET_HARD_FLOAT && TARGET_FPRS
9105 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
9106 || (TARGET_DOUBLE_FLOAT
9107 && (TYPE_MODE (type) == DFmode
9108 || TYPE_MODE (type) == TFmode
9109 || TYPE_MODE (type) == SDmode
9110 || TYPE_MODE (type) == DDmode
9111 || TYPE_MODE (type) == TDmode))))
9113 /* FP args go in FP registers, if present. */
9114 reg = fpr;
9115 n_reg = (size + 7) / 8;
9116 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
9117 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
9118 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
9119 align = 8;
9121 else
9123 /* Otherwise into GP registers. */
9124 reg = gpr;
9125 n_reg = rsize;
9126 sav_ofs = 0;
9127 sav_scale = 4;
9128 if (n_reg == 2)
9129 align = 8;
9132 /* Pull the value out of the saved registers.... */
9134 lab_over = NULL;
9135 addr = create_tmp_var (ptr_type_node, "addr");
9137 /* AltiVec vectors never go in registers when -mabi=altivec. */
9138 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9139 align = 16;
9140 else
9142 lab_false = create_artificial_label (input_location);
9143 lab_over = create_artificial_label (input_location);
9145 /* Long long and SPE vectors are aligned in the registers.
9146 As are any other 2 gpr item such as complex int due to a
9147 historical mistake. */
9148 u = reg;
9149 if (n_reg == 2 && reg == gpr)
9151 regalign = 1;
9152 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9153 build_int_cst (TREE_TYPE (reg), n_reg - 1));
9154 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
9155 unshare_expr (reg), u);
9157 /* _Decimal128 is passed in even/odd fpr pairs; the stored
9158 reg number is 0 for f1, so we want to make it odd. */
9159 else if (reg == fpr && TYPE_MODE (type) == TDmode)
9161 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9162 build_int_cst (TREE_TYPE (reg), 1));
9163 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
9166 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
9167 t = build2 (GE_EXPR, boolean_type_node, u, t);
9168 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9169 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9170 gimplify_and_add (t, pre_p);
9172 t = sav;
9173 if (sav_ofs)
9174 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9176 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
9177 build_int_cst (TREE_TYPE (reg), n_reg));
9178 u = fold_convert (sizetype, u);
9179 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
9180 t = fold_build_pointer_plus (t, u);
9182 /* _Decimal32 varargs are located in the second word of the 64-bit
9183 FP register for 32-bit binaries. */
9184 if (!TARGET_POWERPC64
9185 && TARGET_HARD_FLOAT && TARGET_FPRS
9186 && TYPE_MODE (type) == SDmode)
9187 t = fold_build_pointer_plus_hwi (t, size);
9189 gimplify_assign (addr, t, pre_p);
9191 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9193 stmt = gimple_build_label (lab_false);
9194 gimple_seq_add_stmt (pre_p, stmt);
9196 if ((n_reg == 2 && !regalign) || n_reg > 2)
9198 /* Ensure that we don't find any more args in regs.
9199 Alignment has taken care of for special cases. */
9200 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
9204 /* ... otherwise out of the overflow area. */
9206 /* Care for on-stack alignment if needed. */
9207 t = ovf;
9208 if (align != 1)
9210 t = fold_build_pointer_plus_hwi (t, align - 1);
9211 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
9212 build_int_cst (TREE_TYPE (t), -align));
9214 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9216 gimplify_assign (unshare_expr (addr), t, pre_p);
9218 t = fold_build_pointer_plus_hwi (t, size);
9219 gimplify_assign (unshare_expr (ovf), t, pre_p);
9221 if (lab_over)
9223 stmt = gimple_build_label (lab_over);
9224 gimple_seq_add_stmt (pre_p, stmt);
9227 if (STRICT_ALIGNMENT
9228 && (TYPE_ALIGN (type)
9229 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
9231 /* The value (of type complex double, for example) may not be
9232 aligned in memory in the saved registers, so copy via a
9233 temporary. (This is the same code as used for SPARC.) */
9234 tree tmp = create_tmp_var (type, "va_arg_tmp");
9235 tree dest_addr = build_fold_addr_expr (tmp);
9237 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
9238 3, dest_addr, addr, size_int (rsize * 4));
9240 gimplify_and_add (copy, pre_p);
9241 addr = dest_addr;
9244 addr = fold_convert (ptrtype, addr);
9245 return build_va_arg_indirect_ref (addr);
9248 /* Builtins. */
9250 static void
9251 def_builtin (const char *name, tree type, enum rs6000_builtins code)
9253 tree t;
9254 unsigned classify = rs6000_builtin_info[(int)code].attr;
9255 const char *attr_string = "";
9257 gcc_assert (name != NULL);
9258 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
9260 if (rs6000_builtin_decls[(int)code])
9261 fatal_error ("internal error: builtin function %s already processed", name);
9263 rs6000_builtin_decls[(int)code] = t =
9264 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
9266 /* Set any special attributes. */
9267 if ((classify & RS6000_BTC_CONST) != 0)
9269 /* const function, function only depends on the inputs. */
9270 TREE_READONLY (t) = 1;
9271 TREE_NOTHROW (t) = 1;
9272 attr_string = ", pure";
9274 else if ((classify & RS6000_BTC_PURE) != 0)
9276 /* pure function, function can read global memory, but does not set any
9277 external state. */
9278 DECL_PURE_P (t) = 1;
9279 TREE_NOTHROW (t) = 1;
9280 attr_string = ", const";
9282 else if ((classify & RS6000_BTC_FP) != 0)
9284 /* Function is a math function. If rounding mode is on, then treat the
9285 function as not reading global memory, but it can have arbitrary side
9286 effects. If it is off, then assume the function is a const function.
9287 This mimics the ATTR_MATHFN_FPROUNDING attribute in
9288 builtin-attribute.def that is used for the math functions. */
9289 TREE_NOTHROW (t) = 1;
9290 if (flag_rounding_math)
9292 DECL_PURE_P (t) = 1;
9293 DECL_IS_NOVOPS (t) = 1;
9294 attr_string = ", fp, pure";
9296 else
9298 TREE_READONLY (t) = 1;
9299 attr_string = ", fp, const";
9302 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
9303 gcc_unreachable ();
9305 if (TARGET_DEBUG_BUILTIN)
9306 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
9307 (int)code, name, attr_string);
9310 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
9312 #undef RS6000_BUILTIN_1
9313 #undef RS6000_BUILTIN_2
9314 #undef RS6000_BUILTIN_3
9315 #undef RS6000_BUILTIN_A
9316 #undef RS6000_BUILTIN_D
9317 #undef RS6000_BUILTIN_E
9318 #undef RS6000_BUILTIN_P
9319 #undef RS6000_BUILTIN_Q
9320 #undef RS6000_BUILTIN_S
9321 #undef RS6000_BUILTIN_X
9323 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9324 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9325 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
9326 { MASK, ICODE, NAME, ENUM },
9328 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9329 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9330 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9331 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9332 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9333 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9334 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9336 static const struct builtin_description bdesc_3arg[] =
9338 #include "rs6000-builtin.def"
9341 /* DST operations: void foo (void *, const int, const char). */
9343 #undef RS6000_BUILTIN_1
9344 #undef RS6000_BUILTIN_2
9345 #undef RS6000_BUILTIN_3
9346 #undef RS6000_BUILTIN_A
9347 #undef RS6000_BUILTIN_D
9348 #undef RS6000_BUILTIN_E
9349 #undef RS6000_BUILTIN_P
9350 #undef RS6000_BUILTIN_Q
9351 #undef RS6000_BUILTIN_S
9352 #undef RS6000_BUILTIN_X
9354 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9355 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9356 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9357 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9358 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
9359 { MASK, ICODE, NAME, ENUM },
9361 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9362 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9363 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9364 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9365 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9367 static const struct builtin_description bdesc_dst[] =
9369 #include "rs6000-builtin.def"
9372 /* Simple binary operations: VECc = foo (VECa, VECb). */
9374 #undef RS6000_BUILTIN_1
9375 #undef RS6000_BUILTIN_2
9376 #undef RS6000_BUILTIN_3
9377 #undef RS6000_BUILTIN_A
9378 #undef RS6000_BUILTIN_D
9379 #undef RS6000_BUILTIN_E
9380 #undef RS6000_BUILTIN_P
9381 #undef RS6000_BUILTIN_Q
9382 #undef RS6000_BUILTIN_S
9383 #undef RS6000_BUILTIN_X
9385 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9386 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
9387 { MASK, ICODE, NAME, ENUM },
9389 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9390 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9391 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9392 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9393 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9394 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9395 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9396 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9398 static const struct builtin_description bdesc_2arg[] =
9400 #include "rs6000-builtin.def"
9403 #undef RS6000_BUILTIN_1
9404 #undef RS6000_BUILTIN_2
9405 #undef RS6000_BUILTIN_3
9406 #undef RS6000_BUILTIN_A
9407 #undef RS6000_BUILTIN_D
9408 #undef RS6000_BUILTIN_E
9409 #undef RS6000_BUILTIN_P
9410 #undef RS6000_BUILTIN_Q
9411 #undef RS6000_BUILTIN_S
9412 #undef RS6000_BUILTIN_X
9414 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9415 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9416 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9417 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9418 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9419 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9420 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
9421 { MASK, ICODE, NAME, ENUM },
9423 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9424 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9425 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9427 /* AltiVec predicates. */
9429 static const struct builtin_description bdesc_altivec_preds[] =
9431 #include "rs6000-builtin.def"
9434 /* SPE predicates. */
9435 #undef RS6000_BUILTIN_1
9436 #undef RS6000_BUILTIN_2
9437 #undef RS6000_BUILTIN_3
9438 #undef RS6000_BUILTIN_A
9439 #undef RS6000_BUILTIN_D
9440 #undef RS6000_BUILTIN_E
9441 #undef RS6000_BUILTIN_P
9442 #undef RS6000_BUILTIN_Q
9443 #undef RS6000_BUILTIN_S
9444 #undef RS6000_BUILTIN_X
9446 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9447 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9448 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9449 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9450 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9451 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9452 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9453 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9454 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
9455 { MASK, ICODE, NAME, ENUM },
9457 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9459 static const struct builtin_description bdesc_spe_predicates[] =
9461 #include "rs6000-builtin.def"
9464 /* SPE evsel predicates. */
9465 #undef RS6000_BUILTIN_1
9466 #undef RS6000_BUILTIN_2
9467 #undef RS6000_BUILTIN_3
9468 #undef RS6000_BUILTIN_A
9469 #undef RS6000_BUILTIN_D
9470 #undef RS6000_BUILTIN_E
9471 #undef RS6000_BUILTIN_P
9472 #undef RS6000_BUILTIN_Q
9473 #undef RS6000_BUILTIN_S
9474 #undef RS6000_BUILTIN_X
9476 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9477 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9478 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9479 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9480 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9481 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
9482 { MASK, ICODE, NAME, ENUM },
9484 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9485 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9486 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9487 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9489 static const struct builtin_description bdesc_spe_evsel[] =
9491 #include "rs6000-builtin.def"
9494 /* PAIRED predicates. */
9495 #undef RS6000_BUILTIN_1
9496 #undef RS6000_BUILTIN_2
9497 #undef RS6000_BUILTIN_3
9498 #undef RS6000_BUILTIN_A
9499 #undef RS6000_BUILTIN_D
9500 #undef RS6000_BUILTIN_E
9501 #undef RS6000_BUILTIN_P
9502 #undef RS6000_BUILTIN_Q
9503 #undef RS6000_BUILTIN_S
9504 #undef RS6000_BUILTIN_X
9506 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9507 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9508 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9509 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9510 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9511 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9512 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9513 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
9514 { MASK, ICODE, NAME, ENUM },
9516 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9517 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9519 static const struct builtin_description bdesc_paired_preds[] =
9521 #include "rs6000-builtin.def"
9524 /* ABS* operations. */
9526 #undef RS6000_BUILTIN_1
9527 #undef RS6000_BUILTIN_2
9528 #undef RS6000_BUILTIN_3
9529 #undef RS6000_BUILTIN_A
9530 #undef RS6000_BUILTIN_D
9531 #undef RS6000_BUILTIN_E
9532 #undef RS6000_BUILTIN_P
9533 #undef RS6000_BUILTIN_Q
9534 #undef RS6000_BUILTIN_S
9535 #undef RS6000_BUILTIN_X
9537 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
9538 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9539 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9540 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
9541 { MASK, ICODE, NAME, ENUM },
9543 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9544 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9545 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9546 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9547 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9548 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9550 static const struct builtin_description bdesc_abs[] =
9552 #include "rs6000-builtin.def"
9555 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
9556 foo (VECa). */
9558 #undef RS6000_BUILTIN_1
9559 #undef RS6000_BUILTIN_2
9560 #undef RS6000_BUILTIN_3
9561 #undef RS6000_BUILTIN_A
9562 #undef RS6000_BUILTIN_E
9563 #undef RS6000_BUILTIN_D
9564 #undef RS6000_BUILTIN_P
9565 #undef RS6000_BUILTIN_Q
9566 #undef RS6000_BUILTIN_S
9567 #undef RS6000_BUILTIN_X
9569 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
9570 { MASK, ICODE, NAME, ENUM },
9572 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
9573 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
9574 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
9575 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
9576 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
9577 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
9578 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
9579 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
9580 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
9582 static const struct builtin_description bdesc_1arg[] =
9584 #include "rs6000-builtin.def"
9587 #undef RS6000_BUILTIN_1
9588 #undef RS6000_BUILTIN_2
9589 #undef RS6000_BUILTIN_3
9590 #undef RS6000_BUILTIN_A
9591 #undef RS6000_BUILTIN_D
9592 #undef RS6000_BUILTIN_E
9593 #undef RS6000_BUILTIN_P
9594 #undef RS6000_BUILTIN_Q
9595 #undef RS6000_BUILTIN_S
9596 #undef RS6000_BUILTIN_X
9598 /* Return true if a builtin function is overloaded. */
9599 bool
9600 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
9602 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
9606 static rtx
9607 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
9609 rtx pat;
9610 tree arg0 = CALL_EXPR_ARG (exp, 0);
9611 rtx op0 = expand_normal (arg0);
9612 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9613 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9615 if (icode == CODE_FOR_nothing)
9616 /* Builtin not supported on this processor. */
9617 return 0;
9619 /* If we got invalid arguments bail out before generating bad rtl. */
9620 if (arg0 == error_mark_node)
9621 return const0_rtx;
9623 if (icode == CODE_FOR_altivec_vspltisb
9624 || icode == CODE_FOR_altivec_vspltish
9625 || icode == CODE_FOR_altivec_vspltisw
9626 || icode == CODE_FOR_spe_evsplatfi
9627 || icode == CODE_FOR_spe_evsplati)
9629 /* Only allow 5-bit *signed* literals. */
9630 if (GET_CODE (op0) != CONST_INT
9631 || INTVAL (op0) > 15
9632 || INTVAL (op0) < -16)
9634 error ("argument 1 must be a 5-bit signed literal");
9635 return const0_rtx;
9639 if (target == 0
9640 || GET_MODE (target) != tmode
9641 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9642 target = gen_reg_rtx (tmode);
9644 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9645 op0 = copy_to_mode_reg (mode0, op0);
9647 pat = GEN_FCN (icode) (target, op0);
9648 if (! pat)
9649 return 0;
9650 emit_insn (pat);
9652 return target;
9655 static rtx
9656 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
9658 rtx pat, scratch1, scratch2;
9659 tree arg0 = CALL_EXPR_ARG (exp, 0);
9660 rtx op0 = expand_normal (arg0);
9661 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9662 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9664 /* If we have invalid arguments, bail out before generating bad rtl. */
9665 if (arg0 == error_mark_node)
9666 return const0_rtx;
9668 if (target == 0
9669 || GET_MODE (target) != tmode
9670 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9671 target = gen_reg_rtx (tmode);
9673 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9674 op0 = copy_to_mode_reg (mode0, op0);
9676 scratch1 = gen_reg_rtx (mode0);
9677 scratch2 = gen_reg_rtx (mode0);
9679 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
9680 if (! pat)
9681 return 0;
9682 emit_insn (pat);
9684 return target;
9687 static rtx
9688 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
9690 rtx pat;
9691 tree arg0 = CALL_EXPR_ARG (exp, 0);
9692 tree arg1 = CALL_EXPR_ARG (exp, 1);
9693 rtx op0 = expand_normal (arg0);
9694 rtx op1 = expand_normal (arg1);
9695 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9696 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9697 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9699 if (icode == CODE_FOR_nothing)
9700 /* Builtin not supported on this processor. */
9701 return 0;
9703 /* If we got invalid arguments bail out before generating bad rtl. */
9704 if (arg0 == error_mark_node || arg1 == error_mark_node)
9705 return const0_rtx;
9707 if (icode == CODE_FOR_altivec_vcfux
9708 || icode == CODE_FOR_altivec_vcfsx
9709 || icode == CODE_FOR_altivec_vctsxs
9710 || icode == CODE_FOR_altivec_vctuxs
9711 || icode == CODE_FOR_altivec_vspltb
9712 || icode == CODE_FOR_altivec_vsplth
9713 || icode == CODE_FOR_altivec_vspltw
9714 || icode == CODE_FOR_spe_evaddiw
9715 || icode == CODE_FOR_spe_evldd
9716 || icode == CODE_FOR_spe_evldh
9717 || icode == CODE_FOR_spe_evldw
9718 || icode == CODE_FOR_spe_evlhhesplat
9719 || icode == CODE_FOR_spe_evlhhossplat
9720 || icode == CODE_FOR_spe_evlhhousplat
9721 || icode == CODE_FOR_spe_evlwhe
9722 || icode == CODE_FOR_spe_evlwhos
9723 || icode == CODE_FOR_spe_evlwhou
9724 || icode == CODE_FOR_spe_evlwhsplat
9725 || icode == CODE_FOR_spe_evlwwsplat
9726 || icode == CODE_FOR_spe_evrlwi
9727 || icode == CODE_FOR_spe_evslwi
9728 || icode == CODE_FOR_spe_evsrwis
9729 || icode == CODE_FOR_spe_evsubifw
9730 || icode == CODE_FOR_spe_evsrwiu)
9732 /* Only allow 5-bit unsigned literals. */
9733 STRIP_NOPS (arg1);
9734 if (TREE_CODE (arg1) != INTEGER_CST
9735 || TREE_INT_CST_LOW (arg1) & ~0x1f)
9737 error ("argument 2 must be a 5-bit unsigned literal");
9738 return const0_rtx;
9742 if (target == 0
9743 || GET_MODE (target) != tmode
9744 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9745 target = gen_reg_rtx (tmode);
9747 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9748 op0 = copy_to_mode_reg (mode0, op0);
9749 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9750 op1 = copy_to_mode_reg (mode1, op1);
9752 pat = GEN_FCN (icode) (target, op0, op1);
9753 if (! pat)
9754 return 0;
9755 emit_insn (pat);
9757 return target;
9760 static rtx
9761 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
9763 rtx pat, scratch;
9764 tree cr6_form = CALL_EXPR_ARG (exp, 0);
9765 tree arg0 = CALL_EXPR_ARG (exp, 1);
9766 tree arg1 = CALL_EXPR_ARG (exp, 2);
9767 rtx op0 = expand_normal (arg0);
9768 rtx op1 = expand_normal (arg1);
9769 enum machine_mode tmode = SImode;
9770 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
9771 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
9772 int cr6_form_int;
9774 if (TREE_CODE (cr6_form) != INTEGER_CST)
9776 error ("argument 1 of __builtin_altivec_predicate must be a constant");
9777 return const0_rtx;
9779 else
9780 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
9782 gcc_assert (mode0 == mode1);
9784 /* If we have invalid arguments, bail out before generating bad rtl. */
9785 if (arg0 == error_mark_node || arg1 == error_mark_node)
9786 return const0_rtx;
9788 if (target == 0
9789 || GET_MODE (target) != tmode
9790 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9791 target = gen_reg_rtx (tmode);
9793 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
9794 op0 = copy_to_mode_reg (mode0, op0);
9795 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
9796 op1 = copy_to_mode_reg (mode1, op1);
9798 scratch = gen_reg_rtx (mode0);
9800 pat = GEN_FCN (icode) (scratch, op0, op1);
9801 if (! pat)
9802 return 0;
9803 emit_insn (pat);
9805 /* The vec_any* and vec_all* predicates use the same opcodes for two
9806 different operations, but the bits in CR6 will be different
9807 depending on what information we want. So we have to play tricks
9808 with CR6 to get the right bits out.
9810 If you think this is disgusting, look at the specs for the
9811 AltiVec predicates. */
9813 switch (cr6_form_int)
9815 case 0:
9816 emit_insn (gen_cr6_test_for_zero (target));
9817 break;
9818 case 1:
9819 emit_insn (gen_cr6_test_for_zero_reverse (target));
9820 break;
9821 case 2:
9822 emit_insn (gen_cr6_test_for_lt (target));
9823 break;
9824 case 3:
9825 emit_insn (gen_cr6_test_for_lt_reverse (target));
9826 break;
9827 default:
9828 error ("argument 1 of __builtin_altivec_predicate is out of range");
9829 break;
9832 return target;
9835 static rtx
9836 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
9838 rtx pat, addr;
9839 tree arg0 = CALL_EXPR_ARG (exp, 0);
9840 tree arg1 = CALL_EXPR_ARG (exp, 1);
9841 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9842 enum machine_mode mode0 = Pmode;
9843 enum machine_mode mode1 = Pmode;
9844 rtx op0 = expand_normal (arg0);
9845 rtx op1 = expand_normal (arg1);
9847 if (icode == CODE_FOR_nothing)
9848 /* Builtin not supported on this processor. */
9849 return 0;
9851 /* If we got invalid arguments bail out before generating bad rtl. */
9852 if (arg0 == error_mark_node || arg1 == error_mark_node)
9853 return const0_rtx;
9855 if (target == 0
9856 || GET_MODE (target) != tmode
9857 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9858 target = gen_reg_rtx (tmode);
9860 op1 = copy_to_mode_reg (mode1, op1);
9862 if (op0 == const0_rtx)
9864 addr = gen_rtx_MEM (tmode, op1);
9866 else
9868 op0 = copy_to_mode_reg (mode0, op0);
9869 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
9872 pat = GEN_FCN (icode) (target, addr);
9874 if (! pat)
9875 return 0;
9876 emit_insn (pat);
9878 return target;
9881 static rtx
9882 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
9884 rtx pat, addr;
9885 tree arg0 = CALL_EXPR_ARG (exp, 0);
9886 tree arg1 = CALL_EXPR_ARG (exp, 1);
9887 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9888 enum machine_mode mode0 = Pmode;
9889 enum machine_mode mode1 = Pmode;
9890 rtx op0 = expand_normal (arg0);
9891 rtx op1 = expand_normal (arg1);
9893 if (icode == CODE_FOR_nothing)
9894 /* Builtin not supported on this processor. */
9895 return 0;
9897 /* If we got invalid arguments bail out before generating bad rtl. */
9898 if (arg0 == error_mark_node || arg1 == error_mark_node)
9899 return const0_rtx;
9901 if (target == 0
9902 || GET_MODE (target) != tmode
9903 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9904 target = gen_reg_rtx (tmode);
9906 op1 = copy_to_mode_reg (mode1, op1);
9908 if (op0 == const0_rtx)
9910 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
9912 else
9914 op0 = copy_to_mode_reg (mode0, op0);
9915 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
9918 pat = GEN_FCN (icode) (target, addr);
9920 if (! pat)
9921 return 0;
9922 emit_insn (pat);
9924 return target;
9927 static rtx
9928 spe_expand_stv_builtin (enum insn_code icode, tree exp)
9930 tree arg0 = CALL_EXPR_ARG (exp, 0);
9931 tree arg1 = CALL_EXPR_ARG (exp, 1);
9932 tree arg2 = CALL_EXPR_ARG (exp, 2);
9933 rtx op0 = expand_normal (arg0);
9934 rtx op1 = expand_normal (arg1);
9935 rtx op2 = expand_normal (arg2);
9936 rtx pat;
9937 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
9938 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
9939 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
9941 /* Invalid arguments. Bail before doing anything stoopid! */
9942 if (arg0 == error_mark_node
9943 || arg1 == error_mark_node
9944 || arg2 == error_mark_node)
9945 return const0_rtx;
9947 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
9948 op0 = copy_to_mode_reg (mode2, op0);
9949 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
9950 op1 = copy_to_mode_reg (mode0, op1);
9951 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
9952 op2 = copy_to_mode_reg (mode1, op2);
9954 pat = GEN_FCN (icode) (op1, op2, op0);
9955 if (pat)
9956 emit_insn (pat);
9957 return NULL_RTX;
9960 static rtx
9961 paired_expand_stv_builtin (enum insn_code icode, tree exp)
9963 tree arg0 = CALL_EXPR_ARG (exp, 0);
9964 tree arg1 = CALL_EXPR_ARG (exp, 1);
9965 tree arg2 = CALL_EXPR_ARG (exp, 2);
9966 rtx op0 = expand_normal (arg0);
9967 rtx op1 = expand_normal (arg1);
9968 rtx op2 = expand_normal (arg2);
9969 rtx pat, addr;
9970 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9971 enum machine_mode mode1 = Pmode;
9972 enum machine_mode mode2 = Pmode;
9974 /* Invalid arguments. Bail before doing anything stoopid! */
9975 if (arg0 == error_mark_node
9976 || arg1 == error_mark_node
9977 || arg2 == error_mark_node)
9978 return const0_rtx;
9980 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
9981 op0 = copy_to_mode_reg (tmode, op0);
9983 op2 = copy_to_mode_reg (mode2, op2);
9985 if (op1 == const0_rtx)
9987 addr = gen_rtx_MEM (tmode, op2);
9989 else
9991 op1 = copy_to_mode_reg (mode1, op1);
9992 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
9995 pat = GEN_FCN (icode) (addr, op0);
9996 if (pat)
9997 emit_insn (pat);
9998 return NULL_RTX;
10001 static rtx
10002 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
10004 tree arg0 = CALL_EXPR_ARG (exp, 0);
10005 tree arg1 = CALL_EXPR_ARG (exp, 1);
10006 tree arg2 = CALL_EXPR_ARG (exp, 2);
10007 rtx op0 = expand_normal (arg0);
10008 rtx op1 = expand_normal (arg1);
10009 rtx op2 = expand_normal (arg2);
10010 rtx pat, addr;
10011 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10012 enum machine_mode smode = insn_data[icode].operand[1].mode;
10013 enum machine_mode mode1 = Pmode;
10014 enum machine_mode mode2 = Pmode;
10016 /* Invalid arguments. Bail before doing anything stoopid! */
10017 if (arg0 == error_mark_node
10018 || arg1 == error_mark_node
10019 || arg2 == error_mark_node)
10020 return const0_rtx;
10022 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
10023 op0 = copy_to_mode_reg (smode, op0);
10025 op2 = copy_to_mode_reg (mode2, op2);
10027 if (op1 == const0_rtx)
10029 addr = gen_rtx_MEM (tmode, op2);
10031 else
10033 op1 = copy_to_mode_reg (mode1, op1);
10034 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
10037 pat = GEN_FCN (icode) (addr, op0);
10038 if (pat)
10039 emit_insn (pat);
10040 return NULL_RTX;
10043 static rtx
10044 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
10046 rtx pat;
10047 tree arg0 = CALL_EXPR_ARG (exp, 0);
10048 tree arg1 = CALL_EXPR_ARG (exp, 1);
10049 tree arg2 = CALL_EXPR_ARG (exp, 2);
10050 rtx op0 = expand_normal (arg0);
10051 rtx op1 = expand_normal (arg1);
10052 rtx op2 = expand_normal (arg2);
10053 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10054 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10055 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10056 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
10058 if (icode == CODE_FOR_nothing)
10059 /* Builtin not supported on this processor. */
10060 return 0;
10062 /* If we got invalid arguments bail out before generating bad rtl. */
10063 if (arg0 == error_mark_node
10064 || arg1 == error_mark_node
10065 || arg2 == error_mark_node)
10066 return const0_rtx;
10068 /* Check and prepare argument depending on the instruction code.
10070 Note that a switch statement instead of the sequence of tests
10071 would be incorrect as many of the CODE_FOR values could be
10072 CODE_FOR_nothing and that would yield multiple alternatives
10073 with identical values. We'd never reach here at runtime in
10074 this case. */
10075 if (icode == CODE_FOR_altivec_vsldoi_v4sf
10076 || icode == CODE_FOR_altivec_vsldoi_v4si
10077 || icode == CODE_FOR_altivec_vsldoi_v8hi
10078 || icode == CODE_FOR_altivec_vsldoi_v16qi)
10080 /* Only allow 4-bit unsigned literals. */
10081 STRIP_NOPS (arg2);
10082 if (TREE_CODE (arg2) != INTEGER_CST
10083 || TREE_INT_CST_LOW (arg2) & ~0xf)
10085 error ("argument 3 must be a 4-bit unsigned literal");
10086 return const0_rtx;
10089 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
10090 || icode == CODE_FOR_vsx_xxpermdi_v2di
10091 || icode == CODE_FOR_vsx_xxsldwi_v16qi
10092 || icode == CODE_FOR_vsx_xxsldwi_v8hi
10093 || icode == CODE_FOR_vsx_xxsldwi_v4si
10094 || icode == CODE_FOR_vsx_xxsldwi_v4sf
10095 || icode == CODE_FOR_vsx_xxsldwi_v2di
10096 || icode == CODE_FOR_vsx_xxsldwi_v2df)
10098 /* Only allow 2-bit unsigned literals. */
10099 STRIP_NOPS (arg2);
10100 if (TREE_CODE (arg2) != INTEGER_CST
10101 || TREE_INT_CST_LOW (arg2) & ~0x3)
10103 error ("argument 3 must be a 2-bit unsigned literal");
10104 return const0_rtx;
10107 else if (icode == CODE_FOR_vsx_set_v2df
10108 || icode == CODE_FOR_vsx_set_v2di)
10110 /* Only allow 1-bit unsigned literals. */
10111 STRIP_NOPS (arg2);
10112 if (TREE_CODE (arg2) != INTEGER_CST
10113 || TREE_INT_CST_LOW (arg2) & ~0x1)
10115 error ("argument 3 must be a 1-bit unsigned literal");
10116 return const0_rtx;
10120 if (target == 0
10121 || GET_MODE (target) != tmode
10122 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10123 target = gen_reg_rtx (tmode);
10125 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10126 op0 = copy_to_mode_reg (mode0, op0);
10127 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10128 op1 = copy_to_mode_reg (mode1, op1);
10129 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
10130 op2 = copy_to_mode_reg (mode2, op2);
10132 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
10133 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
10134 else
10135 pat = GEN_FCN (icode) (target, op0, op1, op2);
10136 if (! pat)
10137 return 0;
10138 emit_insn (pat);
10140 return target;
10143 /* Expand the lvx builtins. */
10144 static rtx
10145 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
10147 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10148 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10149 tree arg0;
10150 enum machine_mode tmode, mode0;
10151 rtx pat, op0;
10152 enum insn_code icode;
10154 switch (fcode)
10156 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
10157 icode = CODE_FOR_vector_altivec_load_v16qi;
10158 break;
10159 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
10160 icode = CODE_FOR_vector_altivec_load_v8hi;
10161 break;
10162 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
10163 icode = CODE_FOR_vector_altivec_load_v4si;
10164 break;
10165 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
10166 icode = CODE_FOR_vector_altivec_load_v4sf;
10167 break;
10168 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
10169 icode = CODE_FOR_vector_altivec_load_v2df;
10170 break;
10171 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
10172 icode = CODE_FOR_vector_altivec_load_v2di;
10173 break;
10174 default:
10175 *expandedp = false;
10176 return NULL_RTX;
10179 *expandedp = true;
10181 arg0 = CALL_EXPR_ARG (exp, 0);
10182 op0 = expand_normal (arg0);
10183 tmode = insn_data[icode].operand[0].mode;
10184 mode0 = insn_data[icode].operand[1].mode;
10186 if (target == 0
10187 || GET_MODE (target) != tmode
10188 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10189 target = gen_reg_rtx (tmode);
10191 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10192 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10194 pat = GEN_FCN (icode) (target, op0);
10195 if (! pat)
10196 return 0;
10197 emit_insn (pat);
10198 return target;
10201 /* Expand the stvx builtins. */
10202 static rtx
10203 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10204 bool *expandedp)
10206 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10207 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10208 tree arg0, arg1;
10209 enum machine_mode mode0, mode1;
10210 rtx pat, op0, op1;
10211 enum insn_code icode;
10213 switch (fcode)
10215 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
10216 icode = CODE_FOR_vector_altivec_store_v16qi;
10217 break;
10218 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
10219 icode = CODE_FOR_vector_altivec_store_v8hi;
10220 break;
10221 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
10222 icode = CODE_FOR_vector_altivec_store_v4si;
10223 break;
10224 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
10225 icode = CODE_FOR_vector_altivec_store_v4sf;
10226 break;
10227 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
10228 icode = CODE_FOR_vector_altivec_store_v2df;
10229 break;
10230 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
10231 icode = CODE_FOR_vector_altivec_store_v2di;
10232 break;
10233 default:
10234 *expandedp = false;
10235 return NULL_RTX;
10238 arg0 = CALL_EXPR_ARG (exp, 0);
10239 arg1 = CALL_EXPR_ARG (exp, 1);
10240 op0 = expand_normal (arg0);
10241 op1 = expand_normal (arg1);
10242 mode0 = insn_data[icode].operand[0].mode;
10243 mode1 = insn_data[icode].operand[1].mode;
10245 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10246 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
10247 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
10248 op1 = copy_to_mode_reg (mode1, op1);
10250 pat = GEN_FCN (icode) (op0, op1);
10251 if (pat)
10252 emit_insn (pat);
10254 *expandedp = true;
10255 return NULL_RTX;
10258 /* Expand the dst builtins. */
10259 static rtx
10260 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
10261 bool *expandedp)
10263 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10264 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10265 tree arg0, arg1, arg2;
10266 enum machine_mode mode0, mode1;
10267 rtx pat, op0, op1, op2;
10268 const struct builtin_description *d;
10269 size_t i;
10271 *expandedp = false;
10273 /* Handle DST variants. */
10274 d = bdesc_dst;
10275 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
10276 if (d->code == fcode)
10278 arg0 = CALL_EXPR_ARG (exp, 0);
10279 arg1 = CALL_EXPR_ARG (exp, 1);
10280 arg2 = CALL_EXPR_ARG (exp, 2);
10281 op0 = expand_normal (arg0);
10282 op1 = expand_normal (arg1);
10283 op2 = expand_normal (arg2);
10284 mode0 = insn_data[d->icode].operand[0].mode;
10285 mode1 = insn_data[d->icode].operand[1].mode;
10287 /* Invalid arguments, bail out before generating bad rtl. */
10288 if (arg0 == error_mark_node
10289 || arg1 == error_mark_node
10290 || arg2 == error_mark_node)
10291 return const0_rtx;
10293 *expandedp = true;
10294 STRIP_NOPS (arg2);
10295 if (TREE_CODE (arg2) != INTEGER_CST
10296 || TREE_INT_CST_LOW (arg2) & ~0x3)
10298 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
10299 return const0_rtx;
10302 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
10303 op0 = copy_to_mode_reg (Pmode, op0);
10304 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
10305 op1 = copy_to_mode_reg (mode1, op1);
10307 pat = GEN_FCN (d->icode) (op0, op1, op2);
10308 if (pat != 0)
10309 emit_insn (pat);
10311 return NULL_RTX;
10314 return NULL_RTX;
10317 /* Expand vec_init builtin. */
10318 static rtx
10319 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
10321 enum machine_mode tmode = TYPE_MODE (type);
10322 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
10323 int i, n_elt = GET_MODE_NUNITS (tmode);
10324 rtvec v = rtvec_alloc (n_elt);
10326 gcc_assert (VECTOR_MODE_P (tmode));
10327 gcc_assert (n_elt == call_expr_nargs (exp));
10329 for (i = 0; i < n_elt; ++i)
10331 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
10332 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
10335 if (!target || !register_operand (target, tmode))
10336 target = gen_reg_rtx (tmode);
10338 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
10339 return target;
10342 /* Return the integer constant in ARG. Constrain it to be in the range
10343 of the subparts of VEC_TYPE; issue an error if not. */
10345 static int
10346 get_element_number (tree vec_type, tree arg)
10348 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
10350 if (!host_integerp (arg, 1)
10351 || (elt = tree_low_cst (arg, 1), elt > max))
10353 error ("selector must be an integer constant in the range 0..%wi", max);
10354 return 0;
10357 return elt;
10360 /* Expand vec_set builtin. */
10361 static rtx
10362 altivec_expand_vec_set_builtin (tree exp)
10364 enum machine_mode tmode, mode1;
10365 tree arg0, arg1, arg2;
10366 int elt;
10367 rtx op0, op1;
10369 arg0 = CALL_EXPR_ARG (exp, 0);
10370 arg1 = CALL_EXPR_ARG (exp, 1);
10371 arg2 = CALL_EXPR_ARG (exp, 2);
10373 tmode = TYPE_MODE (TREE_TYPE (arg0));
10374 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10375 gcc_assert (VECTOR_MODE_P (tmode));
10377 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
10378 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
10379 elt = get_element_number (TREE_TYPE (arg0), arg2);
10381 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
10382 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
10384 op0 = force_reg (tmode, op0);
10385 op1 = force_reg (mode1, op1);
10387 rs6000_expand_vector_set (op0, op1, elt);
10389 return op0;
10392 /* Expand vec_ext builtin. */
10393 static rtx
10394 altivec_expand_vec_ext_builtin (tree exp, rtx target)
10396 enum machine_mode tmode, mode0;
10397 tree arg0, arg1;
10398 int elt;
10399 rtx op0;
10401 arg0 = CALL_EXPR_ARG (exp, 0);
10402 arg1 = CALL_EXPR_ARG (exp, 1);
10404 op0 = expand_normal (arg0);
10405 elt = get_element_number (TREE_TYPE (arg0), arg1);
10407 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
10408 mode0 = TYPE_MODE (TREE_TYPE (arg0));
10409 gcc_assert (VECTOR_MODE_P (mode0));
10411 op0 = force_reg (mode0, op0);
10413 if (optimize || !target || !register_operand (target, tmode))
10414 target = gen_reg_rtx (tmode);
10416 rs6000_expand_vector_extract (target, op0, elt);
10418 return target;
10421 /* Expand the builtin in EXP and store the result in TARGET. Store
10422 true in *EXPANDEDP if we found a builtin to expand. */
10423 static rtx
10424 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
10426 const struct builtin_description *d;
10427 size_t i;
10428 enum insn_code icode;
10429 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10430 tree arg0;
10431 rtx op0, pat;
10432 enum machine_mode tmode, mode0;
10433 enum rs6000_builtins fcode
10434 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
10436 if (rs6000_overloaded_builtin_p (fcode))
10438 *expandedp = true;
10439 error ("unresolved overload for Altivec builtin %qF", fndecl);
10441 /* Given it is invalid, just generate a normal call. */
10442 return expand_call (exp, target, false);
10445 target = altivec_expand_ld_builtin (exp, target, expandedp);
10446 if (*expandedp)
10447 return target;
10449 target = altivec_expand_st_builtin (exp, target, expandedp);
10450 if (*expandedp)
10451 return target;
10453 target = altivec_expand_dst_builtin (exp, target, expandedp);
10454 if (*expandedp)
10455 return target;
10457 *expandedp = true;
10459 switch (fcode)
10461 case ALTIVEC_BUILTIN_STVX:
10462 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
10463 case ALTIVEC_BUILTIN_STVEBX:
10464 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
10465 case ALTIVEC_BUILTIN_STVEHX:
10466 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
10467 case ALTIVEC_BUILTIN_STVEWX:
10468 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
10469 case ALTIVEC_BUILTIN_STVXL:
10470 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
10472 case ALTIVEC_BUILTIN_STVLX:
10473 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
10474 case ALTIVEC_BUILTIN_STVLXL:
10475 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
10476 case ALTIVEC_BUILTIN_STVRX:
10477 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
10478 case ALTIVEC_BUILTIN_STVRXL:
10479 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
10481 case VSX_BUILTIN_STXVD2X_V2DF:
10482 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
10483 case VSX_BUILTIN_STXVD2X_V2DI:
10484 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
10485 case VSX_BUILTIN_STXVW4X_V4SF:
10486 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
10487 case VSX_BUILTIN_STXVW4X_V4SI:
10488 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
10489 case VSX_BUILTIN_STXVW4X_V8HI:
10490 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
10491 case VSX_BUILTIN_STXVW4X_V16QI:
10492 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
10494 case ALTIVEC_BUILTIN_MFVSCR:
10495 icode = CODE_FOR_altivec_mfvscr;
10496 tmode = insn_data[icode].operand[0].mode;
10498 if (target == 0
10499 || GET_MODE (target) != tmode
10500 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10501 target = gen_reg_rtx (tmode);
10503 pat = GEN_FCN (icode) (target);
10504 if (! pat)
10505 return 0;
10506 emit_insn (pat);
10507 return target;
10509 case ALTIVEC_BUILTIN_MTVSCR:
10510 icode = CODE_FOR_altivec_mtvscr;
10511 arg0 = CALL_EXPR_ARG (exp, 0);
10512 op0 = expand_normal (arg0);
10513 mode0 = insn_data[icode].operand[0].mode;
10515 /* If we got invalid arguments bail out before generating bad rtl. */
10516 if (arg0 == error_mark_node)
10517 return const0_rtx;
10519 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10520 op0 = copy_to_mode_reg (mode0, op0);
10522 pat = GEN_FCN (icode) (op0);
10523 if (pat)
10524 emit_insn (pat);
10525 return NULL_RTX;
10527 case ALTIVEC_BUILTIN_DSSALL:
10528 emit_insn (gen_altivec_dssall ());
10529 return NULL_RTX;
10531 case ALTIVEC_BUILTIN_DSS:
10532 icode = CODE_FOR_altivec_dss;
10533 arg0 = CALL_EXPR_ARG (exp, 0);
10534 STRIP_NOPS (arg0);
10535 op0 = expand_normal (arg0);
10536 mode0 = insn_data[icode].operand[0].mode;
10538 /* If we got invalid arguments bail out before generating bad rtl. */
10539 if (arg0 == error_mark_node)
10540 return const0_rtx;
10542 if (TREE_CODE (arg0) != INTEGER_CST
10543 || TREE_INT_CST_LOW (arg0) & ~0x3)
10545 error ("argument to dss must be a 2-bit unsigned literal");
10546 return const0_rtx;
10549 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10550 op0 = copy_to_mode_reg (mode0, op0);
10552 emit_insn (gen_altivec_dss (op0));
10553 return NULL_RTX;
10555 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
10556 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
10557 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
10558 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
10559 case VSX_BUILTIN_VEC_INIT_V2DF:
10560 case VSX_BUILTIN_VEC_INIT_V2DI:
10561 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
10563 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
10564 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
10565 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
10566 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
10567 case VSX_BUILTIN_VEC_SET_V2DF:
10568 case VSX_BUILTIN_VEC_SET_V2DI:
10569 return altivec_expand_vec_set_builtin (exp);
10571 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
10572 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
10573 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
10574 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
10575 case VSX_BUILTIN_VEC_EXT_V2DF:
10576 case VSX_BUILTIN_VEC_EXT_V2DI:
10577 return altivec_expand_vec_ext_builtin (exp, target);
10579 default:
10580 break;
10581 /* Fall through. */
10584 /* Expand abs* operations. */
10585 d = bdesc_abs;
10586 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
10587 if (d->code == fcode)
10588 return altivec_expand_abs_builtin (d->icode, exp, target);
10590 /* Expand the AltiVec predicates. */
10591 d = bdesc_altivec_preds;
10592 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
10593 if (d->code == fcode)
10594 return altivec_expand_predicate_builtin (d->icode, exp, target);
10596 /* LV* are funky. We initialized them differently. */
10597 switch (fcode)
10599 case ALTIVEC_BUILTIN_LVSL:
10600 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
10601 exp, target, false);
10602 case ALTIVEC_BUILTIN_LVSR:
10603 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
10604 exp, target, false);
10605 case ALTIVEC_BUILTIN_LVEBX:
10606 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
10607 exp, target, false);
10608 case ALTIVEC_BUILTIN_LVEHX:
10609 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
10610 exp, target, false);
10611 case ALTIVEC_BUILTIN_LVEWX:
10612 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
10613 exp, target, false);
10614 case ALTIVEC_BUILTIN_LVXL:
10615 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
10616 exp, target, false);
10617 case ALTIVEC_BUILTIN_LVX:
10618 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
10619 exp, target, false);
10620 case ALTIVEC_BUILTIN_LVLX:
10621 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
10622 exp, target, true);
10623 case ALTIVEC_BUILTIN_LVLXL:
10624 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
10625 exp, target, true);
10626 case ALTIVEC_BUILTIN_LVRX:
10627 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
10628 exp, target, true);
10629 case ALTIVEC_BUILTIN_LVRXL:
10630 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
10631 exp, target, true);
10632 case VSX_BUILTIN_LXVD2X_V2DF:
10633 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
10634 exp, target, false);
10635 case VSX_BUILTIN_LXVD2X_V2DI:
10636 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
10637 exp, target, false);
10638 case VSX_BUILTIN_LXVW4X_V4SF:
10639 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
10640 exp, target, false);
10641 case VSX_BUILTIN_LXVW4X_V4SI:
10642 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
10643 exp, target, false);
10644 case VSX_BUILTIN_LXVW4X_V8HI:
10645 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
10646 exp, target, false);
10647 case VSX_BUILTIN_LXVW4X_V16QI:
10648 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
10649 exp, target, false);
10650 break;
10651 default:
10652 break;
10653 /* Fall through. */
10656 *expandedp = false;
10657 return NULL_RTX;
10660 /* Expand the builtin in EXP and store the result in TARGET. Store
10661 true in *EXPANDEDP if we found a builtin to expand. */
10662 static rtx
10663 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
10665 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10666 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10667 const struct builtin_description *d;
10668 size_t i;
10670 *expandedp = true;
10672 switch (fcode)
10674 case PAIRED_BUILTIN_STX:
10675 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
10676 case PAIRED_BUILTIN_LX:
10677 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
10678 default:
10679 break;
10680 /* Fall through. */
10683 /* Expand the paired predicates. */
10684 d = bdesc_paired_preds;
10685 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
10686 if (d->code == fcode)
10687 return paired_expand_predicate_builtin (d->icode, exp, target);
10689 *expandedp = false;
10690 return NULL_RTX;
10693 /* Binops that need to be initialized manually, but can be expanded
10694 automagically by rs6000_expand_binop_builtin. */
10695 static const struct builtin_description bdesc_2arg_spe[] =
10697 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
10698 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
10699 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
10700 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
10701 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
10702 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
10703 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
10704 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
10705 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
10706 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
10707 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
10708 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
10709 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
10710 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
10711 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
10712 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
10713 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
10714 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
10715 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
10716 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
10717 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
10718 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
10721 /* Expand the builtin in EXP and store the result in TARGET. Store
10722 true in *EXPANDEDP if we found a builtin to expand.
10724 This expands the SPE builtins that are not simple unary and binary
10725 operations. */
10726 static rtx
10727 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
10729 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10730 tree arg1, arg0;
10731 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10732 enum insn_code icode;
10733 enum machine_mode tmode, mode0;
10734 rtx pat, op0;
10735 const struct builtin_description *d;
10736 size_t i;
10738 *expandedp = true;
10740 /* Syntax check for a 5-bit unsigned immediate. */
10741 switch (fcode)
10743 case SPE_BUILTIN_EVSTDD:
10744 case SPE_BUILTIN_EVSTDH:
10745 case SPE_BUILTIN_EVSTDW:
10746 case SPE_BUILTIN_EVSTWHE:
10747 case SPE_BUILTIN_EVSTWHO:
10748 case SPE_BUILTIN_EVSTWWE:
10749 case SPE_BUILTIN_EVSTWWO:
10750 arg1 = CALL_EXPR_ARG (exp, 2);
10751 if (TREE_CODE (arg1) != INTEGER_CST
10752 || TREE_INT_CST_LOW (arg1) & ~0x1f)
10754 error ("argument 2 must be a 5-bit unsigned literal");
10755 return const0_rtx;
10757 break;
10758 default:
10759 break;
10762 /* The evsplat*i instructions are not quite generic. */
10763 switch (fcode)
10765 case SPE_BUILTIN_EVSPLATFI:
10766 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
10767 exp, target);
10768 case SPE_BUILTIN_EVSPLATI:
10769 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
10770 exp, target);
10771 default:
10772 break;
10775 d = bdesc_2arg_spe;
10776 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
10777 if (d->code == fcode)
10778 return rs6000_expand_binop_builtin (d->icode, exp, target);
10780 d = bdesc_spe_predicates;
10781 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
10782 if (d->code == fcode)
10783 return spe_expand_predicate_builtin (d->icode, exp, target);
10785 d = bdesc_spe_evsel;
10786 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
10787 if (d->code == fcode)
10788 return spe_expand_evsel_builtin (d->icode, exp, target);
10790 switch (fcode)
10792 case SPE_BUILTIN_EVSTDDX:
10793 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
10794 case SPE_BUILTIN_EVSTDHX:
10795 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
10796 case SPE_BUILTIN_EVSTDWX:
10797 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
10798 case SPE_BUILTIN_EVSTWHEX:
10799 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
10800 case SPE_BUILTIN_EVSTWHOX:
10801 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
10802 case SPE_BUILTIN_EVSTWWEX:
10803 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
10804 case SPE_BUILTIN_EVSTWWOX:
10805 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
10806 case SPE_BUILTIN_EVSTDD:
10807 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
10808 case SPE_BUILTIN_EVSTDH:
10809 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
10810 case SPE_BUILTIN_EVSTDW:
10811 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
10812 case SPE_BUILTIN_EVSTWHE:
10813 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
10814 case SPE_BUILTIN_EVSTWHO:
10815 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
10816 case SPE_BUILTIN_EVSTWWE:
10817 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
10818 case SPE_BUILTIN_EVSTWWO:
10819 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
10820 case SPE_BUILTIN_MFSPEFSCR:
10821 icode = CODE_FOR_spe_mfspefscr;
10822 tmode = insn_data[icode].operand[0].mode;
10824 if (target == 0
10825 || GET_MODE (target) != tmode
10826 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10827 target = gen_reg_rtx (tmode);
10829 pat = GEN_FCN (icode) (target);
10830 if (! pat)
10831 return 0;
10832 emit_insn (pat);
10833 return target;
10834 case SPE_BUILTIN_MTSPEFSCR:
10835 icode = CODE_FOR_spe_mtspefscr;
10836 arg0 = CALL_EXPR_ARG (exp, 0);
10837 op0 = expand_normal (arg0);
10838 mode0 = insn_data[icode].operand[0].mode;
10840 if (arg0 == error_mark_node)
10841 return const0_rtx;
10843 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
10844 op0 = copy_to_mode_reg (mode0, op0);
10846 pat = GEN_FCN (icode) (op0);
10847 if (pat)
10848 emit_insn (pat);
10849 return NULL_RTX;
10850 default:
10851 break;
10854 *expandedp = false;
10855 return NULL_RTX;
10858 static rtx
10859 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
10861 rtx pat, scratch, tmp;
10862 tree form = CALL_EXPR_ARG (exp, 0);
10863 tree arg0 = CALL_EXPR_ARG (exp, 1);
10864 tree arg1 = CALL_EXPR_ARG (exp, 2);
10865 rtx op0 = expand_normal (arg0);
10866 rtx op1 = expand_normal (arg1);
10867 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10868 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10869 int form_int;
10870 enum rtx_code code;
10872 if (TREE_CODE (form) != INTEGER_CST)
10874 error ("argument 1 of __builtin_paired_predicate must be a constant");
10875 return const0_rtx;
10877 else
10878 form_int = TREE_INT_CST_LOW (form);
10880 gcc_assert (mode0 == mode1);
10882 if (arg0 == error_mark_node || arg1 == error_mark_node)
10883 return const0_rtx;
10885 if (target == 0
10886 || GET_MODE (target) != SImode
10887 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
10888 target = gen_reg_rtx (SImode);
10889 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
10890 op0 = copy_to_mode_reg (mode0, op0);
10891 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
10892 op1 = copy_to_mode_reg (mode1, op1);
10894 scratch = gen_reg_rtx (CCFPmode);
10896 pat = GEN_FCN (icode) (scratch, op0, op1);
10897 if (!pat)
10898 return const0_rtx;
10900 emit_insn (pat);
10902 switch (form_int)
10904 /* LT bit. */
10905 case 0:
10906 code = LT;
10907 break;
10908 /* GT bit. */
10909 case 1:
10910 code = GT;
10911 break;
10912 /* EQ bit. */
10913 case 2:
10914 code = EQ;
10915 break;
10916 /* UN bit. */
10917 case 3:
10918 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
10919 return target;
10920 default:
10921 error ("argument 1 of __builtin_paired_predicate is out of range");
10922 return const0_rtx;
10925 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
10926 emit_move_insn (target, tmp);
10927 return target;
10930 static rtx
10931 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
10933 rtx pat, scratch, tmp;
10934 tree form = CALL_EXPR_ARG (exp, 0);
10935 tree arg0 = CALL_EXPR_ARG (exp, 1);
10936 tree arg1 = CALL_EXPR_ARG (exp, 2);
10937 rtx op0 = expand_normal (arg0);
10938 rtx op1 = expand_normal (arg1);
10939 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
10940 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
10941 int form_int;
10942 enum rtx_code code;
10944 if (TREE_CODE (form) != INTEGER_CST)
10946 error ("argument 1 of __builtin_spe_predicate must be a constant");
10947 return const0_rtx;
10949 else
10950 form_int = TREE_INT_CST_LOW (form);
10952 gcc_assert (mode0 == mode1);
10954 if (arg0 == error_mark_node || arg1 == error_mark_node)
10955 return const0_rtx;
10957 if (target == 0
10958 || GET_MODE (target) != SImode
10959 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
10960 target = gen_reg_rtx (SImode);
10962 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
10963 op0 = copy_to_mode_reg (mode0, op0);
10964 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
10965 op1 = copy_to_mode_reg (mode1, op1);
10967 scratch = gen_reg_rtx (CCmode);
10969 pat = GEN_FCN (icode) (scratch, op0, op1);
10970 if (! pat)
10971 return const0_rtx;
10972 emit_insn (pat);
10974 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
10975 _lower_. We use one compare, but look in different bits of the
10976 CR for each variant.
10978 There are 2 elements in each SPE simd type (upper/lower). The CR
10979 bits are set as follows:
10981 BIT0 | BIT 1 | BIT 2 | BIT 3
10982 U | L | (U | L) | (U & L)
10984 So, for an "all" relationship, BIT 3 would be set.
10985 For an "any" relationship, BIT 2 would be set. Etc.
10987 Following traditional nomenclature, these bits map to:
10989 BIT0 | BIT 1 | BIT 2 | BIT 3
10990 LT | GT | EQ | OV
10992 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
10995 switch (form_int)
10997 /* All variant. OV bit. */
10998 case 0:
10999 /* We need to get to the OV bit, which is the ORDERED bit. We
11000 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
11001 that's ugly and will make validate_condition_mode die.
11002 So let's just use another pattern. */
11003 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
11004 return target;
11005 /* Any variant. EQ bit. */
11006 case 1:
11007 code = EQ;
11008 break;
11009 /* Upper variant. LT bit. */
11010 case 2:
11011 code = LT;
11012 break;
11013 /* Lower variant. GT bit. */
11014 case 3:
11015 code = GT;
11016 break;
11017 default:
11018 error ("argument 1 of __builtin_spe_predicate is out of range");
11019 return const0_rtx;
11022 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
11023 emit_move_insn (target, tmp);
11025 return target;
11028 /* The evsel builtins look like this:
11030 e = __builtin_spe_evsel_OP (a, b, c, d);
11032 and work like this:
11034 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
11035 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
11038 static rtx
11039 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
11041 rtx pat, scratch;
11042 tree arg0 = CALL_EXPR_ARG (exp, 0);
11043 tree arg1 = CALL_EXPR_ARG (exp, 1);
11044 tree arg2 = CALL_EXPR_ARG (exp, 2);
11045 tree arg3 = CALL_EXPR_ARG (exp, 3);
11046 rtx op0 = expand_normal (arg0);
11047 rtx op1 = expand_normal (arg1);
11048 rtx op2 = expand_normal (arg2);
11049 rtx op3 = expand_normal (arg3);
11050 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11051 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11053 gcc_assert (mode0 == mode1);
11055 if (arg0 == error_mark_node || arg1 == error_mark_node
11056 || arg2 == error_mark_node || arg3 == error_mark_node)
11057 return const0_rtx;
11059 if (target == 0
11060 || GET_MODE (target) != mode0
11061 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
11062 target = gen_reg_rtx (mode0);
11064 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11065 op0 = copy_to_mode_reg (mode0, op0);
11066 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11067 op1 = copy_to_mode_reg (mode0, op1);
11068 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
11069 op2 = copy_to_mode_reg (mode0, op2);
11070 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
11071 op3 = copy_to_mode_reg (mode0, op3);
11073 /* Generate the compare. */
11074 scratch = gen_reg_rtx (CCmode);
11075 pat = GEN_FCN (icode) (scratch, op0, op1);
11076 if (! pat)
11077 return const0_rtx;
11078 emit_insn (pat);
11080 if (mode0 == V2SImode)
11081 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
11082 else
11083 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
11085 return target;
11088 /* Raise an error message for a builtin function that is called without the
11089 appropriate target options being set. */
11091 static void
11092 rs6000_invalid_builtin (enum rs6000_builtins fncode)
11094 size_t uns_fncode = (size_t)fncode;
11095 const char *name = rs6000_builtin_info[uns_fncode].name;
11096 unsigned fnmask = rs6000_builtin_info[uns_fncode].mask;
11098 gcc_assert (name != NULL);
11099 if ((fnmask & RS6000_BTM_CELL) != 0)
11100 error ("Builtin function %s is only valid for the cell processor", name);
11101 else if ((fnmask & RS6000_BTM_VSX) != 0)
11102 error ("Builtin function %s requires the -mvsx option", name);
11103 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
11104 error ("Builtin function %s requires the -maltivec option", name);
11105 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
11106 error ("Builtin function %s requires the -mpaired option", name);
11107 else if ((fnmask & RS6000_BTM_SPE) != 0)
11108 error ("Builtin function %s requires the -mspe option", name);
11109 else
11110 error ("Builtin function %s is not supported with the current options",
11111 name);
11114 /* Expand an expression EXP that calls a built-in function,
11115 with result going to TARGET if that's convenient
11116 (and in mode MODE if that's convenient).
11117 SUBTARGET may be used as the target for computing one of EXP's operands.
11118 IGNORE is nonzero if the value is to be ignored. */
11120 static rtx
11121 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11122 enum machine_mode mode ATTRIBUTE_UNUSED,
11123 int ignore ATTRIBUTE_UNUSED)
11125 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11126 enum rs6000_builtins fcode
11127 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
11128 size_t uns_fcode = (size_t)fcode;
11129 const struct builtin_description *d;
11130 size_t i;
11131 rtx ret;
11132 bool success;
11133 unsigned mask = rs6000_builtin_info[uns_fcode].mask;
11134 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
11136 if (TARGET_DEBUG_BUILTIN)
11138 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
11139 const char *name1 = rs6000_builtin_info[uns_fcode].name;
11140 const char *name2 = ((icode != CODE_FOR_nothing)
11141 ? get_insn_name ((int)icode)
11142 : "nothing");
11143 const char *name3;
11145 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
11147 default: name3 = "unknown"; break;
11148 case RS6000_BTC_SPECIAL: name3 = "special"; break;
11149 case RS6000_BTC_UNARY: name3 = "unary"; break;
11150 case RS6000_BTC_BINARY: name3 = "binary"; break;
11151 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
11152 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
11153 case RS6000_BTC_ABS: name3 = "abs"; break;
11154 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
11155 case RS6000_BTC_DST: name3 = "dst"; break;
11159 fprintf (stderr,
11160 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
11161 (name1) ? name1 : "---", fcode,
11162 (name2) ? name2 : "---", (int)icode,
11163 name3,
11164 func_valid_p ? "" : ", not valid");
11167 if (!func_valid_p)
11169 rs6000_invalid_builtin (fcode);
11171 /* Given it is invalid, just generate a normal call. */
11172 return expand_call (exp, target, ignore);
11175 switch (fcode)
11177 case RS6000_BUILTIN_RECIP:
11178 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
11180 case RS6000_BUILTIN_RECIPF:
11181 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
11183 case RS6000_BUILTIN_RSQRTF:
11184 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
11186 case RS6000_BUILTIN_RSQRT:
11187 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
11189 case POWER7_BUILTIN_BPERMD:
11190 return rs6000_expand_binop_builtin (((TARGET_64BIT)
11191 ? CODE_FOR_bpermd_di
11192 : CODE_FOR_bpermd_si), exp, target);
11194 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
11195 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
11197 int icode = (int) CODE_FOR_altivec_lvsr;
11198 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11199 enum machine_mode mode = insn_data[icode].operand[1].mode;
11200 tree arg;
11201 rtx op, addr, pat;
11203 gcc_assert (TARGET_ALTIVEC);
11205 arg = CALL_EXPR_ARG (exp, 0);
11206 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
11207 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
11208 addr = memory_address (mode, op);
11209 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
11210 op = addr;
11211 else
11213 /* For the load case need to negate the address. */
11214 op = gen_reg_rtx (GET_MODE (addr));
11215 emit_insn (gen_rtx_SET (VOIDmode, op,
11216 gen_rtx_NEG (GET_MODE (addr), addr)));
11218 op = gen_rtx_MEM (mode, op);
11220 if (target == 0
11221 || GET_MODE (target) != tmode
11222 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11223 target = gen_reg_rtx (tmode);
11225 /*pat = gen_altivec_lvsr (target, op);*/
11226 pat = GEN_FCN (icode) (target, op);
11227 if (!pat)
11228 return 0;
11229 emit_insn (pat);
11231 return target;
11234 case ALTIVEC_BUILTIN_VCFUX:
11235 case ALTIVEC_BUILTIN_VCFSX:
11236 case ALTIVEC_BUILTIN_VCTUXS:
11237 case ALTIVEC_BUILTIN_VCTSXS:
11238 /* FIXME: There's got to be a nicer way to handle this case than
11239 constructing a new CALL_EXPR. */
11240 if (call_expr_nargs (exp) == 1)
11242 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
11243 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
11245 break;
11247 default:
11248 break;
11251 if (TARGET_ALTIVEC)
11253 ret = altivec_expand_builtin (exp, target, &success);
11255 if (success)
11256 return ret;
11258 if (TARGET_SPE)
11260 ret = spe_expand_builtin (exp, target, &success);
11262 if (success)
11263 return ret;
11265 if (TARGET_PAIRED_FLOAT)
11267 ret = paired_expand_builtin (exp, target, &success);
11269 if (success)
11270 return ret;
11273 gcc_assert (TARGET_ALTIVEC || TARGET_VSX || TARGET_SPE || TARGET_PAIRED_FLOAT);
11275 /* Handle simple unary operations. */
11276 d = bdesc_1arg;
11277 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11278 if (d->code == fcode)
11279 return rs6000_expand_unop_builtin (d->icode, exp, target);
11281 /* Handle simple binary operations. */
11282 d = bdesc_2arg;
11283 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11284 if (d->code == fcode)
11285 return rs6000_expand_binop_builtin (d->icode, exp, target);
11287 /* Handle simple ternary operations. */
11288 d = bdesc_3arg;
11289 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
11290 if (d->code == fcode)
11291 return rs6000_expand_ternop_builtin (d->icode, exp, target);
11293 gcc_unreachable ();
11296 static void
11297 rs6000_init_builtins (void)
11299 tree tdecl;
11300 tree ftype;
11301 enum machine_mode mode;
11303 if (TARGET_DEBUG_BUILTIN)
11304 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
11305 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
11306 (TARGET_SPE) ? ", spe" : "",
11307 (TARGET_ALTIVEC) ? ", altivec" : "",
11308 (TARGET_VSX) ? ", vsx" : "");
11310 V2SI_type_node = build_vector_type (intSI_type_node, 2);
11311 V2SF_type_node = build_vector_type (float_type_node, 2);
11312 V2DI_type_node = build_vector_type (intDI_type_node, 2);
11313 V2DF_type_node = build_vector_type (double_type_node, 2);
11314 V4HI_type_node = build_vector_type (intHI_type_node, 4);
11315 V4SI_type_node = build_vector_type (intSI_type_node, 4);
11316 V4SF_type_node = build_vector_type (float_type_node, 4);
11317 V8HI_type_node = build_vector_type (intHI_type_node, 8);
11318 V16QI_type_node = build_vector_type (intQI_type_node, 16);
11320 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
11321 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
11322 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
11323 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
11325 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
11326 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
11327 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
11328 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
11330 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
11331 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
11332 'vector unsigned short'. */
11334 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
11335 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11336 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
11337 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
11338 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
11340 long_integer_type_internal_node = long_integer_type_node;
11341 long_unsigned_type_internal_node = long_unsigned_type_node;
11342 long_long_integer_type_internal_node = long_long_integer_type_node;
11343 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
11344 intQI_type_internal_node = intQI_type_node;
11345 uintQI_type_internal_node = unsigned_intQI_type_node;
11346 intHI_type_internal_node = intHI_type_node;
11347 uintHI_type_internal_node = unsigned_intHI_type_node;
11348 intSI_type_internal_node = intSI_type_node;
11349 uintSI_type_internal_node = unsigned_intSI_type_node;
11350 intDI_type_internal_node = intDI_type_node;
11351 uintDI_type_internal_node = unsigned_intDI_type_node;
11352 float_type_internal_node = float_type_node;
11353 double_type_internal_node = double_type_node;
11354 void_type_internal_node = void_type_node;
11356 /* Initialize the modes for builtin_function_type, mapping a machine mode to
11357 tree type node. */
11358 builtin_mode_to_type[QImode][0] = integer_type_node;
11359 builtin_mode_to_type[HImode][0] = integer_type_node;
11360 builtin_mode_to_type[SImode][0] = intSI_type_node;
11361 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
11362 builtin_mode_to_type[DImode][0] = intDI_type_node;
11363 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
11364 builtin_mode_to_type[SFmode][0] = float_type_node;
11365 builtin_mode_to_type[DFmode][0] = double_type_node;
11366 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
11367 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
11368 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
11369 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
11370 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
11371 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
11372 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
11373 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
11374 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
11375 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
11376 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
11377 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
11378 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
11380 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
11381 TYPE_NAME (bool_char_type_node) = tdecl;
11383 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
11384 TYPE_NAME (bool_short_type_node) = tdecl;
11386 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
11387 TYPE_NAME (bool_int_type_node) = tdecl;
11389 tdecl = add_builtin_type ("__pixel", pixel_type_node);
11390 TYPE_NAME (pixel_type_node) = tdecl;
11392 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
11393 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
11394 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
11395 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
11396 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
11398 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
11399 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
11401 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
11402 TYPE_NAME (V16QI_type_node) = tdecl;
11404 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
11405 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
11407 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
11408 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
11410 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
11411 TYPE_NAME (V8HI_type_node) = tdecl;
11413 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
11414 TYPE_NAME (bool_V8HI_type_node) = tdecl;
11416 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
11417 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
11419 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
11420 TYPE_NAME (V4SI_type_node) = tdecl;
11422 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
11423 TYPE_NAME (bool_V4SI_type_node) = tdecl;
11425 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
11426 TYPE_NAME (V4SF_type_node) = tdecl;
11428 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
11429 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
11431 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
11432 TYPE_NAME (V2DF_type_node) = tdecl;
11434 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
11435 TYPE_NAME (V2DI_type_node) = tdecl;
11437 tdecl = add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node);
11438 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
11440 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
11441 TYPE_NAME (bool_V2DI_type_node) = tdecl;
11443 /* Paired and SPE builtins are only available if you build a compiler with
11444 the appropriate options, so only create those builtins with the
11445 appropriate compiler option. Create Altivec and VSX builtins on machines
11446 with at least the general purpose extensions (970 and newer) to allow the
11447 use of the target attribute. */
11448 if (TARGET_PAIRED_FLOAT)
11449 paired_init_builtins ();
11450 if (TARGET_SPE)
11451 spe_init_builtins ();
11452 if (TARGET_EXTRA_BUILTINS)
11453 altivec_init_builtins ();
11454 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
11455 rs6000_common_init_builtins ();
11457 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
11458 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
11459 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
11461 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
11462 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
11463 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
11465 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
11466 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
11467 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
11469 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
11470 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
11471 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
11473 mode = (TARGET_64BIT) ? DImode : SImode;
11474 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
11475 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
11476 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
11478 #if TARGET_XCOFF
11479 /* AIX libm provides clog as __clog. */
11480 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
11481 set_user_assembler_name (tdecl, "__clog");
11482 #endif
11484 #ifdef SUBTARGET_INIT_BUILTINS
11485 SUBTARGET_INIT_BUILTINS;
11486 #endif
11489 /* Returns the rs6000 builtin decl for CODE. */
11491 static tree
11492 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11494 unsigned fnmask;
11496 if (code >= RS6000_BUILTIN_COUNT)
11497 return error_mark_node;
11499 fnmask = rs6000_builtin_info[code].mask;
11500 if ((fnmask & rs6000_builtin_mask) != fnmask)
11502 rs6000_invalid_builtin ((enum rs6000_builtins)code);
11503 return error_mark_node;
11506 return rs6000_builtin_decls[code];
11509 static void
11510 spe_init_builtins (void)
11512 tree puint_type_node = build_pointer_type (unsigned_type_node);
11513 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
11514 const struct builtin_description *d;
11515 size_t i;
11517 tree v2si_ftype_4_v2si
11518 = build_function_type_list (opaque_V2SI_type_node,
11519 opaque_V2SI_type_node,
11520 opaque_V2SI_type_node,
11521 opaque_V2SI_type_node,
11522 opaque_V2SI_type_node,
11523 NULL_TREE);
11525 tree v2sf_ftype_4_v2sf
11526 = build_function_type_list (opaque_V2SF_type_node,
11527 opaque_V2SF_type_node,
11528 opaque_V2SF_type_node,
11529 opaque_V2SF_type_node,
11530 opaque_V2SF_type_node,
11531 NULL_TREE);
11533 tree int_ftype_int_v2si_v2si
11534 = build_function_type_list (integer_type_node,
11535 integer_type_node,
11536 opaque_V2SI_type_node,
11537 opaque_V2SI_type_node,
11538 NULL_TREE);
11540 tree int_ftype_int_v2sf_v2sf
11541 = build_function_type_list (integer_type_node,
11542 integer_type_node,
11543 opaque_V2SF_type_node,
11544 opaque_V2SF_type_node,
11545 NULL_TREE);
11547 tree void_ftype_v2si_puint_int
11548 = build_function_type_list (void_type_node,
11549 opaque_V2SI_type_node,
11550 puint_type_node,
11551 integer_type_node,
11552 NULL_TREE);
11554 tree void_ftype_v2si_puint_char
11555 = build_function_type_list (void_type_node,
11556 opaque_V2SI_type_node,
11557 puint_type_node,
11558 char_type_node,
11559 NULL_TREE);
11561 tree void_ftype_v2si_pv2si_int
11562 = build_function_type_list (void_type_node,
11563 opaque_V2SI_type_node,
11564 opaque_p_V2SI_type_node,
11565 integer_type_node,
11566 NULL_TREE);
11568 tree void_ftype_v2si_pv2si_char
11569 = build_function_type_list (void_type_node,
11570 opaque_V2SI_type_node,
11571 opaque_p_V2SI_type_node,
11572 char_type_node,
11573 NULL_TREE);
11575 tree void_ftype_int
11576 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11578 tree int_ftype_void
11579 = build_function_type_list (integer_type_node, NULL_TREE);
11581 tree v2si_ftype_pv2si_int
11582 = build_function_type_list (opaque_V2SI_type_node,
11583 opaque_p_V2SI_type_node,
11584 integer_type_node,
11585 NULL_TREE);
11587 tree v2si_ftype_puint_int
11588 = build_function_type_list (opaque_V2SI_type_node,
11589 puint_type_node,
11590 integer_type_node,
11591 NULL_TREE);
11593 tree v2si_ftype_pushort_int
11594 = build_function_type_list (opaque_V2SI_type_node,
11595 pushort_type_node,
11596 integer_type_node,
11597 NULL_TREE);
11599 tree v2si_ftype_signed_char
11600 = build_function_type_list (opaque_V2SI_type_node,
11601 signed_char_type_node,
11602 NULL_TREE);
11604 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
11606 /* Initialize irregular SPE builtins. */
11608 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
11609 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
11610 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
11611 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
11612 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
11613 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
11614 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
11615 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
11616 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
11617 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
11618 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
11619 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
11620 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
11621 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
11622 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
11623 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
11624 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
11625 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
11627 /* Loads. */
11628 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
11629 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
11630 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
11631 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
11632 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
11633 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
11634 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
11635 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
11636 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
11637 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
11638 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
11639 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
11640 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
11641 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
11642 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
11643 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
11644 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
11645 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
11646 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
11647 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
11648 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
11649 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
11651 /* Predicates. */
11652 d = bdesc_spe_predicates;
11653 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
11655 tree type;
11657 switch (insn_data[d->icode].operand[1].mode)
11659 case V2SImode:
11660 type = int_ftype_int_v2si_v2si;
11661 break;
11662 case V2SFmode:
11663 type = int_ftype_int_v2sf_v2sf;
11664 break;
11665 default:
11666 gcc_unreachable ();
11669 def_builtin (d->name, type, d->code);
11672 /* Evsel predicates. */
11673 d = bdesc_spe_evsel;
11674 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
11676 tree type;
11678 switch (insn_data[d->icode].operand[1].mode)
11680 case V2SImode:
11681 type = v2si_ftype_4_v2si;
11682 break;
11683 case V2SFmode:
11684 type = v2sf_ftype_4_v2sf;
11685 break;
11686 default:
11687 gcc_unreachable ();
11690 def_builtin (d->name, type, d->code);
11694 static void
11695 paired_init_builtins (void)
11697 const struct builtin_description *d;
11698 size_t i;
11700 tree int_ftype_int_v2sf_v2sf
11701 = build_function_type_list (integer_type_node,
11702 integer_type_node,
11703 V2SF_type_node,
11704 V2SF_type_node,
11705 NULL_TREE);
11706 tree pcfloat_type_node =
11707 build_pointer_type (build_qualified_type
11708 (float_type_node, TYPE_QUAL_CONST));
11710 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
11711 long_integer_type_node,
11712 pcfloat_type_node,
11713 NULL_TREE);
11714 tree void_ftype_v2sf_long_pcfloat =
11715 build_function_type_list (void_type_node,
11716 V2SF_type_node,
11717 long_integer_type_node,
11718 pcfloat_type_node,
11719 NULL_TREE);
11722 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
11723 PAIRED_BUILTIN_LX);
11726 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
11727 PAIRED_BUILTIN_STX);
11729 /* Predicates. */
11730 d = bdesc_paired_preds;
11731 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
11733 tree type;
11735 if (TARGET_DEBUG_BUILTIN)
11736 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
11737 (int)i, get_insn_name (d->icode), (int)d->icode,
11738 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
11740 switch (insn_data[d->icode].operand[1].mode)
11742 case V2SFmode:
11743 type = int_ftype_int_v2sf_v2sf;
11744 break;
11745 default:
11746 gcc_unreachable ();
11749 def_builtin (d->name, type, d->code);
11753 static void
11754 altivec_init_builtins (void)
11756 const struct builtin_description *d;
11757 size_t i;
11758 tree ftype;
11759 tree decl;
11761 tree pvoid_type_node = build_pointer_type (void_type_node);
11763 tree pcvoid_type_node
11764 = build_pointer_type (build_qualified_type (void_type_node,
11765 TYPE_QUAL_CONST));
11767 tree int_ftype_opaque
11768 = build_function_type_list (integer_type_node,
11769 opaque_V4SI_type_node, NULL_TREE);
11770 tree opaque_ftype_opaque
11771 = build_function_type_list (integer_type_node, NULL_TREE);
11772 tree opaque_ftype_opaque_int
11773 = build_function_type_list (opaque_V4SI_type_node,
11774 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
11775 tree opaque_ftype_opaque_opaque_int
11776 = build_function_type_list (opaque_V4SI_type_node,
11777 opaque_V4SI_type_node, opaque_V4SI_type_node,
11778 integer_type_node, NULL_TREE);
11779 tree int_ftype_int_opaque_opaque
11780 = build_function_type_list (integer_type_node,
11781 integer_type_node, opaque_V4SI_type_node,
11782 opaque_V4SI_type_node, NULL_TREE);
11783 tree int_ftype_int_v4si_v4si
11784 = build_function_type_list (integer_type_node,
11785 integer_type_node, V4SI_type_node,
11786 V4SI_type_node, NULL_TREE);
11787 tree void_ftype_v4si
11788 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
11789 tree v8hi_ftype_void
11790 = build_function_type_list (V8HI_type_node, NULL_TREE);
11791 tree void_ftype_void
11792 = build_function_type_list (void_type_node, NULL_TREE);
11793 tree void_ftype_int
11794 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
11796 tree opaque_ftype_long_pcvoid
11797 = build_function_type_list (opaque_V4SI_type_node,
11798 long_integer_type_node, pcvoid_type_node,
11799 NULL_TREE);
11800 tree v16qi_ftype_long_pcvoid
11801 = build_function_type_list (V16QI_type_node,
11802 long_integer_type_node, pcvoid_type_node,
11803 NULL_TREE);
11804 tree v8hi_ftype_long_pcvoid
11805 = build_function_type_list (V8HI_type_node,
11806 long_integer_type_node, pcvoid_type_node,
11807 NULL_TREE);
11808 tree v4si_ftype_long_pcvoid
11809 = build_function_type_list (V4SI_type_node,
11810 long_integer_type_node, pcvoid_type_node,
11811 NULL_TREE);
11812 tree v4sf_ftype_long_pcvoid
11813 = build_function_type_list (V4SF_type_node,
11814 long_integer_type_node, pcvoid_type_node,
11815 NULL_TREE);
11816 tree v2df_ftype_long_pcvoid
11817 = build_function_type_list (V2DF_type_node,
11818 long_integer_type_node, pcvoid_type_node,
11819 NULL_TREE);
11820 tree v2di_ftype_long_pcvoid
11821 = build_function_type_list (V2DI_type_node,
11822 long_integer_type_node, pcvoid_type_node,
11823 NULL_TREE);
11825 tree void_ftype_opaque_long_pvoid
11826 = build_function_type_list (void_type_node,
11827 opaque_V4SI_type_node, long_integer_type_node,
11828 pvoid_type_node, NULL_TREE);
11829 tree void_ftype_v4si_long_pvoid
11830 = build_function_type_list (void_type_node,
11831 V4SI_type_node, long_integer_type_node,
11832 pvoid_type_node, NULL_TREE);
11833 tree void_ftype_v16qi_long_pvoid
11834 = build_function_type_list (void_type_node,
11835 V16QI_type_node, long_integer_type_node,
11836 pvoid_type_node, NULL_TREE);
11837 tree void_ftype_v8hi_long_pvoid
11838 = build_function_type_list (void_type_node,
11839 V8HI_type_node, long_integer_type_node,
11840 pvoid_type_node, NULL_TREE);
11841 tree void_ftype_v4sf_long_pvoid
11842 = build_function_type_list (void_type_node,
11843 V4SF_type_node, long_integer_type_node,
11844 pvoid_type_node, NULL_TREE);
11845 tree void_ftype_v2df_long_pvoid
11846 = build_function_type_list (void_type_node,
11847 V2DF_type_node, long_integer_type_node,
11848 pvoid_type_node, NULL_TREE);
11849 tree void_ftype_v2di_long_pvoid
11850 = build_function_type_list (void_type_node,
11851 V2DI_type_node, long_integer_type_node,
11852 pvoid_type_node, NULL_TREE);
11853 tree int_ftype_int_v8hi_v8hi
11854 = build_function_type_list (integer_type_node,
11855 integer_type_node, V8HI_type_node,
11856 V8HI_type_node, NULL_TREE);
11857 tree int_ftype_int_v16qi_v16qi
11858 = build_function_type_list (integer_type_node,
11859 integer_type_node, V16QI_type_node,
11860 V16QI_type_node, NULL_TREE);
11861 tree int_ftype_int_v4sf_v4sf
11862 = build_function_type_list (integer_type_node,
11863 integer_type_node, V4SF_type_node,
11864 V4SF_type_node, NULL_TREE);
11865 tree int_ftype_int_v2df_v2df
11866 = build_function_type_list (integer_type_node,
11867 integer_type_node, V2DF_type_node,
11868 V2DF_type_node, NULL_TREE);
11869 tree v4si_ftype_v4si
11870 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
11871 tree v8hi_ftype_v8hi
11872 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
11873 tree v16qi_ftype_v16qi
11874 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
11875 tree v4sf_ftype_v4sf
11876 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
11877 tree v2df_ftype_v2df
11878 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
11879 tree void_ftype_pcvoid_int_int
11880 = build_function_type_list (void_type_node,
11881 pcvoid_type_node, integer_type_node,
11882 integer_type_node, NULL_TREE);
11884 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
11885 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
11886 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
11887 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
11888 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
11889 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
11890 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
11891 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
11892 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
11893 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
11894 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
11895 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
11896 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
11897 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
11898 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
11899 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
11900 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
11901 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
11902 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
11903 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
11904 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
11905 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
11906 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
11907 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
11908 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
11909 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
11910 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
11911 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
11912 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
11913 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
11915 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
11916 VSX_BUILTIN_LXVD2X_V2DF);
11917 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
11918 VSX_BUILTIN_LXVD2X_V2DI);
11919 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
11920 VSX_BUILTIN_LXVW4X_V4SF);
11921 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
11922 VSX_BUILTIN_LXVW4X_V4SI);
11923 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
11924 VSX_BUILTIN_LXVW4X_V8HI);
11925 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
11926 VSX_BUILTIN_LXVW4X_V16QI);
11927 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
11928 VSX_BUILTIN_STXVD2X_V2DF);
11929 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
11930 VSX_BUILTIN_STXVD2X_V2DI);
11931 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
11932 VSX_BUILTIN_STXVW4X_V4SF);
11933 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
11934 VSX_BUILTIN_STXVW4X_V4SI);
11935 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
11936 VSX_BUILTIN_STXVW4X_V8HI);
11937 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
11938 VSX_BUILTIN_STXVW4X_V16QI);
11939 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
11940 VSX_BUILTIN_VEC_LD);
11941 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
11942 VSX_BUILTIN_VEC_ST);
11944 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
11945 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
11946 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
11948 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
11949 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
11950 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
11951 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
11952 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
11953 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
11954 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
11955 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
11956 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
11957 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
11958 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
11959 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
11961 /* Cell builtins. */
11962 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
11963 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
11964 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
11965 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
11967 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
11968 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
11969 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
11970 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
11972 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
11973 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
11974 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
11975 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
11977 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
11978 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
11979 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
11980 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
11982 /* Add the DST variants. */
11983 d = bdesc_dst;
11984 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
11985 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
11987 /* Initialize the predicates. */
11988 d = bdesc_altivec_preds;
11989 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
11991 enum machine_mode mode1;
11992 tree type;
11994 if (rs6000_overloaded_builtin_p (d->code))
11995 mode1 = VOIDmode;
11996 else
11997 mode1 = insn_data[d->icode].operand[1].mode;
11999 switch (mode1)
12001 case VOIDmode:
12002 type = int_ftype_int_opaque_opaque;
12003 break;
12004 case V4SImode:
12005 type = int_ftype_int_v4si_v4si;
12006 break;
12007 case V8HImode:
12008 type = int_ftype_int_v8hi_v8hi;
12009 break;
12010 case V16QImode:
12011 type = int_ftype_int_v16qi_v16qi;
12012 break;
12013 case V4SFmode:
12014 type = int_ftype_int_v4sf_v4sf;
12015 break;
12016 case V2DFmode:
12017 type = int_ftype_int_v2df_v2df;
12018 break;
12019 default:
12020 gcc_unreachable ();
12023 def_builtin (d->name, type, d->code);
12026 /* Initialize the abs* operators. */
12027 d = bdesc_abs;
12028 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
12030 enum machine_mode mode0;
12031 tree type;
12033 mode0 = insn_data[d->icode].operand[0].mode;
12035 switch (mode0)
12037 case V4SImode:
12038 type = v4si_ftype_v4si;
12039 break;
12040 case V8HImode:
12041 type = v8hi_ftype_v8hi;
12042 break;
12043 case V16QImode:
12044 type = v16qi_ftype_v16qi;
12045 break;
12046 case V4SFmode:
12047 type = v4sf_ftype_v4sf;
12048 break;
12049 case V2DFmode:
12050 type = v2df_ftype_v2df;
12051 break;
12052 default:
12053 gcc_unreachable ();
12056 def_builtin (d->name, type, d->code);
12059 /* Initialize target builtin that implements
12060 targetm.vectorize.builtin_mask_for_load. */
12062 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
12063 v16qi_ftype_long_pcvoid,
12064 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
12065 BUILT_IN_MD, NULL, NULL_TREE);
12066 TREE_READONLY (decl) = 1;
12067 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
12068 altivec_builtin_mask_for_load = decl;
12070 /* Access to the vec_init patterns. */
12071 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
12072 integer_type_node, integer_type_node,
12073 integer_type_node, NULL_TREE);
12074 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
12076 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
12077 short_integer_type_node,
12078 short_integer_type_node,
12079 short_integer_type_node,
12080 short_integer_type_node,
12081 short_integer_type_node,
12082 short_integer_type_node,
12083 short_integer_type_node, NULL_TREE);
12084 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
12086 ftype = build_function_type_list (V16QI_type_node, char_type_node,
12087 char_type_node, char_type_node,
12088 char_type_node, char_type_node,
12089 char_type_node, char_type_node,
12090 char_type_node, char_type_node,
12091 char_type_node, char_type_node,
12092 char_type_node, char_type_node,
12093 char_type_node, char_type_node,
12094 char_type_node, NULL_TREE);
12095 def_builtin ("__builtin_vec_init_v16qi", ftype,
12096 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
12098 ftype = build_function_type_list (V4SF_type_node, float_type_node,
12099 float_type_node, float_type_node,
12100 float_type_node, NULL_TREE);
12101 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
12103 /* VSX builtins. */
12104 ftype = build_function_type_list (V2DF_type_node, double_type_node,
12105 double_type_node, NULL_TREE);
12106 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
12108 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
12109 intDI_type_node, NULL_TREE);
12110 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
12112 /* Access to the vec_set patterns. */
12113 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
12114 intSI_type_node,
12115 integer_type_node, NULL_TREE);
12116 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
12118 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
12119 intHI_type_node,
12120 integer_type_node, NULL_TREE);
12121 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
12123 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
12124 intQI_type_node,
12125 integer_type_node, NULL_TREE);
12126 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
12128 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
12129 float_type_node,
12130 integer_type_node, NULL_TREE);
12131 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
12133 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
12134 double_type_node,
12135 integer_type_node, NULL_TREE);
12136 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
12138 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
12139 intDI_type_node,
12140 integer_type_node, NULL_TREE);
12141 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
12143 /* Access to the vec_extract patterns. */
12144 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
12145 integer_type_node, NULL_TREE);
12146 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
12148 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
12149 integer_type_node, NULL_TREE);
12150 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
12152 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
12153 integer_type_node, NULL_TREE);
12154 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
12156 ftype = build_function_type_list (float_type_node, V4SF_type_node,
12157 integer_type_node, NULL_TREE);
12158 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
12160 ftype = build_function_type_list (double_type_node, V2DF_type_node,
12161 integer_type_node, NULL_TREE);
12162 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
12164 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
12165 integer_type_node, NULL_TREE);
12166 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
12169 /* Hash function for builtin functions with up to 3 arguments and a return
12170 type. */
12171 static unsigned
12172 builtin_hash_function (const void *hash_entry)
12174 unsigned ret = 0;
12175 int i;
12176 const struct builtin_hash_struct *bh =
12177 (const struct builtin_hash_struct *) hash_entry;
12179 for (i = 0; i < 4; i++)
12181 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
12182 ret = (ret * 2) + bh->uns_p[i];
12185 return ret;
12188 /* Compare builtin hash entries H1 and H2 for equivalence. */
12189 static int
12190 builtin_hash_eq (const void *h1, const void *h2)
12192 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
12193 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
12195 return ((p1->mode[0] == p2->mode[0])
12196 && (p1->mode[1] == p2->mode[1])
12197 && (p1->mode[2] == p2->mode[2])
12198 && (p1->mode[3] == p2->mode[3])
12199 && (p1->uns_p[0] == p2->uns_p[0])
12200 && (p1->uns_p[1] == p2->uns_p[1])
12201 && (p1->uns_p[2] == p2->uns_p[2])
12202 && (p1->uns_p[3] == p2->uns_p[3]));
12205 /* Map types for builtin functions with an explicit return type and up to 3
12206 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
12207 of the argument. */
12208 static tree
12209 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
12210 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
12211 enum rs6000_builtins builtin, const char *name)
12213 struct builtin_hash_struct h;
12214 struct builtin_hash_struct *h2;
12215 void **found;
12216 int num_args = 3;
12217 int i;
12218 tree ret_type = NULL_TREE;
12219 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
12221 /* Create builtin_hash_table. */
12222 if (builtin_hash_table == NULL)
12223 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
12224 builtin_hash_eq, NULL);
12226 h.type = NULL_TREE;
12227 h.mode[0] = mode_ret;
12228 h.mode[1] = mode_arg0;
12229 h.mode[2] = mode_arg1;
12230 h.mode[3] = mode_arg2;
12231 h.uns_p[0] = 0;
12232 h.uns_p[1] = 0;
12233 h.uns_p[2] = 0;
12234 h.uns_p[3] = 0;
12236 /* If the builtin is a type that produces unsigned results or takes unsigned
12237 arguments, and it is returned as a decl for the vectorizer (such as
12238 widening multiplies, permute), make sure the arguments and return value
12239 are type correct. */
12240 switch (builtin)
12242 /* unsigned 2 argument functions. */
12243 case ALTIVEC_BUILTIN_VMULEUB_UNS:
12244 case ALTIVEC_BUILTIN_VMULEUH_UNS:
12245 case ALTIVEC_BUILTIN_VMULOUB_UNS:
12246 case ALTIVEC_BUILTIN_VMULOUH_UNS:
12247 h.uns_p[0] = 1;
12248 h.uns_p[1] = 1;
12249 h.uns_p[2] = 1;
12250 break;
12252 /* unsigned 3 argument functions. */
12253 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
12254 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
12255 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
12256 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
12257 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
12258 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
12259 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
12260 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
12261 case VSX_BUILTIN_VPERM_16QI_UNS:
12262 case VSX_BUILTIN_VPERM_8HI_UNS:
12263 case VSX_BUILTIN_VPERM_4SI_UNS:
12264 case VSX_BUILTIN_VPERM_2DI_UNS:
12265 case VSX_BUILTIN_XXSEL_16QI_UNS:
12266 case VSX_BUILTIN_XXSEL_8HI_UNS:
12267 case VSX_BUILTIN_XXSEL_4SI_UNS:
12268 case VSX_BUILTIN_XXSEL_2DI_UNS:
12269 h.uns_p[0] = 1;
12270 h.uns_p[1] = 1;
12271 h.uns_p[2] = 1;
12272 h.uns_p[3] = 1;
12273 break;
12275 /* signed permute functions with unsigned char mask. */
12276 case ALTIVEC_BUILTIN_VPERM_16QI:
12277 case ALTIVEC_BUILTIN_VPERM_8HI:
12278 case ALTIVEC_BUILTIN_VPERM_4SI:
12279 case ALTIVEC_BUILTIN_VPERM_4SF:
12280 case ALTIVEC_BUILTIN_VPERM_2DI:
12281 case ALTIVEC_BUILTIN_VPERM_2DF:
12282 case VSX_BUILTIN_VPERM_16QI:
12283 case VSX_BUILTIN_VPERM_8HI:
12284 case VSX_BUILTIN_VPERM_4SI:
12285 case VSX_BUILTIN_VPERM_4SF:
12286 case VSX_BUILTIN_VPERM_2DI:
12287 case VSX_BUILTIN_VPERM_2DF:
12288 h.uns_p[3] = 1;
12289 break;
12291 /* unsigned args, signed return. */
12292 case VSX_BUILTIN_XVCVUXDDP_UNS:
12293 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
12294 h.uns_p[1] = 1;
12295 break;
12297 /* signed args, unsigned return. */
12298 case VSX_BUILTIN_XVCVDPUXDS_UNS:
12299 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
12300 h.uns_p[0] = 1;
12301 break;
12303 default:
12304 break;
12307 /* Figure out how many args are present. */
12308 while (num_args > 0 && h.mode[num_args] == VOIDmode)
12309 num_args--;
12311 if (num_args == 0)
12312 fatal_error ("internal error: builtin function %s had no type", name);
12314 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
12315 if (!ret_type && h.uns_p[0])
12316 ret_type = builtin_mode_to_type[h.mode[0]][0];
12318 if (!ret_type)
12319 fatal_error ("internal error: builtin function %s had an unexpected "
12320 "return type %s", name, GET_MODE_NAME (h.mode[0]));
12322 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
12323 arg_type[i] = NULL_TREE;
12325 for (i = 0; i < num_args; i++)
12327 int m = (int) h.mode[i+1];
12328 int uns_p = h.uns_p[i+1];
12330 arg_type[i] = builtin_mode_to_type[m][uns_p];
12331 if (!arg_type[i] && uns_p)
12332 arg_type[i] = builtin_mode_to_type[m][0];
12334 if (!arg_type[i])
12335 fatal_error ("internal error: builtin function %s, argument %d "
12336 "had unexpected argument type %s", name, i,
12337 GET_MODE_NAME (m));
12340 found = htab_find_slot (builtin_hash_table, &h, INSERT);
12341 if (*found == NULL)
12343 h2 = ggc_alloc_builtin_hash_struct ();
12344 *h2 = h;
12345 *found = (void *)h2;
12347 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
12348 arg_type[2], NULL_TREE);
12351 return ((struct builtin_hash_struct *)(*found))->type;
12354 static void
12355 rs6000_common_init_builtins (void)
12357 const struct builtin_description *d;
12358 size_t i;
12360 tree opaque_ftype_opaque = NULL_TREE;
12361 tree opaque_ftype_opaque_opaque = NULL_TREE;
12362 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
12363 tree v2si_ftype_qi = NULL_TREE;
12364 tree v2si_ftype_v2si_qi = NULL_TREE;
12365 tree v2si_ftype_int_qi = NULL_TREE;
12366 unsigned builtin_mask = rs6000_builtin_mask;
12368 if (!TARGET_PAIRED_FLOAT)
12370 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
12371 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
12374 /* Paired and SPE builtins are only available if you build a compiler with
12375 the appropriate options, so only create those builtins with the
12376 appropriate compiler option. Create Altivec and VSX builtins on machines
12377 with at least the general purpose extensions (970 and newer) to allow the
12378 use of the target attribute.. */
12380 if (TARGET_EXTRA_BUILTINS)
12381 builtin_mask |= RS6000_BTM_COMMON;
12383 /* Add the ternary operators. */
12384 d = bdesc_3arg;
12385 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
12387 tree type;
12388 unsigned mask = d->mask;
12390 if ((mask & builtin_mask) != mask)
12392 if (TARGET_DEBUG_BUILTIN)
12393 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
12394 continue;
12397 if (rs6000_overloaded_builtin_p (d->code))
12399 if (! (type = opaque_ftype_opaque_opaque_opaque))
12400 type = opaque_ftype_opaque_opaque_opaque
12401 = build_function_type_list (opaque_V4SI_type_node,
12402 opaque_V4SI_type_node,
12403 opaque_V4SI_type_node,
12404 opaque_V4SI_type_node,
12405 NULL_TREE);
12407 else
12409 enum insn_code icode = d->icode;
12410 if (d->name == 0 || icode == CODE_FOR_nothing)
12411 continue;
12413 type = builtin_function_type (insn_data[icode].operand[0].mode,
12414 insn_data[icode].operand[1].mode,
12415 insn_data[icode].operand[2].mode,
12416 insn_data[icode].operand[3].mode,
12417 d->code, d->name);
12420 def_builtin (d->name, type, d->code);
12423 /* Add the binary operators. */
12424 d = bdesc_2arg;
12425 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12427 enum machine_mode mode0, mode1, mode2;
12428 tree type;
12429 unsigned mask = d->mask;
12431 if ((mask & builtin_mask) != mask)
12433 if (TARGET_DEBUG_BUILTIN)
12434 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
12435 continue;
12438 if (rs6000_overloaded_builtin_p (d->code))
12440 if (! (type = opaque_ftype_opaque_opaque))
12441 type = opaque_ftype_opaque_opaque
12442 = build_function_type_list (opaque_V4SI_type_node,
12443 opaque_V4SI_type_node,
12444 opaque_V4SI_type_node,
12445 NULL_TREE);
12447 else
12449 enum insn_code icode = d->icode;
12450 if (d->name == 0 || icode == CODE_FOR_nothing)
12451 continue;
12453 mode0 = insn_data[icode].operand[0].mode;
12454 mode1 = insn_data[icode].operand[1].mode;
12455 mode2 = insn_data[icode].operand[2].mode;
12457 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
12459 if (! (type = v2si_ftype_v2si_qi))
12460 type = v2si_ftype_v2si_qi
12461 = build_function_type_list (opaque_V2SI_type_node,
12462 opaque_V2SI_type_node,
12463 char_type_node,
12464 NULL_TREE);
12467 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
12468 && mode2 == QImode)
12470 if (! (type = v2si_ftype_int_qi))
12471 type = v2si_ftype_int_qi
12472 = build_function_type_list (opaque_V2SI_type_node,
12473 integer_type_node,
12474 char_type_node,
12475 NULL_TREE);
12478 else
12479 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
12480 d->code, d->name);
12483 def_builtin (d->name, type, d->code);
12486 /* Add the simple unary operators. */
12487 d = bdesc_1arg;
12488 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12490 enum machine_mode mode0, mode1;
12491 tree type;
12492 unsigned mask = d->mask;
12494 if ((mask & builtin_mask) != mask)
12496 if (TARGET_DEBUG_BUILTIN)
12497 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
12498 continue;
12501 if (rs6000_overloaded_builtin_p (d->code))
12503 if (! (type = opaque_ftype_opaque))
12504 type = opaque_ftype_opaque
12505 = build_function_type_list (opaque_V4SI_type_node,
12506 opaque_V4SI_type_node,
12507 NULL_TREE);
12509 else
12511 enum insn_code icode = d->icode;
12512 if (d->name == 0 || icode == CODE_FOR_nothing)
12513 continue;
12515 mode0 = insn_data[icode].operand[0].mode;
12516 mode1 = insn_data[icode].operand[1].mode;
12518 if (mode0 == V2SImode && mode1 == QImode)
12520 if (! (type = v2si_ftype_qi))
12521 type = v2si_ftype_qi
12522 = build_function_type_list (opaque_V2SI_type_node,
12523 char_type_node,
12524 NULL_TREE);
12527 else
12528 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
12529 d->code, d->name);
12532 def_builtin (d->name, type, d->code);
12536 static void
12537 rs6000_init_libfuncs (void)
12539 if (DEFAULT_ABI != ABI_V4 && TARGET_XCOFF
12540 && !TARGET_POWER2 && !TARGET_POWERPC)
12542 /* AIX library routines for float->int conversion. */
12543 set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
12544 set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
12545 set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
12546 set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
12549 if (!TARGET_IEEEQUAD)
12550 /* AIX/Darwin/64-bit Linux quad floating point routines. */
12551 if (!TARGET_XL_COMPAT)
12553 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
12554 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
12555 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
12556 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
12558 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
12560 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
12561 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
12562 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
12563 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
12564 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
12565 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
12566 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
12568 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
12569 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
12570 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
12571 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
12572 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
12573 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
12574 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
12575 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
12578 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
12579 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
12581 else
12583 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
12584 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
12585 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
12586 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
12588 else
12590 /* 32-bit SVR4 quad floating point routines. */
12592 set_optab_libfunc (add_optab, TFmode, "_q_add");
12593 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
12594 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
12595 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
12596 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
12597 if (TARGET_PPC_GPOPT || TARGET_POWER2)
12598 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
12600 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
12601 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
12602 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
12603 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
12604 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
12605 set_optab_libfunc (le_optab, TFmode, "_q_fle");
12607 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
12608 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
12609 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
12610 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
12611 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
12612 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
12613 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
12614 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
12619 /* Expand a block clear operation, and return 1 if successful. Return 0
12620 if we should let the compiler generate normal code.
12622 operands[0] is the destination
12623 operands[1] is the length
12624 operands[3] is the alignment */
12627 expand_block_clear (rtx operands[])
12629 rtx orig_dest = operands[0];
12630 rtx bytes_rtx = operands[1];
12631 rtx align_rtx = operands[3];
12632 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
12633 HOST_WIDE_INT align;
12634 HOST_WIDE_INT bytes;
12635 int offset;
12636 int clear_bytes;
12637 int clear_step;
12639 /* If this is not a fixed size move, just call memcpy */
12640 if (! constp)
12641 return 0;
12643 /* This must be a fixed size alignment */
12644 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12645 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12647 /* Anything to clear? */
12648 bytes = INTVAL (bytes_rtx);
12649 if (bytes <= 0)
12650 return 1;
12652 /* Use the builtin memset after a point, to avoid huge code bloat.
12653 When optimize_size, avoid any significant code bloat; calling
12654 memset is about 4 instructions, so allow for one instruction to
12655 load zero and three to do clearing. */
12656 if (TARGET_ALTIVEC && align >= 128)
12657 clear_step = 16;
12658 else if (TARGET_POWERPC64 && align >= 32)
12659 clear_step = 8;
12660 else if (TARGET_SPE && align >= 64)
12661 clear_step = 8;
12662 else
12663 clear_step = 4;
12665 if (optimize_size && bytes > 3 * clear_step)
12666 return 0;
12667 if (! optimize_size && bytes > 8 * clear_step)
12668 return 0;
12670 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
12672 enum machine_mode mode = BLKmode;
12673 rtx dest;
12675 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
12677 clear_bytes = 16;
12678 mode = V4SImode;
12680 else if (bytes >= 8 && TARGET_SPE && align >= 64)
12682 clear_bytes = 8;
12683 mode = V2SImode;
12685 else if (bytes >= 8 && TARGET_POWERPC64
12686 /* 64-bit loads and stores require word-aligned
12687 displacements. */
12688 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
12690 clear_bytes = 8;
12691 mode = DImode;
12693 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
12694 { /* move 4 bytes */
12695 clear_bytes = 4;
12696 mode = SImode;
12698 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
12699 { /* move 2 bytes */
12700 clear_bytes = 2;
12701 mode = HImode;
12703 else /* move 1 byte at a time */
12705 clear_bytes = 1;
12706 mode = QImode;
12709 dest = adjust_address (orig_dest, mode, offset);
12711 emit_move_insn (dest, CONST0_RTX (mode));
12714 return 1;
12718 /* Expand a block move operation, and return 1 if successful. Return 0
12719 if we should let the compiler generate normal code.
12721 operands[0] is the destination
12722 operands[1] is the source
12723 operands[2] is the length
12724 operands[3] is the alignment */
12726 #define MAX_MOVE_REG 4
12729 expand_block_move (rtx operands[])
12731 rtx orig_dest = operands[0];
12732 rtx orig_src = operands[1];
12733 rtx bytes_rtx = operands[2];
12734 rtx align_rtx = operands[3];
12735 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
12736 int align;
12737 int bytes;
12738 int offset;
12739 int move_bytes;
12740 rtx stores[MAX_MOVE_REG];
12741 int num_reg = 0;
12743 /* If this is not a fixed size move, just call memcpy */
12744 if (! constp)
12745 return 0;
12747 /* This must be a fixed size alignment */
12748 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
12749 align = INTVAL (align_rtx) * BITS_PER_UNIT;
12751 /* Anything to move? */
12752 bytes = INTVAL (bytes_rtx);
12753 if (bytes <= 0)
12754 return 1;
12756 if (bytes > rs6000_block_move_inline_limit)
12757 return 0;
12759 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
12761 union {
12762 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
12763 rtx (*mov) (rtx, rtx);
12764 } gen_func;
12765 enum machine_mode mode = BLKmode;
12766 rtx src, dest;
12768 /* Altivec first, since it will be faster than a string move
12769 when it applies, and usually not significantly larger. */
12770 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
12772 move_bytes = 16;
12773 mode = V4SImode;
12774 gen_func.mov = gen_movv4si;
12776 else if (TARGET_SPE && bytes >= 8 && align >= 64)
12778 move_bytes = 8;
12779 mode = V2SImode;
12780 gen_func.mov = gen_movv2si;
12782 else if (TARGET_STRING
12783 && bytes > 24 /* move up to 32 bytes at a time */
12784 && ! fixed_regs[5]
12785 && ! fixed_regs[6]
12786 && ! fixed_regs[7]
12787 && ! fixed_regs[8]
12788 && ! fixed_regs[9]
12789 && ! fixed_regs[10]
12790 && ! fixed_regs[11]
12791 && ! fixed_regs[12])
12793 move_bytes = (bytes > 32) ? 32 : bytes;
12794 gen_func.movmemsi = gen_movmemsi_8reg;
12796 else if (TARGET_STRING
12797 && bytes > 16 /* move up to 24 bytes at a time */
12798 && ! fixed_regs[5]
12799 && ! fixed_regs[6]
12800 && ! fixed_regs[7]
12801 && ! fixed_regs[8]
12802 && ! fixed_regs[9]
12803 && ! fixed_regs[10])
12805 move_bytes = (bytes > 24) ? 24 : bytes;
12806 gen_func.movmemsi = gen_movmemsi_6reg;
12808 else if (TARGET_STRING
12809 && bytes > 8 /* move up to 16 bytes at a time */
12810 && ! fixed_regs[5]
12811 && ! fixed_regs[6]
12812 && ! fixed_regs[7]
12813 && ! fixed_regs[8])
12815 move_bytes = (bytes > 16) ? 16 : bytes;
12816 gen_func.movmemsi = gen_movmemsi_4reg;
12818 else if (bytes >= 8 && TARGET_POWERPC64
12819 /* 64-bit loads and stores require word-aligned
12820 displacements. */
12821 && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
12823 move_bytes = 8;
12824 mode = DImode;
12825 gen_func.mov = gen_movdi;
12827 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
12828 { /* move up to 8 bytes at a time */
12829 move_bytes = (bytes > 8) ? 8 : bytes;
12830 gen_func.movmemsi = gen_movmemsi_2reg;
12832 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
12833 { /* move 4 bytes */
12834 move_bytes = 4;
12835 mode = SImode;
12836 gen_func.mov = gen_movsi;
12838 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
12839 { /* move 2 bytes */
12840 move_bytes = 2;
12841 mode = HImode;
12842 gen_func.mov = gen_movhi;
12844 else if (TARGET_STRING && bytes > 1)
12845 { /* move up to 4 bytes at a time */
12846 move_bytes = (bytes > 4) ? 4 : bytes;
12847 gen_func.movmemsi = gen_movmemsi_1reg;
12849 else /* move 1 byte at a time */
12851 move_bytes = 1;
12852 mode = QImode;
12853 gen_func.mov = gen_movqi;
12856 src = adjust_address (orig_src, mode, offset);
12857 dest = adjust_address (orig_dest, mode, offset);
12859 if (mode != BLKmode)
12861 rtx tmp_reg = gen_reg_rtx (mode);
12863 emit_insn ((*gen_func.mov) (tmp_reg, src));
12864 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
12867 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
12869 int i;
12870 for (i = 0; i < num_reg; i++)
12871 emit_insn (stores[i]);
12872 num_reg = 0;
12875 if (mode == BLKmode)
12877 /* Move the address into scratch registers. The movmemsi
12878 patterns require zero offset. */
12879 if (!REG_P (XEXP (src, 0)))
12881 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
12882 src = replace_equiv_address (src, src_reg);
12884 set_mem_size (src, move_bytes);
12886 if (!REG_P (XEXP (dest, 0)))
12888 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
12889 dest = replace_equiv_address (dest, dest_reg);
12891 set_mem_size (dest, move_bytes);
12893 emit_insn ((*gen_func.movmemsi) (dest, src,
12894 GEN_INT (move_bytes & 31),
12895 align_rtx));
12899 return 1;
12903 /* Return a string to perform a load_multiple operation.
12904 operands[0] is the vector.
12905 operands[1] is the source address.
12906 operands[2] is the first destination register. */
12908 const char *
12909 rs6000_output_load_multiple (rtx operands[3])
12911 /* We have to handle the case where the pseudo used to contain the address
12912 is assigned to one of the output registers. */
12913 int i, j;
12914 int words = XVECLEN (operands[0], 0);
12915 rtx xop[10];
12917 if (XVECLEN (operands[0], 0) == 1)
12918 return "{l|lwz} %2,0(%1)";
12920 for (i = 0; i < words; i++)
12921 if (refers_to_regno_p (REGNO (operands[2]) + i,
12922 REGNO (operands[2]) + i + 1, operands[1], 0))
12924 if (i == words-1)
12926 xop[0] = GEN_INT (4 * (words-1));
12927 xop[1] = operands[1];
12928 xop[2] = operands[2];
12929 output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop);
12930 return "";
12932 else if (i == 0)
12934 xop[0] = GEN_INT (4 * (words-1));
12935 xop[1] = operands[1];
12936 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
12937 output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop);
12938 return "";
12940 else
12942 for (j = 0; j < words; j++)
12943 if (j != i)
12945 xop[0] = GEN_INT (j * 4);
12946 xop[1] = operands[1];
12947 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
12948 output_asm_insn ("{l|lwz} %2,%0(%1)", xop);
12950 xop[0] = GEN_INT (i * 4);
12951 xop[1] = operands[1];
12952 output_asm_insn ("{l|lwz} %1,%0(%1)", xop);
12953 return "";
12957 return "{lsi|lswi} %2,%1,%N0";
12961 /* A validation routine: say whether CODE, a condition code, and MODE
12962 match. The other alternatives either don't make sense or should
12963 never be generated. */
12965 void
12966 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
12968 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
12969 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
12970 && GET_MODE_CLASS (mode) == MODE_CC);
12972 /* These don't make sense. */
12973 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
12974 || mode != CCUNSmode);
12976 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
12977 || mode == CCUNSmode);
12979 gcc_assert (mode == CCFPmode
12980 || (code != ORDERED && code != UNORDERED
12981 && code != UNEQ && code != LTGT
12982 && code != UNGT && code != UNLT
12983 && code != UNGE && code != UNLE));
12985 /* These should never be generated except for
12986 flag_finite_math_only. */
12987 gcc_assert (mode != CCFPmode
12988 || flag_finite_math_only
12989 || (code != LE && code != GE
12990 && code != UNEQ && code != LTGT
12991 && code != UNGT && code != UNLT));
12993 /* These are invalid; the information is not there. */
12994 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
12998 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
12999 mask required to convert the result of a rotate insn into a shift
13000 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
13003 includes_lshift_p (rtx shiftop, rtx andop)
13005 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13007 shift_mask <<= INTVAL (shiftop);
13009 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13012 /* Similar, but for right shift. */
13015 includes_rshift_p (rtx shiftop, rtx andop)
13017 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
13019 shift_mask >>= INTVAL (shiftop);
13021 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
13024 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
13025 to perform a left shift. It must have exactly SHIFTOP least
13026 significant 0's, then one or more 1's, then zero or more 0's. */
13029 includes_rldic_lshift_p (rtx shiftop, rtx andop)
13031 if (GET_CODE (andop) == CONST_INT)
13033 HOST_WIDE_INT c, lsb, shift_mask;
13035 c = INTVAL (andop);
13036 if (c == 0 || c == ~0)
13037 return 0;
13039 shift_mask = ~0;
13040 shift_mask <<= INTVAL (shiftop);
13042 /* Find the least significant one bit. */
13043 lsb = c & -c;
13045 /* It must coincide with the LSB of the shift mask. */
13046 if (-lsb != shift_mask)
13047 return 0;
13049 /* Invert to look for the next transition (if any). */
13050 c = ~c;
13052 /* Remove the low group of ones (originally low group of zeros). */
13053 c &= -lsb;
13055 /* Again find the lsb, and check we have all 1's above. */
13056 lsb = c & -c;
13057 return c == -lsb;
13059 else if (GET_CODE (andop) == CONST_DOUBLE
13060 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13062 HOST_WIDE_INT low, high, lsb;
13063 HOST_WIDE_INT shift_mask_low, shift_mask_high;
13065 low = CONST_DOUBLE_LOW (andop);
13066 if (HOST_BITS_PER_WIDE_INT < 64)
13067 high = CONST_DOUBLE_HIGH (andop);
13069 if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
13070 || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
13071 return 0;
13073 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13075 shift_mask_high = ~0;
13076 if (INTVAL (shiftop) > 32)
13077 shift_mask_high <<= INTVAL (shiftop) - 32;
13079 lsb = high & -high;
13081 if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
13082 return 0;
13084 high = ~high;
13085 high &= -lsb;
13087 lsb = high & -high;
13088 return high == -lsb;
13091 shift_mask_low = ~0;
13092 shift_mask_low <<= INTVAL (shiftop);
13094 lsb = low & -low;
13096 if (-lsb != shift_mask_low)
13097 return 0;
13099 if (HOST_BITS_PER_WIDE_INT < 64)
13100 high = ~high;
13101 low = ~low;
13102 low &= -lsb;
13104 if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
13106 lsb = high & -high;
13107 return high == -lsb;
13110 lsb = low & -low;
13111 return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
13113 else
13114 return 0;
13117 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
13118 to perform a left shift. It must have SHIFTOP or more least
13119 significant 0's, with the remainder of the word 1's. */
13122 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
13124 if (GET_CODE (andop) == CONST_INT)
13126 HOST_WIDE_INT c, lsb, shift_mask;
13128 shift_mask = ~0;
13129 shift_mask <<= INTVAL (shiftop);
13130 c = INTVAL (andop);
13132 /* Find the least significant one bit. */
13133 lsb = c & -c;
13135 /* It must be covered by the shift mask.
13136 This test also rejects c == 0. */
13137 if ((lsb & shift_mask) == 0)
13138 return 0;
13140 /* Check we have all 1's above the transition, and reject all 1's. */
13141 return c == -lsb && lsb != 1;
13143 else if (GET_CODE (andop) == CONST_DOUBLE
13144 && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
13146 HOST_WIDE_INT low, lsb, shift_mask_low;
13148 low = CONST_DOUBLE_LOW (andop);
13150 if (HOST_BITS_PER_WIDE_INT < 64)
13152 HOST_WIDE_INT high, shift_mask_high;
13154 high = CONST_DOUBLE_HIGH (andop);
13156 if (low == 0)
13158 shift_mask_high = ~0;
13159 if (INTVAL (shiftop) > 32)
13160 shift_mask_high <<= INTVAL (shiftop) - 32;
13162 lsb = high & -high;
13164 if ((lsb & shift_mask_high) == 0)
13165 return 0;
13167 return high == -lsb;
13169 if (high != ~0)
13170 return 0;
13173 shift_mask_low = ~0;
13174 shift_mask_low <<= INTVAL (shiftop);
13176 lsb = low & -low;
13178 if ((lsb & shift_mask_low) == 0)
13179 return 0;
13181 return low == -lsb && lsb != 1;
13183 else
13184 return 0;
13187 /* Return 1 if operands will generate a valid arguments to rlwimi
13188 instruction for insert with right shift in 64-bit mode. The mask may
13189 not start on the first bit or stop on the last bit because wrap-around
13190 effects of instruction do not correspond to semantics of RTL insn. */
13193 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
13195 if (INTVAL (startop) > 32
13196 && INTVAL (startop) < 64
13197 && INTVAL (sizeop) > 1
13198 && INTVAL (sizeop) + INTVAL (startop) < 64
13199 && INTVAL (shiftop) > 0
13200 && INTVAL (sizeop) + INTVAL (shiftop) < 32
13201 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
13202 return 1;
13204 return 0;
13207 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
13208 for lfq and stfq insns iff the registers are hard registers. */
13211 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
13213 /* We might have been passed a SUBREG. */
13214 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
13215 return 0;
13217 /* We might have been passed non floating point registers. */
13218 if (!FP_REGNO_P (REGNO (reg1))
13219 || !FP_REGNO_P (REGNO (reg2)))
13220 return 0;
13222 return (REGNO (reg1) == REGNO (reg2) - 1);
13225 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
13226 addr1 and addr2 must be in consecutive memory locations
13227 (addr2 == addr1 + 8). */
13230 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
13232 rtx addr1, addr2;
13233 unsigned int reg1, reg2;
13234 int offset1, offset2;
13236 /* The mems cannot be volatile. */
13237 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
13238 return 0;
13240 addr1 = XEXP (mem1, 0);
13241 addr2 = XEXP (mem2, 0);
13243 /* Extract an offset (if used) from the first addr. */
13244 if (GET_CODE (addr1) == PLUS)
13246 /* If not a REG, return zero. */
13247 if (GET_CODE (XEXP (addr1, 0)) != REG)
13248 return 0;
13249 else
13251 reg1 = REGNO (XEXP (addr1, 0));
13252 /* The offset must be constant! */
13253 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
13254 return 0;
13255 offset1 = INTVAL (XEXP (addr1, 1));
13258 else if (GET_CODE (addr1) != REG)
13259 return 0;
13260 else
13262 reg1 = REGNO (addr1);
13263 /* This was a simple (mem (reg)) expression. Offset is 0. */
13264 offset1 = 0;
13267 /* And now for the second addr. */
13268 if (GET_CODE (addr2) == PLUS)
13270 /* If not a REG, return zero. */
13271 if (GET_CODE (XEXP (addr2, 0)) != REG)
13272 return 0;
13273 else
13275 reg2 = REGNO (XEXP (addr2, 0));
13276 /* The offset must be constant. */
13277 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
13278 return 0;
13279 offset2 = INTVAL (XEXP (addr2, 1));
13282 else if (GET_CODE (addr2) != REG)
13283 return 0;
13284 else
13286 reg2 = REGNO (addr2);
13287 /* This was a simple (mem (reg)) expression. Offset is 0. */
13288 offset2 = 0;
13291 /* Both of these must have the same base register. */
13292 if (reg1 != reg2)
13293 return 0;
13295 /* The offset for the second addr must be 8 more than the first addr. */
13296 if (offset2 != offset1 + 8)
13297 return 0;
13299 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
13300 instructions. */
13301 return 1;
13306 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
13308 static bool eliminated = false;
13309 rtx ret;
13311 if (mode != SDmode)
13312 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
13313 else
13315 rtx mem = cfun->machine->sdmode_stack_slot;
13316 gcc_assert (mem != NULL_RTX);
13318 if (!eliminated)
13320 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
13321 cfun->machine->sdmode_stack_slot = mem;
13322 eliminated = true;
13324 ret = mem;
13327 if (TARGET_DEBUG_ADDR)
13329 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
13330 GET_MODE_NAME (mode));
13331 if (!ret)
13332 fprintf (stderr, "\tNULL_RTX\n");
13333 else
13334 debug_rtx (ret);
13337 return ret;
13340 static tree
13341 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
13343 /* Don't walk into types. */
13344 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
13346 *walk_subtrees = 0;
13347 return NULL_TREE;
13350 switch (TREE_CODE (*tp))
13352 case VAR_DECL:
13353 case PARM_DECL:
13354 case FIELD_DECL:
13355 case RESULT_DECL:
13356 case SSA_NAME:
13357 case REAL_CST:
13358 case MEM_REF:
13359 case VIEW_CONVERT_EXPR:
13360 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
13361 return *tp;
13362 break;
13363 default:
13364 break;
13367 return NULL_TREE;
13370 enum reload_reg_type {
13371 GPR_REGISTER_TYPE,
13372 VECTOR_REGISTER_TYPE,
13373 OTHER_REGISTER_TYPE
13376 static enum reload_reg_type
13377 rs6000_reload_register_type (enum reg_class rclass)
13379 switch (rclass)
13381 case GENERAL_REGS:
13382 case BASE_REGS:
13383 return GPR_REGISTER_TYPE;
13385 case FLOAT_REGS:
13386 case ALTIVEC_REGS:
13387 case VSX_REGS:
13388 return VECTOR_REGISTER_TYPE;
13390 default:
13391 return OTHER_REGISTER_TYPE;
13395 /* Inform reload about cases where moving X with a mode MODE to a register in
13396 RCLASS requires an extra scratch or immediate register. Return the class
13397 needed for the immediate register.
13399 For VSX and Altivec, we may need a register to convert sp+offset into
13400 reg+sp.
13402 For misaligned 64-bit gpr loads and stores we need a register to
13403 convert an offset address to indirect. */
13405 static reg_class_t
13406 rs6000_secondary_reload (bool in_p,
13407 rtx x,
13408 reg_class_t rclass_i,
13409 enum machine_mode mode,
13410 secondary_reload_info *sri)
13412 enum reg_class rclass = (enum reg_class) rclass_i;
13413 reg_class_t ret = ALL_REGS;
13414 enum insn_code icode;
13415 bool default_p = false;
13417 sri->icode = CODE_FOR_nothing;
13419 /* Convert vector loads and stores into gprs to use an additional base
13420 register. */
13421 icode = rs6000_vector_reload[mode][in_p != false];
13422 if (icode != CODE_FOR_nothing)
13424 ret = NO_REGS;
13425 sri->icode = CODE_FOR_nothing;
13426 sri->extra_cost = 0;
13428 if (GET_CODE (x) == MEM)
13430 rtx addr = XEXP (x, 0);
13432 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
13433 an extra register in that case, but it would need an extra
13434 register if the addressing is reg+reg or (reg+reg)&(-16). */
13435 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
13437 if (!legitimate_indirect_address_p (addr, false)
13438 && !rs6000_legitimate_offset_address_p (TImode, addr, false))
13440 sri->icode = icode;
13441 /* account for splitting the loads, and converting the
13442 address from reg+reg to reg. */
13443 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
13444 + ((GET_CODE (addr) == AND) ? 1 : 0));
13447 /* Loads to and stores from vector registers can only do reg+reg
13448 addressing. Altivec registers can also do (reg+reg)&(-16). */
13449 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
13450 || rclass == FLOAT_REGS || rclass == NO_REGS)
13452 if (!VECTOR_MEM_ALTIVEC_P (mode)
13453 && GET_CODE (addr) == AND
13454 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13455 && INTVAL (XEXP (addr, 1)) == -16
13456 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
13457 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
13459 sri->icode = icode;
13460 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
13461 ? 2 : 1);
13463 else if (!legitimate_indirect_address_p (addr, false)
13464 && (rclass == NO_REGS
13465 || !legitimate_indexed_address_p (addr, false)))
13467 sri->icode = icode;
13468 sri->extra_cost = 1;
13470 else
13471 icode = CODE_FOR_nothing;
13473 /* Any other loads, including to pseudo registers which haven't been
13474 assigned to a register yet, default to require a scratch
13475 register. */
13476 else
13478 sri->icode = icode;
13479 sri->extra_cost = 2;
13482 else if (REG_P (x))
13484 int regno = true_regnum (x);
13486 icode = CODE_FOR_nothing;
13487 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
13488 default_p = true;
13489 else
13491 enum reg_class xclass = REGNO_REG_CLASS (regno);
13492 enum reload_reg_type rtype1 = rs6000_reload_register_type (rclass);
13493 enum reload_reg_type rtype2 = rs6000_reload_register_type (xclass);
13495 /* If memory is needed, use default_secondary_reload to create the
13496 stack slot. */
13497 if (rtype1 != rtype2 || rtype1 == OTHER_REGISTER_TYPE)
13498 default_p = true;
13499 else
13500 ret = NO_REGS;
13503 else
13504 default_p = true;
13506 else if (TARGET_POWERPC64
13507 && rs6000_reload_register_type (rclass) == GPR_REGISTER_TYPE
13508 && MEM_P (x)
13509 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
13511 rtx addr = XEXP (x, 0);
13513 if (GET_CODE (addr) == PRE_MODIFY)
13514 addr = XEXP (addr, 1);
13515 else if (GET_CODE (addr) == LO_SUM
13516 && GET_CODE (XEXP (addr, 0)) == REG
13517 && GET_CODE (XEXP (addr, 1)) == CONST)
13518 addr = XEXP (XEXP (addr, 1), 0);
13520 if (GET_CODE (addr) == PLUS
13521 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13522 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
13524 if (in_p)
13525 sri->icode = CODE_FOR_reload_di_load;
13526 else
13527 sri->icode = CODE_FOR_reload_di_store;
13528 sri->extra_cost = 2;
13529 ret = NO_REGS;
13531 else
13532 default_p = true;
13534 else
13535 default_p = true;
13537 if (default_p)
13538 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
13540 gcc_assert (ret != ALL_REGS);
13542 if (TARGET_DEBUG_ADDR)
13544 fprintf (stderr,
13545 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
13546 "mode = %s",
13547 reg_class_names[ret],
13548 in_p ? "true" : "false",
13549 reg_class_names[rclass],
13550 GET_MODE_NAME (mode));
13552 if (default_p)
13553 fprintf (stderr, ", default secondary reload");
13555 if (sri->icode != CODE_FOR_nothing)
13556 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
13557 insn_data[sri->icode].name, sri->extra_cost);
13558 else
13559 fprintf (stderr, "\n");
13561 debug_rtx (x);
13564 return ret;
13567 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
13568 to SP+reg addressing. */
13570 void
13571 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
13573 int regno = true_regnum (reg);
13574 enum machine_mode mode = GET_MODE (reg);
13575 enum reg_class rclass;
13576 rtx addr;
13577 rtx and_op2 = NULL_RTX;
13578 rtx addr_op1;
13579 rtx addr_op2;
13580 rtx scratch_or_premodify = scratch;
13581 rtx and_rtx;
13582 rtx cc_clobber;
13584 if (TARGET_DEBUG_ADDR)
13586 fprintf (stderr, "\nrs6000_secondary_reload_inner, type = %s\n",
13587 store_p ? "store" : "load");
13588 fprintf (stderr, "reg:\n");
13589 debug_rtx (reg);
13590 fprintf (stderr, "mem:\n");
13591 debug_rtx (mem);
13592 fprintf (stderr, "scratch:\n");
13593 debug_rtx (scratch);
13596 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
13597 gcc_assert (GET_CODE (mem) == MEM);
13598 rclass = REGNO_REG_CLASS (regno);
13599 addr = XEXP (mem, 0);
13601 switch (rclass)
13603 /* GPRs can handle reg + small constant, all other addresses need to use
13604 the scratch register. */
13605 case GENERAL_REGS:
13606 case BASE_REGS:
13607 if (GET_CODE (addr) == AND)
13609 and_op2 = XEXP (addr, 1);
13610 addr = XEXP (addr, 0);
13613 if (GET_CODE (addr) == PRE_MODIFY)
13615 scratch_or_premodify = XEXP (addr, 0);
13616 gcc_assert (REG_P (scratch_or_premodify));
13617 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13618 addr = XEXP (addr, 1);
13621 if (GET_CODE (addr) == PLUS
13622 && (!rs6000_legitimate_offset_address_p (TImode, addr, false)
13623 || and_op2 != NULL_RTX))
13625 addr_op1 = XEXP (addr, 0);
13626 addr_op2 = XEXP (addr, 1);
13627 gcc_assert (legitimate_indirect_address_p (addr_op1, false));
13629 if (!REG_P (addr_op2)
13630 && (GET_CODE (addr_op2) != CONST_INT
13631 || !satisfies_constraint_I (addr_op2)))
13633 if (TARGET_DEBUG_ADDR)
13635 fprintf (stderr,
13636 "\nMove plus addr to register %s, mode = %s: ",
13637 rs6000_reg_names[REGNO (scratch)],
13638 GET_MODE_NAME (mode));
13639 debug_rtx (addr_op2);
13641 rs6000_emit_move (scratch, addr_op2, Pmode);
13642 addr_op2 = scratch;
13645 emit_insn (gen_rtx_SET (VOIDmode,
13646 scratch_or_premodify,
13647 gen_rtx_PLUS (Pmode,
13648 addr_op1,
13649 addr_op2)));
13651 addr = scratch_or_premodify;
13652 scratch_or_premodify = scratch;
13654 else if (!legitimate_indirect_address_p (addr, false)
13655 && !rs6000_legitimate_offset_address_p (TImode, addr, false))
13657 if (TARGET_DEBUG_ADDR)
13659 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13660 rs6000_reg_names[REGNO (scratch_or_premodify)],
13661 GET_MODE_NAME (mode));
13662 debug_rtx (addr);
13664 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13665 addr = scratch_or_premodify;
13666 scratch_or_premodify = scratch;
13668 break;
13670 /* Float/Altivec registers can only handle reg+reg addressing. Move
13671 other addresses into a scratch register. */
13672 case FLOAT_REGS:
13673 case VSX_REGS:
13674 case ALTIVEC_REGS:
13676 /* With float regs, we need to handle the AND ourselves, since we can't
13677 use the Altivec instruction with an implicit AND -16. Allow scalar
13678 loads to float registers to use reg+offset even if VSX. */
13679 if (GET_CODE (addr) == AND
13680 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
13681 || GET_CODE (XEXP (addr, 1)) != CONST_INT
13682 || INTVAL (XEXP (addr, 1)) != -16
13683 || !VECTOR_MEM_ALTIVEC_P (mode)))
13685 and_op2 = XEXP (addr, 1);
13686 addr = XEXP (addr, 0);
13689 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
13690 as the address later. */
13691 if (GET_CODE (addr) == PRE_MODIFY
13692 && (!VECTOR_MEM_VSX_P (mode)
13693 || and_op2 != NULL_RTX
13694 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
13696 scratch_or_premodify = XEXP (addr, 0);
13697 gcc_assert (legitimate_indirect_address_p (scratch_or_premodify,
13698 false));
13699 gcc_assert (GET_CODE (XEXP (addr, 1)) == PLUS);
13700 addr = XEXP (addr, 1);
13703 if (legitimate_indirect_address_p (addr, false) /* reg */
13704 || legitimate_indexed_address_p (addr, false) /* reg+reg */
13705 || GET_CODE (addr) == PRE_MODIFY /* VSX pre-modify */
13706 || (GET_CODE (addr) == AND /* Altivec memory */
13707 && GET_CODE (XEXP (addr, 1)) == CONST_INT
13708 && INTVAL (XEXP (addr, 1)) == -16
13709 && VECTOR_MEM_ALTIVEC_P (mode))
13710 || (rclass == FLOAT_REGS /* legacy float mem */
13711 && GET_MODE_SIZE (mode) == 8
13712 && and_op2 == NULL_RTX
13713 && scratch_or_premodify == scratch
13714 && rs6000_legitimate_offset_address_p (mode, addr, false)))
13717 else if (GET_CODE (addr) == PLUS)
13719 addr_op1 = XEXP (addr, 0);
13720 addr_op2 = XEXP (addr, 1);
13721 gcc_assert (REG_P (addr_op1));
13723 if (TARGET_DEBUG_ADDR)
13725 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
13726 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13727 debug_rtx (addr_op2);
13729 rs6000_emit_move (scratch, addr_op2, Pmode);
13730 emit_insn (gen_rtx_SET (VOIDmode,
13731 scratch_or_premodify,
13732 gen_rtx_PLUS (Pmode,
13733 addr_op1,
13734 scratch)));
13735 addr = scratch_or_premodify;
13736 scratch_or_premodify = scratch;
13739 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
13740 || GET_CODE (addr) == CONST_INT || REG_P (addr))
13742 if (TARGET_DEBUG_ADDR)
13744 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
13745 rs6000_reg_names[REGNO (scratch_or_premodify)],
13746 GET_MODE_NAME (mode));
13747 debug_rtx (addr);
13750 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13751 addr = scratch_or_premodify;
13752 scratch_or_premodify = scratch;
13755 else
13756 gcc_unreachable ();
13758 break;
13760 default:
13761 gcc_unreachable ();
13764 /* If the original address involved a pre-modify that we couldn't use the VSX
13765 memory instruction with update, and we haven't taken care of already,
13766 store the address in the pre-modify register and use that as the
13767 address. */
13768 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
13770 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
13771 addr = scratch_or_premodify;
13774 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
13775 memory instruction, recreate the AND now, including the clobber which is
13776 generated by the general ANDSI3/ANDDI3 patterns for the
13777 andi. instruction. */
13778 if (and_op2 != NULL_RTX)
13780 if (! legitimate_indirect_address_p (addr, false))
13782 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
13783 addr = scratch;
13786 if (TARGET_DEBUG_ADDR)
13788 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
13789 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
13790 debug_rtx (and_op2);
13793 and_rtx = gen_rtx_SET (VOIDmode,
13794 scratch,
13795 gen_rtx_AND (Pmode,
13796 addr,
13797 and_op2));
13799 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
13800 emit_insn (gen_rtx_PARALLEL (VOIDmode,
13801 gen_rtvec (2, and_rtx, cc_clobber)));
13802 addr = scratch;
13805 /* Adjust the address if it changed. */
13806 if (addr != XEXP (mem, 0))
13808 mem = change_address (mem, mode, addr);
13809 if (TARGET_DEBUG_ADDR)
13810 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
13813 /* Now create the move. */
13814 if (store_p)
13815 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
13816 else
13817 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
13819 return;
13822 /* Convert reloads involving 64-bit gprs and misaligned offset
13823 addressing to use indirect addressing. */
13825 void
13826 rs6000_secondary_reload_ppc64 (rtx reg, rtx mem, rtx scratch, bool store_p)
13828 int regno = true_regnum (reg);
13829 enum reg_class rclass;
13830 rtx addr;
13831 rtx scratch_or_premodify = scratch;
13833 if (TARGET_DEBUG_ADDR)
13835 fprintf (stderr, "\nrs6000_secondary_reload_ppc64, type = %s\n",
13836 store_p ? "store" : "load");
13837 fprintf (stderr, "reg:\n");
13838 debug_rtx (reg);
13839 fprintf (stderr, "mem:\n");
13840 debug_rtx (mem);
13841 fprintf (stderr, "scratch:\n");
13842 debug_rtx (scratch);
13845 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
13846 gcc_assert (GET_CODE (mem) == MEM);
13847 rclass = REGNO_REG_CLASS (regno);
13848 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
13849 addr = XEXP (mem, 0);
13851 if (GET_CODE (addr) == PRE_MODIFY)
13853 scratch_or_premodify = XEXP (addr, 0);
13854 gcc_assert (REG_P (scratch_or_premodify));
13855 addr = XEXP (addr, 1);
13857 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
13859 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
13861 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
13863 /* Now create the move. */
13864 if (store_p)
13865 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
13866 else
13867 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
13869 return;
13872 /* Allocate a 64-bit stack slot to be used for copying SDmode
13873 values through if this function has any SDmode references. */
13875 static void
13876 rs6000_alloc_sdmode_stack_slot (void)
13878 tree t;
13879 basic_block bb;
13880 gimple_stmt_iterator gsi;
13882 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
13884 FOR_EACH_BB (bb)
13885 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
13887 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
13888 if (ret)
13890 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
13891 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
13892 SDmode, 0);
13893 return;
13897 /* Check for any SDmode parameters of the function. */
13898 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
13900 if (TREE_TYPE (t) == error_mark_node)
13901 continue;
13903 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
13904 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
13906 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
13907 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
13908 SDmode, 0);
13909 return;
13914 static void
13915 rs6000_instantiate_decls (void)
13917 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
13918 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
13921 /* Given an rtx X being reloaded into a reg required to be
13922 in class CLASS, return the class of reg to actually use.
13923 In general this is just CLASS; but on some machines
13924 in some cases it is preferable to use a more restrictive class.
13926 On the RS/6000, we have to return NO_REGS when we want to reload a
13927 floating-point CONST_DOUBLE to force it to be copied to memory.
13929 We also don't want to reload integer values into floating-point
13930 registers if we can at all help it. In fact, this can
13931 cause reload to die, if it tries to generate a reload of CTR
13932 into a FP register and discovers it doesn't have the memory location
13933 required.
13935 ??? Would it be a good idea to have reload do the converse, that is
13936 try to reload floating modes into FP registers if possible?
13939 static enum reg_class
13940 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
13942 enum machine_mode mode = GET_MODE (x);
13944 if (VECTOR_UNIT_VSX_P (mode)
13945 && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
13946 return rclass;
13948 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
13949 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
13950 && easy_vector_constant (x, mode))
13951 return ALTIVEC_REGS;
13953 if (CONSTANT_P (x) && reg_classes_intersect_p (rclass, FLOAT_REGS))
13954 return NO_REGS;
13956 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
13957 return GENERAL_REGS;
13959 /* For VSX, prefer the traditional registers for 64-bit values because we can
13960 use the non-VSX loads. Prefer the Altivec registers if Altivec is
13961 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
13962 prefer Altivec loads.. */
13963 if (rclass == VSX_REGS)
13965 if (GET_MODE_SIZE (mode) <= 8)
13966 return FLOAT_REGS;
13968 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode))
13969 return ALTIVEC_REGS;
13971 return rclass;
13974 return rclass;
13977 /* Debug version of rs6000_preferred_reload_class. */
13978 static enum reg_class
13979 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
13981 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
13983 fprintf (stderr,
13984 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
13985 "mode = %s, x:\n",
13986 reg_class_names[ret], reg_class_names[rclass],
13987 GET_MODE_NAME (GET_MODE (x)));
13988 debug_rtx (x);
13990 return ret;
13993 /* If we are copying between FP or AltiVec registers and anything else, we need
13994 a memory location. The exception is when we are targeting ppc64 and the
13995 move to/from fpr to gpr instructions are available. Also, under VSX, you
13996 can copy vector registers from the FP register set to the Altivec register
13997 set and vice versa. */
13999 static bool
14000 rs6000_secondary_memory_needed (enum reg_class class1,
14001 enum reg_class class2,
14002 enum machine_mode mode)
14004 if (class1 == class2)
14005 return false;
14007 /* Under VSX, there are 3 register classes that values could be in (VSX_REGS,
14008 ALTIVEC_REGS, and FLOAT_REGS). We don't need to use memory to copy
14009 between these classes. But we need memory for other things that can go in
14010 FLOAT_REGS like SFmode. */
14011 if (TARGET_VSX
14012 && (VECTOR_MEM_VSX_P (mode) || VECTOR_UNIT_VSX_P (mode))
14013 && (class1 == VSX_REGS || class1 == ALTIVEC_REGS
14014 || class1 == FLOAT_REGS))
14015 return (class2 != VSX_REGS && class2 != ALTIVEC_REGS
14016 && class2 != FLOAT_REGS);
14018 if (class1 == VSX_REGS || class2 == VSX_REGS)
14019 return true;
14021 if (class1 == FLOAT_REGS
14022 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14023 || ((mode != DFmode)
14024 && (mode != DDmode)
14025 && (mode != DImode))))
14026 return true;
14028 if (class2 == FLOAT_REGS
14029 && (!TARGET_MFPGPR || !TARGET_POWERPC64
14030 || ((mode != DFmode)
14031 && (mode != DDmode)
14032 && (mode != DImode))))
14033 return true;
14035 if (class1 == ALTIVEC_REGS || class2 == ALTIVEC_REGS)
14036 return true;
14038 return false;
14041 /* Debug version of rs6000_secondary_memory_needed. */
14042 static bool
14043 rs6000_debug_secondary_memory_needed (enum reg_class class1,
14044 enum reg_class class2,
14045 enum machine_mode mode)
14047 bool ret = rs6000_secondary_memory_needed (class1, class2, mode);
14049 fprintf (stderr,
14050 "rs6000_secondary_memory_needed, return: %s, class1 = %s, "
14051 "class2 = %s, mode = %s\n",
14052 ret ? "true" : "false", reg_class_names[class1],
14053 reg_class_names[class2], GET_MODE_NAME (mode));
14055 return ret;
14058 /* Return the register class of a scratch register needed to copy IN into
14059 or out of a register in RCLASS in MODE. If it can be done directly,
14060 NO_REGS is returned. */
14062 static enum reg_class
14063 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
14064 rtx in)
14066 int regno;
14068 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
14069 #if TARGET_MACHO
14070 && MACHOPIC_INDIRECT
14071 #endif
14074 /* We cannot copy a symbolic operand directly into anything
14075 other than BASE_REGS for TARGET_ELF. So indicate that a
14076 register from BASE_REGS is needed as an intermediate
14077 register.
14079 On Darwin, pic addresses require a load from memory, which
14080 needs a base register. */
14081 if (rclass != BASE_REGS
14082 && (GET_CODE (in) == SYMBOL_REF
14083 || GET_CODE (in) == HIGH
14084 || GET_CODE (in) == LABEL_REF
14085 || GET_CODE (in) == CONST))
14086 return BASE_REGS;
14089 if (GET_CODE (in) == REG)
14091 regno = REGNO (in);
14092 if (regno >= FIRST_PSEUDO_REGISTER)
14094 regno = true_regnum (in);
14095 if (regno >= FIRST_PSEUDO_REGISTER)
14096 regno = -1;
14099 else if (GET_CODE (in) == SUBREG)
14101 regno = true_regnum (in);
14102 if (regno >= FIRST_PSEUDO_REGISTER)
14103 regno = -1;
14105 else
14106 regno = -1;
14108 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
14109 into anything. */
14110 if (rclass == GENERAL_REGS || rclass == BASE_REGS
14111 || (regno >= 0 && INT_REGNO_P (regno)))
14112 return NO_REGS;
14114 /* Constants, memory, and FP registers can go into FP registers. */
14115 if ((regno == -1 || FP_REGNO_P (regno))
14116 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
14117 return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
14119 /* Memory, and FP/altivec registers can go into fp/altivec registers under
14120 VSX. */
14121 if (TARGET_VSX
14122 && (regno == -1 || VSX_REGNO_P (regno))
14123 && VSX_REG_CLASS_P (rclass))
14124 return NO_REGS;
14126 /* Memory, and AltiVec registers can go into AltiVec registers. */
14127 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
14128 && rclass == ALTIVEC_REGS)
14129 return NO_REGS;
14131 /* We can copy among the CR registers. */
14132 if ((rclass == CR_REGS || rclass == CR0_REGS)
14133 && regno >= 0 && CR_REGNO_P (regno))
14134 return NO_REGS;
14136 /* Otherwise, we need GENERAL_REGS. */
14137 return GENERAL_REGS;
14140 /* Debug version of rs6000_secondary_reload_class. */
14141 static enum reg_class
14142 rs6000_debug_secondary_reload_class (enum reg_class rclass,
14143 enum machine_mode mode, rtx in)
14145 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
14146 fprintf (stderr,
14147 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
14148 "mode = %s, input rtx:\n",
14149 reg_class_names[ret], reg_class_names[rclass],
14150 GET_MODE_NAME (mode));
14151 debug_rtx (in);
14153 return ret;
14156 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
14158 static bool
14159 rs6000_cannot_change_mode_class (enum machine_mode from,
14160 enum machine_mode to,
14161 enum reg_class rclass)
14163 unsigned from_size = GET_MODE_SIZE (from);
14164 unsigned to_size = GET_MODE_SIZE (to);
14166 if (from_size != to_size)
14168 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
14169 return ((from_size < 8 || to_size < 8 || TARGET_IEEEQUAD)
14170 && reg_classes_intersect_p (xclass, rclass));
14173 if (TARGET_E500_DOUBLE
14174 && ((((to) == DFmode) + ((from) == DFmode)) == 1
14175 || (((to) == TFmode) + ((from) == TFmode)) == 1
14176 || (((to) == DDmode) + ((from) == DDmode)) == 1
14177 || (((to) == TDmode) + ((from) == TDmode)) == 1
14178 || (((to) == DImode) + ((from) == DImode)) == 1))
14179 return true;
14181 /* Since the VSX register set includes traditional floating point registers
14182 and altivec registers, just check for the size being different instead of
14183 trying to check whether the modes are vector modes. Otherwise it won't
14184 allow say DF and DI to change classes. */
14185 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
14186 return (from_size != 8 && from_size != 16);
14188 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
14189 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
14190 return true;
14192 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
14193 && reg_classes_intersect_p (GENERAL_REGS, rclass))
14194 return true;
14196 return false;
14199 /* Debug version of rs6000_cannot_change_mode_class. */
14200 static bool
14201 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
14202 enum machine_mode to,
14203 enum reg_class rclass)
14205 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
14207 fprintf (stderr,
14208 "rs6000_cannot_change_mode_class, return %s, from = %s, "
14209 "to = %s, rclass = %s\n",
14210 ret ? "true" : "false",
14211 GET_MODE_NAME (from), GET_MODE_NAME (to),
14212 reg_class_names[rclass]);
14214 return ret;
14217 /* Given a comparison operation, return the bit number in CCR to test. We
14218 know this is a valid comparison.
14220 SCC_P is 1 if this is for an scc. That means that %D will have been
14221 used instead of %C, so the bits will be in different places.
14223 Return -1 if OP isn't a valid comparison for some reason. */
14226 ccr_bit (rtx op, int scc_p)
14228 enum rtx_code code = GET_CODE (op);
14229 enum machine_mode cc_mode;
14230 int cc_regnum;
14231 int base_bit;
14232 rtx reg;
14234 if (!COMPARISON_P (op))
14235 return -1;
14237 reg = XEXP (op, 0);
14239 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
14241 cc_mode = GET_MODE (reg);
14242 cc_regnum = REGNO (reg);
14243 base_bit = 4 * (cc_regnum - CR0_REGNO);
14245 validate_condition_mode (code, cc_mode);
14247 /* When generating a sCOND operation, only positive conditions are
14248 allowed. */
14249 gcc_assert (!scc_p
14250 || code == EQ || code == GT || code == LT || code == UNORDERED
14251 || code == GTU || code == LTU);
14253 switch (code)
14255 case NE:
14256 return scc_p ? base_bit + 3 : base_bit + 2;
14257 case EQ:
14258 return base_bit + 2;
14259 case GT: case GTU: case UNLE:
14260 return base_bit + 1;
14261 case LT: case LTU: case UNGE:
14262 return base_bit;
14263 case ORDERED: case UNORDERED:
14264 return base_bit + 3;
14266 case GE: case GEU:
14267 /* If scc, we will have done a cror to put the bit in the
14268 unordered position. So test that bit. For integer, this is ! LT
14269 unless this is an scc insn. */
14270 return scc_p ? base_bit + 3 : base_bit;
14272 case LE: case LEU:
14273 return scc_p ? base_bit + 3 : base_bit + 1;
14275 default:
14276 gcc_unreachable ();
14280 /* Return the GOT register. */
14283 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
14285 /* The second flow pass currently (June 1999) can't update
14286 regs_ever_live without disturbing other parts of the compiler, so
14287 update it here to make the prolog/epilogue code happy. */
14288 if (!can_create_pseudo_p ()
14289 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
14290 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
14292 crtl->uses_pic_offset_table = 1;
14294 return pic_offset_table_rtx;
14297 static rs6000_stack_t stack_info;
14299 /* Function to init struct machine_function.
14300 This will be called, via a pointer variable,
14301 from push_function_context. */
14303 static struct machine_function *
14304 rs6000_init_machine_status (void)
14306 stack_info.reload_completed = 0;
14307 return ggc_alloc_cleared_machine_function ();
14310 /* These macros test for integers and extract the low-order bits. */
14311 #define INT_P(X) \
14312 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
14313 && GET_MODE (X) == VOIDmode)
14315 #define INT_LOWPART(X) \
14316 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
14319 extract_MB (rtx op)
14321 int i;
14322 unsigned long val = INT_LOWPART (op);
14324 /* If the high bit is zero, the value is the first 1 bit we find
14325 from the left. */
14326 if ((val & 0x80000000) == 0)
14328 gcc_assert (val & 0xffffffff);
14330 i = 1;
14331 while (((val <<= 1) & 0x80000000) == 0)
14332 ++i;
14333 return i;
14336 /* If the high bit is set and the low bit is not, or the mask is all
14337 1's, the value is zero. */
14338 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
14339 return 0;
14341 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14342 from the right. */
14343 i = 31;
14344 while (((val >>= 1) & 1) != 0)
14345 --i;
14347 return i;
14351 extract_ME (rtx op)
14353 int i;
14354 unsigned long val = INT_LOWPART (op);
14356 /* If the low bit is zero, the value is the first 1 bit we find from
14357 the right. */
14358 if ((val & 1) == 0)
14360 gcc_assert (val & 0xffffffff);
14362 i = 30;
14363 while (((val >>= 1) & 1) == 0)
14364 --i;
14366 return i;
14369 /* If the low bit is set and the high bit is not, or the mask is all
14370 1's, the value is 31. */
14371 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
14372 return 31;
14374 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
14375 from the left. */
14376 i = 0;
14377 while (((val <<= 1) & 0x80000000) != 0)
14378 ++i;
14380 return i;
14383 /* Locate some local-dynamic symbol still in use by this function
14384 so that we can print its name in some tls_ld pattern. */
14386 static const char *
14387 rs6000_get_some_local_dynamic_name (void)
14389 rtx insn;
14391 if (cfun->machine->some_ld_name)
14392 return cfun->machine->some_ld_name;
14394 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
14395 if (INSN_P (insn)
14396 && for_each_rtx (&PATTERN (insn),
14397 rs6000_get_some_local_dynamic_name_1, 0))
14398 return cfun->machine->some_ld_name;
14400 gcc_unreachable ();
14403 /* Helper function for rs6000_get_some_local_dynamic_name. */
14405 static int
14406 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
14408 rtx x = *px;
14410 if (GET_CODE (x) == SYMBOL_REF)
14412 const char *str = XSTR (x, 0);
14413 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
14415 cfun->machine->some_ld_name = str;
14416 return 1;
14420 return 0;
14423 /* Write out a function code label. */
14425 void
14426 rs6000_output_function_entry (FILE *file, const char *fname)
14428 if (fname[0] != '.')
14430 switch (DEFAULT_ABI)
14432 default:
14433 gcc_unreachable ();
14435 case ABI_AIX:
14436 if (DOT_SYMBOLS)
14437 putc ('.', file);
14438 else
14439 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
14440 break;
14442 case ABI_V4:
14443 case ABI_DARWIN:
14444 break;
14448 RS6000_OUTPUT_BASENAME (file, fname);
14451 /* Print an operand. Recognize special options, documented below. */
14453 #if TARGET_ELF
14454 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
14455 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
14456 #else
14457 #define SMALL_DATA_RELOC "sda21"
14458 #define SMALL_DATA_REG 0
14459 #endif
14461 void
14462 print_operand (FILE *file, rtx x, int code)
14464 int i;
14465 unsigned HOST_WIDE_INT uval;
14467 switch (code)
14469 case '.':
14470 /* Write out an instruction after the call which may be replaced
14471 with glue code by the loader. This depends on the AIX version. */
14472 asm_fprintf (file, RS6000_CALL_GLUE);
14473 return;
14475 /* %a is output_address. */
14477 case 'A':
14478 /* If X is a constant integer whose low-order 5 bits are zero,
14479 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
14480 in the AIX assembler where "sri" with a zero shift count
14481 writes a trash instruction. */
14482 if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
14483 putc ('l', file);
14484 else
14485 putc ('r', file);
14486 return;
14488 case 'b':
14489 /* If constant, low-order 16 bits of constant, unsigned.
14490 Otherwise, write normally. */
14491 if (INT_P (x))
14492 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
14493 else
14494 print_operand (file, x, 0);
14495 return;
14497 case 'B':
14498 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
14499 for 64-bit mask direction. */
14500 putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
14501 return;
14503 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
14504 output_operand. */
14506 case 'c':
14507 /* X is a CR register. Print the number of the GT bit of the CR. */
14508 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14509 output_operand_lossage ("invalid %%c value");
14510 else
14511 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 1);
14512 return;
14514 case 'D':
14515 /* Like 'J' but get to the GT bit only. */
14516 gcc_assert (REG_P (x));
14518 /* Bit 1 is GT bit. */
14519 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
14521 /* Add one for shift count in rlinm for scc. */
14522 fprintf (file, "%d", i + 1);
14523 return;
14525 case 'E':
14526 /* X is a CR register. Print the number of the EQ bit of the CR */
14527 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14528 output_operand_lossage ("invalid %%E value");
14529 else
14530 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
14531 return;
14533 case 'f':
14534 /* X is a CR register. Print the shift count needed to move it
14535 to the high-order four bits. */
14536 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14537 output_operand_lossage ("invalid %%f value");
14538 else
14539 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
14540 return;
14542 case 'F':
14543 /* Similar, but print the count for the rotate in the opposite
14544 direction. */
14545 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14546 output_operand_lossage ("invalid %%F value");
14547 else
14548 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
14549 return;
14551 case 'G':
14552 /* X is a constant integer. If it is negative, print "m",
14553 otherwise print "z". This is to make an aze or ame insn. */
14554 if (GET_CODE (x) != CONST_INT)
14555 output_operand_lossage ("invalid %%G value");
14556 else if (INTVAL (x) >= 0)
14557 putc ('z', file);
14558 else
14559 putc ('m', file);
14560 return;
14562 case 'h':
14563 /* If constant, output low-order five bits. Otherwise, write
14564 normally. */
14565 if (INT_P (x))
14566 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
14567 else
14568 print_operand (file, x, 0);
14569 return;
14571 case 'H':
14572 /* If constant, output low-order six bits. Otherwise, write
14573 normally. */
14574 if (INT_P (x))
14575 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
14576 else
14577 print_operand (file, x, 0);
14578 return;
14580 case 'I':
14581 /* Print `i' if this is a constant, else nothing. */
14582 if (INT_P (x))
14583 putc ('i', file);
14584 return;
14586 case 'j':
14587 /* Write the bit number in CCR for jump. */
14588 i = ccr_bit (x, 0);
14589 if (i == -1)
14590 output_operand_lossage ("invalid %%j code");
14591 else
14592 fprintf (file, "%d", i);
14593 return;
14595 case 'J':
14596 /* Similar, but add one for shift count in rlinm for scc and pass
14597 scc flag to `ccr_bit'. */
14598 i = ccr_bit (x, 1);
14599 if (i == -1)
14600 output_operand_lossage ("invalid %%J code");
14601 else
14602 /* If we want bit 31, write a shift count of zero, not 32. */
14603 fprintf (file, "%d", i == 31 ? 0 : i + 1);
14604 return;
14606 case 'k':
14607 /* X must be a constant. Write the 1's complement of the
14608 constant. */
14609 if (! INT_P (x))
14610 output_operand_lossage ("invalid %%k value");
14611 else
14612 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
14613 return;
14615 case 'K':
14616 /* X must be a symbolic constant on ELF. Write an
14617 expression suitable for an 'addi' that adds in the low 16
14618 bits of the MEM. */
14619 if (GET_CODE (x) == CONST)
14621 if (GET_CODE (XEXP (x, 0)) != PLUS
14622 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
14623 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
14624 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
14625 output_operand_lossage ("invalid %%K value");
14627 print_operand_address (file, x);
14628 fputs ("@l", file);
14629 return;
14631 /* %l is output_asm_label. */
14633 case 'L':
14634 /* Write second word of DImode or DFmode reference. Works on register
14635 or non-indexed memory only. */
14636 if (REG_P (x))
14637 fputs (reg_names[REGNO (x) + 1], file);
14638 else if (MEM_P (x))
14640 /* Handle possible auto-increment. Since it is pre-increment and
14641 we have already done it, we can just use an offset of word. */
14642 if (GET_CODE (XEXP (x, 0)) == PRE_INC
14643 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
14644 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14645 UNITS_PER_WORD));
14646 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14647 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
14648 UNITS_PER_WORD));
14649 else
14650 output_address (XEXP (adjust_address_nv (x, SImode,
14651 UNITS_PER_WORD),
14652 0));
14654 if (small_data_operand (x, GET_MODE (x)))
14655 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
14656 reg_names[SMALL_DATA_REG]);
14658 return;
14660 case 'm':
14661 /* MB value for a mask operand. */
14662 if (! mask_operand (x, SImode))
14663 output_operand_lossage ("invalid %%m value");
14665 fprintf (file, "%d", extract_MB (x));
14666 return;
14668 case 'M':
14669 /* ME value for a mask operand. */
14670 if (! mask_operand (x, SImode))
14671 output_operand_lossage ("invalid %%M value");
14673 fprintf (file, "%d", extract_ME (x));
14674 return;
14676 /* %n outputs the negative of its operand. */
14678 case 'N':
14679 /* Write the number of elements in the vector times 4. */
14680 if (GET_CODE (x) != PARALLEL)
14681 output_operand_lossage ("invalid %%N value");
14682 else
14683 fprintf (file, "%d", XVECLEN (x, 0) * 4);
14684 return;
14686 case 'O':
14687 /* Similar, but subtract 1 first. */
14688 if (GET_CODE (x) != PARALLEL)
14689 output_operand_lossage ("invalid %%O value");
14690 else
14691 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
14692 return;
14694 case 'p':
14695 /* X is a CONST_INT that is a power of two. Output the logarithm. */
14696 if (! INT_P (x)
14697 || INT_LOWPART (x) < 0
14698 || (i = exact_log2 (INT_LOWPART (x))) < 0)
14699 output_operand_lossage ("invalid %%p value");
14700 else
14701 fprintf (file, "%d", i);
14702 return;
14704 case 'P':
14705 /* The operand must be an indirect memory reference. The result
14706 is the register name. */
14707 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
14708 || REGNO (XEXP (x, 0)) >= 32)
14709 output_operand_lossage ("invalid %%P value");
14710 else
14711 fputs (reg_names[REGNO (XEXP (x, 0))], file);
14712 return;
14714 case 'q':
14715 /* This outputs the logical code corresponding to a boolean
14716 expression. The expression may have one or both operands
14717 negated (if one, only the first one). For condition register
14718 logical operations, it will also treat the negated
14719 CR codes as NOTs, but not handle NOTs of them. */
14721 const char *const *t = 0;
14722 const char *s;
14723 enum rtx_code code = GET_CODE (x);
14724 static const char * const tbl[3][3] = {
14725 { "and", "andc", "nor" },
14726 { "or", "orc", "nand" },
14727 { "xor", "eqv", "xor" } };
14729 if (code == AND)
14730 t = tbl[0];
14731 else if (code == IOR)
14732 t = tbl[1];
14733 else if (code == XOR)
14734 t = tbl[2];
14735 else
14736 output_operand_lossage ("invalid %%q value");
14738 if (GET_CODE (XEXP (x, 0)) != NOT)
14739 s = t[0];
14740 else
14742 if (GET_CODE (XEXP (x, 1)) == NOT)
14743 s = t[2];
14744 else
14745 s = t[1];
14748 fputs (s, file);
14750 return;
14752 case 'Q':
14753 if (TARGET_MFCRF)
14754 fputc (',', file);
14755 /* FALLTHRU */
14756 else
14757 return;
14759 case 'R':
14760 /* X is a CR register. Print the mask for `mtcrf'. */
14761 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
14762 output_operand_lossage ("invalid %%R value");
14763 else
14764 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
14765 return;
14767 case 's':
14768 /* Low 5 bits of 32 - value */
14769 if (! INT_P (x))
14770 output_operand_lossage ("invalid %%s value");
14771 else
14772 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
14773 return;
14775 case 'S':
14776 /* PowerPC64 mask position. All 0's is excluded.
14777 CONST_INT 32-bit mask is considered sign-extended so any
14778 transition must occur within the CONST_INT, not on the boundary. */
14779 if (! mask64_operand (x, DImode))
14780 output_operand_lossage ("invalid %%S value");
14782 uval = INT_LOWPART (x);
14784 if (uval & 1) /* Clear Left */
14786 #if HOST_BITS_PER_WIDE_INT > 64
14787 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14788 #endif
14789 i = 64;
14791 else /* Clear Right */
14793 uval = ~uval;
14794 #if HOST_BITS_PER_WIDE_INT > 64
14795 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
14796 #endif
14797 i = 63;
14799 while (uval != 0)
14800 --i, uval >>= 1;
14801 gcc_assert (i >= 0);
14802 fprintf (file, "%d", i);
14803 return;
14805 case 't':
14806 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
14807 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
14809 /* Bit 3 is OV bit. */
14810 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
14812 /* If we want bit 31, write a shift count of zero, not 32. */
14813 fprintf (file, "%d", i == 31 ? 0 : i + 1);
14814 return;
14816 case 'T':
14817 /* Print the symbolic name of a branch target register. */
14818 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
14819 && REGNO (x) != CTR_REGNO))
14820 output_operand_lossage ("invalid %%T value");
14821 else if (REGNO (x) == LR_REGNO)
14822 fputs (TARGET_NEW_MNEMONICS ? "lr" : "r", file);
14823 else
14824 fputs ("ctr", file);
14825 return;
14827 case 'u':
14828 /* High-order 16 bits of constant for use in unsigned operand. */
14829 if (! INT_P (x))
14830 output_operand_lossage ("invalid %%u value");
14831 else
14832 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
14833 (INT_LOWPART (x) >> 16) & 0xffff);
14834 return;
14836 case 'v':
14837 /* High-order 16 bits of constant for use in signed operand. */
14838 if (! INT_P (x))
14839 output_operand_lossage ("invalid %%v value");
14840 else
14841 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
14842 (INT_LOWPART (x) >> 16) & 0xffff);
14843 return;
14845 case 'U':
14846 /* Print `u' if this has an auto-increment or auto-decrement. */
14847 if (MEM_P (x)
14848 && (GET_CODE (XEXP (x, 0)) == PRE_INC
14849 || GET_CODE (XEXP (x, 0)) == PRE_DEC
14850 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
14851 putc ('u', file);
14852 return;
14854 case 'V':
14855 /* Print the trap code for this operand. */
14856 switch (GET_CODE (x))
14858 case EQ:
14859 fputs ("eq", file); /* 4 */
14860 break;
14861 case NE:
14862 fputs ("ne", file); /* 24 */
14863 break;
14864 case LT:
14865 fputs ("lt", file); /* 16 */
14866 break;
14867 case LE:
14868 fputs ("le", file); /* 20 */
14869 break;
14870 case GT:
14871 fputs ("gt", file); /* 8 */
14872 break;
14873 case GE:
14874 fputs ("ge", file); /* 12 */
14875 break;
14876 case LTU:
14877 fputs ("llt", file); /* 2 */
14878 break;
14879 case LEU:
14880 fputs ("lle", file); /* 6 */
14881 break;
14882 case GTU:
14883 fputs ("lgt", file); /* 1 */
14884 break;
14885 case GEU:
14886 fputs ("lge", file); /* 5 */
14887 break;
14888 default:
14889 gcc_unreachable ();
14891 break;
14893 case 'w':
14894 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
14895 normally. */
14896 if (INT_P (x))
14897 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
14898 ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
14899 else
14900 print_operand (file, x, 0);
14901 return;
14903 case 'W':
14904 /* MB value for a PowerPC64 rldic operand. */
14905 i = clz_hwi (GET_CODE (x) == CONST_INT
14906 ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
14908 #if HOST_BITS_PER_WIDE_INT == 32
14909 if (GET_CODE (x) == CONST_INT && i > 0)
14910 i += 32; /* zero-extend high-part was all 0's */
14911 else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
14912 i = clz_hwi (CONST_DOUBLE_LOW (x)) + 32;
14913 #endif
14915 fprintf (file, "%d", i);
14916 return;
14918 case 'x':
14919 /* X is a FPR or Altivec register used in a VSX context. */
14920 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
14921 output_operand_lossage ("invalid %%x value");
14922 else
14924 int reg = REGNO (x);
14925 int vsx_reg = (FP_REGNO_P (reg)
14926 ? reg - 32
14927 : reg - FIRST_ALTIVEC_REGNO + 32);
14929 #ifdef TARGET_REGNAMES
14930 if (TARGET_REGNAMES)
14931 fprintf (file, "%%vs%d", vsx_reg);
14932 else
14933 #endif
14934 fprintf (file, "%d", vsx_reg);
14936 return;
14938 case 'X':
14939 if (MEM_P (x)
14940 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
14941 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
14942 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
14943 putc ('x', file);
14944 return;
14946 case 'Y':
14947 /* Like 'L', for third word of TImode */
14948 if (REG_P (x))
14949 fputs (reg_names[REGNO (x) + 2], file);
14950 else if (MEM_P (x))
14952 if (GET_CODE (XEXP (x, 0)) == PRE_INC
14953 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
14954 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
14955 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
14956 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
14957 else
14958 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
14959 if (small_data_operand (x, GET_MODE (x)))
14960 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
14961 reg_names[SMALL_DATA_REG]);
14963 return;
14965 case 'z':
14966 /* X is a SYMBOL_REF. Write out the name preceded by a
14967 period and without any trailing data in brackets. Used for function
14968 names. If we are configured for System V (or the embedded ABI) on
14969 the PowerPC, do not emit the period, since those systems do not use
14970 TOCs and the like. */
14971 gcc_assert (GET_CODE (x) == SYMBOL_REF);
14973 /* Mark the decl as referenced so that cgraph will output the
14974 function. */
14975 if (SYMBOL_REF_DECL (x))
14976 mark_decl_referenced (SYMBOL_REF_DECL (x));
14978 /* For macho, check to see if we need a stub. */
14979 if (TARGET_MACHO)
14981 const char *name = XSTR (x, 0);
14982 #if TARGET_MACHO
14983 if (darwin_emit_branch_islands
14984 && MACHOPIC_INDIRECT
14985 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
14986 name = machopic_indirection_name (x, /*stub_p=*/true);
14987 #endif
14988 assemble_name (file, name);
14990 else if (!DOT_SYMBOLS)
14991 assemble_name (file, XSTR (x, 0));
14992 else
14993 rs6000_output_function_entry (file, XSTR (x, 0));
14994 return;
14996 case 'Z':
14997 /* Like 'L', for last word of TImode. */
14998 if (REG_P (x))
14999 fputs (reg_names[REGNO (x) + 3], file);
15000 else if (MEM_P (x))
15002 if (GET_CODE (XEXP (x, 0)) == PRE_INC
15003 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
15004 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15005 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15006 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
15007 else
15008 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
15009 if (small_data_operand (x, GET_MODE (x)))
15010 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15011 reg_names[SMALL_DATA_REG]);
15013 return;
15015 /* Print AltiVec or SPE memory operand. */
15016 case 'y':
15018 rtx tmp;
15020 gcc_assert (MEM_P (x));
15022 tmp = XEXP (x, 0);
15024 /* Ugly hack because %y is overloaded. */
15025 if ((TARGET_SPE || TARGET_E500_DOUBLE)
15026 && (GET_MODE_SIZE (GET_MODE (x)) == 8
15027 || GET_MODE (x) == TFmode
15028 || GET_MODE (x) == TImode))
15030 /* Handle [reg]. */
15031 if (REG_P (tmp))
15033 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
15034 break;
15036 /* Handle [reg+UIMM]. */
15037 else if (GET_CODE (tmp) == PLUS &&
15038 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
15040 int x;
15042 gcc_assert (REG_P (XEXP (tmp, 0)));
15044 x = INTVAL (XEXP (tmp, 1));
15045 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
15046 break;
15049 /* Fall through. Must be [reg+reg]. */
15051 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
15052 && GET_CODE (tmp) == AND
15053 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
15054 && INTVAL (XEXP (tmp, 1)) == -16)
15055 tmp = XEXP (tmp, 0);
15056 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
15057 && GET_CODE (tmp) == PRE_MODIFY)
15058 tmp = XEXP (tmp, 1);
15059 if (REG_P (tmp))
15060 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
15061 else
15063 if (!GET_CODE (tmp) == PLUS
15064 || !REG_P (XEXP (tmp, 0))
15065 || !REG_P (XEXP (tmp, 1)))
15067 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
15068 break;
15071 if (REGNO (XEXP (tmp, 0)) == 0)
15072 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
15073 reg_names[ REGNO (XEXP (tmp, 0)) ]);
15074 else
15075 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
15076 reg_names[ REGNO (XEXP (tmp, 1)) ]);
15078 break;
15081 case 0:
15082 if (REG_P (x))
15083 fprintf (file, "%s", reg_names[REGNO (x)]);
15084 else if (MEM_P (x))
15086 /* We need to handle PRE_INC and PRE_DEC here, since we need to
15087 know the width from the mode. */
15088 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
15089 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
15090 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15091 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
15092 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
15093 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
15094 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
15095 output_address (XEXP (XEXP (x, 0), 1));
15096 else
15097 output_address (XEXP (x, 0));
15099 else
15101 if (toc_relative_expr_p (x, false))
15102 /* This hack along with a corresponding hack in
15103 rs6000_output_addr_const_extra arranges to output addends
15104 where the assembler expects to find them. eg.
15105 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
15106 without this hack would be output as "x@toc+4". We
15107 want "x+4@toc". */
15108 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15109 else
15110 output_addr_const (file, x);
15112 return;
15114 case '&':
15115 assemble_name (file, rs6000_get_some_local_dynamic_name ());
15116 return;
15118 default:
15119 output_operand_lossage ("invalid %%xn code");
15123 /* Print the address of an operand. */
15125 void
15126 print_operand_address (FILE *file, rtx x)
15128 if (REG_P (x))
15129 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
15130 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
15131 || GET_CODE (x) == LABEL_REF)
15133 output_addr_const (file, x);
15134 if (small_data_operand (x, GET_MODE (x)))
15135 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
15136 reg_names[SMALL_DATA_REG]);
15137 else
15138 gcc_assert (!TARGET_TOC);
15140 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15141 && REG_P (XEXP (x, 1)))
15143 if (REGNO (XEXP (x, 0)) == 0)
15144 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
15145 reg_names[ REGNO (XEXP (x, 0)) ]);
15146 else
15147 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
15148 reg_names[ REGNO (XEXP (x, 1)) ]);
15150 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
15151 && GET_CODE (XEXP (x, 1)) == CONST_INT)
15152 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
15153 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
15154 #if TARGET_MACHO
15155 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15156 && CONSTANT_P (XEXP (x, 1)))
15158 fprintf (file, "lo16(");
15159 output_addr_const (file, XEXP (x, 1));
15160 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15162 #endif
15163 #if TARGET_ELF
15164 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
15165 && CONSTANT_P (XEXP (x, 1)))
15167 output_addr_const (file, XEXP (x, 1));
15168 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
15170 #endif
15171 else if (toc_relative_expr_p (x, false))
15173 /* This hack along with a corresponding hack in
15174 rs6000_output_addr_const_extra arranges to output addends
15175 where the assembler expects to find them. eg.
15176 (lo_sum (reg 9)
15177 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
15178 without this hack would be output as "x@toc+8@l(9)". We
15179 want "x+8@toc@l(9)". */
15180 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
15181 if (GET_CODE (x) == LO_SUM)
15182 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
15183 else
15184 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
15186 else
15187 gcc_unreachable ();
15190 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
15192 static bool
15193 rs6000_output_addr_const_extra (FILE *file, rtx x)
15195 if (GET_CODE (x) == UNSPEC)
15196 switch (XINT (x, 1))
15198 case UNSPEC_TOCREL:
15199 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
15200 && REG_P (XVECEXP (x, 0, 1))
15201 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
15202 output_addr_const (file, XVECEXP (x, 0, 0));
15203 if (x == tocrel_base && tocrel_offset != const0_rtx)
15205 if (INTVAL (tocrel_offset) >= 0)
15206 fprintf (file, "+");
15207 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
15209 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
15211 putc ('-', file);
15212 assemble_name (file, toc_label_name);
15214 else if (TARGET_ELF)
15215 fputs ("@toc", file);
15216 return true;
15218 #if TARGET_MACHO
15219 case UNSPEC_MACHOPIC_OFFSET:
15220 output_addr_const (file, XVECEXP (x, 0, 0));
15221 putc ('-', file);
15222 machopic_output_function_base_name (file);
15223 return true;
15224 #endif
15226 return false;
15229 /* Target hook for assembling integer objects. The PowerPC version has
15230 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
15231 is defined. It also needs to handle DI-mode objects on 64-bit
15232 targets. */
15234 static bool
15235 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
15237 #ifdef RELOCATABLE_NEEDS_FIXUP
15238 /* Special handling for SI values. */
15239 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
15241 static int recurse = 0;
15243 /* For -mrelocatable, we mark all addresses that need to be fixed up in
15244 the .fixup section. Since the TOC section is already relocated, we
15245 don't need to mark it here. We used to skip the text section, but it
15246 should never be valid for relocated addresses to be placed in the text
15247 section. */
15248 if (TARGET_RELOCATABLE
15249 && in_section != toc_section
15250 && !recurse
15251 && GET_CODE (x) != CONST_INT
15252 && GET_CODE (x) != CONST_DOUBLE
15253 && CONSTANT_P (x))
15255 char buf[256];
15257 recurse = 1;
15258 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
15259 fixuplabelno++;
15260 ASM_OUTPUT_LABEL (asm_out_file, buf);
15261 fprintf (asm_out_file, "\t.long\t(");
15262 output_addr_const (asm_out_file, x);
15263 fprintf (asm_out_file, ")@fixup\n");
15264 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
15265 ASM_OUTPUT_ALIGN (asm_out_file, 2);
15266 fprintf (asm_out_file, "\t.long\t");
15267 assemble_name (asm_out_file, buf);
15268 fprintf (asm_out_file, "\n\t.previous\n");
15269 recurse = 0;
15270 return true;
15272 /* Remove initial .'s to turn a -mcall-aixdesc function
15273 address into the address of the descriptor, not the function
15274 itself. */
15275 else if (GET_CODE (x) == SYMBOL_REF
15276 && XSTR (x, 0)[0] == '.'
15277 && DEFAULT_ABI == ABI_AIX)
15279 const char *name = XSTR (x, 0);
15280 while (*name == '.')
15281 name++;
15283 fprintf (asm_out_file, "\t.long\t%s\n", name);
15284 return true;
15287 #endif /* RELOCATABLE_NEEDS_FIXUP */
15288 return default_assemble_integer (x, size, aligned_p);
15291 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
15292 /* Emit an assembler directive to set symbol visibility for DECL to
15293 VISIBILITY_TYPE. */
15295 static void
15296 rs6000_assemble_visibility (tree decl, int vis)
15298 /* Functions need to have their entry point symbol visibility set as
15299 well as their descriptor symbol visibility. */
15300 if (DEFAULT_ABI == ABI_AIX
15301 && DOT_SYMBOLS
15302 && TREE_CODE (decl) == FUNCTION_DECL)
15304 static const char * const visibility_types[] = {
15305 NULL, "internal", "hidden", "protected"
15308 const char *name, *type;
15310 name = ((* targetm.strip_name_encoding)
15311 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
15312 type = visibility_types[vis];
15314 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
15315 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
15317 else
15318 default_assemble_visibility (decl, vis);
15320 #endif
15322 enum rtx_code
15323 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
15325 /* Reversal of FP compares takes care -- an ordered compare
15326 becomes an unordered compare and vice versa. */
15327 if (mode == CCFPmode
15328 && (!flag_finite_math_only
15329 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
15330 || code == UNEQ || code == LTGT))
15331 return reverse_condition_maybe_unordered (code);
15332 else
15333 return reverse_condition (code);
15336 /* Generate a compare for CODE. Return a brand-new rtx that
15337 represents the result of the compare. */
15339 static rtx
15340 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
15342 enum machine_mode comp_mode;
15343 rtx compare_result;
15344 enum rtx_code code = GET_CODE (cmp);
15345 rtx op0 = XEXP (cmp, 0);
15346 rtx op1 = XEXP (cmp, 1);
15348 if (FLOAT_MODE_P (mode))
15349 comp_mode = CCFPmode;
15350 else if (code == GTU || code == LTU
15351 || code == GEU || code == LEU)
15352 comp_mode = CCUNSmode;
15353 else if ((code == EQ || code == NE)
15354 && unsigned_reg_p (op0)
15355 && (unsigned_reg_p (op1)
15356 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
15357 /* These are unsigned values, perhaps there will be a later
15358 ordering compare that can be shared with this one. */
15359 comp_mode = CCUNSmode;
15360 else
15361 comp_mode = CCmode;
15363 /* First, the compare. */
15364 compare_result = gen_reg_rtx (comp_mode);
15366 /* E500 FP compare instructions on the GPRs. Yuck! */
15367 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
15368 && FLOAT_MODE_P (mode))
15370 rtx cmp, or_result, compare_result2;
15371 enum machine_mode op_mode = GET_MODE (op0);
15373 if (op_mode == VOIDmode)
15374 op_mode = GET_MODE (op1);
15376 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
15377 This explains the following mess. */
15379 switch (code)
15381 case EQ: case UNEQ: case NE: case LTGT:
15382 switch (op_mode)
15384 case SFmode:
15385 cmp = (flag_finite_math_only && !flag_trapping_math)
15386 ? gen_tstsfeq_gpr (compare_result, op0, op1)
15387 : gen_cmpsfeq_gpr (compare_result, op0, op1);
15388 break;
15390 case DFmode:
15391 cmp = (flag_finite_math_only && !flag_trapping_math)
15392 ? gen_tstdfeq_gpr (compare_result, op0, op1)
15393 : gen_cmpdfeq_gpr (compare_result, op0, op1);
15394 break;
15396 case TFmode:
15397 cmp = (flag_finite_math_only && !flag_trapping_math)
15398 ? gen_tsttfeq_gpr (compare_result, op0, op1)
15399 : gen_cmptfeq_gpr (compare_result, op0, op1);
15400 break;
15402 default:
15403 gcc_unreachable ();
15405 break;
15407 case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
15408 switch (op_mode)
15410 case SFmode:
15411 cmp = (flag_finite_math_only && !flag_trapping_math)
15412 ? gen_tstsfgt_gpr (compare_result, op0, op1)
15413 : gen_cmpsfgt_gpr (compare_result, op0, op1);
15414 break;
15416 case DFmode:
15417 cmp = (flag_finite_math_only && !flag_trapping_math)
15418 ? gen_tstdfgt_gpr (compare_result, op0, op1)
15419 : gen_cmpdfgt_gpr (compare_result, op0, op1);
15420 break;
15422 case TFmode:
15423 cmp = (flag_finite_math_only && !flag_trapping_math)
15424 ? gen_tsttfgt_gpr (compare_result, op0, op1)
15425 : gen_cmptfgt_gpr (compare_result, op0, op1);
15426 break;
15428 default:
15429 gcc_unreachable ();
15431 break;
15433 case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
15434 switch (op_mode)
15436 case SFmode:
15437 cmp = (flag_finite_math_only && !flag_trapping_math)
15438 ? gen_tstsflt_gpr (compare_result, op0, op1)
15439 : gen_cmpsflt_gpr (compare_result, op0, op1);
15440 break;
15442 case DFmode:
15443 cmp = (flag_finite_math_only && !flag_trapping_math)
15444 ? gen_tstdflt_gpr (compare_result, op0, op1)
15445 : gen_cmpdflt_gpr (compare_result, op0, op1);
15446 break;
15448 case TFmode:
15449 cmp = (flag_finite_math_only && !flag_trapping_math)
15450 ? gen_tsttflt_gpr (compare_result, op0, op1)
15451 : gen_cmptflt_gpr (compare_result, op0, op1);
15452 break;
15454 default:
15455 gcc_unreachable ();
15457 break;
15458 default:
15459 gcc_unreachable ();
15462 /* Synthesize LE and GE from LT/GT || EQ. */
15463 if (code == LE || code == GE || code == LEU || code == GEU)
15465 emit_insn (cmp);
15467 switch (code)
15469 case LE: code = LT; break;
15470 case GE: code = GT; break;
15471 case LEU: code = LT; break;
15472 case GEU: code = GT; break;
15473 default: gcc_unreachable ();
15476 compare_result2 = gen_reg_rtx (CCFPmode);
15478 /* Do the EQ. */
15479 switch (op_mode)
15481 case SFmode:
15482 cmp = (flag_finite_math_only && !flag_trapping_math)
15483 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
15484 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
15485 break;
15487 case DFmode:
15488 cmp = (flag_finite_math_only && !flag_trapping_math)
15489 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
15490 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
15491 break;
15493 case TFmode:
15494 cmp = (flag_finite_math_only && !flag_trapping_math)
15495 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
15496 : gen_cmptfeq_gpr (compare_result2, op0, op1);
15497 break;
15499 default:
15500 gcc_unreachable ();
15502 emit_insn (cmp);
15504 /* OR them together. */
15505 or_result = gen_reg_rtx (CCFPmode);
15506 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
15507 compare_result2);
15508 compare_result = or_result;
15509 code = EQ;
15511 else
15513 if (code == NE || code == LTGT)
15514 code = NE;
15515 else
15516 code = EQ;
15519 emit_insn (cmp);
15521 else
15523 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
15524 CLOBBERs to match cmptf_internal2 pattern. */
15525 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
15526 && GET_MODE (op0) == TFmode
15527 && !TARGET_IEEEQUAD
15528 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
15529 emit_insn (gen_rtx_PARALLEL (VOIDmode,
15530 gen_rtvec (10,
15531 gen_rtx_SET (VOIDmode,
15532 compare_result,
15533 gen_rtx_COMPARE (comp_mode, op0, op1)),
15534 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15535 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15536 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15537 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15538 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15539 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15540 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15541 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
15542 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
15543 else if (GET_CODE (op1) == UNSPEC
15544 && XINT (op1, 1) == UNSPEC_SP_TEST)
15546 rtx op1b = XVECEXP (op1, 0, 0);
15547 comp_mode = CCEQmode;
15548 compare_result = gen_reg_rtx (CCEQmode);
15549 if (TARGET_64BIT)
15550 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
15551 else
15552 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
15554 else
15555 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
15556 gen_rtx_COMPARE (comp_mode, op0, op1)));
15559 /* Some kinds of FP comparisons need an OR operation;
15560 under flag_finite_math_only we don't bother. */
15561 if (FLOAT_MODE_P (mode)
15562 && !flag_finite_math_only
15563 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
15564 && (code == LE || code == GE
15565 || code == UNEQ || code == LTGT
15566 || code == UNGT || code == UNLT))
15568 enum rtx_code or1, or2;
15569 rtx or1_rtx, or2_rtx, compare2_rtx;
15570 rtx or_result = gen_reg_rtx (CCEQmode);
15572 switch (code)
15574 case LE: or1 = LT; or2 = EQ; break;
15575 case GE: or1 = GT; or2 = EQ; break;
15576 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
15577 case LTGT: or1 = LT; or2 = GT; break;
15578 case UNGT: or1 = UNORDERED; or2 = GT; break;
15579 case UNLT: or1 = UNORDERED; or2 = LT; break;
15580 default: gcc_unreachable ();
15582 validate_condition_mode (or1, comp_mode);
15583 validate_condition_mode (or2, comp_mode);
15584 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
15585 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
15586 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
15587 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
15588 const_true_rtx);
15589 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
15591 compare_result = or_result;
15592 code = EQ;
15595 validate_condition_mode (code, GET_MODE (compare_result));
15597 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
15601 /* Emit the RTL for an sISEL pattern. */
15603 void
15604 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
15606 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
15609 void
15610 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
15612 rtx condition_rtx;
15613 enum machine_mode op_mode;
15614 enum rtx_code cond_code;
15615 rtx result = operands[0];
15617 if (TARGET_ISEL && (mode == SImode || mode == DImode))
15619 rs6000_emit_sISEL (mode, operands);
15620 return;
15623 condition_rtx = rs6000_generate_compare (operands[1], mode);
15624 cond_code = GET_CODE (condition_rtx);
15626 if (FLOAT_MODE_P (mode)
15627 && !TARGET_FPRS && TARGET_HARD_FLOAT)
15629 rtx t;
15631 PUT_MODE (condition_rtx, SImode);
15632 t = XEXP (condition_rtx, 0);
15634 gcc_assert (cond_code == NE || cond_code == EQ);
15636 if (cond_code == NE)
15637 emit_insn (gen_e500_flip_gt_bit (t, t));
15639 emit_insn (gen_move_from_CR_gt_bit (result, t));
15640 return;
15643 if (cond_code == NE
15644 || cond_code == GE || cond_code == LE
15645 || cond_code == GEU || cond_code == LEU
15646 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
15648 rtx not_result = gen_reg_rtx (CCEQmode);
15649 rtx not_op, rev_cond_rtx;
15650 enum machine_mode cc_mode;
15652 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
15654 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
15655 SImode, XEXP (condition_rtx, 0), const0_rtx);
15656 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
15657 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
15658 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
15661 op_mode = GET_MODE (XEXP (operands[1], 0));
15662 if (op_mode == VOIDmode)
15663 op_mode = GET_MODE (XEXP (operands[1], 1));
15665 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
15667 PUT_MODE (condition_rtx, DImode);
15668 convert_move (result, condition_rtx, 0);
15670 else
15672 PUT_MODE (condition_rtx, SImode);
15673 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
15677 /* Emit a branch of kind CODE to location LOC. */
15679 void
15680 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
15682 rtx condition_rtx, loc_ref;
15684 condition_rtx = rs6000_generate_compare (operands[0], mode);
15685 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
15686 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
15687 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
15688 loc_ref, pc_rtx)));
15691 /* Return the string to output a conditional branch to LABEL, which is
15692 the operand number of the label, or -1 if the branch is really a
15693 conditional return.
15695 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
15696 condition code register and its mode specifies what kind of
15697 comparison we made.
15699 REVERSED is nonzero if we should reverse the sense of the comparison.
15701 INSN is the insn. */
15703 char *
15704 output_cbranch (rtx op, const char *label, int reversed, rtx insn)
15706 static char string[64];
15707 enum rtx_code code = GET_CODE (op);
15708 rtx cc_reg = XEXP (op, 0);
15709 enum machine_mode mode = GET_MODE (cc_reg);
15710 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
15711 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
15712 int really_reversed = reversed ^ need_longbranch;
15713 char *s = string;
15714 const char *ccode;
15715 const char *pred;
15716 rtx note;
15718 validate_condition_mode (code, mode);
15720 /* Work out which way this really branches. We could use
15721 reverse_condition_maybe_unordered here always but this
15722 makes the resulting assembler clearer. */
15723 if (really_reversed)
15725 /* Reversal of FP compares takes care -- an ordered compare
15726 becomes an unordered compare and vice versa. */
15727 if (mode == CCFPmode)
15728 code = reverse_condition_maybe_unordered (code);
15729 else
15730 code = reverse_condition (code);
15733 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
15735 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
15736 to the GT bit. */
15737 switch (code)
15739 case EQ:
15740 /* Opposite of GT. */
15741 code = GT;
15742 break;
15744 case NE:
15745 code = UNLE;
15746 break;
15748 default:
15749 gcc_unreachable ();
15753 switch (code)
15755 /* Not all of these are actually distinct opcodes, but
15756 we distinguish them for clarity of the resulting assembler. */
15757 case NE: case LTGT:
15758 ccode = "ne"; break;
15759 case EQ: case UNEQ:
15760 ccode = "eq"; break;
15761 case GE: case GEU:
15762 ccode = "ge"; break;
15763 case GT: case GTU: case UNGT:
15764 ccode = "gt"; break;
15765 case LE: case LEU:
15766 ccode = "le"; break;
15767 case LT: case LTU: case UNLT:
15768 ccode = "lt"; break;
15769 case UNORDERED: ccode = "un"; break;
15770 case ORDERED: ccode = "nu"; break;
15771 case UNGE: ccode = "nl"; break;
15772 case UNLE: ccode = "ng"; break;
15773 default:
15774 gcc_unreachable ();
15777 /* Maybe we have a guess as to how likely the branch is.
15778 The old mnemonics don't have a way to specify this information. */
15779 pred = "";
15780 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
15781 if (note != NULL_RTX)
15783 /* PROB is the difference from 50%. */
15784 int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
15786 /* Only hint for highly probable/improbable branches on newer
15787 cpus as static prediction overrides processor dynamic
15788 prediction. For older cpus we may as well always hint, but
15789 assume not taken for branches that are very close to 50% as a
15790 mispredicted taken branch is more expensive than a
15791 mispredicted not-taken branch. */
15792 if (rs6000_always_hint
15793 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
15794 && br_prob_note_reliable_p (note)))
15796 if (abs (prob) > REG_BR_PROB_BASE / 20
15797 && ((prob > 0) ^ need_longbranch))
15798 pred = "+";
15799 else
15800 pred = "-";
15804 if (label == NULL)
15805 s += sprintf (s, "{b%sr|b%slr%s} ", ccode, ccode, pred);
15806 else
15807 s += sprintf (s, "{b%s|b%s%s} ", ccode, ccode, pred);
15809 /* We need to escape any '%' characters in the reg_names string.
15810 Assume they'd only be the first character.... */
15811 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
15812 *s++ = '%';
15813 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
15815 if (label != NULL)
15817 /* If the branch distance was too far, we may have to use an
15818 unconditional branch to go the distance. */
15819 if (need_longbranch)
15820 s += sprintf (s, ",$+8\n\tb %s", label);
15821 else
15822 s += sprintf (s, ",%s", label);
15825 return string;
15828 /* Return the string to flip the GT bit on a CR. */
15829 char *
15830 output_e500_flip_gt_bit (rtx dst, rtx src)
15832 static char string[64];
15833 int a, b;
15835 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
15836 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
15838 /* GT bit. */
15839 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
15840 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
15842 sprintf (string, "crnot %d,%d", a, b);
15843 return string;
15846 /* Return insn for VSX or Altivec comparisons. */
15848 static rtx
15849 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
15851 rtx mask;
15852 enum machine_mode mode = GET_MODE (op0);
15854 switch (code)
15856 default:
15857 break;
15859 case GE:
15860 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15861 return NULL_RTX;
15863 case EQ:
15864 case GT:
15865 case GTU:
15866 case ORDERED:
15867 case UNORDERED:
15868 case UNEQ:
15869 case LTGT:
15870 mask = gen_reg_rtx (mode);
15871 emit_insn (gen_rtx_SET (VOIDmode,
15872 mask,
15873 gen_rtx_fmt_ee (code, mode, op0, op1)));
15874 return mask;
15877 return NULL_RTX;
15880 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
15881 DMODE is expected destination mode. This is a recursive function. */
15883 static rtx
15884 rs6000_emit_vector_compare (enum rtx_code rcode,
15885 rtx op0, rtx op1,
15886 enum machine_mode dmode)
15888 rtx mask;
15889 bool swap_operands = false;
15890 bool try_again = false;
15892 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
15893 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
15895 /* See if the comparison works as is. */
15896 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
15897 if (mask)
15898 return mask;
15900 switch (rcode)
15902 case LT:
15903 rcode = GT;
15904 swap_operands = true;
15905 try_again = true;
15906 break;
15907 case LTU:
15908 rcode = GTU;
15909 swap_operands = true;
15910 try_again = true;
15911 break;
15912 case NE:
15913 case UNLE:
15914 case UNLT:
15915 case UNGE:
15916 case UNGT:
15917 /* Invert condition and try again.
15918 e.g., A != B becomes ~(A==B). */
15920 enum rtx_code rev_code;
15921 enum insn_code nor_code;
15922 rtx mask2;
15924 rev_code = reverse_condition_maybe_unordered (rcode);
15925 if (rev_code == UNKNOWN)
15926 return NULL_RTX;
15928 nor_code = optab_handler (one_cmpl_optab, dmode);
15929 if (nor_code == CODE_FOR_nothing)
15930 return NULL_RTX;
15932 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
15933 if (!mask2)
15934 return NULL_RTX;
15936 mask = gen_reg_rtx (dmode);
15937 emit_insn (GEN_FCN (nor_code) (mask, mask2));
15938 return mask;
15940 break;
15941 case GE:
15942 case GEU:
15943 case LE:
15944 case LEU:
15945 /* Try GT/GTU/LT/LTU OR EQ */
15947 rtx c_rtx, eq_rtx;
15948 enum insn_code ior_code;
15949 enum rtx_code new_code;
15951 switch (rcode)
15953 case GE:
15954 new_code = GT;
15955 break;
15957 case GEU:
15958 new_code = GTU;
15959 break;
15961 case LE:
15962 new_code = LT;
15963 break;
15965 case LEU:
15966 new_code = LTU;
15967 break;
15969 default:
15970 gcc_unreachable ();
15973 ior_code = optab_handler (ior_optab, dmode);
15974 if (ior_code == CODE_FOR_nothing)
15975 return NULL_RTX;
15977 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
15978 if (!c_rtx)
15979 return NULL_RTX;
15981 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
15982 if (!eq_rtx)
15983 return NULL_RTX;
15985 mask = gen_reg_rtx (dmode);
15986 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
15987 return mask;
15989 break;
15990 default:
15991 return NULL_RTX;
15994 if (try_again)
15996 if (swap_operands)
15998 rtx tmp;
15999 tmp = op0;
16000 op0 = op1;
16001 op1 = tmp;
16004 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
16005 if (mask)
16006 return mask;
16009 /* You only get two chances. */
16010 return NULL_RTX;
16013 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
16014 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
16015 operands for the relation operation COND. */
16018 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
16019 rtx cond, rtx cc_op0, rtx cc_op1)
16021 enum machine_mode dest_mode = GET_MODE (dest);
16022 enum machine_mode mask_mode = GET_MODE (cc_op0);
16023 enum rtx_code rcode = GET_CODE (cond);
16024 enum machine_mode cc_mode = CCmode;
16025 rtx mask;
16026 rtx cond2;
16027 rtx tmp;
16028 bool invert_move = false;
16030 if (VECTOR_UNIT_NONE_P (dest_mode))
16031 return 0;
16033 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
16034 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
16036 switch (rcode)
16038 /* Swap operands if we can, and fall back to doing the operation as
16039 specified, and doing a NOR to invert the test. */
16040 case NE:
16041 case UNLE:
16042 case UNLT:
16043 case UNGE:
16044 case UNGT:
16045 /* Invert condition and try again.
16046 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
16047 invert_move = true;
16048 rcode = reverse_condition_maybe_unordered (rcode);
16049 if (rcode == UNKNOWN)
16050 return 0;
16051 break;
16053 /* Mark unsigned tests with CCUNSmode. */
16054 case GTU:
16055 case GEU:
16056 case LTU:
16057 case LEU:
16058 cc_mode = CCUNSmode;
16059 break;
16061 default:
16062 break;
16065 /* Get the vector mask for the given relational operations. */
16066 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
16068 if (!mask)
16069 return 0;
16071 if (invert_move)
16073 tmp = op_true;
16074 op_true = op_false;
16075 op_false = tmp;
16078 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
16079 CONST0_RTX (dest_mode));
16080 emit_insn (gen_rtx_SET (VOIDmode,
16081 dest,
16082 gen_rtx_IF_THEN_ELSE (dest_mode,
16083 cond2,
16084 op_true,
16085 op_false)));
16086 return 1;
16089 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
16090 operands of the last comparison is nonzero/true, FALSE_COND if it
16091 is zero/false. Return 0 if the hardware has no such operation. */
16094 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16096 enum rtx_code code = GET_CODE (op);
16097 rtx op0 = XEXP (op, 0);
16098 rtx op1 = XEXP (op, 1);
16099 REAL_VALUE_TYPE c1;
16100 enum machine_mode compare_mode = GET_MODE (op0);
16101 enum machine_mode result_mode = GET_MODE (dest);
16102 rtx temp;
16103 bool is_against_zero;
16105 /* These modes should always match. */
16106 if (GET_MODE (op1) != compare_mode
16107 /* In the isel case however, we can use a compare immediate, so
16108 op1 may be a small constant. */
16109 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
16110 return 0;
16111 if (GET_MODE (true_cond) != result_mode)
16112 return 0;
16113 if (GET_MODE (false_cond) != result_mode)
16114 return 0;
16116 /* First, work out if the hardware can do this at all, or
16117 if it's too slow.... */
16118 if (!FLOAT_MODE_P (compare_mode))
16120 if (TARGET_ISEL)
16121 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
16122 return 0;
16124 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
16125 && SCALAR_FLOAT_MODE_P (compare_mode))
16126 return 0;
16128 is_against_zero = op1 == CONST0_RTX (compare_mode);
16130 /* A floating-point subtract might overflow, underflow, or produce
16131 an inexact result, thus changing the floating-point flags, so it
16132 can't be generated if we care about that. It's safe if one side
16133 of the construct is zero, since then no subtract will be
16134 generated. */
16135 if (SCALAR_FLOAT_MODE_P (compare_mode)
16136 && flag_trapping_math && ! is_against_zero)
16137 return 0;
16139 /* Eliminate half of the comparisons by switching operands, this
16140 makes the remaining code simpler. */
16141 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
16142 || code == LTGT || code == LT || code == UNLE)
16144 code = reverse_condition_maybe_unordered (code);
16145 temp = true_cond;
16146 true_cond = false_cond;
16147 false_cond = temp;
16150 /* UNEQ and LTGT take four instructions for a comparison with zero,
16151 it'll probably be faster to use a branch here too. */
16152 if (code == UNEQ && HONOR_NANS (compare_mode))
16153 return 0;
16155 if (GET_CODE (op1) == CONST_DOUBLE)
16156 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
16158 /* We're going to try to implement comparisons by performing
16159 a subtract, then comparing against zero. Unfortunately,
16160 Inf - Inf is NaN which is not zero, and so if we don't
16161 know that the operand is finite and the comparison
16162 would treat EQ different to UNORDERED, we can't do it. */
16163 if (HONOR_INFINITIES (compare_mode)
16164 && code != GT && code != UNGE
16165 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
16166 /* Constructs of the form (a OP b ? a : b) are safe. */
16167 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
16168 || (! rtx_equal_p (op0, true_cond)
16169 && ! rtx_equal_p (op1, true_cond))))
16170 return 0;
16172 /* At this point we know we can use fsel. */
16174 /* Reduce the comparison to a comparison against zero. */
16175 if (! is_against_zero)
16177 temp = gen_reg_rtx (compare_mode);
16178 emit_insn (gen_rtx_SET (VOIDmode, temp,
16179 gen_rtx_MINUS (compare_mode, op0, op1)));
16180 op0 = temp;
16181 op1 = CONST0_RTX (compare_mode);
16184 /* If we don't care about NaNs we can reduce some of the comparisons
16185 down to faster ones. */
16186 if (! HONOR_NANS (compare_mode))
16187 switch (code)
16189 case GT:
16190 code = LE;
16191 temp = true_cond;
16192 true_cond = false_cond;
16193 false_cond = temp;
16194 break;
16195 case UNGE:
16196 code = GE;
16197 break;
16198 case UNEQ:
16199 code = EQ;
16200 break;
16201 default:
16202 break;
16205 /* Now, reduce everything down to a GE. */
16206 switch (code)
16208 case GE:
16209 break;
16211 case LE:
16212 temp = gen_reg_rtx (compare_mode);
16213 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16214 op0 = temp;
16215 break;
16217 case ORDERED:
16218 temp = gen_reg_rtx (compare_mode);
16219 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
16220 op0 = temp;
16221 break;
16223 case EQ:
16224 temp = gen_reg_rtx (compare_mode);
16225 emit_insn (gen_rtx_SET (VOIDmode, temp,
16226 gen_rtx_NEG (compare_mode,
16227 gen_rtx_ABS (compare_mode, op0))));
16228 op0 = temp;
16229 break;
16231 case UNGE:
16232 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
16233 temp = gen_reg_rtx (result_mode);
16234 emit_insn (gen_rtx_SET (VOIDmode, temp,
16235 gen_rtx_IF_THEN_ELSE (result_mode,
16236 gen_rtx_GE (VOIDmode,
16237 op0, op1),
16238 true_cond, false_cond)));
16239 false_cond = true_cond;
16240 true_cond = temp;
16242 temp = gen_reg_rtx (compare_mode);
16243 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16244 op0 = temp;
16245 break;
16247 case GT:
16248 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
16249 temp = gen_reg_rtx (result_mode);
16250 emit_insn (gen_rtx_SET (VOIDmode, temp,
16251 gen_rtx_IF_THEN_ELSE (result_mode,
16252 gen_rtx_GE (VOIDmode,
16253 op0, op1),
16254 true_cond, false_cond)));
16255 true_cond = false_cond;
16256 false_cond = temp;
16258 temp = gen_reg_rtx (compare_mode);
16259 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
16260 op0 = temp;
16261 break;
16263 default:
16264 gcc_unreachable ();
16267 emit_insn (gen_rtx_SET (VOIDmode, dest,
16268 gen_rtx_IF_THEN_ELSE (result_mode,
16269 gen_rtx_GE (VOIDmode,
16270 op0, op1),
16271 true_cond, false_cond)));
16272 return 1;
16275 /* Same as above, but for ints (isel). */
16277 static int
16278 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
16280 rtx condition_rtx, cr;
16281 enum machine_mode mode = GET_MODE (dest);
16282 enum rtx_code cond_code;
16283 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
16284 bool signedp;
16286 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
16287 return 0;
16289 /* We still have to do the compare, because isel doesn't do a
16290 compare, it just looks at the CRx bits set by a previous compare
16291 instruction. */
16292 condition_rtx = rs6000_generate_compare (op, mode);
16293 cond_code = GET_CODE (condition_rtx);
16294 cr = XEXP (condition_rtx, 0);
16295 signedp = GET_MODE (cr) == CCmode;
16297 isel_func = (mode == SImode
16298 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
16299 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
16301 switch (cond_code)
16303 case LT: case GT: case LTU: case GTU: case EQ:
16304 /* isel handles these directly. */
16305 break;
16307 default:
16308 /* We need to swap the sense of the comparison. */
16310 rtx t = true_cond;
16311 true_cond = false_cond;
16312 false_cond = t;
16313 PUT_CODE (condition_rtx, reverse_condition (cond_code));
16315 break;
16318 false_cond = force_reg (mode, false_cond);
16319 if (true_cond != const0_rtx)
16320 true_cond = force_reg (mode, true_cond);
16322 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
16324 return 1;
16327 const char *
16328 output_isel (rtx *operands)
16330 enum rtx_code code;
16332 code = GET_CODE (operands[1]);
16334 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
16336 gcc_assert (GET_CODE (operands[2]) == REG
16337 && GET_CODE (operands[3]) == REG);
16338 PUT_CODE (operands[1], reverse_condition (code));
16339 return "isel %0,%3,%2,%j1";
16342 return "isel %0,%2,%3,%j1";
16345 void
16346 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
16348 enum machine_mode mode = GET_MODE (op0);
16349 enum rtx_code c;
16350 rtx target;
16352 /* VSX/altivec have direct min/max insns. */
16353 if ((code == SMAX || code == SMIN)
16354 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
16355 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
16357 emit_insn (gen_rtx_SET (VOIDmode,
16358 dest,
16359 gen_rtx_fmt_ee (code, mode, op0, op1)));
16360 return;
16363 if (code == SMAX || code == SMIN)
16364 c = GE;
16365 else
16366 c = GEU;
16368 if (code == SMAX || code == UMAX)
16369 target = emit_conditional_move (dest, c, op0, op1, mode,
16370 op0, op1, mode, 0);
16371 else
16372 target = emit_conditional_move (dest, c, op0, op1, mode,
16373 op1, op0, mode, 0);
16374 gcc_assert (target);
16375 if (target != dest)
16376 emit_move_insn (dest, target);
16379 /* A subroutine of the atomic operation splitters. Jump to LABEL if
16380 COND is true. Mark the jump as unlikely to be taken. */
16382 static void
16383 emit_unlikely_jump (rtx cond, rtx label)
16385 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
16386 rtx x;
16388 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
16389 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
16390 add_reg_note (x, REG_BR_PROB, very_unlikely);
16393 /* A subroutine of the atomic operation splitters. Emit a load-locked
16394 instruction in MODE. */
16396 static void
16397 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
16399 rtx (*fn) (rtx, rtx) = NULL;
16401 switch (mode)
16403 case SImode:
16404 fn = gen_load_lockedsi;
16405 break;
16406 case DImode:
16407 fn = gen_load_lockeddi;
16408 break;
16409 default:
16410 gcc_unreachable ();
16412 emit_insn (fn (reg, mem));
16415 /* A subroutine of the atomic operation splitters. Emit a store-conditional
16416 instruction in MODE. */
16418 static void
16419 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
16421 rtx (*fn) (rtx, rtx, rtx) = NULL;
16423 switch (mode)
16425 case SImode:
16426 fn = gen_store_conditionalsi;
16427 break;
16428 case DImode:
16429 fn = gen_store_conditionaldi;
16430 break;
16431 default:
16432 gcc_unreachable ();
16435 /* Emit sync before stwcx. to address PPC405 Erratum. */
16436 if (PPC405_ERRATUM77)
16437 emit_insn (gen_hwsync ());
16439 emit_insn (fn (res, mem, val));
16442 /* Expand barriers before and after a load_locked/store_cond sequence. */
16444 static void
16445 rs6000_pre_atomic_barrier (enum memmodel model)
16447 switch (model)
16449 case MEMMODEL_RELAXED:
16450 case MEMMODEL_CONSUME:
16451 case MEMMODEL_ACQUIRE:
16452 break;
16453 case MEMMODEL_RELEASE:
16454 case MEMMODEL_ACQ_REL:
16455 emit_insn (gen_lwsync ());
16456 break;
16457 case MEMMODEL_SEQ_CST:
16458 emit_insn (gen_hwsync ());
16459 break;
16460 default:
16461 gcc_unreachable ();
16465 static void
16466 rs6000_post_atomic_barrier (enum memmodel model)
16468 switch (model)
16470 case MEMMODEL_RELAXED:
16471 case MEMMODEL_CONSUME:
16472 case MEMMODEL_RELEASE:
16473 break;
16474 case MEMMODEL_ACQUIRE:
16475 case MEMMODEL_ACQ_REL:
16476 case MEMMODEL_SEQ_CST:
16477 emit_insn (gen_isync ());
16478 break;
16479 default:
16480 gcc_unreachable ();
16484 /* A subroutine of the various atomic expanders. For sub-word operations,
16485 we must adjust things to operate on SImode. Given the original MEM,
16486 return a new aligned memory. Also build and return the quantities by
16487 which to shift and mask. */
16489 static rtx
16490 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
16492 rtx addr, align, shift, mask, mem;
16493 HOST_WIDE_INT shift_mask;
16494 enum machine_mode mode = GET_MODE (orig_mem);
16496 /* For smaller modes, we have to implement this via SImode. */
16497 shift_mask = (mode == QImode ? 0x18 : 0x10);
16499 addr = XEXP (orig_mem, 0);
16500 addr = force_reg (GET_MODE (addr), addr);
16502 /* Aligned memory containing subword. Generate a new memory. We
16503 do not want any of the existing MEM_ATTR data, as we're now
16504 accessing memory outside the original object. */
16505 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
16506 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16507 mem = gen_rtx_MEM (SImode, align);
16508 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
16509 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
16510 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
16512 /* Shift amount for subword relative to aligned word. */
16513 shift = gen_reg_rtx (SImode);
16514 addr = gen_lowpart (SImode, addr);
16515 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
16516 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
16517 shift, 1, OPTAB_LIB_WIDEN);
16518 *pshift = shift;
16520 /* Mask for insertion. */
16521 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
16522 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
16523 *pmask = mask;
16525 return mem;
16528 /* A subroutine of the various atomic expanders. For sub-word operands,
16529 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
16531 static rtx
16532 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
16534 rtx x;
16536 x = gen_reg_rtx (SImode);
16537 emit_insn (gen_rtx_SET (VOIDmode, x,
16538 gen_rtx_AND (SImode,
16539 gen_rtx_NOT (SImode, mask),
16540 oldval)));
16542 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
16544 return x;
16547 /* A subroutine of the various atomic expanders. For sub-word operands,
16548 extract WIDE to NARROW via SHIFT. */
16550 static void
16551 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
16553 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
16554 wide, 1, OPTAB_LIB_WIDEN);
16555 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
16558 /* Expand an atomic compare and swap operation. */
16560 void
16561 rs6000_expand_atomic_compare_and_swap (rtx operands[])
16563 rtx boolval, retval, mem, oldval, newval, cond;
16564 rtx label1, label2, x, mask, shift;
16565 enum machine_mode mode;
16566 enum memmodel mod_s, mod_f;
16567 bool is_weak;
16569 boolval = operands[0];
16570 retval = operands[1];
16571 mem = operands[2];
16572 oldval = operands[3];
16573 newval = operands[4];
16574 is_weak = (INTVAL (operands[5]) != 0);
16575 mod_s = (enum memmodel) INTVAL (operands[6]);
16576 mod_f = (enum memmodel) INTVAL (operands[7]);
16577 mode = GET_MODE (mem);
16579 mask = shift = NULL_RTX;
16580 if (mode == QImode || mode == HImode)
16582 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16584 /* Shift and mask OLDVAL into position with the word. */
16585 oldval = convert_modes (SImode, mode, oldval, 1);
16586 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
16587 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16589 /* Shift and mask NEWVAL into position within the word. */
16590 newval = convert_modes (SImode, mode, newval, 1);
16591 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
16592 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16594 /* Prepare to adjust the return value. */
16595 retval = gen_reg_rtx (SImode);
16596 mode = SImode;
16598 else if (reg_overlap_mentioned_p (retval, oldval))
16599 oldval = copy_to_reg (oldval);
16601 rs6000_pre_atomic_barrier (mod_s);
16603 label1 = NULL_RTX;
16604 if (!is_weak)
16606 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16607 emit_label (XEXP (label1, 0));
16609 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16611 emit_load_locked (mode, retval, mem);
16613 x = retval;
16614 if (mask)
16616 x = expand_simple_binop (SImode, AND, retval, mask,
16617 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16620 cond = gen_reg_rtx (CCmode);
16621 x = gen_rtx_COMPARE (CCmode, x, oldval);
16622 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
16624 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16625 emit_unlikely_jump (x, label2);
16627 x = newval;
16628 if (mask)
16629 x = rs6000_mask_atomic_subword (retval, newval, mask);
16631 emit_store_conditional (mode, cond, mem, x);
16633 if (!is_weak)
16635 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16636 emit_unlikely_jump (x, label1);
16639 if (mod_f != MEMMODEL_RELAXED)
16640 emit_label (XEXP (label2, 0));
16642 rs6000_post_atomic_barrier (mod_s);
16644 if (mod_f == MEMMODEL_RELAXED)
16645 emit_label (XEXP (label2, 0));
16647 if (shift)
16648 rs6000_finish_atomic_subword (operands[1], retval, shift);
16650 /* In all cases, CR0 contains EQ on success, and NE on failure. */
16651 x = gen_rtx_EQ (SImode, cond, const0_rtx);
16652 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
16655 /* Expand an atomic exchange operation. */
16657 void
16658 rs6000_expand_atomic_exchange (rtx operands[])
16660 rtx retval, mem, val, cond;
16661 enum machine_mode mode;
16662 enum memmodel model;
16663 rtx label, x, mask, shift;
16665 retval = operands[0];
16666 mem = operands[1];
16667 val = operands[2];
16668 model = (enum memmodel) INTVAL (operands[3]);
16669 mode = GET_MODE (mem);
16671 mask = shift = NULL_RTX;
16672 if (mode == QImode || mode == HImode)
16674 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16676 /* Shift and mask VAL into position with the word. */
16677 val = convert_modes (SImode, mode, val, 1);
16678 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16679 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16681 /* Prepare to adjust the return value. */
16682 retval = gen_reg_rtx (SImode);
16683 mode = SImode;
16686 rs6000_pre_atomic_barrier (model);
16688 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
16689 emit_label (XEXP (label, 0));
16691 emit_load_locked (mode, retval, mem);
16693 x = val;
16694 if (mask)
16695 x = rs6000_mask_atomic_subword (retval, val, mask);
16697 cond = gen_reg_rtx (CCmode);
16698 emit_store_conditional (mode, cond, mem, x);
16700 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16701 emit_unlikely_jump (x, label);
16703 rs6000_post_atomic_barrier (model);
16705 if (shift)
16706 rs6000_finish_atomic_subword (operands[0], retval, shift);
16709 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
16710 to perform. MEM is the memory on which to operate. VAL is the second
16711 operand of the binary operator. BEFORE and AFTER are optional locations to
16712 return the value of MEM either before of after the operation. MODEL_RTX
16713 is a CONST_INT containing the memory model to use. */
16715 void
16716 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
16717 rtx orig_before, rtx orig_after, rtx model_rtx)
16719 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
16720 enum machine_mode mode = GET_MODE (mem);
16721 rtx label, x, cond, mask, shift;
16722 rtx before = orig_before, after = orig_after;
16724 mask = shift = NULL_RTX;
16725 if (mode == QImode || mode == HImode)
16727 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
16729 /* Shift and mask VAL into position with the word. */
16730 val = convert_modes (SImode, mode, val, 1);
16731 val = expand_simple_binop (SImode, ASHIFT, val, shift,
16732 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16734 switch (code)
16736 case IOR:
16737 case XOR:
16738 /* We've already zero-extended VAL. That is sufficient to
16739 make certain that it does not affect other bits. */
16740 mask = NULL;
16741 break;
16743 case AND:
16744 /* If we make certain that all of the other bits in VAL are
16745 set, that will be sufficient to not affect other bits. */
16746 x = gen_rtx_NOT (SImode, mask);
16747 x = gen_rtx_IOR (SImode, x, val);
16748 emit_insn (gen_rtx_SET (VOIDmode, val, x));
16749 mask = NULL;
16750 break;
16752 case NOT:
16753 case PLUS:
16754 case MINUS:
16755 /* These will all affect bits outside the field and need
16756 adjustment via MASK within the loop. */
16757 break;
16759 default:
16760 gcc_unreachable ();
16763 /* Prepare to adjust the return value. */
16764 before = gen_reg_rtx (SImode);
16765 if (after)
16766 after = gen_reg_rtx (SImode);
16767 mode = SImode;
16770 rs6000_pre_atomic_barrier (model);
16772 label = gen_label_rtx ();
16773 emit_label (label);
16774 label = gen_rtx_LABEL_REF (VOIDmode, label);
16776 if (before == NULL_RTX)
16777 before = gen_reg_rtx (mode);
16779 emit_load_locked (mode, before, mem);
16781 if (code == NOT)
16783 x = expand_simple_binop (mode, AND, before, val,
16784 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16785 after = expand_simple_unop (mode, NOT, x, after, 1);
16787 else
16789 after = expand_simple_binop (mode, code, before, val,
16790 after, 1, OPTAB_LIB_WIDEN);
16793 x = after;
16794 if (mask)
16796 x = expand_simple_binop (SImode, AND, after, mask,
16797 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16798 x = rs6000_mask_atomic_subword (before, x, mask);
16801 cond = gen_reg_rtx (CCmode);
16802 emit_store_conditional (mode, cond, mem, x);
16804 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
16805 emit_unlikely_jump (x, label);
16807 rs6000_post_atomic_barrier (model);
16809 if (shift)
16811 if (orig_before)
16812 rs6000_finish_atomic_subword (orig_before, before, shift);
16813 if (orig_after)
16814 rs6000_finish_atomic_subword (orig_after, after, shift);
16816 else if (orig_after && after != orig_after)
16817 emit_move_insn (orig_after, after);
16820 /* Emit instructions to move SRC to DST. Called by splitters for
16821 multi-register moves. It will emit at most one instruction for
16822 each register that is accessed; that is, it won't emit li/lis pairs
16823 (or equivalent for 64-bit code). One of SRC or DST must be a hard
16824 register. */
16826 void
16827 rs6000_split_multireg_move (rtx dst, rtx src)
16829 /* The register number of the first register being moved. */
16830 int reg;
16831 /* The mode that is to be moved. */
16832 enum machine_mode mode;
16833 /* The mode that the move is being done in, and its size. */
16834 enum machine_mode reg_mode;
16835 int reg_mode_size;
16836 /* The number of registers that will be moved. */
16837 int nregs;
16839 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
16840 mode = GET_MODE (dst);
16841 nregs = hard_regno_nregs[reg][mode];
16842 if (FP_REGNO_P (reg))
16843 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
16844 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
16845 else if (ALTIVEC_REGNO_P (reg))
16846 reg_mode = V16QImode;
16847 else if (TARGET_E500_DOUBLE && mode == TFmode)
16848 reg_mode = DFmode;
16849 else
16850 reg_mode = word_mode;
16851 reg_mode_size = GET_MODE_SIZE (reg_mode);
16853 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
16855 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
16857 /* Move register range backwards, if we might have destructive
16858 overlap. */
16859 int i;
16860 for (i = nregs - 1; i >= 0; i--)
16861 emit_insn (gen_rtx_SET (VOIDmode,
16862 simplify_gen_subreg (reg_mode, dst, mode,
16863 i * reg_mode_size),
16864 simplify_gen_subreg (reg_mode, src, mode,
16865 i * reg_mode_size)));
16867 else
16869 int i;
16870 int j = -1;
16871 bool used_update = false;
16872 rtx restore_basereg = NULL_RTX;
16874 if (MEM_P (src) && INT_REGNO_P (reg))
16876 rtx breg;
16878 if (GET_CODE (XEXP (src, 0)) == PRE_INC
16879 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
16881 rtx delta_rtx;
16882 breg = XEXP (XEXP (src, 0), 0);
16883 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
16884 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
16885 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
16886 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
16887 src = replace_equiv_address (src, breg);
16889 else if (! rs6000_offsettable_memref_p (src))
16891 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
16893 rtx basereg = XEXP (XEXP (src, 0), 0);
16894 if (TARGET_UPDATE)
16896 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
16897 emit_insn (gen_rtx_SET (VOIDmode, ndst,
16898 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
16899 used_update = true;
16901 else
16902 emit_insn (gen_rtx_SET (VOIDmode, basereg,
16903 XEXP (XEXP (src, 0), 1)));
16904 src = replace_equiv_address (src, basereg);
16906 else
16908 rtx basereg = gen_rtx_REG (Pmode, reg);
16909 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
16910 src = replace_equiv_address (src, basereg);
16914 breg = XEXP (src, 0);
16915 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
16916 breg = XEXP (breg, 0);
16918 /* If the base register we are using to address memory is
16919 also a destination reg, then change that register last. */
16920 if (REG_P (breg)
16921 && REGNO (breg) >= REGNO (dst)
16922 && REGNO (breg) < REGNO (dst) + nregs)
16923 j = REGNO (breg) - REGNO (dst);
16925 else if (MEM_P (dst) && INT_REGNO_P (reg))
16927 rtx breg;
16929 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
16930 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
16932 rtx delta_rtx;
16933 breg = XEXP (XEXP (dst, 0), 0);
16934 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
16935 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
16936 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
16938 /* We have to update the breg before doing the store.
16939 Use store with update, if available. */
16941 if (TARGET_UPDATE)
16943 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
16944 emit_insn (TARGET_32BIT
16945 ? (TARGET_POWERPC64
16946 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
16947 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
16948 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
16949 used_update = true;
16951 else
16952 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
16953 dst = replace_equiv_address (dst, breg);
16955 else if (!rs6000_offsettable_memref_p (dst)
16956 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
16958 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
16960 rtx basereg = XEXP (XEXP (dst, 0), 0);
16961 if (TARGET_UPDATE)
16963 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
16964 emit_insn (gen_rtx_SET (VOIDmode,
16965 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
16966 used_update = true;
16968 else
16969 emit_insn (gen_rtx_SET (VOIDmode, basereg,
16970 XEXP (XEXP (dst, 0), 1)));
16971 dst = replace_equiv_address (dst, basereg);
16973 else
16975 rtx basereg = XEXP (XEXP (dst, 0), 0);
16976 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
16977 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
16978 && REG_P (basereg)
16979 && REG_P (offsetreg)
16980 && REGNO (basereg) != REGNO (offsetreg));
16981 if (REGNO (basereg) == 0)
16983 rtx tmp = offsetreg;
16984 offsetreg = basereg;
16985 basereg = tmp;
16987 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
16988 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
16989 dst = replace_equiv_address (dst, basereg);
16992 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
16993 gcc_assert (rs6000_offsettable_memref_p (dst));
16996 for (i = 0; i < nregs; i++)
16998 /* Calculate index to next subword. */
16999 ++j;
17000 if (j == nregs)
17001 j = 0;
17003 /* If compiler already emitted move of first word by
17004 store with update, no need to do anything. */
17005 if (j == 0 && used_update)
17006 continue;
17008 emit_insn (gen_rtx_SET (VOIDmode,
17009 simplify_gen_subreg (reg_mode, dst, mode,
17010 j * reg_mode_size),
17011 simplify_gen_subreg (reg_mode, src, mode,
17012 j * reg_mode_size)));
17014 if (restore_basereg != NULL_RTX)
17015 emit_insn (restore_basereg);
17020 /* This page contains routines that are used to determine what the
17021 function prologue and epilogue code will do and write them out. */
17023 static inline bool
17024 save_reg_p (int r)
17026 return !call_used_regs[r] && df_regs_ever_live_p (r);
17029 /* Return the first fixed-point register that is required to be
17030 saved. 32 if none. */
17033 first_reg_to_save (void)
17035 int first_reg;
17037 /* Find lowest numbered live register. */
17038 for (first_reg = 13; first_reg <= 31; first_reg++)
17039 if (save_reg_p (first_reg))
17040 break;
17042 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
17043 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
17044 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
17045 || (TARGET_TOC && TARGET_MINIMAL_TOC))
17046 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17047 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
17049 #if TARGET_MACHO
17050 if (flag_pic
17051 && crtl->uses_pic_offset_table
17052 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
17053 return RS6000_PIC_OFFSET_TABLE_REGNUM;
17054 #endif
17056 return first_reg;
17059 /* Similar, for FP regs. */
17062 first_fp_reg_to_save (void)
17064 int first_reg;
17066 /* Find lowest numbered live register. */
17067 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
17068 if (save_reg_p (first_reg))
17069 break;
17071 return first_reg;
17074 /* Similar, for AltiVec regs. */
17076 static int
17077 first_altivec_reg_to_save (void)
17079 int i;
17081 /* Stack frame remains as is unless we are in AltiVec ABI. */
17082 if (! TARGET_ALTIVEC_ABI)
17083 return LAST_ALTIVEC_REGNO + 1;
17085 /* On Darwin, the unwind routines are compiled without
17086 TARGET_ALTIVEC, and use save_world to save/restore the
17087 altivec registers when necessary. */
17088 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17089 && ! TARGET_ALTIVEC)
17090 return FIRST_ALTIVEC_REGNO + 20;
17092 /* Find lowest numbered live register. */
17093 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
17094 if (save_reg_p (i))
17095 break;
17097 return i;
17100 /* Return a 32-bit mask of the AltiVec registers we need to set in
17101 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
17102 the 32-bit word is 0. */
17104 static unsigned int
17105 compute_vrsave_mask (void)
17107 unsigned int i, mask = 0;
17109 /* On Darwin, the unwind routines are compiled without
17110 TARGET_ALTIVEC, and use save_world to save/restore the
17111 call-saved altivec registers when necessary. */
17112 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
17113 && ! TARGET_ALTIVEC)
17114 mask |= 0xFFF;
17116 /* First, find out if we use _any_ altivec registers. */
17117 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
17118 if (df_regs_ever_live_p (i))
17119 mask |= ALTIVEC_REG_BIT (i);
17121 if (mask == 0)
17122 return mask;
17124 /* Next, remove the argument registers from the set. These must
17125 be in the VRSAVE mask set by the caller, so we don't need to add
17126 them in again. More importantly, the mask we compute here is
17127 used to generate CLOBBERs in the set_vrsave insn, and we do not
17128 wish the argument registers to die. */
17129 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
17130 mask &= ~ALTIVEC_REG_BIT (i);
17132 /* Similarly, remove the return value from the set. */
17134 bool yes = false;
17135 diddle_return_value (is_altivec_return_reg, &yes);
17136 if (yes)
17137 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
17140 return mask;
17143 /* For a very restricted set of circumstances, we can cut down the
17144 size of prologues/epilogues by calling our own save/restore-the-world
17145 routines. */
17147 static void
17148 compute_save_world_info (rs6000_stack_t *info_ptr)
17150 info_ptr->world_save_p = 1;
17151 info_ptr->world_save_p
17152 = (WORLD_SAVE_P (info_ptr)
17153 && DEFAULT_ABI == ABI_DARWIN
17154 && !cfun->has_nonlocal_label
17155 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
17156 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
17157 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
17158 && info_ptr->cr_save_p);
17160 /* This will not work in conjunction with sibcalls. Make sure there
17161 are none. (This check is expensive, but seldom executed.) */
17162 if (WORLD_SAVE_P (info_ptr))
17164 rtx insn;
17165 for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
17166 if ( GET_CODE (insn) == CALL_INSN
17167 && SIBLING_CALL_P (insn))
17169 info_ptr->world_save_p = 0;
17170 break;
17174 if (WORLD_SAVE_P (info_ptr))
17176 /* Even if we're not touching VRsave, make sure there's room on the
17177 stack for it, if it looks like we're calling SAVE_WORLD, which
17178 will attempt to save it. */
17179 info_ptr->vrsave_size = 4;
17181 /* If we are going to save the world, we need to save the link register too. */
17182 info_ptr->lr_save_p = 1;
17184 /* "Save" the VRsave register too if we're saving the world. */
17185 if (info_ptr->vrsave_mask == 0)
17186 info_ptr->vrsave_mask = compute_vrsave_mask ();
17188 /* Because the Darwin register save/restore routines only handle
17189 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
17190 check. */
17191 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
17192 && (info_ptr->first_altivec_reg_save
17193 >= FIRST_SAVED_ALTIVEC_REGNO));
17195 return;
17199 static void
17200 is_altivec_return_reg (rtx reg, void *xyes)
17202 bool *yes = (bool *) xyes;
17203 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
17204 *yes = true;
17208 /* Look for user-defined global regs in the range FIRST to LAST-1.
17209 We should not restore these, and so cannot use lmw or out-of-line
17210 restore functions if there are any. We also can't save them
17211 (well, emit frame notes for them), because frame unwinding during
17212 exception handling will restore saved registers. */
17214 static bool
17215 global_regs_p (unsigned first, unsigned last)
17217 while (first < last)
17218 if (global_regs[first++])
17219 return true;
17220 return false;
17223 /* Determine the strategy for savings/restoring registers. */
17225 enum {
17226 SAVRES_MULTIPLE = 0x1,
17227 SAVE_INLINE_FPRS = 0x2,
17228 SAVE_INLINE_GPRS = 0x4,
17229 REST_INLINE_FPRS = 0x8,
17230 REST_INLINE_GPRS = 0x10,
17231 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
17232 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
17233 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
17234 SAVE_INLINE_VRS = 0x100,
17235 REST_INLINE_VRS = 0x200
17238 static int
17239 rs6000_savres_strategy (rs6000_stack_t *info,
17240 bool using_static_chain_p)
17242 int strategy = 0;
17243 bool lr_save_p;
17245 if (TARGET_MULTIPLE
17246 && !TARGET_POWERPC64
17247 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
17248 && info->first_gp_reg_save < 31
17249 && !global_regs_p (info->first_gp_reg_save, 32))
17250 strategy |= SAVRES_MULTIPLE;
17252 if (crtl->calls_eh_return
17253 || cfun->machine->ra_need_lr)
17254 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
17255 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
17256 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17258 if (info->first_fp_reg_save == 64
17259 /* The out-of-line FP routines use double-precision stores;
17260 we can't use those routines if we don't have such stores. */
17261 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
17262 || global_regs_p (info->first_fp_reg_save, 64))
17263 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17265 if (info->first_gp_reg_save == 32
17266 || (!(strategy & SAVRES_MULTIPLE)
17267 && global_regs_p (info->first_gp_reg_save, 32)))
17268 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17270 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
17271 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
17272 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17274 /* Define cutoff for using out-of-line functions to save registers. */
17275 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
17277 if (!optimize_size)
17279 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17280 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17281 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17283 else
17285 /* Prefer out-of-line restore if it will exit. */
17286 if (info->first_fp_reg_save > 61)
17287 strategy |= SAVE_INLINE_FPRS;
17288 if (info->first_gp_reg_save > 29)
17290 if (info->first_fp_reg_save == 64)
17291 strategy |= SAVE_INLINE_GPRS;
17292 else
17293 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17295 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
17296 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17299 else if (DEFAULT_ABI == ABI_DARWIN)
17301 if (info->first_fp_reg_save > 60)
17302 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17303 if (info->first_gp_reg_save > 29)
17304 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17305 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17307 else
17309 gcc_checking_assert (DEFAULT_ABI == ABI_AIX);
17310 if (info->first_fp_reg_save > 61)
17311 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
17312 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
17313 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
17316 /* Don't bother to try to save things out-of-line if r11 is occupied
17317 by the static chain. It would require too much fiddling and the
17318 static chain is rarely used anyway. FPRs are saved w.r.t the stack
17319 pointer on Darwin, and AIX uses r1 or r12. */
17320 if (using_static_chain_p && DEFAULT_ABI != ABI_AIX)
17321 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
17322 | SAVE_INLINE_GPRS
17323 | SAVE_INLINE_VRS | REST_INLINE_VRS);
17325 /* We can only use the out-of-line routines to restore if we've
17326 saved all the registers from first_fp_reg_save in the prologue.
17327 Otherwise, we risk loading garbage. */
17328 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
17330 int i;
17332 for (i = info->first_fp_reg_save; i < 64; i++)
17333 if (!save_reg_p (i))
17335 strategy |= REST_INLINE_FPRS;
17336 break;
17340 /* If we are going to use store multiple, then don't even bother
17341 with the out-of-line routines, since the store-multiple
17342 instruction will always be smaller. */
17343 if ((strategy & SAVRES_MULTIPLE))
17344 strategy |= SAVE_INLINE_GPRS;
17346 /* info->lr_save_p isn't yet set if the only reason lr needs to be
17347 saved is an out-of-line save or restore. Set up the value for
17348 the next test (excluding out-of-line gpr restore). */
17349 lr_save_p = (info->lr_save_p
17350 || !(strategy & SAVE_INLINE_GPRS)
17351 || !(strategy & SAVE_INLINE_FPRS)
17352 || !(strategy & SAVE_INLINE_VRS)
17353 || !(strategy & REST_INLINE_FPRS)
17354 || !(strategy & REST_INLINE_VRS));
17356 /* The situation is more complicated with load multiple. We'd
17357 prefer to use the out-of-line routines for restores, since the
17358 "exit" out-of-line routines can handle the restore of LR and the
17359 frame teardown. However if doesn't make sense to use the
17360 out-of-line routine if that is the only reason we'd need to save
17361 LR, and we can't use the "exit" out-of-line gpr restore if we
17362 have saved some fprs; In those cases it is advantageous to use
17363 load multiple when available. */
17364 if ((strategy & SAVRES_MULTIPLE)
17365 && (!lr_save_p
17366 || info->first_fp_reg_save != 64))
17367 strategy |= REST_INLINE_GPRS;
17369 /* Saving CR interferes with the exit routines used on the SPE, so
17370 just punt here. */
17371 if (TARGET_SPE_ABI
17372 && info->spe_64bit_regs_used
17373 && info->cr_save_p)
17374 strategy |= REST_INLINE_GPRS;
17376 /* We can only use load multiple or the out-of-line routines to
17377 restore if we've used store multiple or out-of-line routines
17378 in the prologue, i.e. if we've saved all the registers from
17379 first_gp_reg_save. Otherwise, we risk loading garbage. */
17380 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
17381 == SAVE_INLINE_GPRS)
17383 int i;
17385 for (i = info->first_gp_reg_save; i < 32; i++)
17386 if (!save_reg_p (i))
17388 strategy |= REST_INLINE_GPRS;
17389 break;
17393 if (TARGET_ELF && TARGET_64BIT)
17395 if (!(strategy & SAVE_INLINE_FPRS))
17396 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17397 else if (!(strategy & SAVE_INLINE_GPRS)
17398 && info->first_fp_reg_save == 64)
17399 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
17401 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
17402 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
17404 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
17405 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
17407 return strategy;
17410 /* Calculate the stack information for the current function. This is
17411 complicated by having two separate calling sequences, the AIX calling
17412 sequence and the V.4 calling sequence.
17414 AIX (and Darwin/Mac OS X) stack frames look like:
17415 32-bit 64-bit
17416 SP----> +---------------------------------------+
17417 | back chain to caller | 0 0
17418 +---------------------------------------+
17419 | saved CR | 4 8 (8-11)
17420 +---------------------------------------+
17421 | saved LR | 8 16
17422 +---------------------------------------+
17423 | reserved for compilers | 12 24
17424 +---------------------------------------+
17425 | reserved for binders | 16 32
17426 +---------------------------------------+
17427 | saved TOC pointer | 20 40
17428 +---------------------------------------+
17429 | Parameter save area (P) | 24 48
17430 +---------------------------------------+
17431 | Alloca space (A) | 24+P etc.
17432 +---------------------------------------+
17433 | Local variable space (L) | 24+P+A
17434 +---------------------------------------+
17435 | Float/int conversion temporary (X) | 24+P+A+L
17436 +---------------------------------------+
17437 | Save area for AltiVec registers (W) | 24+P+A+L+X
17438 +---------------------------------------+
17439 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
17440 +---------------------------------------+
17441 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
17442 +---------------------------------------+
17443 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
17444 +---------------------------------------+
17445 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
17446 +---------------------------------------+
17447 old SP->| back chain to caller's caller |
17448 +---------------------------------------+
17450 The required alignment for AIX configurations is two words (i.e., 8
17451 or 16 bytes).
17454 V.4 stack frames look like:
17456 SP----> +---------------------------------------+
17457 | back chain to caller | 0
17458 +---------------------------------------+
17459 | caller's saved LR | 4
17460 +---------------------------------------+
17461 | Parameter save area (P) | 8
17462 +---------------------------------------+
17463 | Alloca space (A) | 8+P
17464 +---------------------------------------+
17465 | Varargs save area (V) | 8+P+A
17466 +---------------------------------------+
17467 | Local variable space (L) | 8+P+A+V
17468 +---------------------------------------+
17469 | Float/int conversion temporary (X) | 8+P+A+V+L
17470 +---------------------------------------+
17471 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
17472 +---------------------------------------+
17473 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
17474 +---------------------------------------+
17475 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
17476 +---------------------------------------+
17477 | SPE: area for 64-bit GP registers |
17478 +---------------------------------------+
17479 | SPE alignment padding |
17480 +---------------------------------------+
17481 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
17482 +---------------------------------------+
17483 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
17484 +---------------------------------------+
17485 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
17486 +---------------------------------------+
17487 old SP->| back chain to caller's caller |
17488 +---------------------------------------+
17490 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
17491 given. (But note below and in sysv4.h that we require only 8 and
17492 may round up the size of our stack frame anyways. The historical
17493 reason is early versions of powerpc-linux which didn't properly
17494 align the stack at program startup. A happy side-effect is that
17495 -mno-eabi libraries can be used with -meabi programs.)
17497 The EABI configuration defaults to the V.4 layout. However,
17498 the stack alignment requirements may differ. If -mno-eabi is not
17499 given, the required stack alignment is 8 bytes; if -mno-eabi is
17500 given, the required alignment is 16 bytes. (But see V.4 comment
17501 above.) */
17503 #ifndef ABI_STACK_BOUNDARY
17504 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
17505 #endif
17507 static rs6000_stack_t *
17508 rs6000_stack_info (void)
17510 rs6000_stack_t *info_ptr = &stack_info;
17511 int reg_size = TARGET_32BIT ? 4 : 8;
17512 int ehrd_size;
17513 int save_align;
17514 int first_gp;
17515 HOST_WIDE_INT non_fixed_size;
17516 bool using_static_chain_p;
17518 if (reload_completed && info_ptr->reload_completed)
17519 return info_ptr;
17521 memset (info_ptr, 0, sizeof (*info_ptr));
17522 info_ptr->reload_completed = reload_completed;
17524 if (TARGET_SPE)
17526 /* Cache value so we don't rescan instruction chain over and over. */
17527 if (cfun->machine->insn_chain_scanned_p == 0)
17528 cfun->machine->insn_chain_scanned_p
17529 = spe_func_has_64bit_regs_p () + 1;
17530 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
17533 /* Select which calling sequence. */
17534 info_ptr->abi = DEFAULT_ABI;
17536 /* Calculate which registers need to be saved & save area size. */
17537 info_ptr->first_gp_reg_save = first_reg_to_save ();
17538 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
17539 even if it currently looks like we won't. Reload may need it to
17540 get at a constant; if so, it will have already created a constant
17541 pool entry for it. */
17542 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
17543 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
17544 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
17545 && crtl->uses_const_pool
17546 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
17547 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
17548 else
17549 first_gp = info_ptr->first_gp_reg_save;
17551 info_ptr->gp_size = reg_size * (32 - first_gp);
17553 /* For the SPE, we have an additional upper 32-bits on each GPR.
17554 Ideally we should save the entire 64-bits only when the upper
17555 half is used in SIMD instructions. Since we only record
17556 registers live (not the size they are used in), this proves
17557 difficult because we'd have to traverse the instruction chain at
17558 the right time, taking reload into account. This is a real pain,
17559 so we opt to save the GPRs in 64-bits always if but one register
17560 gets used in 64-bits. Otherwise, all the registers in the frame
17561 get saved in 32-bits.
17563 So... since when we save all GPRs (except the SP) in 64-bits, the
17564 traditional GP save area will be empty. */
17565 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17566 info_ptr->gp_size = 0;
17568 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
17569 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
17571 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
17572 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
17573 - info_ptr->first_altivec_reg_save);
17575 /* Does this function call anything? */
17576 info_ptr->calls_p = (! current_function_is_leaf
17577 || cfun->machine->ra_needs_full_frame);
17579 /* Determine if we need to save the condition code registers. */
17580 if (df_regs_ever_live_p (CR2_REGNO)
17581 || df_regs_ever_live_p (CR3_REGNO)
17582 || df_regs_ever_live_p (CR4_REGNO))
17584 info_ptr->cr_save_p = 1;
17585 if (DEFAULT_ABI == ABI_V4)
17586 info_ptr->cr_size = reg_size;
17589 /* If the current function calls __builtin_eh_return, then we need
17590 to allocate stack space for registers that will hold data for
17591 the exception handler. */
17592 if (crtl->calls_eh_return)
17594 unsigned int i;
17595 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
17596 continue;
17598 /* SPE saves EH registers in 64-bits. */
17599 ehrd_size = i * (TARGET_SPE_ABI
17600 && info_ptr->spe_64bit_regs_used != 0
17601 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
17603 else
17604 ehrd_size = 0;
17606 /* Determine various sizes. */
17607 info_ptr->reg_size = reg_size;
17608 info_ptr->fixed_size = RS6000_SAVE_AREA;
17609 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
17610 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
17611 TARGET_ALTIVEC ? 16 : 8);
17612 if (FRAME_GROWS_DOWNWARD)
17613 info_ptr->vars_size
17614 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
17615 + info_ptr->parm_size,
17616 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
17617 - (info_ptr->fixed_size + info_ptr->vars_size
17618 + info_ptr->parm_size);
17620 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17621 info_ptr->spe_gp_size = 8 * (32 - first_gp);
17622 else
17623 info_ptr->spe_gp_size = 0;
17625 if (TARGET_ALTIVEC_ABI)
17626 info_ptr->vrsave_mask = compute_vrsave_mask ();
17627 else
17628 info_ptr->vrsave_mask = 0;
17630 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
17631 info_ptr->vrsave_size = 4;
17632 else
17633 info_ptr->vrsave_size = 0;
17635 compute_save_world_info (info_ptr);
17637 /* Calculate the offsets. */
17638 switch (DEFAULT_ABI)
17640 case ABI_NONE:
17641 default:
17642 gcc_unreachable ();
17644 case ABI_AIX:
17645 case ABI_DARWIN:
17646 info_ptr->fp_save_offset = - info_ptr->fp_size;
17647 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17649 if (TARGET_ALTIVEC_ABI)
17651 info_ptr->vrsave_save_offset
17652 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
17654 /* Align stack so vector save area is on a quadword boundary.
17655 The padding goes above the vectors. */
17656 if (info_ptr->altivec_size != 0)
17657 info_ptr->altivec_padding_size
17658 = info_ptr->vrsave_save_offset & 0xF;
17659 else
17660 info_ptr->altivec_padding_size = 0;
17662 info_ptr->altivec_save_offset
17663 = info_ptr->vrsave_save_offset
17664 - info_ptr->altivec_padding_size
17665 - info_ptr->altivec_size;
17666 gcc_assert (info_ptr->altivec_size == 0
17667 || info_ptr->altivec_save_offset % 16 == 0);
17669 /* Adjust for AltiVec case. */
17670 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
17672 else
17673 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
17674 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
17675 info_ptr->lr_save_offset = 2*reg_size;
17676 break;
17678 case ABI_V4:
17679 info_ptr->fp_save_offset = - info_ptr->fp_size;
17680 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
17681 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
17683 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
17685 /* Align stack so SPE GPR save area is aligned on a
17686 double-word boundary. */
17687 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
17688 info_ptr->spe_padding_size
17689 = 8 - (-info_ptr->cr_save_offset % 8);
17690 else
17691 info_ptr->spe_padding_size = 0;
17693 info_ptr->spe_gp_save_offset
17694 = info_ptr->cr_save_offset
17695 - info_ptr->spe_padding_size
17696 - info_ptr->spe_gp_size;
17698 /* Adjust for SPE case. */
17699 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
17701 else if (TARGET_ALTIVEC_ABI)
17703 info_ptr->vrsave_save_offset
17704 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
17706 /* Align stack so vector save area is on a quadword boundary. */
17707 if (info_ptr->altivec_size != 0)
17708 info_ptr->altivec_padding_size
17709 = 16 - (-info_ptr->vrsave_save_offset % 16);
17710 else
17711 info_ptr->altivec_padding_size = 0;
17713 info_ptr->altivec_save_offset
17714 = info_ptr->vrsave_save_offset
17715 - info_ptr->altivec_padding_size
17716 - info_ptr->altivec_size;
17718 /* Adjust for AltiVec case. */
17719 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
17721 else
17722 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
17723 info_ptr->ehrd_offset -= ehrd_size;
17724 info_ptr->lr_save_offset = reg_size;
17725 break;
17728 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
17729 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
17730 + info_ptr->gp_size
17731 + info_ptr->altivec_size
17732 + info_ptr->altivec_padding_size
17733 + info_ptr->spe_gp_size
17734 + info_ptr->spe_padding_size
17735 + ehrd_size
17736 + info_ptr->cr_size
17737 + info_ptr->vrsave_size,
17738 save_align);
17740 non_fixed_size = (info_ptr->vars_size
17741 + info_ptr->parm_size
17742 + info_ptr->save_size);
17744 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
17745 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
17747 /* Determine if we need to save the link register. */
17748 if (info_ptr->calls_p
17749 || (DEFAULT_ABI == ABI_AIX
17750 && crtl->profile
17751 && !TARGET_PROFILE_KERNEL)
17752 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
17753 #ifdef TARGET_RELOCATABLE
17754 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
17755 #endif
17756 || rs6000_ra_ever_killed ())
17757 info_ptr->lr_save_p = 1;
17759 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
17760 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
17761 && call_used_regs[STATIC_CHAIN_REGNUM]);
17762 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
17763 using_static_chain_p);
17765 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
17766 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
17767 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
17768 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
17769 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
17770 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
17771 info_ptr->lr_save_p = 1;
17773 if (info_ptr->lr_save_p)
17774 df_set_regs_ever_live (LR_REGNO, true);
17776 /* Determine if we need to allocate any stack frame:
17778 For AIX we need to push the stack if a frame pointer is needed
17779 (because the stack might be dynamically adjusted), if we are
17780 debugging, if we make calls, or if the sum of fp_save, gp_save,
17781 and local variables are more than the space needed to save all
17782 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
17783 + 18*8 = 288 (GPR13 reserved).
17785 For V.4 we don't have the stack cushion that AIX uses, but assume
17786 that the debugger can handle stackless frames. */
17788 if (info_ptr->calls_p)
17789 info_ptr->push_p = 1;
17791 else if (DEFAULT_ABI == ABI_V4)
17792 info_ptr->push_p = non_fixed_size != 0;
17794 else if (frame_pointer_needed)
17795 info_ptr->push_p = 1;
17797 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
17798 info_ptr->push_p = 1;
17800 else
17801 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
17803 /* Zero offsets if we're not saving those registers. */
17804 if (info_ptr->fp_size == 0)
17805 info_ptr->fp_save_offset = 0;
17807 if (info_ptr->gp_size == 0)
17808 info_ptr->gp_save_offset = 0;
17810 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
17811 info_ptr->altivec_save_offset = 0;
17813 if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
17814 info_ptr->vrsave_save_offset = 0;
17816 if (! TARGET_SPE_ABI
17817 || info_ptr->spe_64bit_regs_used == 0
17818 || info_ptr->spe_gp_size == 0)
17819 info_ptr->spe_gp_save_offset = 0;
17821 if (! info_ptr->lr_save_p)
17822 info_ptr->lr_save_offset = 0;
17824 if (! info_ptr->cr_save_p)
17825 info_ptr->cr_save_offset = 0;
17827 return info_ptr;
17830 /* Return true if the current function uses any GPRs in 64-bit SIMD
17831 mode. */
17833 static bool
17834 spe_func_has_64bit_regs_p (void)
17836 rtx insns, insn;
17838 /* Functions that save and restore all the call-saved registers will
17839 need to save/restore the registers in 64-bits. */
17840 if (crtl->calls_eh_return
17841 || cfun->calls_setjmp
17842 || crtl->has_nonlocal_goto)
17843 return true;
17845 insns = get_insns ();
17847 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
17849 if (INSN_P (insn))
17851 rtx i;
17853 /* FIXME: This should be implemented with attributes...
17855 (set_attr "spe64" "true")....then,
17856 if (get_spe64(insn)) return true;
17858 It's the only reliable way to do the stuff below. */
17860 i = PATTERN (insn);
17861 if (GET_CODE (i) == SET)
17863 enum machine_mode mode = GET_MODE (SET_SRC (i));
17865 if (SPE_VECTOR_MODE (mode))
17866 return true;
17867 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
17868 return true;
17873 return false;
17876 static void
17877 debug_stack_info (rs6000_stack_t *info)
17879 const char *abi_string;
17881 if (! info)
17882 info = rs6000_stack_info ();
17884 fprintf (stderr, "\nStack information for function %s:\n",
17885 ((current_function_decl && DECL_NAME (current_function_decl))
17886 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
17887 : "<unknown>"));
17889 switch (info->abi)
17891 default: abi_string = "Unknown"; break;
17892 case ABI_NONE: abi_string = "NONE"; break;
17893 case ABI_AIX: abi_string = "AIX"; break;
17894 case ABI_DARWIN: abi_string = "Darwin"; break;
17895 case ABI_V4: abi_string = "V.4"; break;
17898 fprintf (stderr, "\tABI = %5s\n", abi_string);
17900 if (TARGET_ALTIVEC_ABI)
17901 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
17903 if (TARGET_SPE_ABI)
17904 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
17906 if (info->first_gp_reg_save != 32)
17907 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
17909 if (info->first_fp_reg_save != 64)
17910 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
17912 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
17913 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
17914 info->first_altivec_reg_save);
17916 if (info->lr_save_p)
17917 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
17919 if (info->cr_save_p)
17920 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
17922 if (info->vrsave_mask)
17923 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
17925 if (info->push_p)
17926 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
17928 if (info->calls_p)
17929 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
17931 if (info->gp_save_offset)
17932 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
17934 if (info->fp_save_offset)
17935 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
17937 if (info->altivec_save_offset)
17938 fprintf (stderr, "\taltivec_save_offset = %5d\n",
17939 info->altivec_save_offset);
17941 if (info->spe_gp_save_offset)
17942 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
17943 info->spe_gp_save_offset);
17945 if (info->vrsave_save_offset)
17946 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
17947 info->vrsave_save_offset);
17949 if (info->lr_save_offset)
17950 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
17952 if (info->cr_save_offset)
17953 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
17955 if (info->varargs_save_offset)
17956 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
17958 if (info->total_size)
17959 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
17960 info->total_size);
17962 if (info->vars_size)
17963 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
17964 info->vars_size);
17966 if (info->parm_size)
17967 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
17969 if (info->fixed_size)
17970 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
17972 if (info->gp_size)
17973 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
17975 if (info->spe_gp_size)
17976 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
17978 if (info->fp_size)
17979 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
17981 if (info->altivec_size)
17982 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
17984 if (info->vrsave_size)
17985 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
17987 if (info->altivec_padding_size)
17988 fprintf (stderr, "\taltivec_padding_size= %5d\n",
17989 info->altivec_padding_size);
17991 if (info->spe_padding_size)
17992 fprintf (stderr, "\tspe_padding_size = %5d\n",
17993 info->spe_padding_size);
17995 if (info->cr_size)
17996 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
17998 if (info->save_size)
17999 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
18001 if (info->reg_size != 4)
18002 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
18004 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
18006 fprintf (stderr, "\n");
18010 rs6000_return_addr (int count, rtx frame)
18012 /* Currently we don't optimize very well between prolog and body
18013 code and for PIC code the code can be actually quite bad, so
18014 don't try to be too clever here. */
18015 if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
18017 cfun->machine->ra_needs_full_frame = 1;
18019 return
18020 gen_rtx_MEM
18021 (Pmode,
18022 memory_address
18023 (Pmode,
18024 plus_constant (Pmode,
18025 copy_to_reg
18026 (gen_rtx_MEM (Pmode,
18027 memory_address (Pmode, frame))),
18028 RETURN_ADDRESS_OFFSET)));
18031 cfun->machine->ra_need_lr = 1;
18032 return get_hard_reg_initial_val (Pmode, LR_REGNO);
18035 /* Say whether a function is a candidate for sibcall handling or not. */
18037 static bool
18038 rs6000_function_ok_for_sibcall (tree decl, tree exp)
18040 tree fntype;
18042 if (decl)
18043 fntype = TREE_TYPE (decl);
18044 else
18045 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
18047 /* We can't do it if the called function has more vector parameters
18048 than the current function; there's nowhere to put the VRsave code. */
18049 if (TARGET_ALTIVEC_ABI
18050 && TARGET_ALTIVEC_VRSAVE
18051 && !(decl && decl == current_function_decl))
18053 function_args_iterator args_iter;
18054 tree type;
18055 int nvreg = 0;
18057 /* Functions with vector parameters are required to have a
18058 prototype, so the argument type info must be available
18059 here. */
18060 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
18061 if (TREE_CODE (type) == VECTOR_TYPE
18062 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18063 nvreg++;
18065 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
18066 if (TREE_CODE (type) == VECTOR_TYPE
18067 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
18068 nvreg--;
18070 if (nvreg > 0)
18071 return false;
18074 /* Under the AIX ABI we can't allow calls to non-local functions,
18075 because the callee may have a different TOC pointer to the
18076 caller and there's no way to ensure we restore the TOC when we
18077 return. With the secure-plt SYSV ABI we can't make non-local
18078 calls when -fpic/PIC because the plt call stubs use r30. */
18079 if (DEFAULT_ABI == ABI_DARWIN
18080 || (DEFAULT_ABI == ABI_AIX
18081 && decl
18082 && !DECL_EXTERNAL (decl)
18083 && (*targetm.binds_local_p) (decl))
18084 || (DEFAULT_ABI == ABI_V4
18085 && (!TARGET_SECURE_PLT
18086 || !flag_pic
18087 || (decl
18088 && (*targetm.binds_local_p) (decl)))))
18090 tree attr_list = TYPE_ATTRIBUTES (fntype);
18092 if (!lookup_attribute ("longcall", attr_list)
18093 || lookup_attribute ("shortcall", attr_list))
18094 return true;
18097 return false;
18100 /* NULL if INSN insn is valid within a low-overhead loop.
18101 Otherwise return why doloop cannot be applied.
18102 PowerPC uses the COUNT register for branch on table instructions. */
18104 static const char *
18105 rs6000_invalid_within_doloop (const_rtx insn)
18107 if (CALL_P (insn))
18108 return "Function call in the loop.";
18110 if (JUMP_P (insn)
18111 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
18112 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
18113 return "Computed branch in the loop.";
18115 return NULL;
18118 static int
18119 rs6000_ra_ever_killed (void)
18121 rtx top;
18122 rtx reg;
18123 rtx insn;
18125 if (cfun->is_thunk)
18126 return 0;
18128 if (cfun->machine->lr_save_state)
18129 return cfun->machine->lr_save_state - 1;
18131 /* regs_ever_live has LR marked as used if any sibcalls are present,
18132 but this should not force saving and restoring in the
18133 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
18134 clobbers LR, so that is inappropriate. */
18136 /* Also, the prologue can generate a store into LR that
18137 doesn't really count, like this:
18139 move LR->R0
18140 bcl to set PIC register
18141 move LR->R31
18142 move R0->LR
18144 When we're called from the epilogue, we need to avoid counting
18145 this as a store. */
18147 push_topmost_sequence ();
18148 top = get_insns ();
18149 pop_topmost_sequence ();
18150 reg = gen_rtx_REG (Pmode, LR_REGNO);
18152 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
18154 if (INSN_P (insn))
18156 if (CALL_P (insn))
18158 if (!SIBLING_CALL_P (insn))
18159 return 1;
18161 else if (find_regno_note (insn, REG_INC, LR_REGNO))
18162 return 1;
18163 else if (set_of (reg, insn) != NULL_RTX
18164 && !prologue_epilogue_contains (insn))
18165 return 1;
18168 return 0;
18171 /* Emit instructions needed to load the TOC register.
18172 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
18173 a constant pool; or for SVR4 -fpic. */
18175 void
18176 rs6000_emit_load_toc_table (int fromprolog)
18178 rtx dest;
18179 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
18181 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
18183 char buf[30];
18184 rtx lab, tmp1, tmp2, got;
18186 lab = gen_label_rtx ();
18187 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
18188 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18189 if (flag_pic == 2)
18190 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18191 else
18192 got = rs6000_got_sym ();
18193 tmp1 = tmp2 = dest;
18194 if (!fromprolog)
18196 tmp1 = gen_reg_rtx (Pmode);
18197 tmp2 = gen_reg_rtx (Pmode);
18199 emit_insn (gen_load_toc_v4_PIC_1 (lab));
18200 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
18201 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
18202 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
18204 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
18206 emit_insn (gen_load_toc_v4_pic_si ());
18207 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18209 else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
18211 char buf[30];
18212 rtx temp0 = (fromprolog
18213 ? gen_rtx_REG (Pmode, 0)
18214 : gen_reg_rtx (Pmode));
18216 if (fromprolog)
18218 rtx symF, symL;
18220 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
18221 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18223 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
18224 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18226 emit_insn (gen_load_toc_v4_PIC_1 (symF));
18227 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18228 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
18230 else
18232 rtx tocsym, lab;
18234 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
18235 lab = gen_label_rtx ();
18236 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
18237 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
18238 if (TARGET_LINK_STACK)
18239 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
18240 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
18242 emit_insn (gen_addsi3 (dest, temp0, dest));
18244 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
18246 /* This is for AIX code running in non-PIC ELF32. */
18247 char buf[30];
18248 rtx realsym;
18249 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
18250 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
18252 emit_insn (gen_elf_high (dest, realsym));
18253 emit_insn (gen_elf_low (dest, dest, realsym));
18255 else
18257 gcc_assert (DEFAULT_ABI == ABI_AIX);
18259 if (TARGET_32BIT)
18260 emit_insn (gen_load_toc_aix_si (dest));
18261 else
18262 emit_insn (gen_load_toc_aix_di (dest));
18266 /* Emit instructions to restore the link register after determining where
18267 its value has been stored. */
18269 void
18270 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
18272 rs6000_stack_t *info = rs6000_stack_info ();
18273 rtx operands[2];
18275 operands[0] = source;
18276 operands[1] = scratch;
18278 if (info->lr_save_p)
18280 rtx frame_rtx = stack_pointer_rtx;
18281 HOST_WIDE_INT sp_offset = 0;
18282 rtx tmp;
18284 if (frame_pointer_needed
18285 || cfun->calls_alloca
18286 || info->total_size > 32767)
18288 tmp = gen_frame_mem (Pmode, frame_rtx);
18289 emit_move_insn (operands[1], tmp);
18290 frame_rtx = operands[1];
18292 else if (info->push_p)
18293 sp_offset = info->total_size;
18295 tmp = plus_constant (Pmode, frame_rtx,
18296 info->lr_save_offset + sp_offset);
18297 tmp = gen_frame_mem (Pmode, tmp);
18298 emit_move_insn (tmp, operands[0]);
18300 else
18301 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
18303 /* Freeze lr_save_p. We've just emitted rtl that depends on the
18304 state of lr_save_p so any change from here on would be a bug. In
18305 particular, stop rs6000_ra_ever_killed from considering the SET
18306 of lr we may have added just above. */
18307 cfun->machine->lr_save_state = info->lr_save_p + 1;
18310 static GTY(()) alias_set_type set = -1;
18312 alias_set_type
18313 get_TOC_alias_set (void)
18315 if (set == -1)
18316 set = new_alias_set ();
18317 return set;
18320 /* This returns nonzero if the current function uses the TOC. This is
18321 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
18322 is generated by the ABI_V4 load_toc_* patterns. */
18323 #if TARGET_ELF
18324 static int
18325 uses_TOC (void)
18327 rtx insn;
18329 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18330 if (INSN_P (insn))
18332 rtx pat = PATTERN (insn);
18333 int i;
18335 if (GET_CODE (pat) == PARALLEL)
18336 for (i = 0; i < XVECLEN (pat, 0); i++)
18338 rtx sub = XVECEXP (pat, 0, i);
18339 if (GET_CODE (sub) == USE)
18341 sub = XEXP (sub, 0);
18342 if (GET_CODE (sub) == UNSPEC
18343 && XINT (sub, 1) == UNSPEC_TOC)
18344 return 1;
18348 return 0;
18350 #endif
18353 create_TOC_reference (rtx symbol, rtx largetoc_reg)
18355 rtx tocrel, tocreg, hi;
18357 if (TARGET_DEBUG_ADDR)
18359 if (GET_CODE (symbol) == SYMBOL_REF)
18360 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
18361 XSTR (symbol, 0));
18362 else
18364 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
18365 GET_RTX_NAME (GET_CODE (symbol)));
18366 debug_rtx (symbol);
18370 if (!can_create_pseudo_p ())
18371 df_set_regs_ever_live (TOC_REGISTER, true);
18373 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
18374 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
18375 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
18376 return tocrel;
18378 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
18379 if (largetoc_reg != NULL)
18381 emit_move_insn (largetoc_reg, hi);
18382 hi = largetoc_reg;
18384 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
18387 /* Issue assembly directives that create a reference to the given DWARF
18388 FRAME_TABLE_LABEL from the current function section. */
18389 void
18390 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
18392 fprintf (asm_out_file, "\t.ref %s\n",
18393 (* targetm.strip_name_encoding) (frame_table_label));
18396 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
18397 and the change to the stack pointer. */
18399 static void
18400 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
18402 rtvec p;
18403 int i;
18404 rtx regs[3];
18406 i = 0;
18407 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18408 if (hard_frame_needed)
18409 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
18410 if (!(REGNO (fp) == STACK_POINTER_REGNUM
18411 || (hard_frame_needed
18412 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
18413 regs[i++] = fp;
18415 p = rtvec_alloc (i);
18416 while (--i >= 0)
18418 rtx mem = gen_frame_mem (BLKmode, regs[i]);
18419 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
18422 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
18425 /* Emit the correct code for allocating stack space, as insns.
18426 If COPY_REG, make sure a copy of the old frame is left there.
18427 The generated code may use hard register 0 as a temporary. */
18429 static void
18430 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
18432 rtx insn;
18433 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
18434 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
18435 rtx todec = gen_int_mode (-size, Pmode);
18436 rtx par, set, mem;
18438 if (INTVAL (todec) != -size)
18440 warning (0, "stack frame too large");
18441 emit_insn (gen_trap ());
18442 return;
18445 if (crtl->limit_stack)
18447 if (REG_P (stack_limit_rtx)
18448 && REGNO (stack_limit_rtx) > 1
18449 && REGNO (stack_limit_rtx) <= 31)
18451 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
18452 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18453 const0_rtx));
18455 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
18456 && TARGET_32BIT
18457 && DEFAULT_ABI == ABI_V4)
18459 rtx toload = gen_rtx_CONST (VOIDmode,
18460 gen_rtx_PLUS (Pmode,
18461 stack_limit_rtx,
18462 GEN_INT (size)));
18464 emit_insn (gen_elf_high (tmp_reg, toload));
18465 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
18466 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
18467 const0_rtx));
18469 else
18470 warning (0, "stack limit expression is not supported");
18473 if (copy_reg)
18475 if (copy_off != 0)
18476 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
18477 else
18478 emit_move_insn (copy_reg, stack_reg);
18481 if (size > 32767)
18483 /* Need a note here so that try_split doesn't get confused. */
18484 if (get_last_insn () == NULL_RTX)
18485 emit_note (NOTE_INSN_DELETED);
18486 insn = emit_move_insn (tmp_reg, todec);
18487 try_split (PATTERN (insn), insn, 0);
18488 todec = tmp_reg;
18491 insn = emit_insn (TARGET_32BIT
18492 ? gen_movsi_update_stack (stack_reg, stack_reg,
18493 todec, stack_reg)
18494 : gen_movdi_di_update_stack (stack_reg, stack_reg,
18495 todec, stack_reg));
18496 /* Since we didn't use gen_frame_mem to generate the MEM, grab
18497 it now and set the alias set/attributes. The above gen_*_update
18498 calls will generate a PARALLEL with the MEM set being the first
18499 operation. */
18500 par = PATTERN (insn);
18501 gcc_assert (GET_CODE (par) == PARALLEL);
18502 set = XVECEXP (par, 0, 0);
18503 gcc_assert (GET_CODE (set) == SET);
18504 mem = SET_DEST (set);
18505 gcc_assert (MEM_P (mem));
18506 MEM_NOTRAP_P (mem) = 1;
18507 set_mem_alias_set (mem, get_frame_alias_set ());
18509 RTX_FRAME_RELATED_P (insn) = 1;
18510 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
18511 gen_rtx_SET (VOIDmode, stack_reg,
18512 gen_rtx_PLUS (Pmode, stack_reg,
18513 GEN_INT (-size))));
18516 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
18518 #if PROBE_INTERVAL > 32768
18519 #error Cannot use indexed addressing mode for stack probing
18520 #endif
18522 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
18523 inclusive. These are offsets from the current stack pointer. */
18525 static void
18526 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
18528 /* See if we have a constant small number of probes to generate. If so,
18529 that's the easy case. */
18530 if (first + size <= 32768)
18532 HOST_WIDE_INT i;
18534 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
18535 it exceeds SIZE. If only one probe is needed, this will not
18536 generate any code. Then probe at FIRST + SIZE. */
18537 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
18538 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18539 -(first + i)));
18541 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
18542 -(first + size)));
18545 /* Otherwise, do the same as above, but in a loop. Note that we must be
18546 extra careful with variables wrapping around because we might be at
18547 the very top (or the very bottom) of the address space and we have
18548 to be able to handle this case properly; in particular, we use an
18549 equality test for the loop condition. */
18550 else
18552 HOST_WIDE_INT rounded_size;
18553 rtx r12 = gen_rtx_REG (Pmode, 12);
18554 rtx r0 = gen_rtx_REG (Pmode, 0);
18556 /* Sanity check for the addressing mode we're going to use. */
18557 gcc_assert (first <= 32768);
18559 /* Step 1: round SIZE to the previous multiple of the interval. */
18561 rounded_size = size & -PROBE_INTERVAL;
18564 /* Step 2: compute initial and final value of the loop counter. */
18566 /* TEST_ADDR = SP + FIRST. */
18567 emit_insn (gen_rtx_SET (VOIDmode, r12,
18568 plus_constant (Pmode, stack_pointer_rtx,
18569 -first)));
18571 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
18572 if (rounded_size > 32768)
18574 emit_move_insn (r0, GEN_INT (-rounded_size));
18575 emit_insn (gen_rtx_SET (VOIDmode, r0,
18576 gen_rtx_PLUS (Pmode, r12, r0)));
18578 else
18579 emit_insn (gen_rtx_SET (VOIDmode, r0,
18580 plus_constant (Pmode, r12, -rounded_size)));
18583 /* Step 3: the loop
18585 while (TEST_ADDR != LAST_ADDR)
18587 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
18588 probe at TEST_ADDR
18591 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
18592 until it is equal to ROUNDED_SIZE. */
18594 if (TARGET_64BIT)
18595 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
18596 else
18597 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
18600 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
18601 that SIZE is equal to ROUNDED_SIZE. */
18603 if (size != rounded_size)
18604 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
18608 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
18609 absolute addresses. */
18611 const char *
18612 output_probe_stack_range (rtx reg1, rtx reg2)
18614 static int labelno = 0;
18615 char loop_lab[32], end_lab[32];
18616 rtx xops[2];
18618 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
18619 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
18621 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
18623 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
18624 xops[0] = reg1;
18625 xops[1] = reg2;
18626 if (TARGET_64BIT)
18627 output_asm_insn ("{cmp|cmpd} 0,%0,%1", xops);
18628 else
18629 output_asm_insn ("{cmp|cmpw} 0,%0,%1", xops);
18631 fputs ("\tbeq 0,", asm_out_file);
18632 assemble_name_raw (asm_out_file, end_lab);
18633 fputc ('\n', asm_out_file);
18635 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
18636 xops[1] = GEN_INT (-PROBE_INTERVAL);
18637 output_asm_insn ("{cal %0,%1(%0)|addi %0,%0,%1}", xops);
18639 /* Probe at TEST_ADDR and branch. */
18640 xops[1] = gen_rtx_REG (Pmode, 0);
18641 output_asm_insn ("{st|stw} %1,0(%0)", xops);
18642 fprintf (asm_out_file, "\tb ");
18643 assemble_name_raw (asm_out_file, loop_lab);
18644 fputc ('\n', asm_out_file);
18646 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
18648 return "";
18651 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
18652 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
18653 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
18654 deduce these equivalences by itself so it wasn't necessary to hold
18655 its hand so much. Don't be tempted to always supply d2_f_d_e with
18656 the actual cfa register, ie. r31 when we are using a hard frame
18657 pointer. That fails when saving regs off r1, and sched moves the
18658 r31 setup past the reg saves. */
18660 static rtx
18661 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
18662 rtx reg2, rtx rreg)
18664 rtx real, temp;
18666 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
18668 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
18669 int i;
18671 gcc_checking_assert (val == 0);
18672 real = PATTERN (insn);
18673 if (GET_CODE (real) == PARALLEL)
18674 for (i = 0; i < XVECLEN (real, 0); i++)
18675 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18677 rtx set = XVECEXP (real, 0, i);
18679 RTX_FRAME_RELATED_P (set) = 1;
18681 RTX_FRAME_RELATED_P (insn) = 1;
18682 return insn;
18685 /* copy_rtx will not make unique copies of registers, so we need to
18686 ensure we don't have unwanted sharing here. */
18687 if (reg == reg2)
18688 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18690 if (reg == rreg)
18691 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
18693 real = copy_rtx (PATTERN (insn));
18695 if (reg2 != NULL_RTX)
18696 real = replace_rtx (real, reg2, rreg);
18698 if (REGNO (reg) == STACK_POINTER_REGNUM)
18699 gcc_checking_assert (val == 0);
18700 else
18701 real = replace_rtx (real, reg,
18702 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
18703 STACK_POINTER_REGNUM),
18704 GEN_INT (val)));
18706 /* We expect that 'real' is either a SET or a PARALLEL containing
18707 SETs (and possibly other stuff). In a PARALLEL, all the SETs
18708 are important so they all have to be marked RTX_FRAME_RELATED_P. */
18710 if (GET_CODE (real) == SET)
18712 rtx set = real;
18714 temp = simplify_rtx (SET_SRC (set));
18715 if (temp)
18716 SET_SRC (set) = temp;
18717 temp = simplify_rtx (SET_DEST (set));
18718 if (temp)
18719 SET_DEST (set) = temp;
18720 if (GET_CODE (SET_DEST (set)) == MEM)
18722 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18723 if (temp)
18724 XEXP (SET_DEST (set), 0) = temp;
18727 else
18729 int i;
18731 gcc_assert (GET_CODE (real) == PARALLEL);
18732 for (i = 0; i < XVECLEN (real, 0); i++)
18733 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
18735 rtx set = XVECEXP (real, 0, i);
18737 temp = simplify_rtx (SET_SRC (set));
18738 if (temp)
18739 SET_SRC (set) = temp;
18740 temp = simplify_rtx (SET_DEST (set));
18741 if (temp)
18742 SET_DEST (set) = temp;
18743 if (GET_CODE (SET_DEST (set)) == MEM)
18745 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
18746 if (temp)
18747 XEXP (SET_DEST (set), 0) = temp;
18749 RTX_FRAME_RELATED_P (set) = 1;
18753 RTX_FRAME_RELATED_P (insn) = 1;
18754 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
18756 return insn;
18759 /* Returns an insn that has a vrsave set operation with the
18760 appropriate CLOBBERs. */
18762 static rtx
18763 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
18765 int nclobs, i;
18766 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
18767 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
18769 clobs[0]
18770 = gen_rtx_SET (VOIDmode,
18771 vrsave,
18772 gen_rtx_UNSPEC_VOLATILE (SImode,
18773 gen_rtvec (2, reg, vrsave),
18774 UNSPECV_SET_VRSAVE));
18776 nclobs = 1;
18778 /* We need to clobber the registers in the mask so the scheduler
18779 does not move sets to VRSAVE before sets of AltiVec registers.
18781 However, if the function receives nonlocal gotos, reload will set
18782 all call saved registers live. We will end up with:
18784 (set (reg 999) (mem))
18785 (parallel [ (set (reg vrsave) (unspec blah))
18786 (clobber (reg 999))])
18788 The clobber will cause the store into reg 999 to be dead, and
18789 flow will attempt to delete an epilogue insn. In this case, we
18790 need an unspec use/set of the register. */
18792 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
18793 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
18795 if (!epiloguep || call_used_regs [i])
18796 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
18797 gen_rtx_REG (V4SImode, i));
18798 else
18800 rtx reg = gen_rtx_REG (V4SImode, i);
18802 clobs[nclobs++]
18803 = gen_rtx_SET (VOIDmode,
18804 reg,
18805 gen_rtx_UNSPEC (V4SImode,
18806 gen_rtvec (1, reg), 27));
18810 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
18812 for (i = 0; i < nclobs; ++i)
18813 XVECEXP (insn, 0, i) = clobs[i];
18815 return insn;
18818 static rtx
18819 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
18821 rtx addr, mem;
18823 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
18824 mem = gen_frame_mem (GET_MODE (reg), addr);
18825 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
18828 static rtx
18829 gen_frame_load (rtx reg, rtx frame_reg, int offset)
18831 return gen_frame_set (reg, frame_reg, offset, false);
18834 static rtx
18835 gen_frame_store (rtx reg, rtx frame_reg, int offset)
18837 return gen_frame_set (reg, frame_reg, offset, true);
18840 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
18841 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
18843 static rtx
18844 emit_frame_save (rtx frame_reg, enum machine_mode mode,
18845 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
18847 rtx reg, insn;
18849 /* Some cases that need register indexed addressing. */
18850 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
18851 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
18852 || (TARGET_E500_DOUBLE && mode == DFmode)
18853 || (TARGET_SPE_ABI
18854 && SPE_VECTOR_MODE (mode)
18855 && !SPE_CONST_OFFSET_OK (offset))));
18857 reg = gen_rtx_REG (mode, regno);
18858 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
18859 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
18860 NULL_RTX, NULL_RTX);
18863 /* Emit an offset memory reference suitable for a frame store, while
18864 converting to a valid addressing mode. */
18866 static rtx
18867 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
18869 rtx int_rtx, offset_rtx;
18871 int_rtx = GEN_INT (offset);
18873 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
18874 || (TARGET_E500_DOUBLE && mode == DFmode))
18876 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
18877 emit_move_insn (offset_rtx, int_rtx);
18879 else
18880 offset_rtx = int_rtx;
18882 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
18885 #ifndef TARGET_FIX_AND_CONTINUE
18886 #define TARGET_FIX_AND_CONTINUE 0
18887 #endif
18889 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
18890 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
18891 #define LAST_SAVRES_REGISTER 31
18892 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
18894 enum {
18895 SAVRES_LR = 0x1,
18896 SAVRES_SAVE = 0x2,
18897 SAVRES_REG = 0x0c,
18898 SAVRES_GPR = 0,
18899 SAVRES_FPR = 4,
18900 SAVRES_VR = 8
18903 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
18905 /* Temporary holding space for an out-of-line register save/restore
18906 routine name. */
18907 static char savres_routine_name[30];
18909 /* Return the name for an out-of-line register save/restore routine.
18910 We are saving/restoring GPRs if GPR is true. */
18912 static char *
18913 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
18915 const char *prefix = "";
18916 const char *suffix = "";
18918 /* Different targets are supposed to define
18919 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
18920 routine name could be defined with:
18922 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
18924 This is a nice idea in practice, but in reality, things are
18925 complicated in several ways:
18927 - ELF targets have save/restore routines for GPRs.
18929 - SPE targets use different prefixes for 32/64-bit registers, and
18930 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
18932 - PPC64 ELF targets have routines for save/restore of GPRs that
18933 differ in what they do with the link register, so having a set
18934 prefix doesn't work. (We only use one of the save routines at
18935 the moment, though.)
18937 - PPC32 elf targets have "exit" versions of the restore routines
18938 that restore the link register and can save some extra space.
18939 These require an extra suffix. (There are also "tail" versions
18940 of the restore routines and "GOT" versions of the save routines,
18941 but we don't generate those at present. Same problems apply,
18942 though.)
18944 We deal with all this by synthesizing our own prefix/suffix and
18945 using that for the simple sprintf call shown above. */
18946 if (TARGET_SPE)
18948 /* No floating point saves on the SPE. */
18949 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
18951 if ((sel & SAVRES_SAVE))
18952 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
18953 else
18954 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
18956 if ((sel & SAVRES_LR))
18957 suffix = "_x";
18959 else if (DEFAULT_ABI == ABI_V4)
18961 if (TARGET_64BIT)
18962 goto aix_names;
18964 if ((sel & SAVRES_REG) == SAVRES_GPR)
18965 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
18966 else if ((sel & SAVRES_REG) == SAVRES_FPR)
18967 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
18968 else if ((sel & SAVRES_REG) == SAVRES_VR)
18969 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
18970 else
18971 abort ();
18973 if ((sel & SAVRES_LR))
18974 suffix = "_x";
18976 else if (DEFAULT_ABI == ABI_AIX)
18978 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
18979 /* No out-of-line save/restore routines for GPRs on AIX. */
18980 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
18981 #endif
18983 aix_names:
18984 if ((sel & SAVRES_REG) == SAVRES_GPR)
18985 prefix = ((sel & SAVRES_SAVE)
18986 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
18987 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
18988 else if ((sel & SAVRES_REG) == SAVRES_FPR)
18990 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
18991 if ((sel & SAVRES_LR))
18992 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
18993 else
18994 #endif
18996 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
18997 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
19000 else if ((sel & SAVRES_REG) == SAVRES_VR)
19001 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
19002 else
19003 abort ();
19006 if (DEFAULT_ABI == ABI_DARWIN)
19008 /* The Darwin approach is (slightly) different, in order to be
19009 compatible with code generated by the system toolchain. There is a
19010 single symbol for the start of save sequence, and the code here
19011 embeds an offset into that code on the basis of the first register
19012 to be saved. */
19013 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
19014 if ((sel & SAVRES_REG) == SAVRES_GPR)
19015 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
19016 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
19017 (regno - 13) * 4, prefix, regno);
19018 else if ((sel & SAVRES_REG) == SAVRES_FPR)
19019 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
19020 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
19021 else if ((sel & SAVRES_REG) == SAVRES_VR)
19022 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
19023 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
19024 else
19025 abort ();
19027 else
19028 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
19030 return savres_routine_name;
19033 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
19034 We are saving/restoring GPRs if GPR is true. */
19036 static rtx
19037 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
19039 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
19040 ? info->first_gp_reg_save
19041 : (sel & SAVRES_REG) == SAVRES_FPR
19042 ? info->first_fp_reg_save - 32
19043 : (sel & SAVRES_REG) == SAVRES_VR
19044 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
19045 : -1);
19046 rtx sym;
19047 int select = sel;
19049 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
19050 versions of the gpr routines. */
19051 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
19052 && info->spe_64bit_regs_used)
19053 select ^= SAVRES_FPR ^ SAVRES_GPR;
19055 /* Don't generate bogus routine names. */
19056 gcc_assert (FIRST_SAVRES_REGISTER <= regno
19057 && regno <= LAST_SAVRES_REGISTER
19058 && select >= 0 && select <= 12);
19060 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
19062 if (sym == NULL)
19064 char *name;
19066 name = rs6000_savres_routine_name (info, regno, sel);
19068 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
19069 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
19070 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
19073 return sym;
19076 /* Emit a sequence of insns, including a stack tie if needed, for
19077 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
19078 reset the stack pointer, but move the base of the frame into
19079 reg UPDT_REGNO for use by out-of-line register restore routines. */
19081 static rtx
19082 rs6000_emit_stack_reset (rs6000_stack_t *info,
19083 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
19084 unsigned updt_regno)
19086 rtx updt_reg_rtx;
19088 /* This blockage is needed so that sched doesn't decide to move
19089 the sp change before the register restores. */
19090 if (DEFAULT_ABI == ABI_V4
19091 || (TARGET_SPE_ABI
19092 && info->spe_64bit_regs_used != 0
19093 && info->first_gp_reg_save != 32))
19094 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
19096 /* If we are restoring registers out-of-line, we will be using the
19097 "exit" variants of the restore routines, which will reset the
19098 stack for us. But we do need to point updt_reg into the
19099 right place for those routines. */
19100 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
19102 if (frame_off != 0)
19103 return emit_insn (gen_add3_insn (updt_reg_rtx,
19104 frame_reg_rtx, GEN_INT (frame_off)));
19105 else if (REGNO (frame_reg_rtx) != updt_regno)
19106 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
19108 return NULL_RTX;
19111 static inline unsigned
19112 ptr_regno_for_savres (int sel)
19114 if (DEFAULT_ABI == ABI_AIX)
19115 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
19116 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
19119 /* Construct a parallel rtx describing the effect of a call to an
19120 out-of-line register save/restore routine, and emit the insn
19121 or jump_insn as appropriate. */
19123 static rtx
19124 rs6000_emit_savres_rtx (rs6000_stack_t *info,
19125 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
19126 enum machine_mode reg_mode, int sel)
19128 int i;
19129 int offset, start_reg, end_reg, n_regs, use_reg;
19130 int reg_size = GET_MODE_SIZE (reg_mode);
19131 rtx sym;
19132 rtvec p;
19133 rtx par, insn;
19135 offset = 0;
19136 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19137 ? info->first_gp_reg_save
19138 : (sel & SAVRES_REG) == SAVRES_FPR
19139 ? info->first_fp_reg_save
19140 : (sel & SAVRES_REG) == SAVRES_VR
19141 ? info->first_altivec_reg_save
19142 : -1);
19143 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
19144 ? 32
19145 : (sel & SAVRES_REG) == SAVRES_FPR
19146 ? 64
19147 : (sel & SAVRES_REG) == SAVRES_VR
19148 ? LAST_ALTIVEC_REGNO + 1
19149 : -1);
19150 n_regs = end_reg - start_reg;
19151 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
19152 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
19153 + n_regs);
19155 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19156 RTVEC_ELT (p, offset++) = ret_rtx;
19158 RTVEC_ELT (p, offset++)
19159 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
19161 sym = rs6000_savres_routine_sym (info, sel);
19162 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
19164 use_reg = ptr_regno_for_savres (sel);
19165 if ((sel & SAVRES_REG) == SAVRES_VR)
19167 /* Vector regs are saved/restored using [reg+reg] addressing. */
19168 RTVEC_ELT (p, offset++)
19169 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19170 RTVEC_ELT (p, offset++)
19171 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
19173 else
19174 RTVEC_ELT (p, offset++)
19175 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
19177 for (i = 0; i < end_reg - start_reg; i++)
19178 RTVEC_ELT (p, i + offset)
19179 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
19180 frame_reg_rtx, save_area_offset + reg_size * i,
19181 (sel & SAVRES_SAVE) != 0);
19183 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19184 RTVEC_ELT (p, i + offset)
19185 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
19187 par = gen_rtx_PARALLEL (VOIDmode, p);
19189 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
19191 insn = emit_jump_insn (par);
19192 JUMP_LABEL (insn) = ret_rtx;
19194 else
19195 insn = emit_insn (par);
19196 return insn;
19199 /* Determine whether the gp REG is really used. */
19201 static bool
19202 rs6000_reg_live_or_pic_offset_p (int reg)
19204 /* If the function calls eh_return, claim used all the registers that would
19205 be checked for liveness otherwise. This is required for the PIC offset
19206 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
19207 register allocation purposes in this case. */
19209 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
19210 && (!call_used_regs[reg]
19211 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19212 && !TARGET_SINGLE_PIC_BASE
19213 && TARGET_TOC && TARGET_MINIMAL_TOC)))
19214 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
19215 && !TARGET_SINGLE_PIC_BASE
19216 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
19217 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
19220 /* Emit function prologue as insns. */
19222 void
19223 rs6000_emit_prologue (void)
19225 rs6000_stack_t *info = rs6000_stack_info ();
19226 enum machine_mode reg_mode = Pmode;
19227 int reg_size = TARGET_32BIT ? 4 : 8;
19228 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
19229 rtx frame_reg_rtx = sp_reg_rtx;
19230 unsigned int cr_save_regno;
19231 rtx cr_save_rtx = NULL_RTX;
19232 rtx insn;
19233 int strategy;
19234 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
19235 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
19236 && call_used_regs[STATIC_CHAIN_REGNUM]);
19237 /* Offset to top of frame for frame_reg and sp respectively. */
19238 HOST_WIDE_INT frame_off = 0;
19239 HOST_WIDE_INT sp_off = 0;
19241 #ifdef ENABLE_CHECKING
19242 /* Track and check usage of r0, r11, r12. */
19243 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
19244 #define START_USE(R) do \
19246 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19247 reg_inuse |= 1 << (R); \
19248 } while (0)
19249 #define END_USE(R) do \
19251 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
19252 reg_inuse &= ~(1 << (R)); \
19253 } while (0)
19254 #define NOT_INUSE(R) do \
19256 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
19257 } while (0)
19258 #else
19259 #define START_USE(R) do {} while (0)
19260 #define END_USE(R) do {} while (0)
19261 #define NOT_INUSE(R) do {} while (0)
19262 #endif
19264 if (flag_stack_usage_info)
19265 current_function_static_stack_size = info->total_size;
19267 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && info->total_size)
19268 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, info->total_size);
19270 if (TARGET_FIX_AND_CONTINUE)
19272 /* gdb on darwin arranges to forward a function from the old
19273 address by modifying the first 5 instructions of the function
19274 to branch to the overriding function. This is necessary to
19275 permit function pointers that point to the old function to
19276 actually forward to the new function. */
19277 emit_insn (gen_nop ());
19278 emit_insn (gen_nop ());
19279 emit_insn (gen_nop ());
19280 emit_insn (gen_nop ());
19281 emit_insn (gen_nop ());
19284 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
19286 reg_mode = V2SImode;
19287 reg_size = 8;
19290 /* Handle world saves specially here. */
19291 if (WORLD_SAVE_P (info))
19293 int i, j, sz;
19294 rtx treg;
19295 rtvec p;
19296 rtx reg0;
19298 /* save_world expects lr in r0. */
19299 reg0 = gen_rtx_REG (Pmode, 0);
19300 if (info->lr_save_p)
19302 insn = emit_move_insn (reg0,
19303 gen_rtx_REG (Pmode, LR_REGNO));
19304 RTX_FRAME_RELATED_P (insn) = 1;
19307 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
19308 assumptions about the offsets of various bits of the stack
19309 frame. */
19310 gcc_assert (info->gp_save_offset == -220
19311 && info->fp_save_offset == -144
19312 && info->lr_save_offset == 8
19313 && info->cr_save_offset == 4
19314 && info->push_p
19315 && info->lr_save_p
19316 && (!crtl->calls_eh_return
19317 || info->ehrd_offset == -432)
19318 && info->vrsave_save_offset == -224
19319 && info->altivec_save_offset == -416);
19321 treg = gen_rtx_REG (SImode, 11);
19322 emit_move_insn (treg, GEN_INT (-info->total_size));
19324 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
19325 in R11. It also clobbers R12, so beware! */
19327 /* Preserve CR2 for save_world prologues */
19328 sz = 5;
19329 sz += 32 - info->first_gp_reg_save;
19330 sz += 64 - info->first_fp_reg_save;
19331 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
19332 p = rtvec_alloc (sz);
19333 j = 0;
19334 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
19335 gen_rtx_REG (SImode,
19336 LR_REGNO));
19337 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
19338 gen_rtx_SYMBOL_REF (Pmode,
19339 "*save_world"));
19340 /* We do floats first so that the instruction pattern matches
19341 properly. */
19342 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19343 RTVEC_ELT (p, j++)
19344 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19345 ? DFmode : SFmode,
19346 info->first_fp_reg_save + i),
19347 frame_reg_rtx,
19348 info->fp_save_offset + frame_off + 8 * i);
19349 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
19350 RTVEC_ELT (p, j++)
19351 = gen_frame_store (gen_rtx_REG (V4SImode,
19352 info->first_altivec_reg_save + i),
19353 frame_reg_rtx,
19354 info->altivec_save_offset + frame_off + 16 * i);
19355 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19356 RTVEC_ELT (p, j++)
19357 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19358 frame_reg_rtx,
19359 info->gp_save_offset + frame_off + reg_size * i);
19361 /* CR register traditionally saved as CR2. */
19362 RTVEC_ELT (p, j++)
19363 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
19364 frame_reg_rtx, info->cr_save_offset + frame_off);
19365 /* Explain about use of R0. */
19366 if (info->lr_save_p)
19367 RTVEC_ELT (p, j++)
19368 = gen_frame_store (reg0,
19369 frame_reg_rtx, info->lr_save_offset + frame_off);
19370 /* Explain what happens to the stack pointer. */
19372 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
19373 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
19376 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19377 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19378 treg, GEN_INT (-info->total_size));
19379 sp_off = frame_off = info->total_size;
19382 strategy = info->savres_strategy;
19384 /* For V.4, update stack before we do any saving and set back pointer. */
19385 if (! WORLD_SAVE_P (info)
19386 && info->push_p
19387 && (DEFAULT_ABI == ABI_V4
19388 || crtl->calls_eh_return))
19390 bool need_r11 = (TARGET_SPE
19391 ? (!(strategy & SAVE_INLINE_GPRS)
19392 && info->spe_64bit_regs_used == 0)
19393 : (!(strategy & SAVE_INLINE_FPRS)
19394 || !(strategy & SAVE_INLINE_GPRS)
19395 || !(strategy & SAVE_INLINE_VRS)));
19396 int ptr_regno = -1;
19397 rtx ptr_reg = NULL_RTX;
19398 int ptr_off = 0;
19400 if (info->total_size < 32767)
19401 frame_off = info->total_size;
19402 else if (need_r11)
19403 ptr_regno = 11;
19404 else if (info->cr_save_p
19405 || info->lr_save_p
19406 || info->first_fp_reg_save < 64
19407 || info->first_gp_reg_save < 32
19408 || info->altivec_size != 0
19409 || info->vrsave_mask != 0
19410 || crtl->calls_eh_return)
19411 ptr_regno = 12;
19412 else
19414 /* The prologue won't be saving any regs so there is no need
19415 to set up a frame register to access any frame save area.
19416 We also won't be using frame_off anywhere below, but set
19417 the correct value anyway to protect against future
19418 changes to this function. */
19419 frame_off = info->total_size;
19421 if (ptr_regno != -1)
19423 /* Set up the frame offset to that needed by the first
19424 out-of-line save function. */
19425 START_USE (ptr_regno);
19426 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19427 frame_reg_rtx = ptr_reg;
19428 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
19429 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
19430 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
19431 ptr_off = info->gp_save_offset + info->gp_size;
19432 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
19433 ptr_off = info->altivec_save_offset + info->altivec_size;
19434 frame_off = -ptr_off;
19436 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
19437 sp_off = info->total_size;
19438 if (frame_reg_rtx != sp_reg_rtx)
19439 rs6000_emit_stack_tie (frame_reg_rtx, false);
19442 /* If we use the link register, get it into r0. */
19443 if (!WORLD_SAVE_P (info) && info->lr_save_p)
19445 rtx addr, reg, mem;
19447 reg = gen_rtx_REG (Pmode, 0);
19448 START_USE (0);
19449 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
19450 RTX_FRAME_RELATED_P (insn) = 1;
19452 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
19453 | SAVE_NOINLINE_FPRS_SAVES_LR)))
19455 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19456 GEN_INT (info->lr_save_offset + frame_off));
19457 mem = gen_rtx_MEM (Pmode, addr);
19458 /* This should not be of rs6000_sr_alias_set, because of
19459 __builtin_return_address. */
19461 insn = emit_move_insn (mem, reg);
19462 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19463 NULL_RTX, NULL_RTX);
19464 END_USE (0);
19468 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
19469 r12 will be needed by out-of-line gpr restore. */
19470 cr_save_regno = (DEFAULT_ABI == ABI_AIX
19471 && !(strategy & (SAVE_INLINE_GPRS
19472 | SAVE_NOINLINE_GPRS_SAVES_LR))
19473 ? 11 : 12);
19474 if (!WORLD_SAVE_P (info)
19475 && info->cr_save_p
19476 && REGNO (frame_reg_rtx) != cr_save_regno
19477 && !(using_static_chain_p && cr_save_regno == 11))
19479 rtx set;
19481 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
19482 START_USE (cr_save_regno);
19483 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
19484 RTX_FRAME_RELATED_P (insn) = 1;
19485 /* Now, there's no way that dwarf2out_frame_debug_expr is going
19486 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
19487 But that's OK. All we have to do is specify that _one_ condition
19488 code register is saved in this stack slot. The thrower's epilogue
19489 will then restore all the call-saved registers.
19490 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
19491 set = gen_rtx_SET (VOIDmode, cr_save_rtx,
19492 gen_rtx_REG (SImode, CR2_REGNO));
19493 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
19496 /* Do any required saving of fpr's. If only one or two to save, do
19497 it ourselves. Otherwise, call function. */
19498 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
19500 int i;
19501 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
19502 if (save_reg_p (info->first_fp_reg_save + i))
19503 emit_frame_save (frame_reg_rtx,
19504 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
19505 ? DFmode : SFmode),
19506 info->first_fp_reg_save + i,
19507 info->fp_save_offset + frame_off + 8 * i,
19508 sp_off - frame_off);
19510 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
19512 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
19513 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
19514 unsigned ptr_regno = ptr_regno_for_savres (sel);
19515 rtx ptr_reg = frame_reg_rtx;
19517 if (REGNO (frame_reg_rtx) == ptr_regno)
19518 gcc_checking_assert (frame_off == 0);
19519 else
19521 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19522 NOT_INUSE (ptr_regno);
19523 emit_insn (gen_add3_insn (ptr_reg,
19524 frame_reg_rtx, GEN_INT (frame_off)));
19526 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19527 info->fp_save_offset,
19528 info->lr_save_offset,
19529 DFmode, sel);
19530 rs6000_frame_related (insn, ptr_reg, sp_off,
19531 NULL_RTX, NULL_RTX);
19532 if (lr)
19533 END_USE (0);
19536 /* Save GPRs. This is done as a PARALLEL if we are using
19537 the store-multiple instructions. */
19538 if (!WORLD_SAVE_P (info)
19539 && TARGET_SPE_ABI
19540 && info->spe_64bit_regs_used != 0
19541 && info->first_gp_reg_save != 32)
19543 int i;
19544 rtx spe_save_area_ptr;
19545 HOST_WIDE_INT save_off;
19546 int ool_adjust = 0;
19548 /* Determine whether we can address all of the registers that need
19549 to be saved with an offset from frame_reg_rtx that fits in
19550 the small const field for SPE memory instructions. */
19551 int spe_regs_addressable
19552 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
19553 + reg_size * (32 - info->first_gp_reg_save - 1))
19554 && (strategy & SAVE_INLINE_GPRS));
19556 if (spe_regs_addressable)
19558 spe_save_area_ptr = frame_reg_rtx;
19559 save_off = frame_off;
19561 else
19563 /* Make r11 point to the start of the SPE save area. We need
19564 to be careful here if r11 is holding the static chain. If
19565 it is, then temporarily save it in r0. */
19566 HOST_WIDE_INT offset;
19568 if (!(strategy & SAVE_INLINE_GPRS))
19569 ool_adjust = 8 * (info->first_gp_reg_save
19570 - (FIRST_SAVRES_REGISTER + 1));
19571 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
19572 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
19573 save_off = frame_off - offset;
19575 if (using_static_chain_p)
19577 rtx r0 = gen_rtx_REG (Pmode, 0);
19579 START_USE (0);
19580 gcc_assert (info->first_gp_reg_save > 11);
19582 emit_move_insn (r0, spe_save_area_ptr);
19584 else if (REGNO (frame_reg_rtx) != 11)
19585 START_USE (11);
19587 emit_insn (gen_addsi3 (spe_save_area_ptr,
19588 frame_reg_rtx, GEN_INT (offset)));
19589 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
19590 frame_off = -info->spe_gp_save_offset + ool_adjust;
19593 if ((strategy & SAVE_INLINE_GPRS))
19595 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19596 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19597 emit_frame_save (spe_save_area_ptr, reg_mode,
19598 info->first_gp_reg_save + i,
19599 (info->spe_gp_save_offset + save_off
19600 + reg_size * i),
19601 sp_off - save_off);
19603 else
19605 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
19606 info->spe_gp_save_offset + save_off,
19607 0, reg_mode,
19608 SAVRES_SAVE | SAVRES_GPR);
19610 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
19611 NULL_RTX, NULL_RTX);
19614 /* Move the static chain pointer back. */
19615 if (!spe_regs_addressable)
19617 if (using_static_chain_p)
19619 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
19620 END_USE (0);
19622 else if (REGNO (frame_reg_rtx) != 11)
19623 END_USE (11);
19626 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
19628 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
19629 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
19630 unsigned ptr_regno = ptr_regno_for_savres (sel);
19631 rtx ptr_reg = frame_reg_rtx;
19632 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
19633 int end_save = info->gp_save_offset + info->gp_size;
19634 int ptr_off;
19636 if (!ptr_set_up)
19637 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19639 /* Need to adjust r11 (r12) if we saved any FPRs. */
19640 if (end_save + frame_off != 0)
19642 rtx offset = GEN_INT (end_save + frame_off);
19644 if (ptr_set_up)
19645 frame_off = -end_save;
19646 else
19647 NOT_INUSE (ptr_regno);
19648 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
19650 else if (!ptr_set_up)
19652 NOT_INUSE (ptr_regno);
19653 emit_move_insn (ptr_reg, frame_reg_rtx);
19655 ptr_off = -end_save;
19656 insn = rs6000_emit_savres_rtx (info, ptr_reg,
19657 info->gp_save_offset + ptr_off,
19658 info->lr_save_offset + ptr_off,
19659 reg_mode, sel);
19660 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
19661 NULL_RTX, NULL_RTX);
19662 if (lr)
19663 END_USE (0);
19665 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
19667 rtvec p;
19668 int i;
19669 p = rtvec_alloc (32 - info->first_gp_reg_save);
19670 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19671 RTVEC_ELT (p, i)
19672 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
19673 frame_reg_rtx,
19674 info->gp_save_offset + frame_off + reg_size * i);
19675 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
19676 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19677 NULL_RTX, NULL_RTX);
19679 else if (!WORLD_SAVE_P (info))
19681 int i;
19682 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
19683 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
19684 emit_frame_save (frame_reg_rtx, reg_mode,
19685 info->first_gp_reg_save + i,
19686 info->gp_save_offset + frame_off + reg_size * i,
19687 sp_off - frame_off);
19690 if (crtl->calls_eh_return)
19692 unsigned int i;
19693 rtvec p;
19695 for (i = 0; ; ++i)
19697 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19698 if (regno == INVALID_REGNUM)
19699 break;
19702 p = rtvec_alloc (i);
19704 for (i = 0; ; ++i)
19706 unsigned int regno = EH_RETURN_DATA_REGNO (i);
19707 if (regno == INVALID_REGNUM)
19708 break;
19710 insn
19711 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
19712 sp_reg_rtx,
19713 info->ehrd_offset + sp_off + reg_size * (int) i);
19714 RTVEC_ELT (p, i) = insn;
19715 RTX_FRAME_RELATED_P (insn) = 1;
19718 insn = emit_insn (gen_blockage ());
19719 RTX_FRAME_RELATED_P (insn) = 1;
19720 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
19723 /* In AIX ABI we need to make sure r2 is really saved. */
19724 if (TARGET_AIX && crtl->calls_eh_return)
19726 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
19727 rtx save_insn, join_insn, note;
19728 long toc_restore_insn;
19730 tmp_reg = gen_rtx_REG (Pmode, 11);
19731 tmp_reg_si = gen_rtx_REG (SImode, 11);
19732 if (using_static_chain_p)
19734 START_USE (0);
19735 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
19737 else
19738 START_USE (11);
19739 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
19740 /* Peek at instruction to which this function returns. If it's
19741 restoring r2, then we know we've already saved r2. We can't
19742 unconditionally save r2 because the value we have will already
19743 be updated if we arrived at this function via a plt call or
19744 toc adjusting stub. */
19745 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
19746 toc_restore_insn = TARGET_32BIT ? 0x80410014 : 0xE8410028;
19747 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
19748 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
19749 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
19750 validate_condition_mode (EQ, CCUNSmode);
19751 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
19752 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
19753 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
19754 toc_save_done = gen_label_rtx ();
19755 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
19756 gen_rtx_EQ (VOIDmode, compare_result,
19757 const0_rtx),
19758 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
19759 pc_rtx);
19760 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
19761 JUMP_LABEL (jump) = toc_save_done;
19762 LABEL_NUSES (toc_save_done) += 1;
19764 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
19765 TOC_REGNUM, frame_off + 5 * reg_size,
19766 sp_off - frame_off);
19768 emit_label (toc_save_done);
19770 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
19771 have a CFG that has different saves along different paths.
19772 Move the note to a dummy blockage insn, which describes that
19773 R2 is unconditionally saved after the label. */
19774 /* ??? An alternate representation might be a special insn pattern
19775 containing both the branch and the store. That might let the
19776 code that minimizes the number of DW_CFA_advance opcodes better
19777 freedom in placing the annotations. */
19778 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
19779 if (note)
19780 remove_note (save_insn, note);
19781 else
19782 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
19783 copy_rtx (PATTERN (save_insn)), NULL_RTX);
19784 RTX_FRAME_RELATED_P (save_insn) = 0;
19786 join_insn = emit_insn (gen_blockage ());
19787 REG_NOTES (join_insn) = note;
19788 RTX_FRAME_RELATED_P (join_insn) = 1;
19790 if (using_static_chain_p)
19792 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
19793 END_USE (0);
19795 else
19796 END_USE (11);
19799 /* Save CR if we use any that must be preserved. */
19800 if (!WORLD_SAVE_P (info) && info->cr_save_p)
19802 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
19803 GEN_INT (info->cr_save_offset + frame_off));
19804 rtx mem = gen_frame_mem (SImode, addr);
19805 /* See the large comment above about why CR2_REGNO is used. */
19806 rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
19808 /* If we didn't copy cr before, do so now using r0. */
19809 if (cr_save_rtx == NULL_RTX)
19811 rtx set;
19813 START_USE (0);
19814 cr_save_rtx = gen_rtx_REG (SImode, 0);
19815 insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
19816 RTX_FRAME_RELATED_P (insn) = 1;
19817 set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
19818 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
19820 insn = emit_move_insn (mem, cr_save_rtx);
19821 END_USE (REGNO (cr_save_rtx));
19823 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19824 NULL_RTX, NULL_RTX);
19827 /* Update stack and set back pointer unless this is V.4,
19828 for which it was done previously. */
19829 if (!WORLD_SAVE_P (info) && info->push_p
19830 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
19832 rtx ptr_reg = NULL;
19833 int ptr_off = 0;
19835 /* If saving altivec regs we need to be able to address all save
19836 locations using a 16-bit offset. */
19837 if ((strategy & SAVE_INLINE_VRS) == 0
19838 || (info->altivec_size != 0
19839 && (info->altivec_save_offset + info->altivec_size - 16
19840 + info->total_size - frame_off) > 32767)
19841 || (info->vrsave_mask != 0
19842 && (info->vrsave_save_offset
19843 + info->total_size - frame_off) > 32767))
19845 int sel = SAVRES_SAVE | SAVRES_VR;
19846 unsigned ptr_regno = ptr_regno_for_savres (sel);
19848 if (REGNO (frame_reg_rtx) != ptr_regno)
19849 START_USE (ptr_regno);
19850 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
19851 frame_reg_rtx = ptr_reg;
19852 ptr_off = info->altivec_save_offset + info->altivec_size;
19853 frame_off = -ptr_off;
19855 else if (REGNO (frame_reg_rtx) == 1)
19856 frame_off = info->total_size;
19857 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
19858 sp_off = info->total_size;
19859 if (frame_reg_rtx != sp_reg_rtx)
19860 rs6000_emit_stack_tie (frame_reg_rtx, false);
19863 /* Set frame pointer, if needed. */
19864 if (frame_pointer_needed)
19866 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
19867 sp_reg_rtx);
19868 RTX_FRAME_RELATED_P (insn) = 1;
19871 /* Save AltiVec registers if needed. Save here because the red zone does
19872 not always include AltiVec registers. */
19873 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
19874 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
19876 int end_save = info->altivec_save_offset + info->altivec_size;
19877 int ptr_off;
19878 /* Oddly, the vector save/restore functions point r0 at the end
19879 of the save area, then use r11 or r12 to load offsets for
19880 [reg+reg] addressing. */
19881 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
19882 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
19883 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
19885 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
19886 NOT_INUSE (0);
19887 if (end_save + frame_off != 0)
19889 rtx offset = GEN_INT (end_save + frame_off);
19891 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
19893 else
19894 emit_move_insn (ptr_reg, frame_reg_rtx);
19896 ptr_off = -end_save;
19897 insn = rs6000_emit_savres_rtx (info, scratch_reg,
19898 info->altivec_save_offset + ptr_off,
19899 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
19900 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
19901 NULL_RTX, NULL_RTX);
19902 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
19904 /* The oddity mentioned above clobbered our frame reg. */
19905 emit_move_insn (frame_reg_rtx, ptr_reg);
19906 frame_off = ptr_off;
19909 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
19910 && info->altivec_size != 0)
19912 int i;
19914 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
19915 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
19917 rtx areg, savereg, mem;
19918 int offset;
19920 offset = (info->altivec_save_offset + frame_off
19921 + 16 * (i - info->first_altivec_reg_save));
19923 savereg = gen_rtx_REG (V4SImode, i);
19925 NOT_INUSE (0);
19926 areg = gen_rtx_REG (Pmode, 0);
19927 emit_move_insn (areg, GEN_INT (offset));
19929 /* AltiVec addressing mode is [reg+reg]. */
19930 mem = gen_frame_mem (V4SImode,
19931 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
19933 insn = emit_move_insn (mem, savereg);
19935 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
19936 areg, GEN_INT (offset));
19940 /* VRSAVE is a bit vector representing which AltiVec registers
19941 are used. The OS uses this to determine which vector
19942 registers to save on a context switch. We need to save
19943 VRSAVE on the stack frame, add whatever AltiVec registers we
19944 used in this function, and do the corresponding magic in the
19945 epilogue. */
19947 if (!WORLD_SAVE_P (info)
19948 && TARGET_ALTIVEC
19949 && TARGET_ALTIVEC_VRSAVE
19950 && info->vrsave_mask != 0)
19952 rtx reg, vrsave;
19953 int offset;
19954 int save_regno;
19956 /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
19957 as frame_reg_rtx and r11 as the static chain pointer for
19958 nested functions. */
19959 save_regno = 12;
19960 if (DEFAULT_ABI == ABI_AIX && !using_static_chain_p)
19961 save_regno = 11;
19962 else if (REGNO (frame_reg_rtx) == 12)
19964 save_regno = 11;
19965 if (using_static_chain_p)
19966 save_regno = 0;
19969 NOT_INUSE (save_regno);
19970 reg = gen_rtx_REG (SImode, save_regno);
19971 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
19972 if (TARGET_MACHO)
19973 emit_insn (gen_get_vrsave_internal (reg));
19974 else
19975 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
19977 /* Save VRSAVE. */
19978 offset = info->vrsave_save_offset + frame_off;
19979 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
19981 /* Include the registers in the mask. */
19982 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
19984 insn = emit_insn (generate_set_vrsave (reg, info, 0));
19987 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
19988 if (!TARGET_SINGLE_PIC_BASE
19989 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
19990 || (DEFAULT_ABI == ABI_V4
19991 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
19992 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
19994 /* If emit_load_toc_table will use the link register, we need to save
19995 it. We use R12 for this purpose because emit_load_toc_table
19996 can use register 0. This allows us to use a plain 'blr' to return
19997 from the procedure more often. */
19998 int save_LR_around_toc_setup = (TARGET_ELF
19999 && DEFAULT_ABI != ABI_AIX
20000 && flag_pic
20001 && ! info->lr_save_p
20002 && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
20003 if (save_LR_around_toc_setup)
20005 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20006 rtx tmp = gen_rtx_REG (Pmode, 12);
20008 insn = emit_move_insn (tmp, lr);
20009 RTX_FRAME_RELATED_P (insn) = 1;
20011 rs6000_emit_load_toc_table (TRUE);
20013 insn = emit_move_insn (lr, tmp);
20014 add_reg_note (insn, REG_CFA_RESTORE, lr);
20015 RTX_FRAME_RELATED_P (insn) = 1;
20017 else
20018 rs6000_emit_load_toc_table (TRUE);
20021 #if TARGET_MACHO
20022 if (!TARGET_SINGLE_PIC_BASE
20023 && DEFAULT_ABI == ABI_DARWIN
20024 && flag_pic && crtl->uses_pic_offset_table)
20026 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20027 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
20029 /* Save and restore LR locally around this call (in R0). */
20030 if (!info->lr_save_p)
20031 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
20033 emit_insn (gen_load_macho_picbase (src));
20035 emit_move_insn (gen_rtx_REG (Pmode,
20036 RS6000_PIC_OFFSET_TABLE_REGNUM),
20037 lr);
20039 if (!info->lr_save_p)
20040 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
20042 #endif
20044 /* If we need to, save the TOC register after doing the stack setup.
20045 Do not emit eh frame info for this save. The unwinder wants info,
20046 conceptually attached to instructions in this function, about
20047 register values in the caller of this function. This R2 may have
20048 already been changed from the value in the caller.
20049 We don't attempt to write accurate DWARF EH frame info for R2
20050 because code emitted by gcc for a (non-pointer) function call
20051 doesn't save and restore R2. Instead, R2 is managed out-of-line
20052 by a linker generated plt call stub when the function resides in
20053 a shared library. This behaviour is costly to describe in DWARF,
20054 both in terms of the size of DWARF info and the time taken in the
20055 unwinder to interpret it. R2 changes, apart from the
20056 calls_eh_return case earlier in this function, are handled by
20057 linux-unwind.h frob_update_context. */
20058 if (rs6000_save_toc_in_prologue_p ())
20060 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
20061 emit_insn (gen_frame_store (reg, sp_reg_rtx, 5 * reg_size));
20065 /* Write function prologue. */
20067 static void
20068 rs6000_output_function_prologue (FILE *file,
20069 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
20071 rs6000_stack_t *info = rs6000_stack_info ();
20073 if (TARGET_DEBUG_STACK)
20074 debug_stack_info (info);
20076 /* Write .extern for any function we will call to save and restore
20077 fp values. */
20078 if (info->first_fp_reg_save < 64
20079 && !TARGET_MACHO
20080 && !TARGET_ELF)
20082 char *name;
20083 int regno = info->first_fp_reg_save - 32;
20085 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
20087 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
20088 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
20089 name = rs6000_savres_routine_name (info, regno, sel);
20090 fprintf (file, "\t.extern %s\n", name);
20092 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
20094 bool lr = (info->savres_strategy
20095 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
20096 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20097 name = rs6000_savres_routine_name (info, regno, sel);
20098 fprintf (file, "\t.extern %s\n", name);
20102 /* Write .extern for AIX common mode routines, if needed. */
20103 if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
20105 fputs ("\t.extern __mulh\n", file);
20106 fputs ("\t.extern __mull\n", file);
20107 fputs ("\t.extern __divss\n", file);
20108 fputs ("\t.extern __divus\n", file);
20109 fputs ("\t.extern __quoss\n", file);
20110 fputs ("\t.extern __quous\n", file);
20111 common_mode_defined = 1;
20114 rs6000_pic_labelno++;
20117 /* Non-zero if vmx regs are restored before the frame pop, zero if
20118 we restore after the pop when possible. */
20119 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
20121 /* Restoring cr is a two step process: loading a reg from the frame
20122 save, then moving the reg to cr. For ABI_V4 we must let the
20123 unwinder know that the stack location is no longer valid at or
20124 before the stack deallocation, but we can't emit a cfa_restore for
20125 cr at the stack deallocation like we do for other registers.
20126 The trouble is that it is possible for the move to cr to be
20127 scheduled after the stack deallocation. So say exactly where cr
20128 is located on each of the two insns. */
20130 static rtx
20131 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
20133 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
20134 rtx reg = gen_rtx_REG (SImode, regno);
20135 rtx insn = emit_move_insn (reg, mem);
20137 if (!exit_func && DEFAULT_ABI == ABI_V4)
20139 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20140 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
20142 add_reg_note (insn, REG_CFA_REGISTER, set);
20143 RTX_FRAME_RELATED_P (insn) = 1;
20145 return reg;
20148 /* Reload CR from REG. */
20150 static void
20151 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
20153 int count = 0;
20154 int i;
20156 if (using_mfcr_multiple)
20158 for (i = 0; i < 8; i++)
20159 if (save_reg_p (CR0_REGNO + i))
20160 count++;
20161 gcc_assert (count);
20164 if (using_mfcr_multiple && count > 1)
20166 rtvec p;
20167 int ndx;
20169 p = rtvec_alloc (count);
20171 ndx = 0;
20172 for (i = 0; i < 8; i++)
20173 if (save_reg_p (CR0_REGNO + i))
20175 rtvec r = rtvec_alloc (2);
20176 RTVEC_ELT (r, 0) = reg;
20177 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
20178 RTVEC_ELT (p, ndx) =
20179 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
20180 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
20181 ndx++;
20183 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20184 gcc_assert (ndx == count);
20186 else
20187 for (i = 0; i < 8; i++)
20188 if (save_reg_p (CR0_REGNO + i))
20189 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode, CR0_REGNO + i),
20190 reg));
20192 if (!exit_func && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20194 rtx insn = get_last_insn ();
20195 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
20197 add_reg_note (insn, REG_CFA_RESTORE, cr);
20198 RTX_FRAME_RELATED_P (insn) = 1;
20202 /* Like cr, the move to lr instruction can be scheduled after the
20203 stack deallocation, but unlike cr, its stack frame save is still
20204 valid. So we only need to emit the cfa_restore on the correct
20205 instruction. */
20207 static void
20208 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
20210 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
20211 rtx reg = gen_rtx_REG (Pmode, regno);
20213 emit_move_insn (reg, mem);
20216 static void
20217 restore_saved_lr (int regno, bool exit_func)
20219 rtx reg = gen_rtx_REG (Pmode, regno);
20220 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
20221 rtx insn = emit_move_insn (lr, reg);
20223 if (!exit_func && flag_shrink_wrap)
20225 add_reg_note (insn, REG_CFA_RESTORE, lr);
20226 RTX_FRAME_RELATED_P (insn) = 1;
20230 static rtx
20231 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
20233 if (info->cr_save_p)
20234 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20235 gen_rtx_REG (SImode, CR2_REGNO),
20236 cfa_restores);
20237 if (info->lr_save_p)
20238 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20239 gen_rtx_REG (Pmode, LR_REGNO),
20240 cfa_restores);
20241 return cfa_restores;
20244 /* Return true if OFFSET from stack pointer can be clobbered by signals.
20245 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
20246 below stack pointer not cloberred by signals. */
20248 static inline bool
20249 offset_below_red_zone_p (HOST_WIDE_INT offset)
20251 return offset < (DEFAULT_ABI == ABI_V4
20253 : TARGET_32BIT ? -220 : -288);
20256 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
20258 static void
20259 emit_cfa_restores (rtx cfa_restores)
20261 rtx insn = get_last_insn ();
20262 rtx *loc = &REG_NOTES (insn);
20264 while (*loc)
20265 loc = &XEXP (*loc, 1);
20266 *loc = cfa_restores;
20267 RTX_FRAME_RELATED_P (insn) = 1;
20270 /* Emit function epilogue as insns. */
20272 void
20273 rs6000_emit_epilogue (int sibcall)
20275 rs6000_stack_t *info;
20276 int restoring_GPRs_inline;
20277 int restoring_FPRs_inline;
20278 int using_load_multiple;
20279 int using_mtcr_multiple;
20280 int use_backchain_to_restore_sp;
20281 int restore_lr;
20282 int strategy;
20283 HOST_WIDE_INT frame_off = 0;
20284 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
20285 rtx frame_reg_rtx = sp_reg_rtx;
20286 rtx cfa_restores = NULL_RTX;
20287 rtx insn;
20288 rtx cr_save_reg = NULL_RTX;
20289 enum machine_mode reg_mode = Pmode;
20290 int reg_size = TARGET_32BIT ? 4 : 8;
20291 int i;
20292 bool exit_func;
20293 unsigned ptr_regno;
20295 info = rs6000_stack_info ();
20297 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
20299 reg_mode = V2SImode;
20300 reg_size = 8;
20303 strategy = info->savres_strategy;
20304 using_load_multiple = strategy & SAVRES_MULTIPLE;
20305 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
20306 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
20307 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
20308 || rs6000_cpu == PROCESSOR_PPC603
20309 || rs6000_cpu == PROCESSOR_PPC750
20310 || optimize_size);
20311 /* Restore via the backchain when we have a large frame, since this
20312 is more efficient than an addis, addi pair. The second condition
20313 here will not trigger at the moment; We don't actually need a
20314 frame pointer for alloca, but the generic parts of the compiler
20315 give us one anyway. */
20316 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
20317 || (cfun->calls_alloca
20318 && !frame_pointer_needed));
20319 restore_lr = (info->lr_save_p
20320 && (restoring_FPRs_inline
20321 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
20322 && (restoring_GPRs_inline
20323 || info->first_fp_reg_save < 64));
20325 if (WORLD_SAVE_P (info))
20327 int i, j;
20328 char rname[30];
20329 const char *alloc_rname;
20330 rtvec p;
20332 /* eh_rest_world_r10 will return to the location saved in the LR
20333 stack slot (which is not likely to be our caller.)
20334 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
20335 rest_world is similar, except any R10 parameter is ignored.
20336 The exception-handling stuff that was here in 2.95 is no
20337 longer necessary. */
20339 p = rtvec_alloc (9
20341 + 32 - info->first_gp_reg_save
20342 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
20343 + 63 + 1 - info->first_fp_reg_save);
20345 strcpy (rname, ((crtl->calls_eh_return) ?
20346 "*eh_rest_world_r10" : "*rest_world"));
20347 alloc_rname = ggc_strdup (rname);
20349 j = 0;
20350 RTVEC_ELT (p, j++) = ret_rtx;
20351 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
20352 gen_rtx_REG (Pmode,
20353 LR_REGNO));
20354 RTVEC_ELT (p, j++)
20355 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
20356 /* The instruction pattern requires a clobber here;
20357 it is shared with the restVEC helper. */
20358 RTVEC_ELT (p, j++)
20359 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
20362 /* CR register traditionally saved as CR2. */
20363 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
20364 RTVEC_ELT (p, j++)
20365 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
20366 if (flag_shrink_wrap)
20368 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
20369 gen_rtx_REG (Pmode, LR_REGNO),
20370 cfa_restores);
20371 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20375 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20377 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20378 RTVEC_ELT (p, j++)
20379 = gen_frame_load (reg,
20380 frame_reg_rtx, info->gp_save_offset + reg_size * i);
20381 if (flag_shrink_wrap)
20382 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20384 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
20386 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
20387 RTVEC_ELT (p, j++)
20388 = gen_frame_load (reg,
20389 frame_reg_rtx, info->altivec_save_offset + 16 * i);
20390 if (flag_shrink_wrap)
20391 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20393 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
20395 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20396 ? DFmode : SFmode),
20397 info->first_fp_reg_save + i);
20398 RTVEC_ELT (p, j++)
20399 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
20400 if (flag_shrink_wrap)
20401 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20403 RTVEC_ELT (p, j++)
20404 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
20405 RTVEC_ELT (p, j++)
20406 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
20407 RTVEC_ELT (p, j++)
20408 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
20409 RTVEC_ELT (p, j++)
20410 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
20411 RTVEC_ELT (p, j++)
20412 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
20413 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
20415 if (flag_shrink_wrap)
20417 REG_NOTES (insn) = cfa_restores;
20418 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20419 RTX_FRAME_RELATED_P (insn) = 1;
20421 return;
20424 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
20425 if (info->push_p)
20426 frame_off = info->total_size;
20428 /* Restore AltiVec registers if we must do so before adjusting the
20429 stack. */
20430 if (TARGET_ALTIVEC_ABI
20431 && info->altivec_size != 0
20432 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20433 || (DEFAULT_ABI != ABI_V4
20434 && offset_below_red_zone_p (info->altivec_save_offset))))
20436 int i;
20437 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20439 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
20440 if (use_backchain_to_restore_sp)
20442 int frame_regno = 11;
20444 if ((strategy & REST_INLINE_VRS) == 0)
20446 /* Of r11 and r12, select the one not clobbered by an
20447 out-of-line restore function for the frame register. */
20448 frame_regno = 11 + 12 - scratch_regno;
20450 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
20451 emit_move_insn (frame_reg_rtx,
20452 gen_rtx_MEM (Pmode, sp_reg_rtx));
20453 frame_off = 0;
20455 else if (frame_pointer_needed)
20456 frame_reg_rtx = hard_frame_pointer_rtx;
20458 if ((strategy & REST_INLINE_VRS) == 0)
20460 int end_save = info->altivec_save_offset + info->altivec_size;
20461 int ptr_off;
20462 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20463 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20465 if (end_save + frame_off != 0)
20467 rtx offset = GEN_INT (end_save + frame_off);
20469 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20471 else
20472 emit_move_insn (ptr_reg, frame_reg_rtx);
20474 ptr_off = -end_save;
20475 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20476 info->altivec_save_offset + ptr_off,
20477 0, V4SImode, SAVRES_VR);
20479 else
20481 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20482 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20484 rtx addr, areg, mem, reg;
20486 areg = gen_rtx_REG (Pmode, 0);
20487 emit_move_insn
20488 (areg, GEN_INT (info->altivec_save_offset
20489 + frame_off
20490 + 16 * (i - info->first_altivec_reg_save)));
20492 /* AltiVec addressing mode is [reg+reg]. */
20493 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20494 mem = gen_frame_mem (V4SImode, addr);
20496 reg = gen_rtx_REG (V4SImode, i);
20497 emit_move_insn (reg, mem);
20501 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20502 if (((strategy & REST_INLINE_VRS) == 0
20503 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20504 && (flag_shrink_wrap
20505 || (offset_below_red_zone_p
20506 (info->altivec_save_offset
20507 + 16 * (i - info->first_altivec_reg_save)))))
20509 rtx reg = gen_rtx_REG (V4SImode, i);
20510 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20514 /* Restore VRSAVE if we must do so before adjusting the stack. */
20515 if (TARGET_ALTIVEC
20516 && TARGET_ALTIVEC_VRSAVE
20517 && info->vrsave_mask != 0
20518 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20519 || (DEFAULT_ABI != ABI_V4
20520 && offset_below_red_zone_p (info->vrsave_save_offset))))
20522 rtx reg;
20524 if (frame_reg_rtx == sp_reg_rtx)
20526 if (use_backchain_to_restore_sp)
20528 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20529 emit_move_insn (frame_reg_rtx,
20530 gen_rtx_MEM (Pmode, sp_reg_rtx));
20531 frame_off = 0;
20533 else if (frame_pointer_needed)
20534 frame_reg_rtx = hard_frame_pointer_rtx;
20537 reg = gen_rtx_REG (SImode, 12);
20538 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20539 info->vrsave_save_offset + frame_off));
20541 emit_insn (generate_set_vrsave (reg, info, 1));
20544 insn = NULL_RTX;
20545 /* If we have a large stack frame, restore the old stack pointer
20546 using the backchain. */
20547 if (use_backchain_to_restore_sp)
20549 if (frame_reg_rtx == sp_reg_rtx)
20551 /* Under V.4, don't reset the stack pointer until after we're done
20552 loading the saved registers. */
20553 if (DEFAULT_ABI == ABI_V4)
20554 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20556 insn = emit_move_insn (frame_reg_rtx,
20557 gen_rtx_MEM (Pmode, sp_reg_rtx));
20558 frame_off = 0;
20560 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20561 && DEFAULT_ABI == ABI_V4)
20562 /* frame_reg_rtx has been set up by the altivec restore. */
20564 else
20566 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
20567 frame_reg_rtx = sp_reg_rtx;
20570 /* If we have a frame pointer, we can restore the old stack pointer
20571 from it. */
20572 else if (frame_pointer_needed)
20574 frame_reg_rtx = sp_reg_rtx;
20575 if (DEFAULT_ABI == ABI_V4)
20576 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20577 /* Prevent reordering memory accesses against stack pointer restore. */
20578 else if (cfun->calls_alloca
20579 || offset_below_red_zone_p (-info->total_size))
20580 rs6000_emit_stack_tie (frame_reg_rtx, true);
20582 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
20583 GEN_INT (info->total_size)));
20584 frame_off = 0;
20586 else if (info->push_p
20587 && DEFAULT_ABI != ABI_V4
20588 && !crtl->calls_eh_return)
20590 /* Prevent reordering memory accesses against stack pointer restore. */
20591 if (cfun->calls_alloca
20592 || offset_below_red_zone_p (-info->total_size))
20593 rs6000_emit_stack_tie (frame_reg_rtx, false);
20594 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
20595 GEN_INT (info->total_size)));
20596 frame_off = 0;
20598 if (insn && frame_reg_rtx == sp_reg_rtx)
20600 if (cfa_restores)
20602 REG_NOTES (insn) = cfa_restores;
20603 cfa_restores = NULL_RTX;
20605 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
20606 RTX_FRAME_RELATED_P (insn) = 1;
20609 /* Restore AltiVec registers if we have not done so already. */
20610 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20611 && TARGET_ALTIVEC_ABI
20612 && info->altivec_size != 0
20613 && (DEFAULT_ABI == ABI_V4
20614 || !offset_below_red_zone_p (info->altivec_save_offset)))
20616 int i;
20618 if ((strategy & REST_INLINE_VRS) == 0)
20620 int end_save = info->altivec_save_offset + info->altivec_size;
20621 int ptr_off;
20622 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
20623 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
20624 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
20626 if (end_save + frame_off != 0)
20628 rtx offset = GEN_INT (end_save + frame_off);
20630 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
20632 else
20633 emit_move_insn (ptr_reg, frame_reg_rtx);
20635 ptr_off = -end_save;
20636 insn = rs6000_emit_savres_rtx (info, scratch_reg,
20637 info->altivec_save_offset + ptr_off,
20638 0, V4SImode, SAVRES_VR);
20639 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
20641 /* Frame reg was clobbered by out-of-line save. Restore it
20642 from ptr_reg, and if we are calling out-of-line gpr or
20643 fpr restore set up the correct pointer and offset. */
20644 unsigned newptr_regno = 1;
20645 if (!restoring_GPRs_inline)
20647 bool lr = info->gp_save_offset + info->gp_size == 0;
20648 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20649 newptr_regno = ptr_regno_for_savres (sel);
20650 end_save = info->gp_save_offset + info->gp_size;
20652 else if (!restoring_FPRs_inline)
20654 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
20655 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20656 newptr_regno = ptr_regno_for_savres (sel);
20657 end_save = info->gp_save_offset + info->gp_size;
20660 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
20661 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
20663 if (end_save + ptr_off != 0)
20665 rtx offset = GEN_INT (end_save + ptr_off);
20667 frame_off = -end_save;
20668 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
20670 else
20672 frame_off = ptr_off;
20673 emit_move_insn (frame_reg_rtx, ptr_reg);
20677 else
20679 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20680 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
20682 rtx addr, areg, mem, reg;
20684 areg = gen_rtx_REG (Pmode, 0);
20685 emit_move_insn
20686 (areg, GEN_INT (info->altivec_save_offset
20687 + frame_off
20688 + 16 * (i - info->first_altivec_reg_save)));
20690 /* AltiVec addressing mode is [reg+reg]. */
20691 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
20692 mem = gen_frame_mem (V4SImode, addr);
20694 reg = gen_rtx_REG (V4SImode, i);
20695 emit_move_insn (reg, mem);
20699 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
20700 if (((strategy & REST_INLINE_VRS) == 0
20701 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
20702 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
20704 rtx reg = gen_rtx_REG (V4SImode, i);
20705 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20709 /* Restore VRSAVE if we have not done so already. */
20710 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
20711 && TARGET_ALTIVEC
20712 && TARGET_ALTIVEC_VRSAVE
20713 && info->vrsave_mask != 0
20714 && (DEFAULT_ABI == ABI_V4
20715 || !offset_below_red_zone_p (info->vrsave_save_offset)))
20717 rtx reg;
20719 reg = gen_rtx_REG (SImode, 12);
20720 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20721 info->vrsave_save_offset + frame_off));
20723 emit_insn (generate_set_vrsave (reg, info, 1));
20726 /* If we exit by an out-of-line restore function on ABI_V4 then that
20727 function will deallocate the stack, so we don't need to worry
20728 about the unwinder restoring cr from an invalid stack frame
20729 location. */
20730 exit_func = (!restoring_FPRs_inline
20731 || (!restoring_GPRs_inline
20732 && info->first_fp_reg_save == 64));
20734 /* Get the old lr if we saved it. If we are restoring registers
20735 out-of-line, then the out-of-line routines can do this for us. */
20736 if (restore_lr && restoring_GPRs_inline)
20737 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
20739 /* Get the old cr if we saved it. */
20740 if (info->cr_save_p)
20742 unsigned cr_save_regno = 12;
20744 if (!restoring_GPRs_inline)
20746 /* Ensure we don't use the register used by the out-of-line
20747 gpr register restore below. */
20748 bool lr = info->gp_save_offset + info->gp_size == 0;
20749 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
20750 int gpr_ptr_regno = ptr_regno_for_savres (sel);
20752 if (gpr_ptr_regno == 12)
20753 cr_save_regno = 11;
20754 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
20756 else if (REGNO (frame_reg_rtx) == 12)
20757 cr_save_regno = 11;
20759 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
20760 info->cr_save_offset + frame_off,
20761 exit_func);
20764 /* Set LR here to try to overlap restores below. */
20765 if (restore_lr && restoring_GPRs_inline)
20766 restore_saved_lr (0, exit_func);
20768 /* Load exception handler data registers, if needed. */
20769 if (crtl->calls_eh_return)
20771 unsigned int i, regno;
20773 if (TARGET_AIX)
20775 rtx reg = gen_rtx_REG (reg_mode, 2);
20776 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20777 frame_off + 5 * reg_size));
20780 for (i = 0; ; ++i)
20782 rtx mem;
20784 regno = EH_RETURN_DATA_REGNO (i);
20785 if (regno == INVALID_REGNUM)
20786 break;
20788 /* Note: possible use of r0 here to address SPE regs. */
20789 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
20790 info->ehrd_offset + frame_off
20791 + reg_size * (int) i);
20793 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
20797 /* Restore GPRs. This is done as a PARALLEL if we are using
20798 the load-multiple instructions. */
20799 if (TARGET_SPE_ABI
20800 && info->spe_64bit_regs_used
20801 && info->first_gp_reg_save != 32)
20803 /* Determine whether we can address all of the registers that need
20804 to be saved with an offset from frame_reg_rtx that fits in
20805 the small const field for SPE memory instructions. */
20806 int spe_regs_addressable
20807 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
20808 + reg_size * (32 - info->first_gp_reg_save - 1))
20809 && restoring_GPRs_inline);
20811 if (!spe_regs_addressable)
20813 int ool_adjust = 0;
20814 rtx old_frame_reg_rtx = frame_reg_rtx;
20815 /* Make r11 point to the start of the SPE save area. We worried about
20816 not clobbering it when we were saving registers in the prologue.
20817 There's no need to worry here because the static chain is passed
20818 anew to every function. */
20820 if (!restoring_GPRs_inline)
20821 ool_adjust = 8 * (info->first_gp_reg_save
20822 - (FIRST_SAVRES_REGISTER + 1));
20823 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
20824 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
20825 GEN_INT (info->spe_gp_save_offset
20826 + frame_off
20827 - ool_adjust)));
20828 /* Keep the invariant that frame_reg_rtx + frame_off points
20829 at the top of the stack frame. */
20830 frame_off = -info->spe_gp_save_offset + ool_adjust;
20833 if (restoring_GPRs_inline)
20835 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
20837 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20838 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
20840 rtx offset, addr, mem, reg;
20842 /* We're doing all this to ensure that the immediate offset
20843 fits into the immediate field of 'evldd'. */
20844 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
20846 offset = GEN_INT (spe_offset + reg_size * i);
20847 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
20848 mem = gen_rtx_MEM (V2SImode, addr);
20849 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
20851 emit_move_insn (reg, mem);
20854 else
20855 rs6000_emit_savres_rtx (info, frame_reg_rtx,
20856 info->spe_gp_save_offset + frame_off,
20857 info->lr_save_offset + frame_off,
20858 reg_mode,
20859 SAVRES_GPR | SAVRES_LR);
20861 else if (!restoring_GPRs_inline)
20863 /* We are jumping to an out-of-line function. */
20864 rtx ptr_reg;
20865 int end_save = info->gp_save_offset + info->gp_size;
20866 bool can_use_exit = end_save == 0;
20867 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
20868 int ptr_off;
20870 /* Emit stack reset code if we need it. */
20871 ptr_regno = ptr_regno_for_savres (sel);
20872 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
20873 if (can_use_exit)
20874 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
20875 else if (end_save + frame_off != 0)
20876 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
20877 GEN_INT (end_save + frame_off)));
20878 else if (REGNO (frame_reg_rtx) != ptr_regno)
20879 emit_move_insn (ptr_reg, frame_reg_rtx);
20880 if (REGNO (frame_reg_rtx) == ptr_regno)
20881 frame_off = -end_save;
20883 if (can_use_exit && info->cr_save_p)
20884 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
20886 ptr_off = -end_save;
20887 rs6000_emit_savres_rtx (info, ptr_reg,
20888 info->gp_save_offset + ptr_off,
20889 info->lr_save_offset + ptr_off,
20890 reg_mode, sel);
20892 else if (using_load_multiple)
20894 rtvec p;
20895 p = rtvec_alloc (32 - info->first_gp_reg_save);
20896 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20897 RTVEC_ELT (p, i)
20898 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
20899 frame_reg_rtx,
20900 info->gp_save_offset + frame_off + reg_size * i);
20901 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
20903 else
20905 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
20906 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
20907 emit_insn (gen_frame_load
20908 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
20909 frame_reg_rtx,
20910 info->gp_save_offset + frame_off + reg_size * i));
20913 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
20915 /* If the frame pointer was used then we can't delay emitting
20916 a REG_CFA_DEF_CFA note. This must happen on the insn that
20917 restores the frame pointer, r31. We may have already emitted
20918 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
20919 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
20920 be harmless if emitted. */
20921 if (frame_pointer_needed)
20923 insn = get_last_insn ();
20924 add_reg_note (insn, REG_CFA_DEF_CFA,
20925 plus_constant (Pmode, frame_reg_rtx, frame_off));
20926 RTX_FRAME_RELATED_P (insn) = 1;
20929 /* Set up cfa_restores. We always need these when
20930 shrink-wrapping. If not shrink-wrapping then we only need
20931 the cfa_restore when the stack location is no longer valid.
20932 The cfa_restores must be emitted on or before the insn that
20933 invalidates the stack, and of course must not be emitted
20934 before the insn that actually does the restore. The latter
20935 is why it is a bad idea to emit the cfa_restores as a group
20936 on the last instruction here that actually does a restore:
20937 That insn may be reordered with respect to others doing
20938 restores. */
20939 if (flag_shrink_wrap
20940 && !restoring_GPRs_inline
20941 && info->first_fp_reg_save == 64)
20942 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
20944 for (i = info->first_gp_reg_save; i < 32; i++)
20945 if (!restoring_GPRs_inline
20946 || using_load_multiple
20947 || rs6000_reg_live_or_pic_offset_p (i))
20949 rtx reg = gen_rtx_REG (reg_mode, i);
20951 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20955 if (!restoring_GPRs_inline
20956 && info->first_fp_reg_save == 64)
20958 /* We are jumping to an out-of-line function. */
20959 if (cfa_restores)
20960 emit_cfa_restores (cfa_restores);
20961 return;
20964 if (restore_lr && !restoring_GPRs_inline)
20966 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
20967 restore_saved_lr (0, exit_func);
20970 /* Restore fpr's if we need to do it without calling a function. */
20971 if (restoring_FPRs_inline)
20972 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
20973 if (save_reg_p (info->first_fp_reg_save + i))
20975 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
20976 ? DFmode : SFmode),
20977 info->first_fp_reg_save + i);
20978 emit_insn (gen_frame_load (reg, frame_reg_rtx,
20979 info->fp_save_offset + frame_off + 8 * i));
20980 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
20981 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
20984 /* If we saved cr, restore it here. Just those that were used. */
20985 if (info->cr_save_p)
20986 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
20988 /* If this is V.4, unwind the stack pointer after all of the loads
20989 have been done, or set up r11 if we are restoring fp out of line. */
20990 ptr_regno = 1;
20991 if (!restoring_FPRs_inline)
20993 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
20994 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
20995 ptr_regno = ptr_regno_for_savres (sel);
20998 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
20999 if (REGNO (frame_reg_rtx) == ptr_regno)
21000 frame_off = 0;
21002 if (insn && restoring_FPRs_inline)
21004 if (cfa_restores)
21006 REG_NOTES (insn) = cfa_restores;
21007 cfa_restores = NULL_RTX;
21009 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
21010 RTX_FRAME_RELATED_P (insn) = 1;
21013 if (crtl->calls_eh_return)
21015 rtx sa = EH_RETURN_STACKADJ_RTX;
21016 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
21019 if (!sibcall)
21021 rtvec p;
21022 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
21023 if (! restoring_FPRs_inline)
21025 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
21026 RTVEC_ELT (p, 0) = ret_rtx;
21028 else
21030 if (cfa_restores)
21032 /* We can't hang the cfa_restores off a simple return,
21033 since the shrink-wrap code sometimes uses an existing
21034 return. This means there might be a path from
21035 pre-prologue code to this return, and dwarf2cfi code
21036 wants the eh_frame unwinder state to be the same on
21037 all paths to any point. So we need to emit the
21038 cfa_restores before the return. For -m64 we really
21039 don't need epilogue cfa_restores at all, except for
21040 this irritating dwarf2cfi with shrink-wrap
21041 requirement; The stack red-zone means eh_frame info
21042 from the prologue telling the unwinder to restore
21043 from the stack is perfectly good right to the end of
21044 the function. */
21045 emit_insn (gen_blockage ());
21046 emit_cfa_restores (cfa_restores);
21047 cfa_restores = NULL_RTX;
21049 p = rtvec_alloc (2);
21050 RTVEC_ELT (p, 0) = simple_return_rtx;
21053 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
21054 ? gen_rtx_USE (VOIDmode,
21055 gen_rtx_REG (Pmode, LR_REGNO))
21056 : gen_rtx_CLOBBER (VOIDmode,
21057 gen_rtx_REG (Pmode, LR_REGNO)));
21059 /* If we have to restore more than two FP registers, branch to the
21060 restore function. It will return to our caller. */
21061 if (! restoring_FPRs_inline)
21063 int i;
21064 rtx sym;
21066 if (flag_shrink_wrap)
21067 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
21069 sym = rs6000_savres_routine_sym (info,
21070 SAVRES_FPR | (lr ? SAVRES_LR : 0));
21071 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
21072 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
21073 gen_rtx_REG (Pmode,
21074 DEFAULT_ABI == ABI_AIX
21075 ? 1 : 11));
21076 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
21078 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
21080 RTVEC_ELT (p, i + 4)
21081 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
21082 if (flag_shrink_wrap)
21083 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
21084 cfa_restores);
21088 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
21091 if (cfa_restores)
21093 if (sibcall)
21094 /* Ensure the cfa_restores are hung off an insn that won't
21095 be reordered above other restores. */
21096 emit_insn (gen_blockage ());
21098 emit_cfa_restores (cfa_restores);
21102 /* Write function epilogue. */
21104 static void
21105 rs6000_output_function_epilogue (FILE *file,
21106 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
21108 #if TARGET_MACHO
21109 macho_branch_islands ();
21110 /* Mach-O doesn't support labels at the end of objects, so if
21111 it looks like we might want one, insert a NOP. */
21113 rtx insn = get_last_insn ();
21114 rtx deleted_debug_label = NULL_RTX;
21115 while (insn
21116 && NOTE_P (insn)
21117 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
21119 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
21120 notes only, instead set their CODE_LABEL_NUMBER to -1,
21121 otherwise there would be code generation differences
21122 in between -g and -g0. */
21123 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21124 deleted_debug_label = insn;
21125 insn = PREV_INSN (insn);
21127 if (insn
21128 && (LABEL_P (insn)
21129 || (NOTE_P (insn)
21130 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
21131 fputs ("\tnop\n", file);
21132 else if (deleted_debug_label)
21133 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
21134 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
21135 CODE_LABEL_NUMBER (insn) = -1;
21137 #endif
21139 /* Output a traceback table here. See /usr/include/sys/debug.h for info
21140 on its format.
21142 We don't output a traceback table if -finhibit-size-directive was
21143 used. The documentation for -finhibit-size-directive reads
21144 ``don't output a @code{.size} assembler directive, or anything
21145 else that would cause trouble if the function is split in the
21146 middle, and the two halves are placed at locations far apart in
21147 memory.'' The traceback table has this property, since it
21148 includes the offset from the start of the function to the
21149 traceback table itself.
21151 System V.4 Powerpc's (and the embedded ABI derived from it) use a
21152 different traceback table. */
21153 if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
21154 && rs6000_traceback != traceback_none && !cfun->is_thunk)
21156 const char *fname = NULL;
21157 const char *language_string = lang_hooks.name;
21158 int fixed_parms = 0, float_parms = 0, parm_info = 0;
21159 int i;
21160 int optional_tbtab;
21161 rs6000_stack_t *info = rs6000_stack_info ();
21163 if (rs6000_traceback == traceback_full)
21164 optional_tbtab = 1;
21165 else if (rs6000_traceback == traceback_part)
21166 optional_tbtab = 0;
21167 else
21168 optional_tbtab = !optimize_size && !TARGET_ELF;
21170 if (optional_tbtab)
21172 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
21173 while (*fname == '.') /* V.4 encodes . in the name */
21174 fname++;
21176 /* Need label immediately before tbtab, so we can compute
21177 its offset from the function start. */
21178 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21179 ASM_OUTPUT_LABEL (file, fname);
21182 /* The .tbtab pseudo-op can only be used for the first eight
21183 expressions, since it can't handle the possibly variable
21184 length fields that follow. However, if you omit the optional
21185 fields, the assembler outputs zeros for all optional fields
21186 anyways, giving each variable length field is minimum length
21187 (as defined in sys/debug.h). Thus we can not use the .tbtab
21188 pseudo-op at all. */
21190 /* An all-zero word flags the start of the tbtab, for debuggers
21191 that have to find it by searching forward from the entry
21192 point or from the current pc. */
21193 fputs ("\t.long 0\n", file);
21195 /* Tbtab format type. Use format type 0. */
21196 fputs ("\t.byte 0,", file);
21198 /* Language type. Unfortunately, there does not seem to be any
21199 official way to discover the language being compiled, so we
21200 use language_string.
21201 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
21202 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
21203 a number, so for now use 9. LTO and Go aren't assigned numbers
21204 either, so for now use 0. */
21205 if (! strcmp (language_string, "GNU C")
21206 || ! strcmp (language_string, "GNU GIMPLE")
21207 || ! strcmp (language_string, "GNU Go"))
21208 i = 0;
21209 else if (! strcmp (language_string, "GNU F77")
21210 || ! strcmp (language_string, "GNU Fortran"))
21211 i = 1;
21212 else if (! strcmp (language_string, "GNU Pascal"))
21213 i = 2;
21214 else if (! strcmp (language_string, "GNU Ada"))
21215 i = 3;
21216 else if (! strcmp (language_string, "GNU C++")
21217 || ! strcmp (language_string, "GNU Objective-C++"))
21218 i = 9;
21219 else if (! strcmp (language_string, "GNU Java"))
21220 i = 13;
21221 else if (! strcmp (language_string, "GNU Objective-C"))
21222 i = 14;
21223 else
21224 gcc_unreachable ();
21225 fprintf (file, "%d,", i);
21227 /* 8 single bit fields: global linkage (not set for C extern linkage,
21228 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
21229 from start of procedure stored in tbtab, internal function, function
21230 has controlled storage, function has no toc, function uses fp,
21231 function logs/aborts fp operations. */
21232 /* Assume that fp operations are used if any fp reg must be saved. */
21233 fprintf (file, "%d,",
21234 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
21236 /* 6 bitfields: function is interrupt handler, name present in
21237 proc table, function calls alloca, on condition directives
21238 (controls stack walks, 3 bits), saves condition reg, saves
21239 link reg. */
21240 /* The `function calls alloca' bit seems to be set whenever reg 31 is
21241 set up as a frame pointer, even when there is no alloca call. */
21242 fprintf (file, "%d,",
21243 ((optional_tbtab << 6)
21244 | ((optional_tbtab & frame_pointer_needed) << 5)
21245 | (info->cr_save_p << 1)
21246 | (info->lr_save_p)));
21248 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
21249 (6 bits). */
21250 fprintf (file, "%d,",
21251 (info->push_p << 7) | (64 - info->first_fp_reg_save));
21253 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
21254 fprintf (file, "%d,", (32 - first_reg_to_save ()));
21256 if (optional_tbtab)
21258 /* Compute the parameter info from the function decl argument
21259 list. */
21260 tree decl;
21261 int next_parm_info_bit = 31;
21263 for (decl = DECL_ARGUMENTS (current_function_decl);
21264 decl; decl = DECL_CHAIN (decl))
21266 rtx parameter = DECL_INCOMING_RTL (decl);
21267 enum machine_mode mode = GET_MODE (parameter);
21269 if (GET_CODE (parameter) == REG)
21271 if (SCALAR_FLOAT_MODE_P (mode))
21273 int bits;
21275 float_parms++;
21277 switch (mode)
21279 case SFmode:
21280 case SDmode:
21281 bits = 0x2;
21282 break;
21284 case DFmode:
21285 case DDmode:
21286 case TFmode:
21287 case TDmode:
21288 bits = 0x3;
21289 break;
21291 default:
21292 gcc_unreachable ();
21295 /* If only one bit will fit, don't or in this entry. */
21296 if (next_parm_info_bit > 0)
21297 parm_info |= (bits << (next_parm_info_bit - 1));
21298 next_parm_info_bit -= 2;
21300 else
21302 fixed_parms += ((GET_MODE_SIZE (mode)
21303 + (UNITS_PER_WORD - 1))
21304 / UNITS_PER_WORD);
21305 next_parm_info_bit -= 1;
21311 /* Number of fixed point parameters. */
21312 /* This is actually the number of words of fixed point parameters; thus
21313 an 8 byte struct counts as 2; and thus the maximum value is 8. */
21314 fprintf (file, "%d,", fixed_parms);
21316 /* 2 bitfields: number of floating point parameters (7 bits), parameters
21317 all on stack. */
21318 /* This is actually the number of fp registers that hold parameters;
21319 and thus the maximum value is 13. */
21320 /* Set parameters on stack bit if parameters are not in their original
21321 registers, regardless of whether they are on the stack? Xlc
21322 seems to set the bit when not optimizing. */
21323 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
21325 if (! optional_tbtab)
21326 return;
21328 /* Optional fields follow. Some are variable length. */
21330 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
21331 11 double float. */
21332 /* There is an entry for each parameter in a register, in the order that
21333 they occur in the parameter list. Any intervening arguments on the
21334 stack are ignored. If the list overflows a long (max possible length
21335 34 bits) then completely leave off all elements that don't fit. */
21336 /* Only emit this long if there was at least one parameter. */
21337 if (fixed_parms || float_parms)
21338 fprintf (file, "\t.long %d\n", parm_info);
21340 /* Offset from start of code to tb table. */
21341 fputs ("\t.long ", file);
21342 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
21343 RS6000_OUTPUT_BASENAME (file, fname);
21344 putc ('-', file);
21345 rs6000_output_function_entry (file, fname);
21346 putc ('\n', file);
21348 /* Interrupt handler mask. */
21349 /* Omit this long, since we never set the interrupt handler bit
21350 above. */
21352 /* Number of CTL (controlled storage) anchors. */
21353 /* Omit this long, since the has_ctl bit is never set above. */
21355 /* Displacement into stack of each CTL anchor. */
21356 /* Omit this list of longs, because there are no CTL anchors. */
21358 /* Length of function name. */
21359 if (*fname == '*')
21360 ++fname;
21361 fprintf (file, "\t.short %d\n", (int) strlen (fname));
21363 /* Function name. */
21364 assemble_string (fname, strlen (fname));
21366 /* Register for alloca automatic storage; this is always reg 31.
21367 Only emit this if the alloca bit was set above. */
21368 if (frame_pointer_needed)
21369 fputs ("\t.byte 31\n", file);
21371 fputs ("\t.align 2\n", file);
21375 /* A C compound statement that outputs the assembler code for a thunk
21376 function, used to implement C++ virtual function calls with
21377 multiple inheritance. The thunk acts as a wrapper around a virtual
21378 function, adjusting the implicit object parameter before handing
21379 control off to the real function.
21381 First, emit code to add the integer DELTA to the location that
21382 contains the incoming first argument. Assume that this argument
21383 contains a pointer, and is the one used to pass the `this' pointer
21384 in C++. This is the incoming argument *before* the function
21385 prologue, e.g. `%o0' on a sparc. The addition must preserve the
21386 values of all other incoming arguments.
21388 After the addition, emit code to jump to FUNCTION, which is a
21389 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
21390 not touch the return address. Hence returning from FUNCTION will
21391 return to whoever called the current `thunk'.
21393 The effect must be as if FUNCTION had been called directly with the
21394 adjusted first argument. This macro is responsible for emitting
21395 all of the code for a thunk function; output_function_prologue()
21396 and output_function_epilogue() are not invoked.
21398 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
21399 been extracted from it.) It might possibly be useful on some
21400 targets, but probably not.
21402 If you do not define this macro, the target-independent code in the
21403 C++ frontend will generate a less efficient heavyweight thunk that
21404 calls FUNCTION instead of jumping to it. The generic approach does
21405 not support varargs. */
21407 static void
21408 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
21409 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
21410 tree function)
21412 rtx this_rtx, insn, funexp;
21414 reload_completed = 1;
21415 epilogue_completed = 1;
21417 /* Mark the end of the (empty) prologue. */
21418 emit_note (NOTE_INSN_PROLOGUE_END);
21420 /* Find the "this" pointer. If the function returns a structure,
21421 the structure return pointer is in r3. */
21422 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
21423 this_rtx = gen_rtx_REG (Pmode, 4);
21424 else
21425 this_rtx = gen_rtx_REG (Pmode, 3);
21427 /* Apply the constant offset, if required. */
21428 if (delta)
21429 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
21431 /* Apply the offset from the vtable, if required. */
21432 if (vcall_offset)
21434 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
21435 rtx tmp = gen_rtx_REG (Pmode, 12);
21437 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
21438 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
21440 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
21441 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
21443 else
21445 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
21447 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
21449 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
21452 /* Generate a tail call to the target function. */
21453 if (!TREE_USED (function))
21455 assemble_external (function);
21456 TREE_USED (function) = 1;
21458 funexp = XEXP (DECL_RTL (function), 0);
21459 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
21461 #if TARGET_MACHO
21462 if (MACHOPIC_INDIRECT)
21463 funexp = machopic_indirect_call_target (funexp);
21464 #endif
21466 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
21467 generate sibcall RTL explicitly. */
21468 insn = emit_call_insn (
21469 gen_rtx_PARALLEL (VOIDmode,
21470 gen_rtvec (4,
21471 gen_rtx_CALL (VOIDmode,
21472 funexp, const0_rtx),
21473 gen_rtx_USE (VOIDmode, const0_rtx),
21474 gen_rtx_USE (VOIDmode,
21475 gen_rtx_REG (SImode,
21476 LR_REGNO)),
21477 simple_return_rtx)));
21478 SIBLING_CALL_P (insn) = 1;
21479 emit_barrier ();
21481 /* Run just enough of rest_of_compilation to get the insns emitted.
21482 There's not really enough bulk here to make other passes such as
21483 instruction scheduling worth while. Note that use_thunk calls
21484 assemble_start_function and assemble_end_function. */
21485 insn = get_insns ();
21486 insn_locators_alloc ();
21487 shorten_branches (insn);
21488 final_start_function (insn, file, 1);
21489 final (insn, file, 1);
21490 final_end_function ();
21492 reload_completed = 0;
21493 epilogue_completed = 0;
21496 /* A quick summary of the various types of 'constant-pool tables'
21497 under PowerPC:
21499 Target Flags Name One table per
21500 AIX (none) AIX TOC object file
21501 AIX -mfull-toc AIX TOC object file
21502 AIX -mminimal-toc AIX minimal TOC translation unit
21503 SVR4/EABI (none) SVR4 SDATA object file
21504 SVR4/EABI -fpic SVR4 pic object file
21505 SVR4/EABI -fPIC SVR4 PIC translation unit
21506 SVR4/EABI -mrelocatable EABI TOC function
21507 SVR4/EABI -maix AIX TOC object file
21508 SVR4/EABI -maix -mminimal-toc
21509 AIX minimal TOC translation unit
21511 Name Reg. Set by entries contains:
21512 made by addrs? fp? sum?
21514 AIX TOC 2 crt0 as Y option option
21515 AIX minimal TOC 30 prolog gcc Y Y option
21516 SVR4 SDATA 13 crt0 gcc N Y N
21517 SVR4 pic 30 prolog ld Y not yet N
21518 SVR4 PIC 30 prolog gcc Y option option
21519 EABI TOC 30 prolog gcc Y option option
21523 /* Hash functions for the hash table. */
21525 static unsigned
21526 rs6000_hash_constant (rtx k)
21528 enum rtx_code code = GET_CODE (k);
21529 enum machine_mode mode = GET_MODE (k);
21530 unsigned result = (code << 3) ^ mode;
21531 const char *format;
21532 int flen, fidx;
21534 format = GET_RTX_FORMAT (code);
21535 flen = strlen (format);
21536 fidx = 0;
21538 switch (code)
21540 case LABEL_REF:
21541 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
21543 case CONST_DOUBLE:
21544 if (mode != VOIDmode)
21545 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
21546 flen = 2;
21547 break;
21549 case CODE_LABEL:
21550 fidx = 3;
21551 break;
21553 default:
21554 break;
21557 for (; fidx < flen; fidx++)
21558 switch (format[fidx])
21560 case 's':
21562 unsigned i, len;
21563 const char *str = XSTR (k, fidx);
21564 len = strlen (str);
21565 result = result * 613 + len;
21566 for (i = 0; i < len; i++)
21567 result = result * 613 + (unsigned) str[i];
21568 break;
21570 case 'u':
21571 case 'e':
21572 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
21573 break;
21574 case 'i':
21575 case 'n':
21576 result = result * 613 + (unsigned) XINT (k, fidx);
21577 break;
21578 case 'w':
21579 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
21580 result = result * 613 + (unsigned) XWINT (k, fidx);
21581 else
21583 size_t i;
21584 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
21585 result = result * 613 + (unsigned) (XWINT (k, fidx)
21586 >> CHAR_BIT * i);
21588 break;
21589 case '0':
21590 break;
21591 default:
21592 gcc_unreachable ();
21595 return result;
21598 static unsigned
21599 toc_hash_function (const void *hash_entry)
21601 const struct toc_hash_struct *thc =
21602 (const struct toc_hash_struct *) hash_entry;
21603 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
21606 /* Compare H1 and H2 for equivalence. */
21608 static int
21609 toc_hash_eq (const void *h1, const void *h2)
21611 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
21612 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
21614 if (((const struct toc_hash_struct *) h1)->key_mode
21615 != ((const struct toc_hash_struct *) h2)->key_mode)
21616 return 0;
21618 return rtx_equal_p (r1, r2);
21621 /* These are the names given by the C++ front-end to vtables, and
21622 vtable-like objects. Ideally, this logic should not be here;
21623 instead, there should be some programmatic way of inquiring as
21624 to whether or not an object is a vtable. */
21626 #define VTABLE_NAME_P(NAME) \
21627 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
21628 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
21629 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
21630 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
21631 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
21633 #ifdef NO_DOLLAR_IN_LABEL
21634 /* Return a GGC-allocated character string translating dollar signs in
21635 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
21637 const char *
21638 rs6000_xcoff_strip_dollar (const char *name)
21640 char *strip, *p;
21641 const char *q;
21642 size_t len;
21644 q = (const char *) strchr (name, '$');
21646 if (q == 0 || q == name)
21647 return name;
21649 len = strlen (name);
21650 strip = XALLOCAVEC (char, len + 1);
21651 strcpy (strip, name);
21652 p = strip + (q - name);
21653 while (p)
21655 *p = '_';
21656 p = strchr (p + 1, '$');
21659 return ggc_alloc_string (strip, len);
21661 #endif
21663 void
21664 rs6000_output_symbol_ref (FILE *file, rtx x)
21666 /* Currently C++ toc references to vtables can be emitted before it
21667 is decided whether the vtable is public or private. If this is
21668 the case, then the linker will eventually complain that there is
21669 a reference to an unknown section. Thus, for vtables only,
21670 we emit the TOC reference to reference the symbol and not the
21671 section. */
21672 const char *name = XSTR (x, 0);
21674 if (VTABLE_NAME_P (name))
21676 RS6000_OUTPUT_BASENAME (file, name);
21678 else
21679 assemble_name (file, name);
21682 /* Output a TOC entry. We derive the entry name from what is being
21683 written. */
21685 void
21686 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
21688 char buf[256];
21689 const char *name = buf;
21690 rtx base = x;
21691 HOST_WIDE_INT offset = 0;
21693 gcc_assert (!TARGET_NO_TOC);
21695 /* When the linker won't eliminate them, don't output duplicate
21696 TOC entries (this happens on AIX if there is any kind of TOC,
21697 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
21698 CODE_LABELs. */
21699 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
21701 struct toc_hash_struct *h;
21702 void * * found;
21704 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
21705 time because GGC is not initialized at that point. */
21706 if (toc_hash_table == NULL)
21707 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
21708 toc_hash_eq, NULL);
21710 h = ggc_alloc_toc_hash_struct ();
21711 h->key = x;
21712 h->key_mode = mode;
21713 h->labelno = labelno;
21715 found = htab_find_slot (toc_hash_table, h, INSERT);
21716 if (*found == NULL)
21717 *found = h;
21718 else /* This is indeed a duplicate.
21719 Set this label equal to that label. */
21721 fputs ("\t.set ", file);
21722 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21723 fprintf (file, "%d,", labelno);
21724 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
21725 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
21726 found)->labelno));
21727 return;
21731 /* If we're going to put a double constant in the TOC, make sure it's
21732 aligned properly when strict alignment is on. */
21733 if (GET_CODE (x) == CONST_DOUBLE
21734 && STRICT_ALIGNMENT
21735 && GET_MODE_BITSIZE (mode) >= 64
21736 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
21737 ASM_OUTPUT_ALIGN (file, 3);
21740 (*targetm.asm_out.internal_label) (file, "LC", labelno);
21742 /* Handle FP constants specially. Note that if we have a minimal
21743 TOC, things we put here aren't actually in the TOC, so we can allow
21744 FP constants. */
21745 if (GET_CODE (x) == CONST_DOUBLE &&
21746 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
21748 REAL_VALUE_TYPE rv;
21749 long k[4];
21751 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
21752 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
21753 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
21754 else
21755 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
21757 if (TARGET_64BIT)
21759 if (TARGET_MINIMAL_TOC)
21760 fputs (DOUBLE_INT_ASM_OP, file);
21761 else
21762 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21763 k[0] & 0xffffffff, k[1] & 0xffffffff,
21764 k[2] & 0xffffffff, k[3] & 0xffffffff);
21765 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
21766 k[0] & 0xffffffff, k[1] & 0xffffffff,
21767 k[2] & 0xffffffff, k[3] & 0xffffffff);
21768 return;
21770 else
21772 if (TARGET_MINIMAL_TOC)
21773 fputs ("\t.long ", file);
21774 else
21775 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
21776 k[0] & 0xffffffff, k[1] & 0xffffffff,
21777 k[2] & 0xffffffff, k[3] & 0xffffffff);
21778 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
21779 k[0] & 0xffffffff, k[1] & 0xffffffff,
21780 k[2] & 0xffffffff, k[3] & 0xffffffff);
21781 return;
21784 else if (GET_CODE (x) == CONST_DOUBLE &&
21785 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
21787 REAL_VALUE_TYPE rv;
21788 long k[2];
21790 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
21792 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
21793 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
21794 else
21795 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
21797 if (TARGET_64BIT)
21799 if (TARGET_MINIMAL_TOC)
21800 fputs (DOUBLE_INT_ASM_OP, file);
21801 else
21802 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
21803 k[0] & 0xffffffff, k[1] & 0xffffffff);
21804 fprintf (file, "0x%lx%08lx\n",
21805 k[0] & 0xffffffff, k[1] & 0xffffffff);
21806 return;
21808 else
21810 if (TARGET_MINIMAL_TOC)
21811 fputs ("\t.long ", file);
21812 else
21813 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
21814 k[0] & 0xffffffff, k[1] & 0xffffffff);
21815 fprintf (file, "0x%lx,0x%lx\n",
21816 k[0] & 0xffffffff, k[1] & 0xffffffff);
21817 return;
21820 else if (GET_CODE (x) == CONST_DOUBLE &&
21821 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
21823 REAL_VALUE_TYPE rv;
21824 long l;
21826 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
21827 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
21828 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
21829 else
21830 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
21832 if (TARGET_64BIT)
21834 if (TARGET_MINIMAL_TOC)
21835 fputs (DOUBLE_INT_ASM_OP, file);
21836 else
21837 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
21838 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
21839 return;
21841 else
21843 if (TARGET_MINIMAL_TOC)
21844 fputs ("\t.long ", file);
21845 else
21846 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
21847 fprintf (file, "0x%lx\n", l & 0xffffffff);
21848 return;
21851 else if (GET_MODE (x) == VOIDmode
21852 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
21854 unsigned HOST_WIDE_INT low;
21855 HOST_WIDE_INT high;
21857 if (GET_CODE (x) == CONST_DOUBLE)
21859 low = CONST_DOUBLE_LOW (x);
21860 high = CONST_DOUBLE_HIGH (x);
21862 else
21863 #if HOST_BITS_PER_WIDE_INT == 32
21865 low = INTVAL (x);
21866 high = (low & 0x80000000) ? ~0 : 0;
21868 #else
21870 low = INTVAL (x) & 0xffffffff;
21871 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
21873 #endif
21875 /* TOC entries are always Pmode-sized, but since this
21876 is a bigendian machine then if we're putting smaller
21877 integer constants in the TOC we have to pad them.
21878 (This is still a win over putting the constants in
21879 a separate constant pool, because then we'd have
21880 to have both a TOC entry _and_ the actual constant.)
21882 For a 32-bit target, CONST_INT values are loaded and shifted
21883 entirely within `low' and can be stored in one TOC entry. */
21885 /* It would be easy to make this work, but it doesn't now. */
21886 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
21888 if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
21890 #if HOST_BITS_PER_WIDE_INT == 32
21891 lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
21892 POINTER_SIZE, &low, &high, 0);
21893 #else
21894 low |= high << 32;
21895 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
21896 high = (HOST_WIDE_INT) low >> 32;
21897 low &= 0xffffffff;
21898 #endif
21901 if (TARGET_64BIT)
21903 if (TARGET_MINIMAL_TOC)
21904 fputs (DOUBLE_INT_ASM_OP, file);
21905 else
21906 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
21907 (long) high & 0xffffffff, (long) low & 0xffffffff);
21908 fprintf (file, "0x%lx%08lx\n",
21909 (long) high & 0xffffffff, (long) low & 0xffffffff);
21910 return;
21912 else
21914 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
21916 if (TARGET_MINIMAL_TOC)
21917 fputs ("\t.long ", file);
21918 else
21919 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
21920 (long) high & 0xffffffff, (long) low & 0xffffffff);
21921 fprintf (file, "0x%lx,0x%lx\n",
21922 (long) high & 0xffffffff, (long) low & 0xffffffff);
21924 else
21926 if (TARGET_MINIMAL_TOC)
21927 fputs ("\t.long ", file);
21928 else
21929 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
21930 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
21932 return;
21936 if (GET_CODE (x) == CONST)
21938 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
21939 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
21941 base = XEXP (XEXP (x, 0), 0);
21942 offset = INTVAL (XEXP (XEXP (x, 0), 1));
21945 switch (GET_CODE (base))
21947 case SYMBOL_REF:
21948 name = XSTR (base, 0);
21949 break;
21951 case LABEL_REF:
21952 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
21953 CODE_LABEL_NUMBER (XEXP (base, 0)));
21954 break;
21956 case CODE_LABEL:
21957 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
21958 break;
21960 default:
21961 gcc_unreachable ();
21964 if (TARGET_MINIMAL_TOC)
21965 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
21966 else
21968 fputs ("\t.tc ", file);
21969 RS6000_OUTPUT_BASENAME (file, name);
21971 if (offset < 0)
21972 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
21973 else if (offset)
21974 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
21976 fputs ("[TC],", file);
21979 /* Currently C++ toc references to vtables can be emitted before it
21980 is decided whether the vtable is public or private. If this is
21981 the case, then the linker will eventually complain that there is
21982 a TOC reference to an unknown section. Thus, for vtables only,
21983 we emit the TOC reference to reference the symbol and not the
21984 section. */
21985 if (VTABLE_NAME_P (name))
21987 RS6000_OUTPUT_BASENAME (file, name);
21988 if (offset < 0)
21989 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
21990 else if (offset > 0)
21991 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
21993 else
21994 output_addr_const (file, x);
21995 putc ('\n', file);
21998 /* Output an assembler pseudo-op to write an ASCII string of N characters
21999 starting at P to FILE.
22001 On the RS/6000, we have to do this using the .byte operation and
22002 write out special characters outside the quoted string.
22003 Also, the assembler is broken; very long strings are truncated,
22004 so we must artificially break them up early. */
22006 void
22007 output_ascii (FILE *file, const char *p, int n)
22009 char c;
22010 int i, count_string;
22011 const char *for_string = "\t.byte \"";
22012 const char *for_decimal = "\t.byte ";
22013 const char *to_close = NULL;
22015 count_string = 0;
22016 for (i = 0; i < n; i++)
22018 c = *p++;
22019 if (c >= ' ' && c < 0177)
22021 if (for_string)
22022 fputs (for_string, file);
22023 putc (c, file);
22025 /* Write two quotes to get one. */
22026 if (c == '"')
22028 putc (c, file);
22029 ++count_string;
22032 for_string = NULL;
22033 for_decimal = "\"\n\t.byte ";
22034 to_close = "\"\n";
22035 ++count_string;
22037 if (count_string >= 512)
22039 fputs (to_close, file);
22041 for_string = "\t.byte \"";
22042 for_decimal = "\t.byte ";
22043 to_close = NULL;
22044 count_string = 0;
22047 else
22049 if (for_decimal)
22050 fputs (for_decimal, file);
22051 fprintf (file, "%d", c);
22053 for_string = "\n\t.byte \"";
22054 for_decimal = ", ";
22055 to_close = "\n";
22056 count_string = 0;
22060 /* Now close the string if we have written one. Then end the line. */
22061 if (to_close)
22062 fputs (to_close, file);
22065 /* Generate a unique section name for FILENAME for a section type
22066 represented by SECTION_DESC. Output goes into BUF.
22068 SECTION_DESC can be any string, as long as it is different for each
22069 possible section type.
22071 We name the section in the same manner as xlc. The name begins with an
22072 underscore followed by the filename (after stripping any leading directory
22073 names) with the last period replaced by the string SECTION_DESC. If
22074 FILENAME does not contain a period, SECTION_DESC is appended to the end of
22075 the name. */
22077 void
22078 rs6000_gen_section_name (char **buf, const char *filename,
22079 const char *section_desc)
22081 const char *q, *after_last_slash, *last_period = 0;
22082 char *p;
22083 int len;
22085 after_last_slash = filename;
22086 for (q = filename; *q; q++)
22088 if (*q == '/')
22089 after_last_slash = q + 1;
22090 else if (*q == '.')
22091 last_period = q;
22094 len = strlen (after_last_slash) + strlen (section_desc) + 2;
22095 *buf = (char *) xmalloc (len);
22097 p = *buf;
22098 *p++ = '_';
22100 for (q = after_last_slash; *q; q++)
22102 if (q == last_period)
22104 strcpy (p, section_desc);
22105 p += strlen (section_desc);
22106 break;
22109 else if (ISALNUM (*q))
22110 *p++ = *q;
22113 if (last_period == 0)
22114 strcpy (p, section_desc);
22115 else
22116 *p = '\0';
22119 /* Emit profile function. */
22121 void
22122 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
22124 /* Non-standard profiling for kernels, which just saves LR then calls
22125 _mcount without worrying about arg saves. The idea is to change
22126 the function prologue as little as possible as it isn't easy to
22127 account for arg save/restore code added just for _mcount. */
22128 if (TARGET_PROFILE_KERNEL)
22129 return;
22131 if (DEFAULT_ABI == ABI_AIX)
22133 #ifndef NO_PROFILE_COUNTERS
22134 # define NO_PROFILE_COUNTERS 0
22135 #endif
22136 if (NO_PROFILE_COUNTERS)
22137 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22138 LCT_NORMAL, VOIDmode, 0);
22139 else
22141 char buf[30];
22142 const char *label_name;
22143 rtx fun;
22145 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22146 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
22147 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
22149 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
22150 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
22153 else if (DEFAULT_ABI == ABI_DARWIN)
22155 const char *mcount_name = RS6000_MCOUNT;
22156 int caller_addr_regno = LR_REGNO;
22158 /* Be conservative and always set this, at least for now. */
22159 crtl->uses_pic_offset_table = 1;
22161 #if TARGET_MACHO
22162 /* For PIC code, set up a stub and collect the caller's address
22163 from r0, which is where the prologue puts it. */
22164 if (MACHOPIC_INDIRECT
22165 && crtl->uses_pic_offset_table)
22166 caller_addr_regno = 0;
22167 #endif
22168 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
22169 LCT_NORMAL, VOIDmode, 1,
22170 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
22174 /* Write function profiler code. */
22176 void
22177 output_function_profiler (FILE *file, int labelno)
22179 char buf[100];
22181 switch (DEFAULT_ABI)
22183 default:
22184 gcc_unreachable ();
22186 case ABI_V4:
22187 if (!TARGET_32BIT)
22189 warning (0, "no profiling of 64-bit code for this ABI");
22190 return;
22192 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
22193 fprintf (file, "\tmflr %s\n", reg_names[0]);
22194 if (NO_PROFILE_COUNTERS)
22196 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
22197 reg_names[0], reg_names[1]);
22199 else if (TARGET_SECURE_PLT && flag_pic)
22201 if (TARGET_LINK_STACK)
22203 char name[32];
22204 get_ppc476_thunk_name (name);
22205 asm_fprintf (file, "\tbl %s\n", name);
22207 else
22208 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
22209 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
22210 reg_names[0], reg_names[1]);
22211 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22212 asm_fprintf (file, "\t{cau|addis} %s,%s,",
22213 reg_names[12], reg_names[12]);
22214 assemble_name (file, buf);
22215 asm_fprintf (file, "-1b@ha\n\t{cal|la} %s,", reg_names[0]);
22216 assemble_name (file, buf);
22217 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
22219 else if (flag_pic == 1)
22221 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
22222 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
22223 reg_names[0], reg_names[1]);
22224 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
22225 asm_fprintf (file, "\t{l|lwz} %s,", reg_names[0]);
22226 assemble_name (file, buf);
22227 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
22229 else if (flag_pic > 1)
22231 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
22232 reg_names[0], reg_names[1]);
22233 /* Now, we need to get the address of the label. */
22234 if (TARGET_LINK_STACK)
22236 char name[32];
22237 get_ppc476_thunk_name (name);
22238 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
22239 assemble_name (file, buf);
22240 fputs ("-.\n1:", file);
22241 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22242 asm_fprintf (file, "\taddi %s,%s,4\n",
22243 reg_names[11], reg_names[11]);
22245 else
22247 fputs ("\tbcl 20,31,1f\n\t.long ", file);
22248 assemble_name (file, buf);
22249 fputs ("-.\n1:", file);
22250 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
22252 asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
22253 reg_names[0], reg_names[11]);
22254 asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
22255 reg_names[0], reg_names[0], reg_names[11]);
22257 else
22259 asm_fprintf (file, "\t{liu|lis} %s,", reg_names[12]);
22260 assemble_name (file, buf);
22261 fputs ("@ha\n", file);
22262 asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
22263 reg_names[0], reg_names[1]);
22264 asm_fprintf (file, "\t{cal|la} %s,", reg_names[0]);
22265 assemble_name (file, buf);
22266 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
22269 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
22270 fprintf (file, "\tbl %s%s\n",
22271 RS6000_MCOUNT, flag_pic ? "@plt" : "");
22272 break;
22274 case ABI_AIX:
22275 case ABI_DARWIN:
22276 if (!TARGET_PROFILE_KERNEL)
22278 /* Don't do anything, done in output_profile_hook (). */
22280 else
22282 gcc_assert (!TARGET_32BIT);
22284 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
22285 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
22287 if (cfun->static_chain_decl != NULL)
22289 asm_fprintf (file, "\tstd %s,24(%s)\n",
22290 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22291 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22292 asm_fprintf (file, "\tld %s,24(%s)\n",
22293 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
22295 else
22296 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
22298 break;
22304 /* The following variable value is the last issued insn. */
22306 static rtx last_scheduled_insn;
22308 /* The following variable helps to balance issuing of load and
22309 store instructions */
22311 static int load_store_pendulum;
22313 /* Power4 load update and store update instructions are cracked into a
22314 load or store and an integer insn which are executed in the same cycle.
22315 Branches have their own dispatch slot which does not count against the
22316 GCC issue rate, but it changes the program flow so there are no other
22317 instructions to issue in this cycle. */
22319 static int
22320 rs6000_variable_issue_1 (rtx insn, int more)
22322 last_scheduled_insn = insn;
22323 if (GET_CODE (PATTERN (insn)) == USE
22324 || GET_CODE (PATTERN (insn)) == CLOBBER)
22326 cached_can_issue_more = more;
22327 return cached_can_issue_more;
22330 if (insn_terminates_group_p (insn, current_group))
22332 cached_can_issue_more = 0;
22333 return cached_can_issue_more;
22336 /* If no reservation, but reach here */
22337 if (recog_memoized (insn) < 0)
22338 return more;
22340 if (rs6000_sched_groups)
22342 if (is_microcoded_insn (insn))
22343 cached_can_issue_more = 0;
22344 else if (is_cracked_insn (insn))
22345 cached_can_issue_more = more > 2 ? more - 2 : 0;
22346 else
22347 cached_can_issue_more = more - 1;
22349 return cached_can_issue_more;
22352 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
22353 return 0;
22355 cached_can_issue_more = more - 1;
22356 return cached_can_issue_more;
22359 static int
22360 rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
22362 int r = rs6000_variable_issue_1 (insn, more);
22363 if (verbose)
22364 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
22365 return r;
22368 /* Adjust the cost of a scheduling dependency. Return the new cost of
22369 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
22371 static int
22372 rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22374 enum attr_type attr_type;
22376 if (! recog_memoized (insn))
22377 return 0;
22379 switch (REG_NOTE_KIND (link))
22381 case REG_DEP_TRUE:
22383 /* Data dependency; DEP_INSN writes a register that INSN reads
22384 some cycles later. */
22386 /* Separate a load from a narrower, dependent store. */
22387 if (rs6000_sched_groups
22388 && GET_CODE (PATTERN (insn)) == SET
22389 && GET_CODE (PATTERN (dep_insn)) == SET
22390 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
22391 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
22392 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
22393 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
22394 return cost + 14;
22396 attr_type = get_attr_type (insn);
22398 switch (attr_type)
22400 case TYPE_JMPREG:
22401 /* Tell the first scheduling pass about the latency between
22402 a mtctr and bctr (and mtlr and br/blr). The first
22403 scheduling pass will not know about this latency since
22404 the mtctr instruction, which has the latency associated
22405 to it, will be generated by reload. */
22406 return TARGET_POWER ? 5 : 4;
22407 case TYPE_BRANCH:
22408 /* Leave some extra cycles between a compare and its
22409 dependent branch, to inhibit expensive mispredicts. */
22410 if ((rs6000_cpu_attr == CPU_PPC603
22411 || rs6000_cpu_attr == CPU_PPC604
22412 || rs6000_cpu_attr == CPU_PPC604E
22413 || rs6000_cpu_attr == CPU_PPC620
22414 || rs6000_cpu_attr == CPU_PPC630
22415 || rs6000_cpu_attr == CPU_PPC750
22416 || rs6000_cpu_attr == CPU_PPC7400
22417 || rs6000_cpu_attr == CPU_PPC7450
22418 || rs6000_cpu_attr == CPU_POWER4
22419 || rs6000_cpu_attr == CPU_POWER5
22420 || rs6000_cpu_attr == CPU_POWER7
22421 || rs6000_cpu_attr == CPU_CELL)
22422 && recog_memoized (dep_insn)
22423 && (INSN_CODE (dep_insn) >= 0))
22425 switch (get_attr_type (dep_insn))
22427 case TYPE_CMP:
22428 case TYPE_COMPARE:
22429 case TYPE_DELAYED_COMPARE:
22430 case TYPE_IMUL_COMPARE:
22431 case TYPE_LMUL_COMPARE:
22432 case TYPE_FPCOMPARE:
22433 case TYPE_CR_LOGICAL:
22434 case TYPE_DELAYED_CR:
22435 return cost + 2;
22436 default:
22437 break;
22439 break;
22441 case TYPE_STORE:
22442 case TYPE_STORE_U:
22443 case TYPE_STORE_UX:
22444 case TYPE_FPSTORE:
22445 case TYPE_FPSTORE_U:
22446 case TYPE_FPSTORE_UX:
22447 if ((rs6000_cpu == PROCESSOR_POWER6)
22448 && recog_memoized (dep_insn)
22449 && (INSN_CODE (dep_insn) >= 0))
22452 if (GET_CODE (PATTERN (insn)) != SET)
22453 /* If this happens, we have to extend this to schedule
22454 optimally. Return default for now. */
22455 return cost;
22457 /* Adjust the cost for the case where the value written
22458 by a fixed point operation is used as the address
22459 gen value on a store. */
22460 switch (get_attr_type (dep_insn))
22462 case TYPE_LOAD:
22463 case TYPE_LOAD_U:
22464 case TYPE_LOAD_UX:
22465 case TYPE_CNTLZ:
22467 if (! store_data_bypass_p (dep_insn, insn))
22468 return 4;
22469 break;
22471 case TYPE_LOAD_EXT:
22472 case TYPE_LOAD_EXT_U:
22473 case TYPE_LOAD_EXT_UX:
22474 case TYPE_VAR_SHIFT_ROTATE:
22475 case TYPE_VAR_DELAYED_COMPARE:
22477 if (! store_data_bypass_p (dep_insn, insn))
22478 return 6;
22479 break;
22481 case TYPE_INTEGER:
22482 case TYPE_COMPARE:
22483 case TYPE_FAST_COMPARE:
22484 case TYPE_EXTS:
22485 case TYPE_SHIFT:
22486 case TYPE_INSERT_WORD:
22487 case TYPE_INSERT_DWORD:
22488 case TYPE_FPLOAD_U:
22489 case TYPE_FPLOAD_UX:
22490 case TYPE_STORE_U:
22491 case TYPE_STORE_UX:
22492 case TYPE_FPSTORE_U:
22493 case TYPE_FPSTORE_UX:
22495 if (! store_data_bypass_p (dep_insn, insn))
22496 return 3;
22497 break;
22499 case TYPE_IMUL:
22500 case TYPE_IMUL2:
22501 case TYPE_IMUL3:
22502 case TYPE_LMUL:
22503 case TYPE_IMUL_COMPARE:
22504 case TYPE_LMUL_COMPARE:
22506 if (! store_data_bypass_p (dep_insn, insn))
22507 return 17;
22508 break;
22510 case TYPE_IDIV:
22512 if (! store_data_bypass_p (dep_insn, insn))
22513 return 45;
22514 break;
22516 case TYPE_LDIV:
22518 if (! store_data_bypass_p (dep_insn, insn))
22519 return 57;
22520 break;
22522 default:
22523 break;
22526 break;
22528 case TYPE_LOAD:
22529 case TYPE_LOAD_U:
22530 case TYPE_LOAD_UX:
22531 case TYPE_LOAD_EXT:
22532 case TYPE_LOAD_EXT_U:
22533 case TYPE_LOAD_EXT_UX:
22534 if ((rs6000_cpu == PROCESSOR_POWER6)
22535 && recog_memoized (dep_insn)
22536 && (INSN_CODE (dep_insn) >= 0))
22539 /* Adjust the cost for the case where the value written
22540 by a fixed point instruction is used within the address
22541 gen portion of a subsequent load(u)(x) */
22542 switch (get_attr_type (dep_insn))
22544 case TYPE_LOAD:
22545 case TYPE_LOAD_U:
22546 case TYPE_LOAD_UX:
22547 case TYPE_CNTLZ:
22549 if (set_to_load_agen (dep_insn, insn))
22550 return 4;
22551 break;
22553 case TYPE_LOAD_EXT:
22554 case TYPE_LOAD_EXT_U:
22555 case TYPE_LOAD_EXT_UX:
22556 case TYPE_VAR_SHIFT_ROTATE:
22557 case TYPE_VAR_DELAYED_COMPARE:
22559 if (set_to_load_agen (dep_insn, insn))
22560 return 6;
22561 break;
22563 case TYPE_INTEGER:
22564 case TYPE_COMPARE:
22565 case TYPE_FAST_COMPARE:
22566 case TYPE_EXTS:
22567 case TYPE_SHIFT:
22568 case TYPE_INSERT_WORD:
22569 case TYPE_INSERT_DWORD:
22570 case TYPE_FPLOAD_U:
22571 case TYPE_FPLOAD_UX:
22572 case TYPE_STORE_U:
22573 case TYPE_STORE_UX:
22574 case TYPE_FPSTORE_U:
22575 case TYPE_FPSTORE_UX:
22577 if (set_to_load_agen (dep_insn, insn))
22578 return 3;
22579 break;
22581 case TYPE_IMUL:
22582 case TYPE_IMUL2:
22583 case TYPE_IMUL3:
22584 case TYPE_LMUL:
22585 case TYPE_IMUL_COMPARE:
22586 case TYPE_LMUL_COMPARE:
22588 if (set_to_load_agen (dep_insn, insn))
22589 return 17;
22590 break;
22592 case TYPE_IDIV:
22594 if (set_to_load_agen (dep_insn, insn))
22595 return 45;
22596 break;
22598 case TYPE_LDIV:
22600 if (set_to_load_agen (dep_insn, insn))
22601 return 57;
22602 break;
22604 default:
22605 break;
22608 break;
22610 case TYPE_FPLOAD:
22611 if ((rs6000_cpu == PROCESSOR_POWER6)
22612 && recog_memoized (dep_insn)
22613 && (INSN_CODE (dep_insn) >= 0)
22614 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
22615 return 2;
22617 default:
22618 break;
22621 /* Fall out to return default cost. */
22623 break;
22625 case REG_DEP_OUTPUT:
22626 /* Output dependency; DEP_INSN writes a register that INSN writes some
22627 cycles later. */
22628 if ((rs6000_cpu == PROCESSOR_POWER6)
22629 && recog_memoized (dep_insn)
22630 && (INSN_CODE (dep_insn) >= 0))
22632 attr_type = get_attr_type (insn);
22634 switch (attr_type)
22636 case TYPE_FP:
22637 if (get_attr_type (dep_insn) == TYPE_FP)
22638 return 1;
22639 break;
22640 case TYPE_FPLOAD:
22641 if (get_attr_type (dep_insn) == TYPE_MFFGPR)
22642 return 2;
22643 break;
22644 default:
22645 break;
22648 case REG_DEP_ANTI:
22649 /* Anti dependency; DEP_INSN reads a register that INSN writes some
22650 cycles later. */
22651 return 0;
22653 default:
22654 gcc_unreachable ();
22657 return cost;
22660 /* Debug version of rs6000_adjust_cost. */
22662 static int
22663 rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
22665 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
22667 if (ret != cost)
22669 const char *dep;
22671 switch (REG_NOTE_KIND (link))
22673 default: dep = "unknown depencency"; break;
22674 case REG_DEP_TRUE: dep = "data dependency"; break;
22675 case REG_DEP_OUTPUT: dep = "output dependency"; break;
22676 case REG_DEP_ANTI: dep = "anti depencency"; break;
22679 fprintf (stderr,
22680 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
22681 "%s, insn:\n", ret, cost, dep);
22683 debug_rtx (insn);
22686 return ret;
22689 /* The function returns a true if INSN is microcoded.
22690 Return false otherwise. */
22692 static bool
22693 is_microcoded_insn (rtx insn)
22695 if (!insn || !NONDEBUG_INSN_P (insn)
22696 || GET_CODE (PATTERN (insn)) == USE
22697 || GET_CODE (PATTERN (insn)) == CLOBBER)
22698 return false;
22700 if (rs6000_cpu_attr == CPU_CELL)
22701 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
22703 if (rs6000_sched_groups)
22705 enum attr_type type = get_attr_type (insn);
22706 if (type == TYPE_LOAD_EXT_U
22707 || type == TYPE_LOAD_EXT_UX
22708 || type == TYPE_LOAD_UX
22709 || type == TYPE_STORE_UX
22710 || type == TYPE_MFCR)
22711 return true;
22714 return false;
22717 /* The function returns true if INSN is cracked into 2 instructions
22718 by the processor (and therefore occupies 2 issue slots). */
22720 static bool
22721 is_cracked_insn (rtx insn)
22723 if (!insn || !NONDEBUG_INSN_P (insn)
22724 || GET_CODE (PATTERN (insn)) == USE
22725 || GET_CODE (PATTERN (insn)) == CLOBBER)
22726 return false;
22728 if (rs6000_sched_groups)
22730 enum attr_type type = get_attr_type (insn);
22731 if (type == TYPE_LOAD_U || type == TYPE_STORE_U
22732 || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
22733 || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
22734 || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
22735 || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
22736 || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
22737 || type == TYPE_IDIV || type == TYPE_LDIV
22738 || type == TYPE_INSERT_WORD)
22739 return true;
22742 return false;
22745 /* The function returns true if INSN can be issued only from
22746 the branch slot. */
22748 static bool
22749 is_branch_slot_insn (rtx insn)
22751 if (!insn || !NONDEBUG_INSN_P (insn)
22752 || GET_CODE (PATTERN (insn)) == USE
22753 || GET_CODE (PATTERN (insn)) == CLOBBER)
22754 return false;
22756 if (rs6000_sched_groups)
22758 enum attr_type type = get_attr_type (insn);
22759 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
22760 return true;
22761 return false;
22764 return false;
22767 /* The function returns true if out_inst sets a value that is
22768 used in the address generation computation of in_insn */
22769 static bool
22770 set_to_load_agen (rtx out_insn, rtx in_insn)
22772 rtx out_set, in_set;
22774 /* For performance reasons, only handle the simple case where
22775 both loads are a single_set. */
22776 out_set = single_set (out_insn);
22777 if (out_set)
22779 in_set = single_set (in_insn);
22780 if (in_set)
22781 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
22784 return false;
22787 /* Try to determine base/offset/size parts of the given MEM.
22788 Return true if successful, false if all the values couldn't
22789 be determined.
22791 This function only looks for REG or REG+CONST address forms.
22792 REG+REG address form will return false. */
22794 static bool
22795 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
22796 HOST_WIDE_INT *size)
22798 rtx addr_rtx;
22799 if MEM_SIZE_KNOWN_P (mem)
22800 *size = MEM_SIZE (mem);
22801 else
22802 return false;
22804 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
22805 addr_rtx = XEXP (XEXP (mem, 0), 1);
22806 else
22807 addr_rtx = (XEXP (mem, 0));
22809 if (GET_CODE (addr_rtx) == REG)
22811 *base = addr_rtx;
22812 *offset = 0;
22814 else if (GET_CODE (addr_rtx) == PLUS
22815 && CONST_INT_P (XEXP (addr_rtx, 1)))
22817 *base = XEXP (addr_rtx, 0);
22818 *offset = INTVAL (XEXP (addr_rtx, 1));
22820 else
22821 return false;
22823 return true;
22826 /* The function returns true if the target storage location of
22827 mem1 is adjacent to the target storage location of mem2 */
22828 /* Return 1 if memory locations are adjacent. */
22830 static bool
22831 adjacent_mem_locations (rtx mem1, rtx mem2)
22833 rtx reg1, reg2;
22834 HOST_WIDE_INT off1, size1, off2, size2;
22836 if (get_memref_parts (mem1, &reg1, &off1, &size1)
22837 && get_memref_parts (mem2, &reg2, &off2, &size2))
22838 return ((REGNO (reg1) == REGNO (reg2))
22839 && ((off1 + size1 == off2)
22840 || (off2 + size2 == off1)));
22842 return false;
22845 /* This function returns true if it can be determined that the two MEM
22846 locations overlap by at least 1 byte based on base reg/offset/size. */
22848 static bool
22849 mem_locations_overlap (rtx mem1, rtx mem2)
22851 rtx reg1, reg2;
22852 HOST_WIDE_INT off1, size1, off2, size2;
22854 if (get_memref_parts (mem1, &reg1, &off1, &size1)
22855 && get_memref_parts (mem2, &reg2, &off2, &size2))
22856 return ((REGNO (reg1) == REGNO (reg2))
22857 && (((off1 <= off2) && (off1 + size1 > off2))
22858 || ((off2 <= off1) && (off2 + size2 > off1))));
22860 return false;
22863 /* A C statement (sans semicolon) to update the integer scheduling
22864 priority INSN_PRIORITY (INSN). Increase the priority to execute the
22865 INSN earlier, reduce the priority to execute INSN later. Do not
22866 define this macro if you do not need to adjust the scheduling
22867 priorities of insns. */
22869 static int
22870 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
22872 rtx load_mem, str_mem;
22873 /* On machines (like the 750) which have asymmetric integer units,
22874 where one integer unit can do multiply and divides and the other
22875 can't, reduce the priority of multiply/divide so it is scheduled
22876 before other integer operations. */
22878 #if 0
22879 if (! INSN_P (insn))
22880 return priority;
22882 if (GET_CODE (PATTERN (insn)) == USE)
22883 return priority;
22885 switch (rs6000_cpu_attr) {
22886 case CPU_PPC750:
22887 switch (get_attr_type (insn))
22889 default:
22890 break;
22892 case TYPE_IMUL:
22893 case TYPE_IDIV:
22894 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
22895 priority, priority);
22896 if (priority >= 0 && priority < 0x01000000)
22897 priority >>= 3;
22898 break;
22901 #endif
22903 if (insn_must_be_first_in_group (insn)
22904 && reload_completed
22905 && current_sched_info->sched_max_insns_priority
22906 && rs6000_sched_restricted_insns_priority)
22909 /* Prioritize insns that can be dispatched only in the first
22910 dispatch slot. */
22911 if (rs6000_sched_restricted_insns_priority == 1)
22912 /* Attach highest priority to insn. This means that in
22913 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
22914 precede 'priority' (critical path) considerations. */
22915 return current_sched_info->sched_max_insns_priority;
22916 else if (rs6000_sched_restricted_insns_priority == 2)
22917 /* Increase priority of insn by a minimal amount. This means that in
22918 haifa-sched.c:ready_sort(), only 'priority' (critical path)
22919 considerations precede dispatch-slot restriction considerations. */
22920 return (priority + 1);
22923 if (rs6000_cpu == PROCESSOR_POWER6
22924 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
22925 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
22926 /* Attach highest priority to insn if the scheduler has just issued two
22927 stores and this instruction is a load, or two loads and this instruction
22928 is a store. Power6 wants loads and stores scheduled alternately
22929 when possible */
22930 return current_sched_info->sched_max_insns_priority;
22932 return priority;
22935 /* Return true if the instruction is nonpipelined on the Cell. */
22936 static bool
22937 is_nonpipeline_insn (rtx insn)
22939 enum attr_type type;
22940 if (!insn || !NONDEBUG_INSN_P (insn)
22941 || GET_CODE (PATTERN (insn)) == USE
22942 || GET_CODE (PATTERN (insn)) == CLOBBER)
22943 return false;
22945 type = get_attr_type (insn);
22946 if (type == TYPE_IMUL
22947 || type == TYPE_IMUL2
22948 || type == TYPE_IMUL3
22949 || type == TYPE_LMUL
22950 || type == TYPE_IDIV
22951 || type == TYPE_LDIV
22952 || type == TYPE_SDIV
22953 || type == TYPE_DDIV
22954 || type == TYPE_SSQRT
22955 || type == TYPE_DSQRT
22956 || type == TYPE_MFCR
22957 || type == TYPE_MFCRF
22958 || type == TYPE_MFJMPR)
22960 return true;
22962 return false;
22966 /* Return how many instructions the machine can issue per cycle. */
22968 static int
22969 rs6000_issue_rate (void)
22971 /* Unless scheduling for register pressure, use issue rate of 1 for
22972 first scheduling pass to decrease degradation. */
22973 if (!reload_completed && !flag_sched_pressure)
22974 return 1;
22976 switch (rs6000_cpu_attr) {
22977 case CPU_RIOS1: /* ? */
22978 case CPU_RS64A:
22979 case CPU_PPC601: /* ? */
22980 case CPU_PPC7450:
22981 return 3;
22982 case CPU_PPC440:
22983 case CPU_PPC603:
22984 case CPU_PPC750:
22985 case CPU_PPC7400:
22986 case CPU_PPC8540:
22987 case CPU_PPC8548:
22988 case CPU_CELL:
22989 case CPU_PPCE300C2:
22990 case CPU_PPCE300C3:
22991 case CPU_PPCE500MC:
22992 case CPU_PPCE500MC64:
22993 case CPU_TITAN:
22994 return 2;
22995 case CPU_RIOS2:
22996 case CPU_PPC476:
22997 case CPU_PPC604:
22998 case CPU_PPC604E:
22999 case CPU_PPC620:
23000 case CPU_PPC630:
23001 return 4;
23002 case CPU_POWER4:
23003 case CPU_POWER5:
23004 case CPU_POWER6:
23005 case CPU_POWER7:
23006 return 5;
23007 default:
23008 return 1;
23012 /* Return how many instructions to look ahead for better insn
23013 scheduling. */
23015 static int
23016 rs6000_use_sched_lookahead (void)
23018 switch (rs6000_cpu_attr)
23020 case CPU_PPC8540:
23021 case CPU_PPC8548:
23022 return 4;
23024 case CPU_CELL:
23025 return (reload_completed ? 8 : 0);
23027 default:
23028 return 0;
23032 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
23033 static int
23034 rs6000_use_sched_lookahead_guard (rtx insn)
23036 if (rs6000_cpu_attr != CPU_CELL)
23037 return 1;
23039 if (insn == NULL_RTX || !INSN_P (insn))
23040 abort ();
23042 if (!reload_completed
23043 || is_nonpipeline_insn (insn)
23044 || is_microcoded_insn (insn))
23045 return 0;
23047 return 1;
23050 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
23051 and return true. */
23053 static bool
23054 find_mem_ref (rtx pat, rtx *mem_ref)
23056 const char * fmt;
23057 int i, j;
23059 /* stack_tie does not produce any real memory traffic. */
23060 if (tie_operand (pat, VOIDmode))
23061 return false;
23063 if (GET_CODE (pat) == MEM)
23065 *mem_ref = pat;
23066 return true;
23069 /* Recursively process the pattern. */
23070 fmt = GET_RTX_FORMAT (GET_CODE (pat));
23072 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
23074 if (fmt[i] == 'e')
23076 if (find_mem_ref (XEXP (pat, i), mem_ref))
23077 return true;
23079 else if (fmt[i] == 'E')
23080 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
23082 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
23083 return true;
23087 return false;
23090 /* Determine if PAT is a PATTERN of a load insn. */
23092 static bool
23093 is_load_insn1 (rtx pat, rtx *load_mem)
23095 if (!pat || pat == NULL_RTX)
23096 return false;
23098 if (GET_CODE (pat) == SET)
23099 return find_mem_ref (SET_SRC (pat), load_mem);
23101 if (GET_CODE (pat) == PARALLEL)
23103 int i;
23105 for (i = 0; i < XVECLEN (pat, 0); i++)
23106 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
23107 return true;
23110 return false;
23113 /* Determine if INSN loads from memory. */
23115 static bool
23116 is_load_insn (rtx insn, rtx *load_mem)
23118 if (!insn || !INSN_P (insn))
23119 return false;
23121 if (GET_CODE (insn) == CALL_INSN)
23122 return false;
23124 return is_load_insn1 (PATTERN (insn), load_mem);
23127 /* Determine if PAT is a PATTERN of a store insn. */
23129 static bool
23130 is_store_insn1 (rtx pat, rtx *str_mem)
23132 if (!pat || pat == NULL_RTX)
23133 return false;
23135 if (GET_CODE (pat) == SET)
23136 return find_mem_ref (SET_DEST (pat), str_mem);
23138 if (GET_CODE (pat) == PARALLEL)
23140 int i;
23142 for (i = 0; i < XVECLEN (pat, 0); i++)
23143 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
23144 return true;
23147 return false;
23150 /* Determine if INSN stores to memory. */
23152 static bool
23153 is_store_insn (rtx insn, rtx *str_mem)
23155 if (!insn || !INSN_P (insn))
23156 return false;
23158 return is_store_insn1 (PATTERN (insn), str_mem);
23161 /* Returns whether the dependence between INSN and NEXT is considered
23162 costly by the given target. */
23164 static bool
23165 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
23167 rtx insn;
23168 rtx next;
23169 rtx load_mem, str_mem;
23171 /* If the flag is not enabled - no dependence is considered costly;
23172 allow all dependent insns in the same group.
23173 This is the most aggressive option. */
23174 if (rs6000_sched_costly_dep == no_dep_costly)
23175 return false;
23177 /* If the flag is set to 1 - a dependence is always considered costly;
23178 do not allow dependent instructions in the same group.
23179 This is the most conservative option. */
23180 if (rs6000_sched_costly_dep == all_deps_costly)
23181 return true;
23183 insn = DEP_PRO (dep);
23184 next = DEP_CON (dep);
23186 if (rs6000_sched_costly_dep == store_to_load_dep_costly
23187 && is_load_insn (next, &load_mem)
23188 && is_store_insn (insn, &str_mem))
23189 /* Prevent load after store in the same group. */
23190 return true;
23192 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
23193 && is_load_insn (next, &load_mem)
23194 && is_store_insn (insn, &str_mem)
23195 && DEP_TYPE (dep) == REG_DEP_TRUE
23196 && mem_locations_overlap(str_mem, load_mem))
23197 /* Prevent load after store in the same group if it is a true
23198 dependence. */
23199 return true;
23201 /* The flag is set to X; dependences with latency >= X are considered costly,
23202 and will not be scheduled in the same group. */
23203 if (rs6000_sched_costly_dep <= max_dep_latency
23204 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
23205 return true;
23207 return false;
23210 /* Return the next insn after INSN that is found before TAIL is reached,
23211 skipping any "non-active" insns - insns that will not actually occupy
23212 an issue slot. Return NULL_RTX if such an insn is not found. */
23214 static rtx
23215 get_next_active_insn (rtx insn, rtx tail)
23217 if (insn == NULL_RTX || insn == tail)
23218 return NULL_RTX;
23220 while (1)
23222 insn = NEXT_INSN (insn);
23223 if (insn == NULL_RTX || insn == tail)
23224 return NULL_RTX;
23226 if (CALL_P (insn)
23227 || JUMP_P (insn)
23228 || (NONJUMP_INSN_P (insn)
23229 && GET_CODE (PATTERN (insn)) != USE
23230 && GET_CODE (PATTERN (insn)) != CLOBBER
23231 && INSN_CODE (insn) != CODE_FOR_stack_tie))
23232 break;
23234 return insn;
23237 /* We are about to begin issuing insns for this clock cycle. */
23239 static int
23240 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
23241 rtx *ready ATTRIBUTE_UNUSED,
23242 int *pn_ready ATTRIBUTE_UNUSED,
23243 int clock_var ATTRIBUTE_UNUSED)
23245 int n_ready = *pn_ready;
23247 if (sched_verbose)
23248 fprintf (dump, "// rs6000_sched_reorder :\n");
23250 /* Reorder the ready list, if the second to last ready insn
23251 is a nonepipeline insn. */
23252 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
23254 if (is_nonpipeline_insn (ready[n_ready - 1])
23255 && (recog_memoized (ready[n_ready - 2]) > 0))
23256 /* Simply swap first two insns. */
23258 rtx tmp = ready[n_ready - 1];
23259 ready[n_ready - 1] = ready[n_ready - 2];
23260 ready[n_ready - 2] = tmp;
23264 if (rs6000_cpu == PROCESSOR_POWER6)
23265 load_store_pendulum = 0;
23267 return rs6000_issue_rate ();
23270 /* Like rs6000_sched_reorder, but called after issuing each insn. */
23272 static int
23273 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
23274 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
23276 if (sched_verbose)
23277 fprintf (dump, "// rs6000_sched_reorder2 :\n");
23279 /* For Power6, we need to handle some special cases to try and keep the
23280 store queue from overflowing and triggering expensive flushes.
23282 This code monitors how load and store instructions are being issued
23283 and skews the ready list one way or the other to increase the likelihood
23284 that a desired instruction is issued at the proper time.
23286 A couple of things are done. First, we maintain a "load_store_pendulum"
23287 to track the current state of load/store issue.
23289 - If the pendulum is at zero, then no loads or stores have been
23290 issued in the current cycle so we do nothing.
23292 - If the pendulum is 1, then a single load has been issued in this
23293 cycle and we attempt to locate another load in the ready list to
23294 issue with it.
23296 - If the pendulum is -2, then two stores have already been
23297 issued in this cycle, so we increase the priority of the first load
23298 in the ready list to increase it's likelihood of being chosen first
23299 in the next cycle.
23301 - If the pendulum is -1, then a single store has been issued in this
23302 cycle and we attempt to locate another store in the ready list to
23303 issue with it, preferring a store to an adjacent memory location to
23304 facilitate store pairing in the store queue.
23306 - If the pendulum is 2, then two loads have already been
23307 issued in this cycle, so we increase the priority of the first store
23308 in the ready list to increase it's likelihood of being chosen first
23309 in the next cycle.
23311 - If the pendulum < -2 or > 2, then do nothing.
23313 Note: This code covers the most common scenarios. There exist non
23314 load/store instructions which make use of the LSU and which
23315 would need to be accounted for to strictly model the behavior
23316 of the machine. Those instructions are currently unaccounted
23317 for to help minimize compile time overhead of this code.
23319 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
23321 int pos;
23322 int i;
23323 rtx tmp, load_mem, str_mem;
23325 if (is_store_insn (last_scheduled_insn, &str_mem))
23326 /* Issuing a store, swing the load_store_pendulum to the left */
23327 load_store_pendulum--;
23328 else if (is_load_insn (last_scheduled_insn, &load_mem))
23329 /* Issuing a load, swing the load_store_pendulum to the right */
23330 load_store_pendulum++;
23331 else
23332 return cached_can_issue_more;
23334 /* If the pendulum is balanced, or there is only one instruction on
23335 the ready list, then all is well, so return. */
23336 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
23337 return cached_can_issue_more;
23339 if (load_store_pendulum == 1)
23341 /* A load has been issued in this cycle. Scan the ready list
23342 for another load to issue with it */
23343 pos = *pn_ready-1;
23345 while (pos >= 0)
23347 if (is_load_insn (ready[pos], &load_mem))
23349 /* Found a load. Move it to the head of the ready list,
23350 and adjust it's priority so that it is more likely to
23351 stay there */
23352 tmp = ready[pos];
23353 for (i=pos; i<*pn_ready-1; i++)
23354 ready[i] = ready[i + 1];
23355 ready[*pn_ready-1] = tmp;
23357 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23358 INSN_PRIORITY (tmp)++;
23359 break;
23361 pos--;
23364 else if (load_store_pendulum == -2)
23366 /* Two stores have been issued in this cycle. Increase the
23367 priority of the first load in the ready list to favor it for
23368 issuing in the next cycle. */
23369 pos = *pn_ready-1;
23371 while (pos >= 0)
23373 if (is_load_insn (ready[pos], &load_mem)
23374 && !sel_sched_p ()
23375 && INSN_PRIORITY_KNOWN (ready[pos]))
23377 INSN_PRIORITY (ready[pos])++;
23379 /* Adjust the pendulum to account for the fact that a load
23380 was found and increased in priority. This is to prevent
23381 increasing the priority of multiple loads */
23382 load_store_pendulum--;
23384 break;
23386 pos--;
23389 else if (load_store_pendulum == -1)
23391 /* A store has been issued in this cycle. Scan the ready list for
23392 another store to issue with it, preferring a store to an adjacent
23393 memory location */
23394 int first_store_pos = -1;
23396 pos = *pn_ready-1;
23398 while (pos >= 0)
23400 if (is_store_insn (ready[pos], &str_mem))
23402 rtx str_mem2;
23403 /* Maintain the index of the first store found on the
23404 list */
23405 if (first_store_pos == -1)
23406 first_store_pos = pos;
23408 if (is_store_insn (last_scheduled_insn, &str_mem2)
23409 && adjacent_mem_locations (str_mem, str_mem2))
23411 /* Found an adjacent store. Move it to the head of the
23412 ready list, and adjust it's priority so that it is
23413 more likely to stay there */
23414 tmp = ready[pos];
23415 for (i=pos; i<*pn_ready-1; i++)
23416 ready[i] = ready[i + 1];
23417 ready[*pn_ready-1] = tmp;
23419 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23420 INSN_PRIORITY (tmp)++;
23422 first_store_pos = -1;
23424 break;
23427 pos--;
23430 if (first_store_pos >= 0)
23432 /* An adjacent store wasn't found, but a non-adjacent store was,
23433 so move the non-adjacent store to the front of the ready
23434 list, and adjust its priority so that it is more likely to
23435 stay there. */
23436 tmp = ready[first_store_pos];
23437 for (i=first_store_pos; i<*pn_ready-1; i++)
23438 ready[i] = ready[i + 1];
23439 ready[*pn_ready-1] = tmp;
23440 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
23441 INSN_PRIORITY (tmp)++;
23444 else if (load_store_pendulum == 2)
23446 /* Two loads have been issued in this cycle. Increase the priority
23447 of the first store in the ready list to favor it for issuing in
23448 the next cycle. */
23449 pos = *pn_ready-1;
23451 while (pos >= 0)
23453 if (is_store_insn (ready[pos], &str_mem)
23454 && !sel_sched_p ()
23455 && INSN_PRIORITY_KNOWN (ready[pos]))
23457 INSN_PRIORITY (ready[pos])++;
23459 /* Adjust the pendulum to account for the fact that a store
23460 was found and increased in priority. This is to prevent
23461 increasing the priority of multiple stores */
23462 load_store_pendulum++;
23464 break;
23466 pos--;
23471 return cached_can_issue_more;
23474 /* Return whether the presence of INSN causes a dispatch group termination
23475 of group WHICH_GROUP.
23477 If WHICH_GROUP == current_group, this function will return true if INSN
23478 causes the termination of the current group (i.e, the dispatch group to
23479 which INSN belongs). This means that INSN will be the last insn in the
23480 group it belongs to.
23482 If WHICH_GROUP == previous_group, this function will return true if INSN
23483 causes the termination of the previous group (i.e, the dispatch group that
23484 precedes the group to which INSN belongs). This means that INSN will be
23485 the first insn in the group it belongs to). */
23487 static bool
23488 insn_terminates_group_p (rtx insn, enum group_termination which_group)
23490 bool first, last;
23492 if (! insn)
23493 return false;
23495 first = insn_must_be_first_in_group (insn);
23496 last = insn_must_be_last_in_group (insn);
23498 if (first && last)
23499 return true;
23501 if (which_group == current_group)
23502 return last;
23503 else if (which_group == previous_group)
23504 return first;
23506 return false;
23510 static bool
23511 insn_must_be_first_in_group (rtx insn)
23513 enum attr_type type;
23515 if (!insn
23516 || GET_CODE (insn) == NOTE
23517 || DEBUG_INSN_P (insn)
23518 || GET_CODE (PATTERN (insn)) == USE
23519 || GET_CODE (PATTERN (insn)) == CLOBBER)
23520 return false;
23522 switch (rs6000_cpu)
23524 case PROCESSOR_POWER5:
23525 if (is_cracked_insn (insn))
23526 return true;
23527 case PROCESSOR_POWER4:
23528 if (is_microcoded_insn (insn))
23529 return true;
23531 if (!rs6000_sched_groups)
23532 return false;
23534 type = get_attr_type (insn);
23536 switch (type)
23538 case TYPE_MFCR:
23539 case TYPE_MFCRF:
23540 case TYPE_MTCR:
23541 case TYPE_DELAYED_CR:
23542 case TYPE_CR_LOGICAL:
23543 case TYPE_MTJMPR:
23544 case TYPE_MFJMPR:
23545 case TYPE_IDIV:
23546 case TYPE_LDIV:
23547 case TYPE_LOAD_L:
23548 case TYPE_STORE_C:
23549 case TYPE_ISYNC:
23550 case TYPE_SYNC:
23551 return true;
23552 default:
23553 break;
23555 break;
23556 case PROCESSOR_POWER6:
23557 type = get_attr_type (insn);
23559 switch (type)
23561 case TYPE_INSERT_DWORD:
23562 case TYPE_EXTS:
23563 case TYPE_CNTLZ:
23564 case TYPE_SHIFT:
23565 case TYPE_VAR_SHIFT_ROTATE:
23566 case TYPE_TRAP:
23567 case TYPE_IMUL:
23568 case TYPE_IMUL2:
23569 case TYPE_IMUL3:
23570 case TYPE_LMUL:
23571 case TYPE_IDIV:
23572 case TYPE_INSERT_WORD:
23573 case TYPE_DELAYED_COMPARE:
23574 case TYPE_IMUL_COMPARE:
23575 case TYPE_LMUL_COMPARE:
23576 case TYPE_FPCOMPARE:
23577 case TYPE_MFCR:
23578 case TYPE_MTCR:
23579 case TYPE_MFJMPR:
23580 case TYPE_MTJMPR:
23581 case TYPE_ISYNC:
23582 case TYPE_SYNC:
23583 case TYPE_LOAD_L:
23584 case TYPE_STORE_C:
23585 case TYPE_LOAD_U:
23586 case TYPE_LOAD_UX:
23587 case TYPE_LOAD_EXT_UX:
23588 case TYPE_STORE_U:
23589 case TYPE_STORE_UX:
23590 case TYPE_FPLOAD_U:
23591 case TYPE_FPLOAD_UX:
23592 case TYPE_FPSTORE_U:
23593 case TYPE_FPSTORE_UX:
23594 return true;
23595 default:
23596 break;
23598 break;
23599 case PROCESSOR_POWER7:
23600 type = get_attr_type (insn);
23602 switch (type)
23604 case TYPE_CR_LOGICAL:
23605 case TYPE_MFCR:
23606 case TYPE_MFCRF:
23607 case TYPE_MTCR:
23608 case TYPE_IDIV:
23609 case TYPE_LDIV:
23610 case TYPE_COMPARE:
23611 case TYPE_DELAYED_COMPARE:
23612 case TYPE_VAR_DELAYED_COMPARE:
23613 case TYPE_ISYNC:
23614 case TYPE_LOAD_L:
23615 case TYPE_STORE_C:
23616 case TYPE_LOAD_U:
23617 case TYPE_LOAD_UX:
23618 case TYPE_LOAD_EXT:
23619 case TYPE_LOAD_EXT_U:
23620 case TYPE_LOAD_EXT_UX:
23621 case TYPE_STORE_U:
23622 case TYPE_STORE_UX:
23623 case TYPE_FPLOAD_U:
23624 case TYPE_FPLOAD_UX:
23625 case TYPE_FPSTORE_U:
23626 case TYPE_FPSTORE_UX:
23627 case TYPE_MFJMPR:
23628 case TYPE_MTJMPR:
23629 return true;
23630 default:
23631 break;
23633 break;
23634 default:
23635 break;
23638 return false;
23641 static bool
23642 insn_must_be_last_in_group (rtx insn)
23644 enum attr_type type;
23646 if (!insn
23647 || GET_CODE (insn) == NOTE
23648 || DEBUG_INSN_P (insn)
23649 || GET_CODE (PATTERN (insn)) == USE
23650 || GET_CODE (PATTERN (insn)) == CLOBBER)
23651 return false;
23653 switch (rs6000_cpu) {
23654 case PROCESSOR_POWER4:
23655 case PROCESSOR_POWER5:
23656 if (is_microcoded_insn (insn))
23657 return true;
23659 if (is_branch_slot_insn (insn))
23660 return true;
23662 break;
23663 case PROCESSOR_POWER6:
23664 type = get_attr_type (insn);
23666 switch (type)
23668 case TYPE_EXTS:
23669 case TYPE_CNTLZ:
23670 case TYPE_SHIFT:
23671 case TYPE_VAR_SHIFT_ROTATE:
23672 case TYPE_TRAP:
23673 case TYPE_IMUL:
23674 case TYPE_IMUL2:
23675 case TYPE_IMUL3:
23676 case TYPE_LMUL:
23677 case TYPE_IDIV:
23678 case TYPE_DELAYED_COMPARE:
23679 case TYPE_IMUL_COMPARE:
23680 case TYPE_LMUL_COMPARE:
23681 case TYPE_FPCOMPARE:
23682 case TYPE_MFCR:
23683 case TYPE_MTCR:
23684 case TYPE_MFJMPR:
23685 case TYPE_MTJMPR:
23686 case TYPE_ISYNC:
23687 case TYPE_SYNC:
23688 case TYPE_LOAD_L:
23689 case TYPE_STORE_C:
23690 return true;
23691 default:
23692 break;
23694 break;
23695 case PROCESSOR_POWER7:
23696 type = get_attr_type (insn);
23698 switch (type)
23700 case TYPE_ISYNC:
23701 case TYPE_SYNC:
23702 case TYPE_LOAD_L:
23703 case TYPE_STORE_C:
23704 case TYPE_LOAD_EXT_U:
23705 case TYPE_LOAD_EXT_UX:
23706 case TYPE_STORE_UX:
23707 return true;
23708 default:
23709 break;
23711 break;
23712 default:
23713 break;
23716 return false;
23719 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
23720 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
23722 static bool
23723 is_costly_group (rtx *group_insns, rtx next_insn)
23725 int i;
23726 int issue_rate = rs6000_issue_rate ();
23728 for (i = 0; i < issue_rate; i++)
23730 sd_iterator_def sd_it;
23731 dep_t dep;
23732 rtx insn = group_insns[i];
23734 if (!insn)
23735 continue;
23737 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
23739 rtx next = DEP_CON (dep);
23741 if (next == next_insn
23742 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
23743 return true;
23747 return false;
23750 /* Utility of the function redefine_groups.
23751 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
23752 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
23753 to keep it "far" (in a separate group) from GROUP_INSNS, following
23754 one of the following schemes, depending on the value of the flag
23755 -minsert_sched_nops = X:
23756 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
23757 in order to force NEXT_INSN into a separate group.
23758 (2) X < sched_finish_regroup_exact: insert exactly X nops.
23759 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
23760 insertion (has a group just ended, how many vacant issue slots remain in the
23761 last group, and how many dispatch groups were encountered so far). */
23763 static int
23764 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
23765 rtx next_insn, bool *group_end, int can_issue_more,
23766 int *group_count)
23768 rtx nop;
23769 bool force;
23770 int issue_rate = rs6000_issue_rate ();
23771 bool end = *group_end;
23772 int i;
23774 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
23775 return can_issue_more;
23777 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
23778 return can_issue_more;
23780 force = is_costly_group (group_insns, next_insn);
23781 if (!force)
23782 return can_issue_more;
23784 if (sched_verbose > 6)
23785 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
23786 *group_count ,can_issue_more);
23788 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
23790 if (*group_end)
23791 can_issue_more = 0;
23793 /* Since only a branch can be issued in the last issue_slot, it is
23794 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
23795 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
23796 in this case the last nop will start a new group and the branch
23797 will be forced to the new group. */
23798 if (can_issue_more && !is_branch_slot_insn (next_insn))
23799 can_issue_more--;
23801 /* Power6 and Power7 have special group ending nop. */
23802 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7)
23804 nop = gen_group_ending_nop ();
23805 emit_insn_before (nop, next_insn);
23806 can_issue_more = 0;
23808 else
23809 while (can_issue_more > 0)
23811 nop = gen_nop ();
23812 emit_insn_before (nop, next_insn);
23813 can_issue_more--;
23816 *group_end = true;
23817 return 0;
23820 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
23822 int n_nops = rs6000_sched_insert_nops;
23824 /* Nops can't be issued from the branch slot, so the effective
23825 issue_rate for nops is 'issue_rate - 1'. */
23826 if (can_issue_more == 0)
23827 can_issue_more = issue_rate;
23828 can_issue_more--;
23829 if (can_issue_more == 0)
23831 can_issue_more = issue_rate - 1;
23832 (*group_count)++;
23833 end = true;
23834 for (i = 0; i < issue_rate; i++)
23836 group_insns[i] = 0;
23840 while (n_nops > 0)
23842 nop = gen_nop ();
23843 emit_insn_before (nop, next_insn);
23844 if (can_issue_more == issue_rate - 1) /* new group begins */
23845 end = false;
23846 can_issue_more--;
23847 if (can_issue_more == 0)
23849 can_issue_more = issue_rate - 1;
23850 (*group_count)++;
23851 end = true;
23852 for (i = 0; i < issue_rate; i++)
23854 group_insns[i] = 0;
23857 n_nops--;
23860 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
23861 can_issue_more++;
23863 /* Is next_insn going to start a new group? */
23864 *group_end
23865 = (end
23866 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
23867 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
23868 || (can_issue_more < issue_rate &&
23869 insn_terminates_group_p (next_insn, previous_group)));
23870 if (*group_end && end)
23871 (*group_count)--;
23873 if (sched_verbose > 6)
23874 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
23875 *group_count, can_issue_more);
23876 return can_issue_more;
23879 return can_issue_more;
23882 /* This function tries to synch the dispatch groups that the compiler "sees"
23883 with the dispatch groups that the processor dispatcher is expected to
23884 form in practice. It tries to achieve this synchronization by forcing the
23885 estimated processor grouping on the compiler (as opposed to the function
23886 'pad_goups' which tries to force the scheduler's grouping on the processor).
23888 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
23889 examines the (estimated) dispatch groups that will be formed by the processor
23890 dispatcher. It marks these group boundaries to reflect the estimated
23891 processor grouping, overriding the grouping that the scheduler had marked.
23892 Depending on the value of the flag '-minsert-sched-nops' this function can
23893 force certain insns into separate groups or force a certain distance between
23894 them by inserting nops, for example, if there exists a "costly dependence"
23895 between the insns.
23897 The function estimates the group boundaries that the processor will form as
23898 follows: It keeps track of how many vacant issue slots are available after
23899 each insn. A subsequent insn will start a new group if one of the following
23900 4 cases applies:
23901 - no more vacant issue slots remain in the current dispatch group.
23902 - only the last issue slot, which is the branch slot, is vacant, but the next
23903 insn is not a branch.
23904 - only the last 2 or less issue slots, including the branch slot, are vacant,
23905 which means that a cracked insn (which occupies two issue slots) can't be
23906 issued in this group.
23907 - less than 'issue_rate' slots are vacant, and the next insn always needs to
23908 start a new group. */
23910 static int
23911 redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
23913 rtx insn, next_insn;
23914 int issue_rate;
23915 int can_issue_more;
23916 int slot, i;
23917 bool group_end;
23918 int group_count = 0;
23919 rtx *group_insns;
23921 /* Initialize. */
23922 issue_rate = rs6000_issue_rate ();
23923 group_insns = XALLOCAVEC (rtx, issue_rate);
23924 for (i = 0; i < issue_rate; i++)
23926 group_insns[i] = 0;
23928 can_issue_more = issue_rate;
23929 slot = 0;
23930 insn = get_next_active_insn (prev_head_insn, tail);
23931 group_end = false;
23933 while (insn != NULL_RTX)
23935 slot = (issue_rate - can_issue_more);
23936 group_insns[slot] = insn;
23937 can_issue_more =
23938 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
23939 if (insn_terminates_group_p (insn, current_group))
23940 can_issue_more = 0;
23942 next_insn = get_next_active_insn (insn, tail);
23943 if (next_insn == NULL_RTX)
23944 return group_count + 1;
23946 /* Is next_insn going to start a new group? */
23947 group_end
23948 = (can_issue_more == 0
23949 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
23950 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
23951 || (can_issue_more < issue_rate &&
23952 insn_terminates_group_p (next_insn, previous_group)));
23954 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
23955 next_insn, &group_end, can_issue_more,
23956 &group_count);
23958 if (group_end)
23960 group_count++;
23961 can_issue_more = 0;
23962 for (i = 0; i < issue_rate; i++)
23964 group_insns[i] = 0;
23968 if (GET_MODE (next_insn) == TImode && can_issue_more)
23969 PUT_MODE (next_insn, VOIDmode);
23970 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
23971 PUT_MODE (next_insn, TImode);
23973 insn = next_insn;
23974 if (can_issue_more == 0)
23975 can_issue_more = issue_rate;
23976 } /* while */
23978 return group_count;
23981 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
23982 dispatch group boundaries that the scheduler had marked. Pad with nops
23983 any dispatch groups which have vacant issue slots, in order to force the
23984 scheduler's grouping on the processor dispatcher. The function
23985 returns the number of dispatch groups found. */
23987 static int
23988 pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
23990 rtx insn, next_insn;
23991 rtx nop;
23992 int issue_rate;
23993 int can_issue_more;
23994 int group_end;
23995 int group_count = 0;
23997 /* Initialize issue_rate. */
23998 issue_rate = rs6000_issue_rate ();
23999 can_issue_more = issue_rate;
24001 insn = get_next_active_insn (prev_head_insn, tail);
24002 next_insn = get_next_active_insn (insn, tail);
24004 while (insn != NULL_RTX)
24006 can_issue_more =
24007 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
24009 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
24011 if (next_insn == NULL_RTX)
24012 break;
24014 if (group_end)
24016 /* If the scheduler had marked group termination at this location
24017 (between insn and next_insn), and neither insn nor next_insn will
24018 force group termination, pad the group with nops to force group
24019 termination. */
24020 if (can_issue_more
24021 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
24022 && !insn_terminates_group_p (insn, current_group)
24023 && !insn_terminates_group_p (next_insn, previous_group))
24025 if (!is_branch_slot_insn (next_insn))
24026 can_issue_more--;
24028 while (can_issue_more)
24030 nop = gen_nop ();
24031 emit_insn_before (nop, next_insn);
24032 can_issue_more--;
24036 can_issue_more = issue_rate;
24037 group_count++;
24040 insn = next_insn;
24041 next_insn = get_next_active_insn (insn, tail);
24044 return group_count;
24047 /* We're beginning a new block. Initialize data structures as necessary. */
24049 static void
24050 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
24051 int sched_verbose ATTRIBUTE_UNUSED,
24052 int max_ready ATTRIBUTE_UNUSED)
24054 last_scheduled_insn = NULL_RTX;
24055 load_store_pendulum = 0;
24058 /* The following function is called at the end of scheduling BB.
24059 After reload, it inserts nops at insn group bundling. */
24061 static void
24062 rs6000_sched_finish (FILE *dump, int sched_verbose)
24064 int n_groups;
24066 if (sched_verbose)
24067 fprintf (dump, "=== Finishing schedule.\n");
24069 if (reload_completed && rs6000_sched_groups)
24071 /* Do not run sched_finish hook when selective scheduling enabled. */
24072 if (sel_sched_p ())
24073 return;
24075 if (rs6000_sched_insert_nops == sched_finish_none)
24076 return;
24078 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
24079 n_groups = pad_groups (dump, sched_verbose,
24080 current_sched_info->prev_head,
24081 current_sched_info->next_tail);
24082 else
24083 n_groups = redefine_groups (dump, sched_verbose,
24084 current_sched_info->prev_head,
24085 current_sched_info->next_tail);
24087 if (sched_verbose >= 6)
24089 fprintf (dump, "ngroups = %d\n", n_groups);
24090 print_rtl (dump, current_sched_info->prev_head);
24091 fprintf (dump, "Done finish_sched\n");
24096 struct _rs6000_sched_context
24098 short cached_can_issue_more;
24099 rtx last_scheduled_insn;
24100 int load_store_pendulum;
24103 typedef struct _rs6000_sched_context rs6000_sched_context_def;
24104 typedef rs6000_sched_context_def *rs6000_sched_context_t;
24106 /* Allocate store for new scheduling context. */
24107 static void *
24108 rs6000_alloc_sched_context (void)
24110 return xmalloc (sizeof (rs6000_sched_context_def));
24113 /* If CLEAN_P is true then initializes _SC with clean data,
24114 and from the global context otherwise. */
24115 static void
24116 rs6000_init_sched_context (void *_sc, bool clean_p)
24118 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24120 if (clean_p)
24122 sc->cached_can_issue_more = 0;
24123 sc->last_scheduled_insn = NULL_RTX;
24124 sc->load_store_pendulum = 0;
24126 else
24128 sc->cached_can_issue_more = cached_can_issue_more;
24129 sc->last_scheduled_insn = last_scheduled_insn;
24130 sc->load_store_pendulum = load_store_pendulum;
24134 /* Sets the global scheduling context to the one pointed to by _SC. */
24135 static void
24136 rs6000_set_sched_context (void *_sc)
24138 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
24140 gcc_assert (sc != NULL);
24142 cached_can_issue_more = sc->cached_can_issue_more;
24143 last_scheduled_insn = sc->last_scheduled_insn;
24144 load_store_pendulum = sc->load_store_pendulum;
24147 /* Free _SC. */
24148 static void
24149 rs6000_free_sched_context (void *_sc)
24151 gcc_assert (_sc != NULL);
24153 free (_sc);
24157 /* Length in units of the trampoline for entering a nested function. */
24160 rs6000_trampoline_size (void)
24162 int ret = 0;
24164 switch (DEFAULT_ABI)
24166 default:
24167 gcc_unreachable ();
24169 case ABI_AIX:
24170 ret = (TARGET_32BIT) ? 12 : 24;
24171 break;
24173 case ABI_DARWIN:
24174 case ABI_V4:
24175 ret = (TARGET_32BIT) ? 40 : 48;
24176 break;
24179 return ret;
24182 /* Emit RTL insns to initialize the variable parts of a trampoline.
24183 FNADDR is an RTX for the address of the function's pure code.
24184 CXT is an RTX for the static chain value for the function. */
24186 static void
24187 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
24189 int regsize = (TARGET_32BIT) ? 4 : 8;
24190 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
24191 rtx ctx_reg = force_reg (Pmode, cxt);
24192 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
24194 switch (DEFAULT_ABI)
24196 default:
24197 gcc_unreachable ();
24199 /* Under AIX, just build the 3 word function descriptor */
24200 case ABI_AIX:
24202 rtx fnmem, fn_reg, toc_reg;
24204 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
24205 error ("You cannot take the address of a nested function if you use "
24206 "the -mno-pointers-to-nested-functions option.");
24208 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
24209 fn_reg = gen_reg_rtx (Pmode);
24210 toc_reg = gen_reg_rtx (Pmode);
24212 /* Macro to shorten the code expansions below. */
24213 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
24215 m_tramp = replace_equiv_address (m_tramp, addr);
24217 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
24218 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
24219 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
24220 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
24221 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
24223 # undef MEM_PLUS
24225 break;
24227 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
24228 case ABI_DARWIN:
24229 case ABI_V4:
24230 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
24231 LCT_NORMAL, VOIDmode, 4,
24232 addr, Pmode,
24233 GEN_INT (rs6000_trampoline_size ()), SImode,
24234 fnaddr, Pmode,
24235 ctx_reg, Pmode);
24236 break;
24241 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
24242 identifier as an argument, so the front end shouldn't look it up. */
24244 static bool
24245 rs6000_attribute_takes_identifier_p (const_tree attr_id)
24247 return is_attribute_p ("altivec", attr_id);
24250 /* Handle the "altivec" attribute. The attribute may have
24251 arguments as follows:
24253 __attribute__((altivec(vector__)))
24254 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
24255 __attribute__((altivec(bool__))) (always followed by 'unsigned')
24257 and may appear more than once (e.g., 'vector bool char') in a
24258 given declaration. */
24260 static tree
24261 rs6000_handle_altivec_attribute (tree *node,
24262 tree name ATTRIBUTE_UNUSED,
24263 tree args,
24264 int flags ATTRIBUTE_UNUSED,
24265 bool *no_add_attrs)
24267 tree type = *node, result = NULL_TREE;
24268 enum machine_mode mode;
24269 int unsigned_p;
24270 char altivec_type
24271 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
24272 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
24273 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
24274 : '?');
24276 while (POINTER_TYPE_P (type)
24277 || TREE_CODE (type) == FUNCTION_TYPE
24278 || TREE_CODE (type) == METHOD_TYPE
24279 || TREE_CODE (type) == ARRAY_TYPE)
24280 type = TREE_TYPE (type);
24282 mode = TYPE_MODE (type);
24284 /* Check for invalid AltiVec type qualifiers. */
24285 if (type == long_double_type_node)
24286 error ("use of %<long double%> in AltiVec types is invalid");
24287 else if (type == boolean_type_node)
24288 error ("use of boolean types in AltiVec types is invalid");
24289 else if (TREE_CODE (type) == COMPLEX_TYPE)
24290 error ("use of %<complex%> in AltiVec types is invalid");
24291 else if (DECIMAL_FLOAT_MODE_P (mode))
24292 error ("use of decimal floating point types in AltiVec types is invalid");
24293 else if (!TARGET_VSX)
24295 if (type == long_unsigned_type_node || type == long_integer_type_node)
24297 if (TARGET_64BIT)
24298 error ("use of %<long%> in AltiVec types is invalid for "
24299 "64-bit code without -mvsx");
24300 else if (rs6000_warn_altivec_long)
24301 warning (0, "use of %<long%> in AltiVec types is deprecated; "
24302 "use %<int%>");
24304 else if (type == long_long_unsigned_type_node
24305 || type == long_long_integer_type_node)
24306 error ("use of %<long long%> in AltiVec types is invalid without "
24307 "-mvsx");
24308 else if (type == double_type_node)
24309 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
24312 switch (altivec_type)
24314 case 'v':
24315 unsigned_p = TYPE_UNSIGNED (type);
24316 switch (mode)
24318 case DImode:
24319 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
24320 break;
24321 case SImode:
24322 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
24323 break;
24324 case HImode:
24325 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
24326 break;
24327 case QImode:
24328 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
24329 break;
24330 case SFmode: result = V4SF_type_node; break;
24331 case DFmode: result = V2DF_type_node; break;
24332 /* If the user says 'vector int bool', we may be handed the 'bool'
24333 attribute _before_ the 'vector' attribute, and so select the
24334 proper type in the 'b' case below. */
24335 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
24336 case V2DImode: case V2DFmode:
24337 result = type;
24338 default: break;
24340 break;
24341 case 'b':
24342 switch (mode)
24344 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
24345 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
24346 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
24347 case QImode: case V16QImode: result = bool_V16QI_type_node;
24348 default: break;
24350 break;
24351 case 'p':
24352 switch (mode)
24354 case V8HImode: result = pixel_V8HI_type_node;
24355 default: break;
24357 default: break;
24360 /* Propagate qualifiers attached to the element type
24361 onto the vector type. */
24362 if (result && result != type && TYPE_QUALS (type))
24363 result = build_qualified_type (result, TYPE_QUALS (type));
24365 *no_add_attrs = true; /* No need to hang on to the attribute. */
24367 if (result)
24368 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
24370 return NULL_TREE;
24373 /* AltiVec defines four built-in scalar types that serve as vector
24374 elements; we must teach the compiler how to mangle them. */
24376 static const char *
24377 rs6000_mangle_type (const_tree type)
24379 type = TYPE_MAIN_VARIANT (type);
24381 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
24382 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
24383 return NULL;
24385 if (type == bool_char_type_node) return "U6__boolc";
24386 if (type == bool_short_type_node) return "U6__bools";
24387 if (type == pixel_type_node) return "u7__pixel";
24388 if (type == bool_int_type_node) return "U6__booli";
24389 if (type == bool_long_type_node) return "U6__booll";
24391 /* Mangle IBM extended float long double as `g' (__float128) on
24392 powerpc*-linux where long-double-64 previously was the default. */
24393 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
24394 && TARGET_ELF
24395 && TARGET_LONG_DOUBLE_128
24396 && !TARGET_IEEEQUAD)
24397 return "g";
24399 /* For all other types, use normal C++ mangling. */
24400 return NULL;
24403 /* Handle a "longcall" or "shortcall" attribute; arguments as in
24404 struct attribute_spec.handler. */
24406 static tree
24407 rs6000_handle_longcall_attribute (tree *node, tree name,
24408 tree args ATTRIBUTE_UNUSED,
24409 int flags ATTRIBUTE_UNUSED,
24410 bool *no_add_attrs)
24412 if (TREE_CODE (*node) != FUNCTION_TYPE
24413 && TREE_CODE (*node) != FIELD_DECL
24414 && TREE_CODE (*node) != TYPE_DECL)
24416 warning (OPT_Wattributes, "%qE attribute only applies to functions",
24417 name);
24418 *no_add_attrs = true;
24421 return NULL_TREE;
24424 /* Set longcall attributes on all functions declared when
24425 rs6000_default_long_calls is true. */
24426 static void
24427 rs6000_set_default_type_attributes (tree type)
24429 if (rs6000_default_long_calls
24430 && (TREE_CODE (type) == FUNCTION_TYPE
24431 || TREE_CODE (type) == METHOD_TYPE))
24432 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
24433 NULL_TREE,
24434 TYPE_ATTRIBUTES (type));
24436 #if TARGET_MACHO
24437 darwin_set_default_type_attributes (type);
24438 #endif
24441 /* Return a reference suitable for calling a function with the
24442 longcall attribute. */
24445 rs6000_longcall_ref (rtx call_ref)
24447 const char *call_name;
24448 tree node;
24450 if (GET_CODE (call_ref) != SYMBOL_REF)
24451 return call_ref;
24453 /* System V adds '.' to the internal name, so skip them. */
24454 call_name = XSTR (call_ref, 0);
24455 if (*call_name == '.')
24457 while (*call_name == '.')
24458 call_name++;
24460 node = get_identifier (call_name);
24461 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
24464 return force_reg (Pmode, call_ref);
24467 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
24468 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
24469 #endif
24471 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24472 struct attribute_spec.handler. */
24473 static tree
24474 rs6000_handle_struct_attribute (tree *node, tree name,
24475 tree args ATTRIBUTE_UNUSED,
24476 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
24478 tree *type = NULL;
24479 if (DECL_P (*node))
24481 if (TREE_CODE (*node) == TYPE_DECL)
24482 type = &TREE_TYPE (*node);
24484 else
24485 type = node;
24487 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
24488 || TREE_CODE (*type) == UNION_TYPE)))
24490 warning (OPT_Wattributes, "%qE attribute ignored", name);
24491 *no_add_attrs = true;
24494 else if ((is_attribute_p ("ms_struct", name)
24495 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
24496 || ((is_attribute_p ("gcc_struct", name)
24497 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
24499 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
24500 name);
24501 *no_add_attrs = true;
24504 return NULL_TREE;
24507 static bool
24508 rs6000_ms_bitfield_layout_p (const_tree record_type)
24510 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
24511 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
24512 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
24515 #ifdef USING_ELFOS_H
24517 /* A get_unnamed_section callback, used for switching to toc_section. */
24519 static void
24520 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
24522 if (DEFAULT_ABI == ABI_AIX
24523 && TARGET_MINIMAL_TOC
24524 && !TARGET_RELOCATABLE)
24526 if (!toc_initialized)
24528 toc_initialized = 1;
24529 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24530 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
24531 fprintf (asm_out_file, "\t.tc ");
24532 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
24533 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24534 fprintf (asm_out_file, "\n");
24536 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24537 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24538 fprintf (asm_out_file, " = .+32768\n");
24540 else
24541 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24543 else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
24544 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
24545 else
24547 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
24548 if (!toc_initialized)
24550 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
24551 fprintf (asm_out_file, " = .+32768\n");
24552 toc_initialized = 1;
24557 /* Implement TARGET_ASM_INIT_SECTIONS. */
24559 static void
24560 rs6000_elf_asm_init_sections (void)
24562 toc_section
24563 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
24565 sdata2_section
24566 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
24567 SDATA2_SECTION_ASM_OP);
24570 /* Implement TARGET_SELECT_RTX_SECTION. */
24572 static section *
24573 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
24574 unsigned HOST_WIDE_INT align)
24576 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
24577 return toc_section;
24578 else
24579 return default_elf_select_rtx_section (mode, x, align);
24582 /* For a SYMBOL_REF, set generic flags and then perform some
24583 target-specific processing.
24585 When the AIX ABI is requested on a non-AIX system, replace the
24586 function name with the real name (with a leading .) rather than the
24587 function descriptor name. This saves a lot of overriding code to
24588 read the prefixes. */
24590 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
24591 static void
24592 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
24594 default_encode_section_info (decl, rtl, first);
24596 if (first
24597 && TREE_CODE (decl) == FUNCTION_DECL
24598 && !TARGET_AIX
24599 && DEFAULT_ABI == ABI_AIX)
24601 rtx sym_ref = XEXP (rtl, 0);
24602 size_t len = strlen (XSTR (sym_ref, 0));
24603 char *str = XALLOCAVEC (char, len + 2);
24604 str[0] = '.';
24605 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
24606 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
24610 static inline bool
24611 compare_section_name (const char *section, const char *templ)
24613 int len;
24615 len = strlen (templ);
24616 return (strncmp (section, templ, len) == 0
24617 && (section[len] == 0 || section[len] == '.'));
24620 bool
24621 rs6000_elf_in_small_data_p (const_tree decl)
24623 if (rs6000_sdata == SDATA_NONE)
24624 return false;
24626 /* We want to merge strings, so we never consider them small data. */
24627 if (TREE_CODE (decl) == STRING_CST)
24628 return false;
24630 /* Functions are never in the small data area. */
24631 if (TREE_CODE (decl) == FUNCTION_DECL)
24632 return false;
24634 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
24636 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
24637 if (compare_section_name (section, ".sdata")
24638 || compare_section_name (section, ".sdata2")
24639 || compare_section_name (section, ".gnu.linkonce.s")
24640 || compare_section_name (section, ".sbss")
24641 || compare_section_name (section, ".sbss2")
24642 || compare_section_name (section, ".gnu.linkonce.sb")
24643 || strcmp (section, ".PPC.EMB.sdata0") == 0
24644 || strcmp (section, ".PPC.EMB.sbss0") == 0)
24645 return true;
24647 else
24649 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
24651 if (size > 0
24652 && size <= g_switch_value
24653 /* If it's not public, and we're not going to reference it there,
24654 there's no need to put it in the small data section. */
24655 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
24656 return true;
24659 return false;
24662 #endif /* USING_ELFOS_H */
24664 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
24666 static bool
24667 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
24669 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
24672 /* Return a REG that occurs in ADDR with coefficient 1.
24673 ADDR can be effectively incremented by incrementing REG.
24675 r0 is special and we must not select it as an address
24676 register by this routine since our caller will try to
24677 increment the returned register via an "la" instruction. */
24680 find_addr_reg (rtx addr)
24682 while (GET_CODE (addr) == PLUS)
24684 if (GET_CODE (XEXP (addr, 0)) == REG
24685 && REGNO (XEXP (addr, 0)) != 0)
24686 addr = XEXP (addr, 0);
24687 else if (GET_CODE (XEXP (addr, 1)) == REG
24688 && REGNO (XEXP (addr, 1)) != 0)
24689 addr = XEXP (addr, 1);
24690 else if (CONSTANT_P (XEXP (addr, 0)))
24691 addr = XEXP (addr, 1);
24692 else if (CONSTANT_P (XEXP (addr, 1)))
24693 addr = XEXP (addr, 0);
24694 else
24695 gcc_unreachable ();
24697 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
24698 return addr;
24701 void
24702 rs6000_fatal_bad_address (rtx op)
24704 fatal_insn ("bad address", op);
24707 #if TARGET_MACHO
24709 typedef struct branch_island_d {
24710 tree function_name;
24711 tree label_name;
24712 int line_number;
24713 } branch_island;
24715 DEF_VEC_O(branch_island);
24716 DEF_VEC_ALLOC_O(branch_island,gc);
24718 static VEC(branch_island,gc) *branch_islands;
24720 /* Remember to generate a branch island for far calls to the given
24721 function. */
24723 static void
24724 add_compiler_branch_island (tree label_name, tree function_name,
24725 int line_number)
24727 branch_island *bi = VEC_safe_push (branch_island, gc, branch_islands, NULL);
24729 bi->function_name = function_name;
24730 bi->label_name = label_name;
24731 bi->line_number = line_number;
24734 /* Generate far-jump branch islands for everything recorded in
24735 branch_islands. Invoked immediately after the last instruction of
24736 the epilogue has been emitted; the branch islands must be appended
24737 to, and contiguous with, the function body. Mach-O stubs are
24738 generated in machopic_output_stub(). */
24740 static void
24741 macho_branch_islands (void)
24743 char tmp_buf[512];
24745 while (!VEC_empty (branch_island, branch_islands))
24747 branch_island *bi = VEC_last (branch_island, branch_islands);
24748 const char *label = IDENTIFIER_POINTER (bi->label_name);
24749 const char *name = IDENTIFIER_POINTER (bi->function_name);
24750 char name_buf[512];
24751 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
24752 if (name[0] == '*' || name[0] == '&')
24753 strcpy (name_buf, name+1);
24754 else
24756 name_buf[0] = '_';
24757 strcpy (name_buf+1, name);
24759 strcpy (tmp_buf, "\n");
24760 strcat (tmp_buf, label);
24761 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24762 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
24763 dbxout_stabd (N_SLINE, bi->line_number);
24764 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24765 if (flag_pic)
24767 if (TARGET_LINK_STACK)
24769 char name[32];
24770 get_ppc476_thunk_name (name);
24771 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
24772 strcat (tmp_buf, name);
24773 strcat (tmp_buf, "\n");
24774 strcat (tmp_buf, label);
24775 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
24777 else
24779 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
24780 strcat (tmp_buf, label);
24781 strcat (tmp_buf, "_pic\n");
24782 strcat (tmp_buf, label);
24783 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
24786 strcat (tmp_buf, "\taddis r11,r11,ha16(");
24787 strcat (tmp_buf, name_buf);
24788 strcat (tmp_buf, " - ");
24789 strcat (tmp_buf, label);
24790 strcat (tmp_buf, "_pic)\n");
24792 strcat (tmp_buf, "\tmtlr r0\n");
24794 strcat (tmp_buf, "\taddi r12,r11,lo16(");
24795 strcat (tmp_buf, name_buf);
24796 strcat (tmp_buf, " - ");
24797 strcat (tmp_buf, label);
24798 strcat (tmp_buf, "_pic)\n");
24800 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
24802 else
24804 strcat (tmp_buf, ":\nlis r12,hi16(");
24805 strcat (tmp_buf, name_buf);
24806 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
24807 strcat (tmp_buf, name_buf);
24808 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
24810 output_asm_insn (tmp_buf, 0);
24811 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
24812 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
24813 dbxout_stabd (N_SLINE, bi->line_number);
24814 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
24815 VEC_pop (branch_island, branch_islands);
24819 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
24820 already there or not. */
24822 static int
24823 no_previous_def (tree function_name)
24825 branch_island *bi;
24826 unsigned ix;
24828 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
24829 if (function_name == bi->function_name)
24830 return 0;
24831 return 1;
24834 /* GET_PREV_LABEL gets the label name from the previous definition of
24835 the function. */
24837 static tree
24838 get_prev_label (tree function_name)
24840 branch_island *bi;
24841 unsigned ix;
24843 FOR_EACH_VEC_ELT (branch_island, branch_islands, ix, bi)
24844 if (function_name == bi->function_name)
24845 return bi->label_name;
24846 return NULL_TREE;
24849 /* INSN is either a function call or a millicode call. It may have an
24850 unconditional jump in its delay slot.
24852 CALL_DEST is the routine we are calling. */
24854 char *
24855 output_call (rtx insn, rtx *operands, int dest_operand_number,
24856 int cookie_operand_number)
24858 static char buf[256];
24859 if (darwin_emit_branch_islands
24860 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
24861 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
24863 tree labelname;
24864 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
24866 if (no_previous_def (funname))
24868 rtx label_rtx = gen_label_rtx ();
24869 char *label_buf, temp_buf[256];
24870 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
24871 CODE_LABEL_NUMBER (label_rtx));
24872 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
24873 labelname = get_identifier (label_buf);
24874 add_compiler_branch_island (labelname, funname, insn_line (insn));
24876 else
24877 labelname = get_prev_label (funname);
24879 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
24880 instruction will reach 'foo', otherwise link as 'bl L42'".
24881 "L42" should be a 'branch island', that will do a far jump to
24882 'foo'. Branch islands are generated in
24883 macho_branch_islands(). */
24884 sprintf (buf, "jbsr %%z%d,%.246s",
24885 dest_operand_number, IDENTIFIER_POINTER (labelname));
24887 else
24888 sprintf (buf, "bl %%z%d", dest_operand_number);
24889 return buf;
24892 /* Generate PIC and indirect symbol stubs. */
24894 void
24895 machopic_output_stub (FILE *file, const char *symb, const char *stub)
24897 unsigned int length;
24898 char *symbol_name, *lazy_ptr_name;
24899 char *local_label_0;
24900 static int label = 0;
24902 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
24903 symb = (*targetm.strip_name_encoding) (symb);
24906 length = strlen (symb);
24907 symbol_name = XALLOCAVEC (char, length + 32);
24908 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
24910 lazy_ptr_name = XALLOCAVEC (char, length + 32);
24911 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
24913 if (flag_pic == 2)
24914 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
24915 else
24916 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
24918 if (flag_pic == 2)
24920 fprintf (file, "\t.align 5\n");
24922 fprintf (file, "%s:\n", stub);
24923 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
24925 label++;
24926 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
24927 sprintf (local_label_0, "\"L%011d$spb\"", label);
24929 fprintf (file, "\tmflr r0\n");
24930 if (TARGET_LINK_STACK)
24932 char name[32];
24933 get_ppc476_thunk_name (name);
24934 fprintf (file, "\tbl %s\n", name);
24935 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
24937 else
24939 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
24940 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
24942 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
24943 lazy_ptr_name, local_label_0);
24944 fprintf (file, "\tmtlr r0\n");
24945 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
24946 (TARGET_64BIT ? "ldu" : "lwzu"),
24947 lazy_ptr_name, local_label_0);
24948 fprintf (file, "\tmtctr r12\n");
24949 fprintf (file, "\tbctr\n");
24951 else
24953 fprintf (file, "\t.align 4\n");
24955 fprintf (file, "%s:\n", stub);
24956 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
24958 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
24959 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
24960 (TARGET_64BIT ? "ldu" : "lwzu"),
24961 lazy_ptr_name);
24962 fprintf (file, "\tmtctr r12\n");
24963 fprintf (file, "\tbctr\n");
24966 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
24967 fprintf (file, "%s:\n", lazy_ptr_name);
24968 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
24969 fprintf (file, "%sdyld_stub_binding_helper\n",
24970 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
24973 /* Legitimize PIC addresses. If the address is already
24974 position-independent, we return ORIG. Newly generated
24975 position-independent addresses go into a reg. This is REG if non
24976 zero, otherwise we allocate register(s) as necessary. */
24978 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
24981 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
24982 rtx reg)
24984 rtx base, offset;
24986 if (reg == NULL && ! reload_in_progress && ! reload_completed)
24987 reg = gen_reg_rtx (Pmode);
24989 if (GET_CODE (orig) == CONST)
24991 rtx reg_temp;
24993 if (GET_CODE (XEXP (orig, 0)) == PLUS
24994 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
24995 return orig;
24997 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
24999 /* Use a different reg for the intermediate value, as
25000 it will be marked UNCHANGING. */
25001 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
25002 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
25003 Pmode, reg_temp);
25004 offset =
25005 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
25006 Pmode, reg);
25008 if (GET_CODE (offset) == CONST_INT)
25010 if (SMALL_INT (offset))
25011 return plus_constant (Pmode, base, INTVAL (offset));
25012 else if (! reload_in_progress && ! reload_completed)
25013 offset = force_reg (Pmode, offset);
25014 else
25016 rtx mem = force_const_mem (Pmode, orig);
25017 return machopic_legitimize_pic_address (mem, Pmode, reg);
25020 return gen_rtx_PLUS (Pmode, base, offset);
25023 /* Fall back on generic machopic code. */
25024 return machopic_legitimize_pic_address (orig, mode, reg);
25027 /* Output a .machine directive for the Darwin assembler, and call
25028 the generic start_file routine. */
25030 static void
25031 rs6000_darwin_file_start (void)
25033 static const struct
25035 const char *arg;
25036 const char *name;
25037 int if_set;
25038 } mapping[] = {
25039 { "ppc64", "ppc64", MASK_64BIT },
25040 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
25041 { "power4", "ppc970", 0 },
25042 { "G5", "ppc970", 0 },
25043 { "7450", "ppc7450", 0 },
25044 { "7400", "ppc7400", MASK_ALTIVEC },
25045 { "G4", "ppc7400", 0 },
25046 { "750", "ppc750", 0 },
25047 { "740", "ppc750", 0 },
25048 { "G3", "ppc750", 0 },
25049 { "604e", "ppc604e", 0 },
25050 { "604", "ppc604", 0 },
25051 { "603e", "ppc603", 0 },
25052 { "603", "ppc603", 0 },
25053 { "601", "ppc601", 0 },
25054 { NULL, "ppc", 0 } };
25055 const char *cpu_id = "";
25056 size_t i;
25058 rs6000_file_start ();
25059 darwin_file_start ();
25061 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
25063 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
25064 cpu_id = rs6000_default_cpu;
25066 if (global_options_set.x_rs6000_cpu_index)
25067 cpu_id = processor_target_table[rs6000_cpu_index].name;
25069 /* Look through the mapping array. Pick the first name that either
25070 matches the argument, has a bit set in IF_SET that is also set
25071 in the target flags, or has a NULL name. */
25073 i = 0;
25074 while (mapping[i].arg != NULL
25075 && strcmp (mapping[i].arg, cpu_id) != 0
25076 && (mapping[i].if_set & target_flags) == 0)
25077 i++;
25079 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
25082 #endif /* TARGET_MACHO */
25084 #if TARGET_ELF
25085 static int
25086 rs6000_elf_reloc_rw_mask (void)
25088 if (flag_pic)
25089 return 3;
25090 else if (DEFAULT_ABI == ABI_AIX)
25091 return 2;
25092 else
25093 return 0;
25096 /* Record an element in the table of global constructors. SYMBOL is
25097 a SYMBOL_REF of the function to be called; PRIORITY is a number
25098 between 0 and MAX_INIT_PRIORITY.
25100 This differs from default_named_section_asm_out_constructor in
25101 that we have special handling for -mrelocatable. */
25103 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
25104 static void
25105 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
25107 const char *section = ".ctors";
25108 char buf[16];
25110 if (priority != DEFAULT_INIT_PRIORITY)
25112 sprintf (buf, ".ctors.%.5u",
25113 /* Invert the numbering so the linker puts us in the proper
25114 order; constructors are run from right to left, and the
25115 linker sorts in increasing order. */
25116 MAX_INIT_PRIORITY - priority);
25117 section = buf;
25120 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25121 assemble_align (POINTER_SIZE);
25123 if (TARGET_RELOCATABLE)
25125 fputs ("\t.long (", asm_out_file);
25126 output_addr_const (asm_out_file, symbol);
25127 fputs (")@fixup\n", asm_out_file);
25129 else
25130 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25133 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
25134 static void
25135 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
25137 const char *section = ".dtors";
25138 char buf[16];
25140 if (priority != DEFAULT_INIT_PRIORITY)
25142 sprintf (buf, ".dtors.%.5u",
25143 /* Invert the numbering so the linker puts us in the proper
25144 order; constructors are run from right to left, and the
25145 linker sorts in increasing order. */
25146 MAX_INIT_PRIORITY - priority);
25147 section = buf;
25150 switch_to_section (get_section (section, SECTION_WRITE, NULL));
25151 assemble_align (POINTER_SIZE);
25153 if (TARGET_RELOCATABLE)
25155 fputs ("\t.long (", asm_out_file);
25156 output_addr_const (asm_out_file, symbol);
25157 fputs (")@fixup\n", asm_out_file);
25159 else
25160 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
25163 void
25164 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
25166 if (TARGET_64BIT)
25168 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
25169 ASM_OUTPUT_LABEL (file, name);
25170 fputs (DOUBLE_INT_ASM_OP, file);
25171 rs6000_output_function_entry (file, name);
25172 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
25173 if (DOT_SYMBOLS)
25175 fputs ("\t.size\t", file);
25176 assemble_name (file, name);
25177 fputs (",24\n\t.type\t.", file);
25178 assemble_name (file, name);
25179 fputs (",@function\n", file);
25180 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
25182 fputs ("\t.globl\t.", file);
25183 assemble_name (file, name);
25184 putc ('\n', file);
25187 else
25188 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25189 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25190 rs6000_output_function_entry (file, name);
25191 fputs (":\n", file);
25192 return;
25195 if (TARGET_RELOCATABLE
25196 && !TARGET_SECURE_PLT
25197 && (get_pool_size () != 0 || crtl->profile)
25198 && uses_TOC ())
25200 char buf[256];
25202 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
25204 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
25205 fprintf (file, "\t.long ");
25206 assemble_name (file, buf);
25207 putc ('-', file);
25208 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25209 assemble_name (file, buf);
25210 putc ('\n', file);
25213 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
25214 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
25216 if (DEFAULT_ABI == ABI_AIX)
25218 const char *desc_name, *orig_name;
25220 orig_name = (*targetm.strip_name_encoding) (name);
25221 desc_name = orig_name;
25222 while (*desc_name == '.')
25223 desc_name++;
25225 if (TREE_PUBLIC (decl))
25226 fprintf (file, "\t.globl %s\n", desc_name);
25228 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
25229 fprintf (file, "%s:\n", desc_name);
25230 fprintf (file, "\t.long %s\n", orig_name);
25231 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
25232 if (DEFAULT_ABI == ABI_AIX)
25233 fputs ("\t.long 0\n", file);
25234 fprintf (file, "\t.previous\n");
25236 ASM_OUTPUT_LABEL (file, name);
25239 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
25240 static void
25241 rs6000_elf_file_end (void)
25243 #ifdef HAVE_AS_GNU_ATTRIBUTE
25244 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
25246 if (rs6000_passes_float)
25247 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
25248 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
25249 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
25250 : 2));
25251 if (rs6000_passes_vector)
25252 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
25253 (TARGET_ALTIVEC_ABI ? 2
25254 : TARGET_SPE_ABI ? 3
25255 : 1));
25256 if (rs6000_returns_struct)
25257 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
25258 aix_struct_return ? 2 : 1);
25260 #endif
25261 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25262 if (TARGET_32BIT)
25263 file_end_indicate_exec_stack ();
25264 #endif
25266 #endif
25268 #if TARGET_XCOFF
25269 static void
25270 rs6000_xcoff_asm_output_anchor (rtx symbol)
25272 char buffer[100];
25274 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
25275 SYMBOL_REF_BLOCK_OFFSET (symbol));
25276 ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
25279 static void
25280 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
25282 fputs (GLOBAL_ASM_OP, stream);
25283 RS6000_OUTPUT_BASENAME (stream, name);
25284 putc ('\n', stream);
25287 /* A get_unnamed_decl callback, used for read-only sections. PTR
25288 points to the section string variable. */
25290 static void
25291 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
25293 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
25294 *(const char *const *) directive,
25295 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25298 /* Likewise for read-write sections. */
25300 static void
25301 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
25303 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
25304 *(const char *const *) directive,
25305 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
25308 /* A get_unnamed_section callback, used for switching to toc_section. */
25310 static void
25311 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
25313 if (TARGET_MINIMAL_TOC)
25315 /* toc_section is always selected at least once from
25316 rs6000_xcoff_file_start, so this is guaranteed to
25317 always be defined once and only once in each file. */
25318 if (!toc_initialized)
25320 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
25321 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
25322 toc_initialized = 1;
25324 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
25325 (TARGET_32BIT ? "" : ",3"));
25327 else
25328 fputs ("\t.toc\n", asm_out_file);
25331 /* Implement TARGET_ASM_INIT_SECTIONS. */
25333 static void
25334 rs6000_xcoff_asm_init_sections (void)
25336 read_only_data_section
25337 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25338 &xcoff_read_only_section_name);
25340 private_data_section
25341 = get_unnamed_section (SECTION_WRITE,
25342 rs6000_xcoff_output_readwrite_section_asm_op,
25343 &xcoff_private_data_section_name);
25345 read_only_private_data_section
25346 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
25347 &xcoff_private_data_section_name);
25349 toc_section
25350 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
25352 readonly_data_section = read_only_data_section;
25353 exception_section = data_section;
25356 static int
25357 rs6000_xcoff_reloc_rw_mask (void)
25359 return 3;
25362 static void
25363 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
25364 tree decl ATTRIBUTE_UNUSED)
25366 int smclass;
25367 static const char * const suffix[3] = { "PR", "RO", "RW" };
25369 if (flags & SECTION_CODE)
25370 smclass = 0;
25371 else if (flags & SECTION_WRITE)
25372 smclass = 2;
25373 else
25374 smclass = 1;
25376 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
25377 (flags & SECTION_CODE) ? "." : "",
25378 name, suffix[smclass], flags & SECTION_ENTSIZE);
25381 static section *
25382 rs6000_xcoff_select_section (tree decl, int reloc,
25383 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25385 if (decl_readonly_section (decl, reloc))
25387 if (TREE_PUBLIC (decl))
25388 return read_only_data_section;
25389 else
25390 return read_only_private_data_section;
25392 else
25394 if (TREE_PUBLIC (decl))
25395 return data_section;
25396 else
25397 return private_data_section;
25401 static void
25402 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
25404 const char *name;
25406 /* Use select_section for private and uninitialized data. */
25407 if (!TREE_PUBLIC (decl)
25408 || DECL_COMMON (decl)
25409 || DECL_INITIAL (decl) == NULL_TREE
25410 || DECL_INITIAL (decl) == error_mark_node
25411 || (flag_zero_initialized_in_bss
25412 && initializer_zerop (DECL_INITIAL (decl))))
25413 return;
25415 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
25416 name = (*targetm.strip_name_encoding) (name);
25417 DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
25420 /* Select section for constant in constant pool.
25422 On RS/6000, all constants are in the private read-only data area.
25423 However, if this is being placed in the TOC it must be output as a
25424 toc entry. */
25426 static section *
25427 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
25428 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
25430 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
25431 return toc_section;
25432 else
25433 return read_only_private_data_section;
25436 /* Remove any trailing [DS] or the like from the symbol name. */
25438 static const char *
25439 rs6000_xcoff_strip_name_encoding (const char *name)
25441 size_t len;
25442 if (*name == '*')
25443 name++;
25444 len = strlen (name);
25445 if (name[len - 1] == ']')
25446 return ggc_alloc_string (name, len - 4);
25447 else
25448 return name;
25451 /* Section attributes. AIX is always PIC. */
25453 static unsigned int
25454 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
25456 unsigned int align;
25457 unsigned int flags = default_section_type_flags (decl, name, reloc);
25459 /* Align to at least UNIT size. */
25460 if (flags & SECTION_CODE || !decl)
25461 align = MIN_UNITS_PER_WORD;
25462 else
25463 /* Increase alignment of large objects if not already stricter. */
25464 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
25465 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
25466 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
25468 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
25471 /* Output at beginning of assembler file.
25473 Initialize the section names for the RS/6000 at this point.
25475 Specify filename, including full path, to assembler.
25477 We want to go into the TOC section so at least one .toc will be emitted.
25478 Also, in order to output proper .bs/.es pairs, we need at least one static
25479 [RW] section emitted.
25481 Finally, declare mcount when profiling to make the assembler happy. */
25483 static void
25484 rs6000_xcoff_file_start (void)
25486 rs6000_gen_section_name (&xcoff_bss_section_name,
25487 main_input_filename, ".bss_");
25488 rs6000_gen_section_name (&xcoff_private_data_section_name,
25489 main_input_filename, ".rw_");
25490 rs6000_gen_section_name (&xcoff_read_only_section_name,
25491 main_input_filename, ".ro_");
25493 fputs ("\t.file\t", asm_out_file);
25494 output_quoted_string (asm_out_file, main_input_filename);
25495 fputc ('\n', asm_out_file);
25496 if (write_symbols != NO_DEBUG)
25497 switch_to_section (private_data_section);
25498 switch_to_section (text_section);
25499 if (profile_flag)
25500 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
25501 rs6000_file_start ();
25504 /* Output at end of assembler file.
25505 On the RS/6000, referencing data should automatically pull in text. */
25507 static void
25508 rs6000_xcoff_file_end (void)
25510 switch_to_section (text_section);
25511 fputs ("_section_.text:\n", asm_out_file);
25512 switch_to_section (data_section);
25513 fputs (TARGET_32BIT
25514 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
25515 asm_out_file);
25517 #endif /* TARGET_XCOFF */
25519 /* Compute a (partial) cost for rtx X. Return true if the complete
25520 cost has been computed, and false if subexpressions should be
25521 scanned. In either case, *TOTAL contains the cost result. */
25523 static bool
25524 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
25525 int *total, bool speed)
25527 enum machine_mode mode = GET_MODE (x);
25529 switch (code)
25531 /* On the RS/6000, if it is valid in the insn, it is free. */
25532 case CONST_INT:
25533 if (((outer_code == SET
25534 || outer_code == PLUS
25535 || outer_code == MINUS)
25536 && (satisfies_constraint_I (x)
25537 || satisfies_constraint_L (x)))
25538 || (outer_code == AND
25539 && (satisfies_constraint_K (x)
25540 || (mode == SImode
25541 ? satisfies_constraint_L (x)
25542 : satisfies_constraint_J (x))
25543 || mask_operand (x, mode)
25544 || (mode == DImode
25545 && mask64_operand (x, DImode))))
25546 || ((outer_code == IOR || outer_code == XOR)
25547 && (satisfies_constraint_K (x)
25548 || (mode == SImode
25549 ? satisfies_constraint_L (x)
25550 : satisfies_constraint_J (x))))
25551 || outer_code == ASHIFT
25552 || outer_code == ASHIFTRT
25553 || outer_code == LSHIFTRT
25554 || outer_code == ROTATE
25555 || outer_code == ROTATERT
25556 || outer_code == ZERO_EXTRACT
25557 || (outer_code == MULT
25558 && satisfies_constraint_I (x))
25559 || ((outer_code == DIV || outer_code == UDIV
25560 || outer_code == MOD || outer_code == UMOD)
25561 && exact_log2 (INTVAL (x)) >= 0)
25562 || (outer_code == COMPARE
25563 && (satisfies_constraint_I (x)
25564 || satisfies_constraint_K (x)))
25565 || ((outer_code == EQ || outer_code == NE)
25566 && (satisfies_constraint_I (x)
25567 || satisfies_constraint_K (x)
25568 || (mode == SImode
25569 ? satisfies_constraint_L (x)
25570 : satisfies_constraint_J (x))))
25571 || (outer_code == GTU
25572 && satisfies_constraint_I (x))
25573 || (outer_code == LTU
25574 && satisfies_constraint_P (x)))
25576 *total = 0;
25577 return true;
25579 else if ((outer_code == PLUS
25580 && reg_or_add_cint_operand (x, VOIDmode))
25581 || (outer_code == MINUS
25582 && reg_or_sub_cint_operand (x, VOIDmode))
25583 || ((outer_code == SET
25584 || outer_code == IOR
25585 || outer_code == XOR)
25586 && (INTVAL (x)
25587 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
25589 *total = COSTS_N_INSNS (1);
25590 return true;
25592 /* FALLTHRU */
25594 case CONST_DOUBLE:
25595 if (mode == DImode && code == CONST_DOUBLE)
25597 if ((outer_code == IOR || outer_code == XOR)
25598 && CONST_DOUBLE_HIGH (x) == 0
25599 && (CONST_DOUBLE_LOW (x)
25600 & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
25602 *total = 0;
25603 return true;
25605 else if ((outer_code == AND && and64_2_operand (x, DImode))
25606 || ((outer_code == SET
25607 || outer_code == IOR
25608 || outer_code == XOR)
25609 && CONST_DOUBLE_HIGH (x) == 0))
25611 *total = COSTS_N_INSNS (1);
25612 return true;
25615 /* FALLTHRU */
25617 case CONST:
25618 case HIGH:
25619 case SYMBOL_REF:
25620 case MEM:
25621 /* When optimizing for size, MEM should be slightly more expensive
25622 than generating address, e.g., (plus (reg) (const)).
25623 L1 cache latency is about two instructions. */
25624 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
25625 return true;
25627 case LABEL_REF:
25628 *total = 0;
25629 return true;
25631 case PLUS:
25632 case MINUS:
25633 if (FLOAT_MODE_P (mode))
25634 *total = rs6000_cost->fp;
25635 else
25636 *total = COSTS_N_INSNS (1);
25637 return false;
25639 case MULT:
25640 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25641 && satisfies_constraint_I (XEXP (x, 1)))
25643 if (INTVAL (XEXP (x, 1)) >= -256
25644 && INTVAL (XEXP (x, 1)) <= 255)
25645 *total = rs6000_cost->mulsi_const9;
25646 else
25647 *total = rs6000_cost->mulsi_const;
25649 else if (mode == SFmode)
25650 *total = rs6000_cost->fp;
25651 else if (FLOAT_MODE_P (mode))
25652 *total = rs6000_cost->dmul;
25653 else if (mode == DImode)
25654 *total = rs6000_cost->muldi;
25655 else
25656 *total = rs6000_cost->mulsi;
25657 return false;
25659 case FMA:
25660 if (mode == SFmode)
25661 *total = rs6000_cost->fp;
25662 else
25663 *total = rs6000_cost->dmul;
25664 break;
25666 case DIV:
25667 case MOD:
25668 if (FLOAT_MODE_P (mode))
25670 *total = mode == DFmode ? rs6000_cost->ddiv
25671 : rs6000_cost->sdiv;
25672 return false;
25674 /* FALLTHRU */
25676 case UDIV:
25677 case UMOD:
25678 if (GET_CODE (XEXP (x, 1)) == CONST_INT
25679 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
25681 if (code == DIV || code == MOD)
25682 /* Shift, addze */
25683 *total = COSTS_N_INSNS (2);
25684 else
25685 /* Shift */
25686 *total = COSTS_N_INSNS (1);
25688 else
25690 if (GET_MODE (XEXP (x, 1)) == DImode)
25691 *total = rs6000_cost->divdi;
25692 else
25693 *total = rs6000_cost->divsi;
25695 /* Add in shift and subtract for MOD. */
25696 if (code == MOD || code == UMOD)
25697 *total += COSTS_N_INSNS (2);
25698 return false;
25700 case CTZ:
25701 case FFS:
25702 *total = COSTS_N_INSNS (4);
25703 return false;
25705 case POPCOUNT:
25706 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
25707 return false;
25709 case PARITY:
25710 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
25711 return false;
25713 case NOT:
25714 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
25716 *total = 0;
25717 return false;
25719 /* FALLTHRU */
25721 case AND:
25722 case CLZ:
25723 case IOR:
25724 case XOR:
25725 case ZERO_EXTRACT:
25726 *total = COSTS_N_INSNS (1);
25727 return false;
25729 case ASHIFT:
25730 case ASHIFTRT:
25731 case LSHIFTRT:
25732 case ROTATE:
25733 case ROTATERT:
25734 /* Handle mul_highpart. */
25735 if (outer_code == TRUNCATE
25736 && GET_CODE (XEXP (x, 0)) == MULT)
25738 if (mode == DImode)
25739 *total = rs6000_cost->muldi;
25740 else
25741 *total = rs6000_cost->mulsi;
25742 return true;
25744 else if (outer_code == AND)
25745 *total = 0;
25746 else
25747 *total = COSTS_N_INSNS (1);
25748 return false;
25750 case SIGN_EXTEND:
25751 case ZERO_EXTEND:
25752 if (GET_CODE (XEXP (x, 0)) == MEM)
25753 *total = 0;
25754 else
25755 *total = COSTS_N_INSNS (1);
25756 return false;
25758 case COMPARE:
25759 case NEG:
25760 case ABS:
25761 if (!FLOAT_MODE_P (mode))
25763 *total = COSTS_N_INSNS (1);
25764 return false;
25766 /* FALLTHRU */
25768 case FLOAT:
25769 case UNSIGNED_FLOAT:
25770 case FIX:
25771 case UNSIGNED_FIX:
25772 case FLOAT_TRUNCATE:
25773 *total = rs6000_cost->fp;
25774 return false;
25776 case FLOAT_EXTEND:
25777 if (mode == DFmode)
25778 *total = 0;
25779 else
25780 *total = rs6000_cost->fp;
25781 return false;
25783 case UNSPEC:
25784 switch (XINT (x, 1))
25786 case UNSPEC_FRSP:
25787 *total = rs6000_cost->fp;
25788 return true;
25790 default:
25791 break;
25793 break;
25795 case CALL:
25796 case IF_THEN_ELSE:
25797 if (!speed)
25799 *total = COSTS_N_INSNS (1);
25800 return true;
25802 else if (FLOAT_MODE_P (mode)
25803 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
25805 *total = rs6000_cost->fp;
25806 return false;
25808 break;
25810 case EQ:
25811 case GTU:
25812 case LTU:
25813 /* Carry bit requires mode == Pmode.
25814 NEG or PLUS already counted so only add one. */
25815 if (mode == Pmode
25816 && (outer_code == NEG || outer_code == PLUS))
25818 *total = COSTS_N_INSNS (1);
25819 return true;
25821 if (outer_code == SET)
25823 if (XEXP (x, 1) == const0_rtx)
25825 if (TARGET_ISEL && !TARGET_MFCRF)
25826 *total = COSTS_N_INSNS (8);
25827 else
25828 *total = COSTS_N_INSNS (2);
25829 return true;
25831 else if (mode == Pmode)
25833 *total = COSTS_N_INSNS (3);
25834 return false;
25837 /* FALLTHRU */
25839 case GT:
25840 case LT:
25841 case UNORDERED:
25842 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
25844 if (TARGET_ISEL && !TARGET_MFCRF)
25845 *total = COSTS_N_INSNS (8);
25846 else
25847 *total = COSTS_N_INSNS (2);
25848 return true;
25850 /* CC COMPARE. */
25851 if (outer_code == COMPARE)
25853 *total = 0;
25854 return true;
25856 break;
25858 default:
25859 break;
25862 return false;
25865 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
25867 static bool
25868 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
25869 bool speed)
25871 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
25873 fprintf (stderr,
25874 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
25875 "opno = %d, total = %d, speed = %s, x:\n",
25876 ret ? "complete" : "scan inner",
25877 GET_RTX_NAME (code),
25878 GET_RTX_NAME (outer_code),
25879 opno,
25880 *total,
25881 speed ? "true" : "false");
25883 debug_rtx (x);
25885 return ret;
25888 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
25890 static int
25891 rs6000_debug_address_cost (rtx x, bool speed)
25893 int ret = TARGET_ADDRESS_COST (x, speed);
25895 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
25896 ret, speed ? "true" : "false");
25897 debug_rtx (x);
25899 return ret;
25903 /* A C expression returning the cost of moving data from a register of class
25904 CLASS1 to one of CLASS2. */
25906 static int
25907 rs6000_register_move_cost (enum machine_mode mode,
25908 reg_class_t from, reg_class_t to)
25910 int ret;
25912 if (TARGET_DEBUG_COST)
25913 dbg_cost_ctrl++;
25915 /* Moves from/to GENERAL_REGS. */
25916 if (reg_classes_intersect_p (to, GENERAL_REGS)
25917 || reg_classes_intersect_p (from, GENERAL_REGS))
25919 reg_class_t rclass = from;
25921 if (! reg_classes_intersect_p (to, GENERAL_REGS))
25922 rclass = to;
25924 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
25925 ret = (rs6000_memory_move_cost (mode, rclass, false)
25926 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
25928 /* It's more expensive to move CR_REGS than CR0_REGS because of the
25929 shift. */
25930 else if (rclass == CR_REGS)
25931 ret = 4;
25933 /* For those processors that have slow LR/CTR moves, make them more
25934 expensive than memory in order to bias spills to memory .*/
25935 else if ((rs6000_cpu == PROCESSOR_POWER6
25936 || rs6000_cpu == PROCESSOR_POWER7)
25937 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
25938 ret = 6 * hard_regno_nregs[0][mode];
25940 else
25941 /* A move will cost one instruction per GPR moved. */
25942 ret = 2 * hard_regno_nregs[0][mode];
25945 /* If we have VSX, we can easily move between FPR or Altivec registers. */
25946 else if (VECTOR_UNIT_VSX_P (mode)
25947 && reg_classes_intersect_p (to, VSX_REGS)
25948 && reg_classes_intersect_p (from, VSX_REGS))
25949 ret = 2 * hard_regno_nregs[32][mode];
25951 /* Moving between two similar registers is just one instruction. */
25952 else if (reg_classes_intersect_p (to, from))
25953 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
25955 /* Everything else has to go through GENERAL_REGS. */
25956 else
25957 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
25958 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
25960 if (TARGET_DEBUG_COST)
25962 if (dbg_cost_ctrl == 1)
25963 fprintf (stderr,
25964 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
25965 ret, GET_MODE_NAME (mode), reg_class_names[from],
25966 reg_class_names[to]);
25967 dbg_cost_ctrl--;
25970 return ret;
25973 /* A C expressions returning the cost of moving data of MODE from a register to
25974 or from memory. */
25976 static int
25977 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
25978 bool in ATTRIBUTE_UNUSED)
25980 int ret;
25982 if (TARGET_DEBUG_COST)
25983 dbg_cost_ctrl++;
25985 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
25986 ret = 4 * hard_regno_nregs[0][mode];
25987 else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
25988 ret = 4 * hard_regno_nregs[32][mode];
25989 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
25990 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
25991 else
25992 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
25994 if (TARGET_DEBUG_COST)
25996 if (dbg_cost_ctrl == 1)
25997 fprintf (stderr,
25998 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
25999 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
26000 dbg_cost_ctrl--;
26003 return ret;
26006 /* Returns a code for a target-specific builtin that implements
26007 reciprocal of the function, or NULL_TREE if not available. */
26009 static tree
26010 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
26011 bool sqrt ATTRIBUTE_UNUSED)
26013 if (optimize_insn_for_size_p ())
26014 return NULL_TREE;
26016 if (md_fn)
26017 switch (fn)
26019 case VSX_BUILTIN_XVSQRTDP:
26020 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
26021 return NULL_TREE;
26023 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
26025 case VSX_BUILTIN_XVSQRTSP:
26026 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
26027 return NULL_TREE;
26029 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
26031 default:
26032 return NULL_TREE;
26035 else
26036 switch (fn)
26038 case BUILT_IN_SQRT:
26039 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
26040 return NULL_TREE;
26042 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
26044 case BUILT_IN_SQRTF:
26045 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
26046 return NULL_TREE;
26048 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
26050 default:
26051 return NULL_TREE;
26055 /* Load up a constant. If the mode is a vector mode, splat the value across
26056 all of the vector elements. */
26058 static rtx
26059 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
26061 rtx reg;
26063 if (mode == SFmode || mode == DFmode)
26065 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
26066 reg = force_reg (mode, d);
26068 else if (mode == V4SFmode)
26070 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
26071 rtvec v = gen_rtvec (4, d, d, d, d);
26072 reg = gen_reg_rtx (mode);
26073 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26075 else if (mode == V2DFmode)
26077 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
26078 rtvec v = gen_rtvec (2, d, d);
26079 reg = gen_reg_rtx (mode);
26080 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
26082 else
26083 gcc_unreachable ();
26085 return reg;
26088 /* Generate an FMA instruction. */
26090 static void
26091 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
26093 enum machine_mode mode = GET_MODE (target);
26094 rtx dst;
26096 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26097 gcc_assert (dst != NULL);
26099 if (dst != target)
26100 emit_move_insn (target, dst);
26103 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
26105 static void
26106 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
26108 enum machine_mode mode = GET_MODE (target);
26109 rtx dst;
26111 /* Altivec does not support fms directly;
26112 generate in terms of fma in that case. */
26113 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
26114 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
26115 else
26117 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
26118 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
26120 gcc_assert (dst != NULL);
26122 if (dst != target)
26123 emit_move_insn (target, dst);
26126 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
26128 static void
26129 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
26131 enum machine_mode mode = GET_MODE (dst);
26132 rtx r;
26134 /* This is a tad more complicated, since the fnma_optab is for
26135 a different expression: fma(-m1, m2, a), which is the same
26136 thing except in the case of signed zeros.
26138 Fortunately we know that if FMA is supported that FNMSUB is
26139 also supported in the ISA. Just expand it directly. */
26141 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
26143 r = gen_rtx_NEG (mode, a);
26144 r = gen_rtx_FMA (mode, m1, m2, r);
26145 r = gen_rtx_NEG (mode, r);
26146 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
26149 /* Newton-Raphson approximation of floating point divide with just 2 passes
26150 (either single precision floating point, or newer machines with higher
26151 accuracy estimates). Support both scalar and vector divide. Assumes no
26152 trapping math and finite arguments. */
26154 static void
26155 rs6000_emit_swdiv_high_precision (rtx dst, rtx n, rtx d)
26157 enum machine_mode mode = GET_MODE (dst);
26158 rtx x0, e0, e1, y1, u0, v0;
26159 enum insn_code code = optab_handler (smul_optab, mode);
26160 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26161 rtx one = rs6000_load_constant_and_splat (mode, dconst1);
26163 gcc_assert (code != CODE_FOR_nothing);
26165 /* x0 = 1./d estimate */
26166 x0 = gen_reg_rtx (mode);
26167 emit_insn (gen_rtx_SET (VOIDmode, x0,
26168 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26169 UNSPEC_FRES)));
26171 e0 = gen_reg_rtx (mode);
26172 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - (d * x0) */
26174 e1 = gen_reg_rtx (mode);
26175 rs6000_emit_madd (e1, e0, e0, e0); /* e1 = (e0 * e0) + e0 */
26177 y1 = gen_reg_rtx (mode);
26178 rs6000_emit_madd (y1, e1, x0, x0); /* y1 = (e1 * x0) + x0 */
26180 u0 = gen_reg_rtx (mode);
26181 emit_insn (gen_mul (u0, n, y1)); /* u0 = n * y1 */
26183 v0 = gen_reg_rtx (mode);
26184 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - (d * u0) */
26186 rs6000_emit_madd (dst, v0, y1, u0); /* dst = (v0 * y1) + u0 */
26189 /* Newton-Raphson approximation of floating point divide that has a low
26190 precision estimate. Assumes no trapping math and finite arguments. */
26192 static void
26193 rs6000_emit_swdiv_low_precision (rtx dst, rtx n, rtx d)
26195 enum machine_mode mode = GET_MODE (dst);
26196 rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
26197 enum insn_code code = optab_handler (smul_optab, mode);
26198 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26200 gcc_assert (code != CODE_FOR_nothing);
26202 one = rs6000_load_constant_and_splat (mode, dconst1);
26204 /* x0 = 1./d estimate */
26205 x0 = gen_reg_rtx (mode);
26206 emit_insn (gen_rtx_SET (VOIDmode, x0,
26207 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
26208 UNSPEC_FRES)));
26210 e0 = gen_reg_rtx (mode);
26211 rs6000_emit_nmsub (e0, d, x0, one); /* e0 = 1. - d * x0 */
26213 y1 = gen_reg_rtx (mode);
26214 rs6000_emit_madd (y1, e0, x0, x0); /* y1 = x0 + e0 * x0 */
26216 e1 = gen_reg_rtx (mode);
26217 emit_insn (gen_mul (e1, e0, e0)); /* e1 = e0 * e0 */
26219 y2 = gen_reg_rtx (mode);
26220 rs6000_emit_madd (y2, e1, y1, y1); /* y2 = y1 + e1 * y1 */
26222 e2 = gen_reg_rtx (mode);
26223 emit_insn (gen_mul (e2, e1, e1)); /* e2 = e1 * e1 */
26225 y3 = gen_reg_rtx (mode);
26226 rs6000_emit_madd (y3, e2, y2, y2); /* y3 = y2 + e2 * y2 */
26228 u0 = gen_reg_rtx (mode);
26229 emit_insn (gen_mul (u0, n, y3)); /* u0 = n * y3 */
26231 v0 = gen_reg_rtx (mode);
26232 rs6000_emit_nmsub (v0, d, u0, n); /* v0 = n - d * u0 */
26234 rs6000_emit_madd (dst, v0, y3, u0); /* dst = u0 + v0 * y3 */
26237 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
26238 add a reg_note saying that this was a division. Support both scalar and
26239 vector divide. Assumes no trapping math and finite arguments. */
26241 void
26242 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
26244 enum machine_mode mode = GET_MODE (dst);
26246 if (RS6000_RECIP_HIGH_PRECISION_P (mode))
26247 rs6000_emit_swdiv_high_precision (dst, n, d);
26248 else
26249 rs6000_emit_swdiv_low_precision (dst, n, d);
26251 if (note_p)
26252 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
26255 /* Newton-Raphson approximation of single/double-precision floating point
26256 rsqrt. Assumes no trapping math and finite arguments. */
26258 void
26259 rs6000_emit_swrsqrt (rtx dst, rtx src)
26261 enum machine_mode mode = GET_MODE (src);
26262 rtx x0 = gen_reg_rtx (mode);
26263 rtx y = gen_reg_rtx (mode);
26264 int passes = (TARGET_RECIP_PRECISION) ? 2 : 3;
26265 REAL_VALUE_TYPE dconst3_2;
26266 int i;
26267 rtx halfthree;
26268 enum insn_code code = optab_handler (smul_optab, mode);
26269 gen_2arg_fn_t gen_mul = (gen_2arg_fn_t) GEN_FCN (code);
26271 gcc_assert (code != CODE_FOR_nothing);
26273 /* Load up the constant 1.5 either as a scalar, or as a vector. */
26274 real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
26275 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
26277 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
26279 /* x0 = rsqrt estimate */
26280 emit_insn (gen_rtx_SET (VOIDmode, x0,
26281 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
26282 UNSPEC_RSQRT)));
26284 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
26285 rs6000_emit_msub (y, src, halfthree, src);
26287 for (i = 0; i < passes; i++)
26289 rtx x1 = gen_reg_rtx (mode);
26290 rtx u = gen_reg_rtx (mode);
26291 rtx v = gen_reg_rtx (mode);
26293 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
26294 emit_insn (gen_mul (u, x0, x0));
26295 rs6000_emit_nmsub (v, y, u, halfthree);
26296 emit_insn (gen_mul (x1, x0, v));
26297 x0 = x1;
26300 emit_move_insn (dst, x0);
26301 return;
26304 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
26305 (Power7) targets. DST is the target, and SRC is the argument operand. */
26307 void
26308 rs6000_emit_popcount (rtx dst, rtx src)
26310 enum machine_mode mode = GET_MODE (dst);
26311 rtx tmp1, tmp2;
26313 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
26314 if (TARGET_POPCNTD)
26316 if (mode == SImode)
26317 emit_insn (gen_popcntdsi2 (dst, src));
26318 else
26319 emit_insn (gen_popcntddi2 (dst, src));
26320 return;
26323 tmp1 = gen_reg_rtx (mode);
26325 if (mode == SImode)
26327 emit_insn (gen_popcntbsi2 (tmp1, src));
26328 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
26329 NULL_RTX, 0);
26330 tmp2 = force_reg (SImode, tmp2);
26331 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
26333 else
26335 emit_insn (gen_popcntbdi2 (tmp1, src));
26336 tmp2 = expand_mult (DImode, tmp1,
26337 GEN_INT ((HOST_WIDE_INT)
26338 0x01010101 << 32 | 0x01010101),
26339 NULL_RTX, 0);
26340 tmp2 = force_reg (DImode, tmp2);
26341 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
26346 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
26347 target, and SRC is the argument operand. */
26349 void
26350 rs6000_emit_parity (rtx dst, rtx src)
26352 enum machine_mode mode = GET_MODE (dst);
26353 rtx tmp;
26355 tmp = gen_reg_rtx (mode);
26357 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
26358 if (TARGET_CMPB)
26360 if (mode == SImode)
26362 emit_insn (gen_popcntbsi2 (tmp, src));
26363 emit_insn (gen_paritysi2_cmpb (dst, tmp));
26365 else
26367 emit_insn (gen_popcntbdi2 (tmp, src));
26368 emit_insn (gen_paritydi2_cmpb (dst, tmp));
26370 return;
26373 if (mode == SImode)
26375 /* Is mult+shift >= shift+xor+shift+xor? */
26376 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
26378 rtx tmp1, tmp2, tmp3, tmp4;
26380 tmp1 = gen_reg_rtx (SImode);
26381 emit_insn (gen_popcntbsi2 (tmp1, src));
26383 tmp2 = gen_reg_rtx (SImode);
26384 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
26385 tmp3 = gen_reg_rtx (SImode);
26386 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
26388 tmp4 = gen_reg_rtx (SImode);
26389 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
26390 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
26392 else
26393 rs6000_emit_popcount (tmp, src);
26394 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
26396 else
26398 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
26399 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
26401 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
26403 tmp1 = gen_reg_rtx (DImode);
26404 emit_insn (gen_popcntbdi2 (tmp1, src));
26406 tmp2 = gen_reg_rtx (DImode);
26407 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
26408 tmp3 = gen_reg_rtx (DImode);
26409 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
26411 tmp4 = gen_reg_rtx (DImode);
26412 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
26413 tmp5 = gen_reg_rtx (DImode);
26414 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
26416 tmp6 = gen_reg_rtx (DImode);
26417 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
26418 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
26420 else
26421 rs6000_emit_popcount (tmp, src);
26422 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
26426 /* Expand an Altivec constant permutation. Return true if we match
26427 an efficient implementation; false to fall back to VPERM. */
26429 bool
26430 altivec_expand_vec_perm_const (rtx operands[4])
26432 struct altivec_perm_insn {
26433 enum insn_code impl;
26434 unsigned char perm[16];
26436 static const struct altivec_perm_insn patterns[] = {
26437 { CODE_FOR_altivec_vpkuhum,
26438 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
26439 { CODE_FOR_altivec_vpkuwum,
26440 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
26441 { CODE_FOR_altivec_vmrghb,
26442 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
26443 { CODE_FOR_altivec_vmrghh,
26444 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
26445 { CODE_FOR_altivec_vmrghw,
26446 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
26447 { CODE_FOR_altivec_vmrglb,
26448 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
26449 { CODE_FOR_altivec_vmrglh,
26450 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
26451 { CODE_FOR_altivec_vmrglw,
26452 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } }
26455 unsigned int i, j, elt, which;
26456 unsigned char perm[16];
26457 rtx target, op0, op1, sel, x;
26458 bool one_vec;
26460 target = operands[0];
26461 op0 = operands[1];
26462 op1 = operands[2];
26463 sel = operands[3];
26465 /* Unpack the constant selector. */
26466 for (i = which = 0; i < 16; ++i)
26468 rtx e = XVECEXP (sel, 0, i);
26469 elt = INTVAL (e) & 31;
26470 which |= (elt < 16 ? 1 : 2);
26471 perm[i] = elt;
26474 /* Simplify the constant selector based on operands. */
26475 switch (which)
26477 default:
26478 gcc_unreachable ();
26480 case 3:
26481 one_vec = false;
26482 if (!rtx_equal_p (op0, op1))
26483 break;
26484 /* FALLTHRU */
26486 case 2:
26487 for (i = 0; i < 16; ++i)
26488 perm[i] &= 15;
26489 op0 = op1;
26490 one_vec = true;
26491 break;
26493 case 1:
26494 op1 = op0;
26495 one_vec = true;
26496 break;
26499 /* Look for splat patterns. */
26500 if (one_vec)
26502 elt = perm[0];
26504 for (i = 0; i < 16; ++i)
26505 if (perm[i] != elt)
26506 break;
26507 if (i == 16)
26509 emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt)));
26510 return true;
26513 if (elt % 2 == 0)
26515 for (i = 0; i < 16; i += 2)
26516 if (perm[i] != elt || perm[i + 1] != elt + 1)
26517 break;
26518 if (i == 16)
26520 x = gen_reg_rtx (V8HImode);
26521 emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0),
26522 GEN_INT (elt / 2)));
26523 emit_move_insn (target, gen_lowpart (V16QImode, x));
26524 return true;
26528 if (elt % 4 == 0)
26530 for (i = 0; i < 16; i += 4)
26531 if (perm[i] != elt
26532 || perm[i + 1] != elt + 1
26533 || perm[i + 2] != elt + 2
26534 || perm[i + 3] != elt + 3)
26535 break;
26536 if (i == 16)
26538 x = gen_reg_rtx (V4SImode);
26539 emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0),
26540 GEN_INT (elt / 4)));
26541 emit_move_insn (target, gen_lowpart (V16QImode, x));
26542 return true;
26547 /* Look for merge and pack patterns. */
26548 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
26550 bool swapped;
26552 elt = patterns[j].perm[0];
26553 if (perm[0] == elt)
26554 swapped = false;
26555 else if (perm[0] == elt + 16)
26556 swapped = true;
26557 else
26558 continue;
26559 for (i = 1; i < 16; ++i)
26561 elt = patterns[j].perm[i];
26562 if (swapped)
26563 elt = (elt >= 16 ? elt - 16 : elt + 16);
26564 else if (one_vec && elt >= 16)
26565 elt -= 16;
26566 if (perm[i] != elt)
26567 break;
26569 if (i == 16)
26571 enum insn_code icode = patterns[j].impl;
26572 enum machine_mode omode = insn_data[icode].operand[0].mode;
26573 enum machine_mode imode = insn_data[icode].operand[1].mode;
26575 if (swapped)
26576 x = op0, op0 = op1, op1 = x;
26577 if (imode != V16QImode)
26579 op0 = gen_lowpart (imode, op0);
26580 op1 = gen_lowpart (imode, op1);
26582 if (omode == V16QImode)
26583 x = target;
26584 else
26585 x = gen_reg_rtx (omode);
26586 emit_insn (GEN_FCN (icode) (x, op0, op1));
26587 if (omode != V16QImode)
26588 emit_move_insn (target, gen_lowpart (V16QImode, x));
26589 return true;
26593 return false;
26596 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
26597 Return true if we match an efficient implementation. */
26599 static bool
26600 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
26601 unsigned char perm0, unsigned char perm1)
26603 rtx x;
26605 /* If both selectors come from the same operand, fold to single op. */
26606 if ((perm0 & 2) == (perm1 & 2))
26608 if (perm0 & 2)
26609 op0 = op1;
26610 else
26611 op1 = op0;
26613 /* If both operands are equal, fold to simpler permutation. */
26614 if (rtx_equal_p (op0, op1))
26616 perm0 = perm0 & 1;
26617 perm1 = (perm1 & 1) + 2;
26619 /* If the first selector comes from the second operand, swap. */
26620 else if (perm0 & 2)
26622 if (perm1 & 2)
26623 return false;
26624 perm0 -= 2;
26625 perm1 += 2;
26626 x = op0, op0 = op1, op1 = x;
26628 /* If the second selector does not come from the second operand, fail. */
26629 else if ((perm1 & 2) == 0)
26630 return false;
26632 /* Success! */
26633 if (target != NULL)
26635 enum machine_mode vmode, dmode;
26636 rtvec v;
26638 vmode = GET_MODE (target);
26639 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
26640 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
26642 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
26643 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
26644 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
26645 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26647 return true;
26650 bool
26651 rs6000_expand_vec_perm_const (rtx operands[4])
26653 rtx target, op0, op1, sel;
26654 unsigned char perm0, perm1;
26656 target = operands[0];
26657 op0 = operands[1];
26658 op1 = operands[2];
26659 sel = operands[3];
26661 /* Unpack the constant selector. */
26662 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
26663 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
26665 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
26668 /* Test whether a constant permutation is supported. */
26670 static bool
26671 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
26672 const unsigned char *sel)
26674 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
26675 if (TARGET_ALTIVEC)
26676 return true;
26678 /* Check for ps_merge* or evmerge* insns. */
26679 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
26680 || (TARGET_SPE && vmode == V2SImode))
26682 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
26683 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
26684 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
26687 return false;
26690 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
26692 static void
26693 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
26694 enum machine_mode vmode, unsigned nelt, rtx perm[])
26696 enum machine_mode imode;
26697 rtx x;
26699 imode = vmode;
26700 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
26702 imode = GET_MODE_INNER (vmode);
26703 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
26704 imode = mode_for_vector (imode, nelt);
26707 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
26708 x = expand_vec_perm (vmode, op0, op1, x, target);
26709 if (x != target)
26710 emit_move_insn (target, x);
26713 /* Expand an extract even operation. */
26715 void
26716 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
26718 enum machine_mode vmode = GET_MODE (target);
26719 unsigned i, nelt = GET_MODE_NUNITS (vmode);
26720 rtx perm[16];
26722 for (i = 0; i < nelt; i++)
26723 perm[i] = GEN_INT (i * 2);
26725 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26728 /* Expand a vector interleave operation. */
26730 void
26731 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
26733 enum machine_mode vmode = GET_MODE (target);
26734 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
26735 rtx perm[16];
26737 high = (highp == BYTES_BIG_ENDIAN ? 0 : nelt / 2);
26738 for (i = 0; i < nelt / 2; i++)
26740 perm[i * 2] = GEN_INT (i + high);
26741 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
26744 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
26747 /* Return an RTX representing where to find the function value of a
26748 function returning MODE. */
26749 static rtx
26750 rs6000_complex_function_value (enum machine_mode mode)
26752 unsigned int regno;
26753 rtx r1, r2;
26754 enum machine_mode inner = GET_MODE_INNER (mode);
26755 unsigned int inner_bytes = GET_MODE_SIZE (inner);
26757 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
26758 regno = FP_ARG_RETURN;
26759 else
26761 regno = GP_ARG_RETURN;
26763 /* 32-bit is OK since it'll go in r3/r4. */
26764 if (TARGET_32BIT && inner_bytes >= 4)
26765 return gen_rtx_REG (mode, regno);
26768 if (inner_bytes >= 8)
26769 return gen_rtx_REG (mode, regno);
26771 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
26772 const0_rtx);
26773 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
26774 GEN_INT (inner_bytes));
26775 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
26778 /* Target hook for TARGET_FUNCTION_VALUE.
26780 On the SPE, both FPs and vectors are returned in r3.
26782 On RS/6000 an integer value is in r3 and a floating-point value is in
26783 fp1, unless -msoft-float. */
26785 static rtx
26786 rs6000_function_value (const_tree valtype,
26787 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
26788 bool outgoing ATTRIBUTE_UNUSED)
26790 enum machine_mode mode;
26791 unsigned int regno;
26793 /* Special handling for structs in darwin64. */
26794 if (TARGET_MACHO
26795 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
26797 CUMULATIVE_ARGS valcum;
26798 rtx valret;
26800 valcum.words = 0;
26801 valcum.fregno = FP_ARG_MIN_REG;
26802 valcum.vregno = ALTIVEC_ARG_MIN_REG;
26803 /* Do a trial code generation as if this were going to be passed as
26804 an argument; if any part goes in memory, we return NULL. */
26805 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
26806 if (valret)
26807 return valret;
26808 /* Otherwise fall through to standard ABI rules. */
26811 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
26813 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
26814 return gen_rtx_PARALLEL (DImode,
26815 gen_rtvec (2,
26816 gen_rtx_EXPR_LIST (VOIDmode,
26817 gen_rtx_REG (SImode, GP_ARG_RETURN),
26818 const0_rtx),
26819 gen_rtx_EXPR_LIST (VOIDmode,
26820 gen_rtx_REG (SImode,
26821 GP_ARG_RETURN + 1),
26822 GEN_INT (4))));
26824 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
26826 return gen_rtx_PARALLEL (DCmode,
26827 gen_rtvec (4,
26828 gen_rtx_EXPR_LIST (VOIDmode,
26829 gen_rtx_REG (SImode, GP_ARG_RETURN),
26830 const0_rtx),
26831 gen_rtx_EXPR_LIST (VOIDmode,
26832 gen_rtx_REG (SImode,
26833 GP_ARG_RETURN + 1),
26834 GEN_INT (4)),
26835 gen_rtx_EXPR_LIST (VOIDmode,
26836 gen_rtx_REG (SImode,
26837 GP_ARG_RETURN + 2),
26838 GEN_INT (8)),
26839 gen_rtx_EXPR_LIST (VOIDmode,
26840 gen_rtx_REG (SImode,
26841 GP_ARG_RETURN + 3),
26842 GEN_INT (12))));
26845 mode = TYPE_MODE (valtype);
26846 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
26847 || POINTER_TYPE_P (valtype))
26848 mode = TARGET_32BIT ? SImode : DImode;
26850 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
26851 /* _Decimal128 must use an even/odd register pair. */
26852 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
26853 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
26854 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
26855 regno = FP_ARG_RETURN;
26856 else if (TREE_CODE (valtype) == COMPLEX_TYPE
26857 && targetm.calls.split_complex_arg)
26858 return rs6000_complex_function_value (mode);
26859 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
26860 return register is used in both cases, and we won't see V2DImode/V2DFmode
26861 for pure altivec, combine the two cases. */
26862 else if (TREE_CODE (valtype) == VECTOR_TYPE
26863 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
26864 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
26865 regno = ALTIVEC_ARG_RETURN;
26866 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
26867 && (mode == DFmode || mode == DCmode
26868 || mode == TFmode || mode == TCmode))
26869 return spe_build_register_parallel (mode, GP_ARG_RETURN);
26870 else
26871 regno = GP_ARG_RETURN;
26873 return gen_rtx_REG (mode, regno);
26876 /* Define how to find the value returned by a library function
26877 assuming the value has mode MODE. */
26879 rs6000_libcall_value (enum machine_mode mode)
26881 unsigned int regno;
26883 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
26885 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
26886 return gen_rtx_PARALLEL (DImode,
26887 gen_rtvec (2,
26888 gen_rtx_EXPR_LIST (VOIDmode,
26889 gen_rtx_REG (SImode, GP_ARG_RETURN),
26890 const0_rtx),
26891 gen_rtx_EXPR_LIST (VOIDmode,
26892 gen_rtx_REG (SImode,
26893 GP_ARG_RETURN + 1),
26894 GEN_INT (4))));
26897 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
26898 /* _Decimal128 must use an even/odd register pair. */
26899 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
26900 else if (SCALAR_FLOAT_MODE_P (mode)
26901 && TARGET_HARD_FLOAT && TARGET_FPRS
26902 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
26903 regno = FP_ARG_RETURN;
26904 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
26905 return register is used in both cases, and we won't see V2DImode/V2DFmode
26906 for pure altivec, combine the two cases. */
26907 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
26908 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
26909 regno = ALTIVEC_ARG_RETURN;
26910 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
26911 return rs6000_complex_function_value (mode);
26912 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
26913 && (mode == DFmode || mode == DCmode
26914 || mode == TFmode || mode == TCmode))
26915 return spe_build_register_parallel (mode, GP_ARG_RETURN);
26916 else
26917 regno = GP_ARG_RETURN;
26919 return gen_rtx_REG (mode, regno);
26923 /* Given FROM and TO register numbers, say whether this elimination is allowed.
26924 Frame pointer elimination is automatically handled.
26926 For the RS/6000, if frame pointer elimination is being done, we would like
26927 to convert ap into fp, not sp.
26929 We need r30 if -mminimal-toc was specified, and there are constant pool
26930 references. */
26932 static bool
26933 rs6000_can_eliminate (const int from, const int to)
26935 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
26936 ? ! frame_pointer_needed
26937 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
26938 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
26939 : true);
26942 /* Define the offset between two registers, FROM to be eliminated and its
26943 replacement TO, at the start of a routine. */
26944 HOST_WIDE_INT
26945 rs6000_initial_elimination_offset (int from, int to)
26947 rs6000_stack_t *info = rs6000_stack_info ();
26948 HOST_WIDE_INT offset;
26950 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
26951 offset = info->push_p ? 0 : -info->total_size;
26952 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
26954 offset = info->push_p ? 0 : -info->total_size;
26955 if (FRAME_GROWS_DOWNWARD)
26956 offset += info->fixed_size + info->vars_size + info->parm_size;
26958 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
26959 offset = FRAME_GROWS_DOWNWARD
26960 ? info->fixed_size + info->vars_size + info->parm_size
26961 : 0;
26962 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
26963 offset = info->total_size;
26964 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
26965 offset = info->push_p ? info->total_size : 0;
26966 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
26967 offset = 0;
26968 else
26969 gcc_unreachable ();
26971 return offset;
26974 static rtx
26975 rs6000_dwarf_register_span (rtx reg)
26977 rtx parts[8];
26978 int i, words;
26979 unsigned regno = REGNO (reg);
26980 enum machine_mode mode = GET_MODE (reg);
26982 if (TARGET_SPE
26983 && regno < 32
26984 && (SPE_VECTOR_MODE (GET_MODE (reg))
26985 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
26986 && mode != SFmode && mode != SDmode && mode != SCmode)))
26988 else
26989 return NULL_RTX;
26991 regno = REGNO (reg);
26993 /* The duality of the SPE register size wreaks all kinds of havoc.
26994 This is a way of distinguishing r0 in 32-bits from r0 in
26995 64-bits. */
26996 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
26997 gcc_assert (words <= 4);
26998 for (i = 0; i < words; i++, regno++)
27000 if (BYTES_BIG_ENDIAN)
27002 parts[2 * i] = gen_rtx_REG (SImode, regno + 1200);
27003 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
27005 else
27007 parts[2 * i] = gen_rtx_REG (SImode, regno);
27008 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + 1200);
27012 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
27015 /* Fill in sizes for SPE register high parts in table used by unwinder. */
27017 static void
27018 rs6000_init_dwarf_reg_sizes_extra (tree address)
27020 if (TARGET_SPE)
27022 int i;
27023 enum machine_mode mode = TYPE_MODE (char_type_node);
27024 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
27025 rtx mem = gen_rtx_MEM (BLKmode, addr);
27026 rtx value = gen_int_mode (4, mode);
27028 for (i = 1201; i < 1232; i++)
27030 int column = DWARF_REG_TO_UNWIND_COLUMN (i);
27031 HOST_WIDE_INT offset
27032 = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
27034 emit_move_insn (adjust_address (mem, mode, offset), value);
27039 /* Map internal gcc register numbers to DWARF2 register numbers. */
27041 unsigned int
27042 rs6000_dbx_register_number (unsigned int regno)
27044 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
27045 return regno;
27046 if (regno == MQ_REGNO)
27047 return 100;
27048 if (regno == LR_REGNO)
27049 return 108;
27050 if (regno == CTR_REGNO)
27051 return 109;
27052 if (CR_REGNO_P (regno))
27053 return regno - CR0_REGNO + 86;
27054 if (regno == CA_REGNO)
27055 return 101; /* XER */
27056 if (ALTIVEC_REGNO_P (regno))
27057 return regno - FIRST_ALTIVEC_REGNO + 1124;
27058 if (regno == VRSAVE_REGNO)
27059 return 356;
27060 if (regno == VSCR_REGNO)
27061 return 67;
27062 if (regno == SPE_ACC_REGNO)
27063 return 99;
27064 if (regno == SPEFSCR_REGNO)
27065 return 612;
27066 /* SPE high reg number. We get these values of regno from
27067 rs6000_dwarf_register_span. */
27068 gcc_assert (regno >= 1200 && regno < 1232);
27069 return regno;
27072 /* target hook eh_return_filter_mode */
27073 static enum machine_mode
27074 rs6000_eh_return_filter_mode (void)
27076 return TARGET_32BIT ? SImode : word_mode;
27079 /* Target hook for scalar_mode_supported_p. */
27080 static bool
27081 rs6000_scalar_mode_supported_p (enum machine_mode mode)
27083 if (DECIMAL_FLOAT_MODE_P (mode))
27084 return default_decimal_float_supported_p ();
27085 else
27086 return default_scalar_mode_supported_p (mode);
27089 /* Target hook for vector_mode_supported_p. */
27090 static bool
27091 rs6000_vector_mode_supported_p (enum machine_mode mode)
27094 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
27095 return true;
27097 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
27098 return true;
27100 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
27101 return true;
27103 else
27104 return false;
27107 /* Target hook for invalid_arg_for_unprototyped_fn. */
27108 static const char *
27109 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
27111 return (!rs6000_darwin64_abi
27112 && typelist == 0
27113 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
27114 && (funcdecl == NULL_TREE
27115 || (TREE_CODE (funcdecl) == FUNCTION_DECL
27116 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
27117 ? N_("AltiVec argument passed to unprototyped function")
27118 : NULL;
27121 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
27122 setup by using __stack_chk_fail_local hidden function instead of
27123 calling __stack_chk_fail directly. Otherwise it is better to call
27124 __stack_chk_fail directly. */
27126 static tree ATTRIBUTE_UNUSED
27127 rs6000_stack_protect_fail (void)
27129 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
27130 ? default_hidden_stack_protect_fail ()
27131 : default_external_stack_protect_fail ();
27134 void
27135 rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
27136 int num_operands ATTRIBUTE_UNUSED)
27138 if (rs6000_warn_cell_microcode)
27140 const char *temp;
27141 int insn_code_number = recog_memoized (insn);
27142 location_t location = locator_location (INSN_LOCATOR (insn));
27144 /* Punt on insns we cannot recognize. */
27145 if (insn_code_number < 0)
27146 return;
27148 temp = get_insn_template (insn_code_number, insn);
27150 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
27151 warning_at (location, OPT_mwarn_cell_microcode,
27152 "emitting microcode insn %s\t[%s] #%d",
27153 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27154 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
27155 warning_at (location, OPT_mwarn_cell_microcode,
27156 "emitting conditional microcode insn %s\t[%s] #%d",
27157 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
27162 /* Mask options that we want to support inside of attribute((target)) and
27163 #pragma GCC target operations. Note, we do not include things like
27164 64/32-bit, endianess, hard/soft floating point, etc. that would have
27165 different calling sequences. */
27167 struct rs6000_opt_mask {
27168 const char *name; /* option name */
27169 int mask; /* mask to set */
27170 bool invert; /* invert sense of mask */
27171 bool valid_target; /* option is a target option */
27174 static struct rs6000_opt_mask const rs6000_opt_masks[] =
27176 { "altivec", MASK_ALTIVEC, false, true },
27177 { "cmpb", MASK_CMPB, false, true },
27178 { "dlmzb", MASK_DLMZB, false, true },
27179 { "fprnd", MASK_FPRND, false, true },
27180 { "hard-dfp", MASK_DFP, false, true },
27181 { "isel", MASK_ISEL, false, true },
27182 { "mfcrf", MASK_MFCRF, false, true },
27183 { "mfpgpr", MASK_MFPGPR, false, true },
27184 { "mulhw", MASK_MULHW, false, true },
27185 { "multiple", MASK_MULTIPLE, false, true },
27186 { "update", MASK_NO_UPDATE, true , true },
27187 { "popcntb", MASK_POPCNTB, false, true },
27188 { "popcntd", MASK_POPCNTD, false, true },
27189 { "powerpc-gfxopt", MASK_PPC_GFXOPT, false, true },
27190 { "powerpc-gpopt", MASK_PPC_GPOPT, false, true },
27191 { "recip-precision", MASK_RECIP_PRECISION, false, true },
27192 { "string", MASK_STRING, false, true },
27193 { "vsx", MASK_VSX, false, true },
27194 #ifdef MASK_64BIT
27195 #if TARGET_AIX_OS
27196 { "aix64", MASK_64BIT, false, false },
27197 { "aix32", MASK_64BIT, true, false },
27198 #else
27199 { "64", MASK_64BIT, false, false },
27200 { "32", MASK_64BIT, true, false },
27201 #endif
27202 #endif
27203 #ifdef MASK_EABI
27204 { "eabi", MASK_EABI, false, false },
27205 #endif
27206 #ifdef MASK_LITTLE_ENDIAN
27207 { "little", MASK_LITTLE_ENDIAN, false, false },
27208 { "big", MASK_LITTLE_ENDIAN, true, false },
27209 #endif
27210 #ifdef MASK_RELOCATABLE
27211 { "relocatable", MASK_RELOCATABLE, false, false },
27212 #endif
27213 #ifdef MASK_STRICT_ALIGN
27214 { "strict-align", MASK_STRICT_ALIGN, false, false },
27215 #endif
27216 { "power", MASK_POWER, false, false },
27217 { "power2", MASK_POWER2, false, false },
27218 { "powerpc", MASK_POWERPC, false, false },
27219 { "soft-float", MASK_SOFT_FLOAT, false, false },
27220 { "string", MASK_STRING, false, false },
27223 /* Builtin mask mapping for printing the flags. */
27224 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
27226 { "altivec", RS6000_BTM_ALTIVEC, false, false },
27227 { "vsx", RS6000_BTM_VSX, false, false },
27228 { "spe", RS6000_BTM_SPE, false, false },
27229 { "paired", RS6000_BTM_PAIRED, false, false },
27230 { "fre", RS6000_BTM_FRE, false, false },
27231 { "fres", RS6000_BTM_FRES, false, false },
27232 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
27233 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
27234 { "popcntd", RS6000_BTM_POPCNTD, false, false },
27235 { "powerpc", RS6000_BTM_POWERPC, false, false },
27236 { "cell", RS6000_BTM_CELL, false, false },
27239 /* Option variables that we want to support inside attribute((target)) and
27240 #pragma GCC target operations. */
27242 struct rs6000_opt_var {
27243 const char *name; /* option name */
27244 size_t global_offset; /* offset of the option in global_options. */
27245 size_t target_offset; /* offset of the option in target optiosn. */
27248 static struct rs6000_opt_var const rs6000_opt_vars[] =
27250 { "friz",
27251 offsetof (struct gcc_options, x_TARGET_FRIZ),
27252 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
27253 { "avoid-indexed-addresses",
27254 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
27255 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
27256 { "paired",
27257 offsetof (struct gcc_options, x_rs6000_paired_float),
27258 offsetof (struct cl_target_option, x_rs6000_paired_float), },
27259 { "longcall",
27260 offsetof (struct gcc_options, x_rs6000_default_long_calls),
27261 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
27264 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
27265 parsing. Return true if there were no errors. */
27267 static bool
27268 rs6000_inner_target_options (tree args, bool attr_p)
27270 bool ret = true;
27272 if (args == NULL_TREE)
27275 else if (TREE_CODE (args) == STRING_CST)
27277 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27278 char *q;
27280 while ((q = strtok (p, ",")) != NULL)
27282 bool error_p = false;
27283 bool not_valid_p = false;
27284 const char *cpu_opt = NULL;
27286 p = NULL;
27287 if (strncmp (q, "cpu=", 4) == 0)
27289 int cpu_index = rs6000_cpu_name_lookup (q+4);
27290 if (cpu_index >= 0)
27291 rs6000_cpu_index = cpu_index;
27292 else
27294 error_p = true;
27295 cpu_opt = q+4;
27298 else if (strncmp (q, "tune=", 5) == 0)
27300 int tune_index = rs6000_cpu_name_lookup (q+5);
27301 if (tune_index >= 0)
27302 rs6000_tune_index = tune_index;
27303 else
27305 error_p = true;
27306 cpu_opt = q+5;
27309 else
27311 size_t i;
27312 bool invert = false;
27313 char *r = q;
27315 error_p = true;
27316 if (strncmp (r, "no-", 3) == 0)
27318 invert = true;
27319 r += 3;
27322 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27323 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
27325 int mask = rs6000_opt_masks[i].mask;
27327 if (!rs6000_opt_masks[i].valid_target)
27328 not_valid_p = true;
27329 else
27331 error_p = false;
27332 target_flags_explicit |= mask;
27334 /* VSX needs altivec, so -mvsx automagically sets
27335 altivec. */
27336 if (mask == MASK_VSX && !invert)
27337 mask |= MASK_ALTIVEC;
27339 if (rs6000_opt_masks[i].invert)
27340 invert = !invert;
27342 if (invert)
27343 target_flags &= ~mask;
27344 else
27345 target_flags |= mask;
27347 break;
27350 if (error_p && !not_valid_p)
27352 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27353 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
27355 size_t j = rs6000_opt_vars[i].global_offset;
27356 *((int *) ((char *)&global_options + j)) = !invert;
27357 error_p = false;
27358 break;
27363 if (error_p)
27365 const char *eprefix, *esuffix;
27367 ret = false;
27368 if (attr_p)
27370 eprefix = "__attribute__((__target__(";
27371 esuffix = ")))";
27373 else
27375 eprefix = "#pragma GCC target ";
27376 esuffix = "";
27379 if (cpu_opt)
27380 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
27381 q, esuffix);
27382 else if (not_valid_p)
27383 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
27384 else
27385 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
27390 else if (TREE_CODE (args) == TREE_LIST)
27394 tree value = TREE_VALUE (args);
27395 if (value)
27397 bool ret2 = rs6000_inner_target_options (value, attr_p);
27398 if (!ret2)
27399 ret = false;
27401 args = TREE_CHAIN (args);
27403 while (args != NULL_TREE);
27406 else
27407 gcc_unreachable ();
27409 return ret;
27412 /* Print out the target options as a list for -mdebug=target. */
27414 static void
27415 rs6000_debug_target_options (tree args, const char *prefix)
27417 if (args == NULL_TREE)
27418 fprintf (stderr, "%s<NULL>", prefix);
27420 else if (TREE_CODE (args) == STRING_CST)
27422 char *p = ASTRDUP (TREE_STRING_POINTER (args));
27423 char *q;
27425 while ((q = strtok (p, ",")) != NULL)
27427 p = NULL;
27428 fprintf (stderr, "%s\"%s\"", prefix, q);
27429 prefix = ", ";
27433 else if (TREE_CODE (args) == TREE_LIST)
27437 tree value = TREE_VALUE (args);
27438 if (value)
27440 rs6000_debug_target_options (value, prefix);
27441 prefix = ", ";
27443 args = TREE_CHAIN (args);
27445 while (args != NULL_TREE);
27448 else
27449 gcc_unreachable ();
27451 return;
27455 /* Hook to validate attribute((target("..."))). */
27457 static bool
27458 rs6000_valid_attribute_p (tree fndecl,
27459 tree ARG_UNUSED (name),
27460 tree args,
27461 int flags)
27463 struct cl_target_option cur_target;
27464 bool ret;
27465 tree old_optimize = build_optimization_node ();
27466 tree new_target, new_optimize;
27467 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27469 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
27471 if (TARGET_DEBUG_TARGET)
27473 tree tname = DECL_NAME (fndecl);
27474 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
27475 if (tname)
27476 fprintf (stderr, "function: %.*s\n",
27477 (int) IDENTIFIER_LENGTH (tname),
27478 IDENTIFIER_POINTER (tname));
27479 else
27480 fprintf (stderr, "function: unknown\n");
27482 fprintf (stderr, "args:");
27483 rs6000_debug_target_options (args, " ");
27484 fprintf (stderr, "\n");
27486 if (flags)
27487 fprintf (stderr, "flags: 0x%x\n", flags);
27489 fprintf (stderr, "--------------------\n");
27492 old_optimize = build_optimization_node ();
27493 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
27495 /* If the function changed the optimization levels as well as setting target
27496 options, start with the optimizations specified. */
27497 if (func_optimize && func_optimize != old_optimize)
27498 cl_optimization_restore (&global_options,
27499 TREE_OPTIMIZATION (func_optimize));
27501 /* The target attributes may also change some optimization flags, so update
27502 the optimization options if necessary. */
27503 cl_target_option_save (&cur_target, &global_options);
27504 rs6000_cpu_index = rs6000_tune_index = -1;
27505 ret = rs6000_inner_target_options (args, true);
27507 /* Set up any additional state. */
27508 if (ret)
27510 ret = rs6000_option_override_internal (false);
27511 new_target = build_target_option_node ();
27513 else
27514 new_target = NULL;
27516 new_optimize = build_optimization_node ();
27518 if (!new_target)
27519 ret = false;
27521 else if (fndecl)
27523 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
27525 if (old_optimize != new_optimize)
27526 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
27529 cl_target_option_restore (&global_options, &cur_target);
27531 if (old_optimize != new_optimize)
27532 cl_optimization_restore (&global_options,
27533 TREE_OPTIMIZATION (old_optimize));
27535 return ret;
27539 /* Hook to validate the current #pragma GCC target and set the state, and
27540 update the macros based on what was changed. If ARGS is NULL, then
27541 POP_TARGET is used to reset the options. */
27543 bool
27544 rs6000_pragma_target_parse (tree args, tree pop_target)
27546 tree prev_tree = build_target_option_node ();
27547 tree cur_tree;
27548 struct cl_target_option *prev_opt, *cur_opt;
27549 unsigned prev_bumask, cur_bumask, diff_bumask;
27550 int prev_flags, cur_flags, diff_flags;
27552 if (TARGET_DEBUG_TARGET)
27554 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
27555 fprintf (stderr, "args:");
27556 rs6000_debug_target_options (args, " ");
27557 fprintf (stderr, "\n");
27559 if (pop_target)
27561 fprintf (stderr, "pop_target:\n");
27562 debug_tree (pop_target);
27564 else
27565 fprintf (stderr, "pop_target: <NULL>\n");
27567 fprintf (stderr, "--------------------\n");
27570 if (! args)
27572 cur_tree = ((pop_target)
27573 ? pop_target
27574 : target_option_default_node);
27575 cl_target_option_restore (&global_options,
27576 TREE_TARGET_OPTION (cur_tree));
27578 else
27580 rs6000_cpu_index = rs6000_tune_index = -1;
27581 if (!rs6000_inner_target_options (args, false)
27582 || !rs6000_option_override_internal (false)
27583 || (cur_tree = build_target_option_node ()) == NULL_TREE)
27585 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
27586 fprintf (stderr, "invalid pragma\n");
27588 return false;
27592 target_option_current_node = cur_tree;
27594 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
27595 change the macros that are defined. */
27596 if (rs6000_target_modify_macros_ptr)
27598 prev_opt = TREE_TARGET_OPTION (prev_tree);
27599 prev_bumask = prev_opt->x_rs6000_builtin_mask;
27600 prev_flags = prev_opt->x_target_flags;
27602 cur_opt = TREE_TARGET_OPTION (cur_tree);
27603 cur_flags = cur_opt->x_target_flags;
27604 cur_bumask = cur_opt->x_rs6000_builtin_mask;
27606 diff_bumask = (prev_bumask ^ cur_bumask);
27607 diff_flags = (prev_flags ^ cur_flags);
27609 if ((diff_flags != 0) || (diff_bumask != 0))
27611 /* Delete old macros. */
27612 rs6000_target_modify_macros_ptr (false,
27613 prev_flags & diff_flags,
27614 prev_bumask & diff_bumask);
27616 /* Define new macros. */
27617 rs6000_target_modify_macros_ptr (true,
27618 cur_flags & diff_flags,
27619 cur_bumask & diff_bumask);
27623 return true;
27627 /* Remember the last target of rs6000_set_current_function. */
27628 static GTY(()) tree rs6000_previous_fndecl;
27630 /* Establish appropriate back-end context for processing the function
27631 FNDECL. The argument might be NULL to indicate processing at top
27632 level, outside of any function scope. */
27633 static void
27634 rs6000_set_current_function (tree fndecl)
27636 tree old_tree = (rs6000_previous_fndecl
27637 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
27638 : NULL_TREE);
27640 tree new_tree = (fndecl
27641 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
27642 : NULL_TREE);
27644 if (TARGET_DEBUG_TARGET)
27646 bool print_final = false;
27647 fprintf (stderr, "\n==================== rs6000_set_current_function");
27649 if (fndecl)
27650 fprintf (stderr, ", fndecl %s (%p)",
27651 (DECL_NAME (fndecl)
27652 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
27653 : "<unknown>"), (void *)fndecl);
27655 if (rs6000_previous_fndecl)
27656 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
27658 fprintf (stderr, "\n");
27659 if (new_tree)
27661 fprintf (stderr, "\nnew fndecl target specific options:\n");
27662 debug_tree (new_tree);
27663 print_final = true;
27666 if (old_tree)
27668 fprintf (stderr, "\nold fndecl target specific options:\n");
27669 debug_tree (old_tree);
27670 print_final = true;
27673 if (print_final)
27674 fprintf (stderr, "--------------------\n");
27677 /* Only change the context if the function changes. This hook is called
27678 several times in the course of compiling a function, and we don't want to
27679 slow things down too much or call target_reinit when it isn't safe. */
27680 if (fndecl && fndecl != rs6000_previous_fndecl)
27682 rs6000_previous_fndecl = fndecl;
27683 if (old_tree == new_tree)
27686 else if (new_tree)
27688 cl_target_option_restore (&global_options,
27689 TREE_TARGET_OPTION (new_tree));
27690 target_reinit ();
27693 else if (old_tree)
27695 struct cl_target_option *def
27696 = TREE_TARGET_OPTION (target_option_current_node);
27698 cl_target_option_restore (&global_options, def);
27699 target_reinit ();
27705 /* Save the current options */
27707 static void
27708 rs6000_function_specific_save (struct cl_target_option *ptr)
27710 ptr->rs6000_target_flags_explicit = target_flags_explicit;
27713 /* Restore the current options */
27715 static void
27716 rs6000_function_specific_restore (struct cl_target_option *ptr)
27718 target_flags_explicit = ptr->rs6000_target_flags_explicit;
27719 (void) rs6000_option_override_internal (false);
27722 /* Print the current options */
27724 static void
27725 rs6000_function_specific_print (FILE *file, int indent,
27726 struct cl_target_option *ptr)
27728 size_t i;
27729 int flags = ptr->x_target_flags;
27730 unsigned bu_mask = ptr->x_rs6000_builtin_mask;
27732 /* Print the various mask options. */
27733 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
27734 if ((flags & rs6000_opt_masks[i].mask) != 0)
27736 flags &= ~ rs6000_opt_masks[i].mask;
27737 fprintf (file, "%*s-m%s%s\n", indent, "",
27738 rs6000_opt_masks[i].invert ? "no-" : "",
27739 rs6000_opt_masks[i].name);
27742 /* Print the various options that are variables. */
27743 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
27745 size_t j = rs6000_opt_vars[i].target_offset;
27746 if (((signed char *) ptr)[j])
27747 fprintf (file, "%*s-m%s\n", indent, "",
27748 rs6000_opt_vars[i].name);
27751 /* Print the various builtin flags. */
27752 fprintf (file, "%*sbuiltin mask = 0x%x\n", indent, "", bu_mask);
27753 for (i = 0; i < ARRAY_SIZE (rs6000_builtin_mask_names); i++)
27754 if ((bu_mask & rs6000_builtin_mask_names[i].mask) != 0)
27756 fprintf (file, "%*s%s builtins supported\n", indent, "",
27757 rs6000_builtin_mask_names[i].name);
27762 /* Hook to determine if one function can safely inline another. */
27764 static bool
27765 rs6000_can_inline_p (tree caller, tree callee)
27767 bool ret = false;
27768 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
27769 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
27771 /* If callee has no option attributes, then it is ok to inline. */
27772 if (!callee_tree)
27773 ret = true;
27775 /* If caller has no option attributes, but callee does then it is not ok to
27776 inline. */
27777 else if (!caller_tree)
27778 ret = false;
27780 else
27782 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
27783 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
27785 /* Callee's options should a subset of the caller's, i.e. a vsx function
27786 can inline an altivec function but a non-vsx function can't inline a
27787 vsx function. */
27788 if ((caller_opts->x_target_flags & callee_opts->x_target_flags)
27789 == callee_opts->x_target_flags)
27790 ret = true;
27793 if (TARGET_DEBUG_TARGET)
27794 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
27795 (DECL_NAME (caller)
27796 ? IDENTIFIER_POINTER (DECL_NAME (caller))
27797 : "<unknown>"),
27798 (DECL_NAME (callee)
27799 ? IDENTIFIER_POINTER (DECL_NAME (callee))
27800 : "<unknown>"),
27801 (ret ? "can" : "cannot"));
27803 return ret;
27806 /* Allocate a stack temp and fixup the address so it meets the particular
27807 memory requirements (either offetable or REG+REG addressing). */
27810 rs6000_allocate_stack_temp (enum machine_mode mode,
27811 bool offsettable_p,
27812 bool reg_reg_p)
27814 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
27815 rtx addr = XEXP (stack, 0);
27816 int strict_p = (reload_in_progress || reload_completed);
27818 if (!legitimate_indirect_address_p (addr, strict_p))
27820 if (offsettable_p
27821 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p))
27822 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
27824 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
27825 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
27828 return stack;
27831 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
27832 to such a form to deal with memory reference instructions like STFIWX that
27833 only take reg+reg addressing. */
27836 rs6000_address_for_fpconvert (rtx x)
27838 int strict_p = (reload_in_progress || reload_completed);
27839 rtx addr;
27841 gcc_assert (MEM_P (x));
27842 addr = XEXP (x, 0);
27843 if (! legitimate_indirect_address_p (addr, strict_p)
27844 && ! legitimate_indexed_address_p (addr, strict_p))
27846 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
27848 rtx reg = XEXP (addr, 0);
27849 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
27850 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
27851 gcc_assert (REG_P (reg));
27852 emit_insn (gen_add3_insn (reg, reg, size_rtx));
27853 addr = reg;
27855 else if (GET_CODE (addr) == PRE_MODIFY)
27857 rtx reg = XEXP (addr, 0);
27858 rtx expr = XEXP (addr, 1);
27859 gcc_assert (REG_P (reg));
27860 gcc_assert (GET_CODE (expr) == PLUS);
27861 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
27862 addr = reg;
27865 x = replace_equiv_address (x, copy_addr_to_reg (addr));
27868 return x;
27871 /* Given a memory reference, if it is not in the form for altivec memory
27872 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
27873 convert to the altivec format. */
27876 rs6000_address_for_altivec (rtx x)
27878 gcc_assert (MEM_P (x));
27879 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
27881 rtx addr = XEXP (x, 0);
27882 int strict_p = (reload_in_progress || reload_completed);
27884 if (!legitimate_indexed_address_p (addr, strict_p)
27885 && !legitimate_indirect_address_p (addr, strict_p))
27886 addr = copy_to_mode_reg (Pmode, addr);
27888 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
27889 x = change_address (x, GET_MODE (x), addr);
27892 return x;
27895 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
27897 On the RS/6000, all integer constants are acceptable, most won't be valid
27898 for particular insns, though. Only easy FP constants are acceptable. */
27900 static bool
27901 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
27903 if (rs6000_tls_referenced_p (x))
27904 return false;
27906 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
27907 || GET_MODE (x) == VOIDmode
27908 || (TARGET_POWERPC64 && mode == DImode)
27909 || easy_fp_constant (x, mode)
27910 || easy_vector_constant (x, mode));
27914 /* A function pointer under AIX is a pointer to a data area whose first word
27915 contains the actual address of the function, whose second word contains a
27916 pointer to its TOC, and whose third word contains a value to place in the
27917 static chain register (r11). Note that if we load the static chain, our
27918 "trampoline" need not have any executable code. */
27920 void
27921 rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag)
27923 rtx func_addr;
27924 rtx toc_reg;
27925 rtx sc_reg;
27926 rtx stack_ptr;
27927 rtx stack_toc_offset;
27928 rtx stack_toc_mem;
27929 rtx func_toc_offset;
27930 rtx func_toc_mem;
27931 rtx func_sc_offset;
27932 rtx func_sc_mem;
27933 rtx insn;
27934 rtx (*call_func) (rtx, rtx, rtx, rtx);
27935 rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx);
27937 stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27938 toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
27940 /* Load up address of the actual function. */
27941 func_desc = force_reg (Pmode, func_desc);
27942 func_addr = gen_reg_rtx (Pmode);
27943 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
27945 if (TARGET_32BIT)
27948 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT);
27949 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT);
27950 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT);
27951 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
27953 call_func = gen_call_indirect_aix32bit;
27954 call_value_func = gen_call_value_indirect_aix32bit;
27956 else
27958 call_func = gen_call_indirect_aix32bit_nor11;
27959 call_value_func = gen_call_value_indirect_aix32bit_nor11;
27962 else
27964 stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT);
27965 func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT);
27966 func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT);
27967 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
27969 call_func = gen_call_indirect_aix64bit;
27970 call_value_func = gen_call_value_indirect_aix64bit;
27972 else
27974 call_func = gen_call_indirect_aix64bit_nor11;
27975 call_value_func = gen_call_value_indirect_aix64bit_nor11;
27979 /* Reserved spot to store the TOC. */
27980 stack_toc_mem = gen_frame_mem (Pmode,
27981 gen_rtx_PLUS (Pmode,
27982 stack_ptr,
27983 stack_toc_offset));
27985 gcc_assert (cfun);
27986 gcc_assert (cfun->machine);
27988 /* Can we optimize saving the TOC in the prologue or do we need to do it at
27989 every call? */
27990 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
27991 cfun->machine->save_toc_in_prologue = true;
27993 else
27995 MEM_VOLATILE_P (stack_toc_mem) = 1;
27996 emit_move_insn (stack_toc_mem, toc_reg);
27999 /* Calculate the address to load the TOC of the called function. We don't
28000 actually load this until the split after reload. */
28001 func_toc_mem = gen_rtx_MEM (Pmode,
28002 gen_rtx_PLUS (Pmode,
28003 func_desc,
28004 func_toc_offset));
28006 /* If we have a static chain, load it up. */
28007 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28009 func_sc_mem = gen_rtx_MEM (Pmode,
28010 gen_rtx_PLUS (Pmode,
28011 func_desc,
28012 func_sc_offset));
28014 sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
28015 emit_move_insn (sc_reg, func_sc_mem);
28018 /* Create the call. */
28019 if (value)
28020 insn = call_value_func (value, func_addr, flag, func_toc_mem,
28021 stack_toc_mem);
28022 else
28023 insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem);
28025 emit_call_insn (insn);
28028 /* Return whether we need to always update the saved TOC pointer when we update
28029 the stack pointer. */
28031 static bool
28032 rs6000_save_toc_in_prologue_p (void)
28034 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
28037 #ifdef HAVE_GAS_HIDDEN
28038 # define USE_HIDDEN_LINKONCE 1
28039 #else
28040 # define USE_HIDDEN_LINKONCE 0
28041 #endif
28043 /* Fills in the label name that should be used for a 476 link stack thunk. */
28045 void
28046 get_ppc476_thunk_name (char name[32])
28048 gcc_assert (TARGET_LINK_STACK);
28050 if (USE_HIDDEN_LINKONCE)
28051 sprintf (name, "__ppc476.get_thunk");
28052 else
28053 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
28056 /* This function emits the simple thunk routine that is used to preserve
28057 the link stack on the 476 cpu. */
28059 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
28060 static void
28061 rs6000_code_end (void)
28063 char name[32];
28064 tree decl;
28066 if (!TARGET_LINK_STACK)
28067 return;
28069 get_ppc476_thunk_name (name);
28071 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
28072 build_function_type_list (void_type_node, NULL_TREE));
28073 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
28074 NULL_TREE, void_type_node);
28075 TREE_PUBLIC (decl) = 1;
28076 TREE_STATIC (decl) = 1;
28078 if (USE_HIDDEN_LINKONCE)
28080 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
28081 targetm.asm_out.unique_section (decl, 0);
28082 switch_to_section (get_named_section (decl, NULL, 0));
28083 DECL_WEAK (decl) = 1;
28084 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
28085 targetm.asm_out.globalize_label (asm_out_file, name);
28086 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
28087 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
28089 else
28091 switch_to_section (text_section);
28092 ASM_OUTPUT_LABEL (asm_out_file, name);
28095 DECL_INITIAL (decl) = make_node (BLOCK);
28096 current_function_decl = decl;
28097 init_function_start (decl);
28098 first_function_block_is_cold = false;
28099 /* Make sure unwind info is emitted for the thunk if needed. */
28100 final_start_function (emit_barrier (), asm_out_file, 1);
28102 fputs ("\tblr\n", asm_out_file);
28104 final_end_function ();
28105 init_insn_lengths ();
28106 free_after_compilation (cfun);
28107 set_cfun (NULL);
28108 current_function_decl = NULL;
28111 /* Add r30 to hard reg set if the prologue sets it up and it is not
28112 pic_offset_table_rtx. */
28114 static void
28115 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
28117 if (!TARGET_SINGLE_PIC_BASE
28118 && TARGET_TOC
28119 && TARGET_MINIMAL_TOC
28120 && get_pool_size () != 0)
28121 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
28124 struct gcc_target targetm = TARGET_INITIALIZER;
28126 #include "gt-rs6000.h"